Searched refs:rt (Results 1 - 200 of 316) sorted by relevance

12

/linux-4.1.27/arch/mips/ralink/
H A Dtimer.c38 static inline void rt_timer_w32(struct rt_timer *rt, u8 reg, u32 val) rt_timer_w32() argument
40 __raw_writel(val, rt->membase + reg); rt_timer_w32()
43 static inline u32 rt_timer_r32(struct rt_timer *rt, u8 reg) rt_timer_r32() argument
45 return __raw_readl(rt->membase + reg); rt_timer_r32()
50 struct rt_timer *rt = (struct rt_timer *) _rt; rt_timer_irq() local
52 rt_timer_w32(rt, TIMER_REG_TMR0LOAD, rt->timer_freq / rt->timer_div); rt_timer_irq()
53 rt_timer_w32(rt, TIMER_REG_TMRSTAT, TMRSTAT_TMR0INT); rt_timer_irq()
59 static int rt_timer_request(struct rt_timer *rt) rt_timer_request() argument
61 int err = request_irq(rt->irq, rt_timer_irq, 0, rt_timer_request()
62 dev_name(rt->dev), rt); rt_timer_request()
64 dev_err(rt->dev, "failed to request irq\n"); rt_timer_request()
67 rt_timer_w32(rt, TIMER_REG_TMR0CTL, t); rt_timer_request()
72 static void rt_timer_free(struct rt_timer *rt) rt_timer_free() argument
74 free_irq(rt->irq, rt); rt_timer_free()
77 static int rt_timer_config(struct rt_timer *rt, unsigned long divisor) rt_timer_config() argument
79 if (rt->timer_freq < divisor) rt_timer_config()
80 rt->timer_div = rt->timer_freq; rt_timer_config()
82 rt->timer_div = divisor; rt_timer_config()
84 rt_timer_w32(rt, TIMER_REG_TMR0LOAD, rt->timer_freq / rt->timer_div); rt_timer_config()
89 static int rt_timer_enable(struct rt_timer *rt) rt_timer_enable() argument
93 rt_timer_w32(rt, TIMER_REG_TMR0LOAD, rt->timer_freq / rt->timer_div); rt_timer_enable()
95 t = rt_timer_r32(rt, TIMER_REG_TMR0CTL); rt_timer_enable()
97 rt_timer_w32(rt, TIMER_REG_TMR0CTL, t); rt_timer_enable()
102 static void rt_timer_disable(struct rt_timer *rt) rt_timer_disable() argument
106 t = rt_timer_r32(rt, TIMER_REG_TMR0CTL); rt_timer_disable()
108 rt_timer_w32(rt, TIMER_REG_TMR0CTL, t); rt_timer_disable()
114 struct rt_timer *rt; rt_timer_probe() local
117 rt = devm_kzalloc(&pdev->dev, sizeof(*rt), GFP_KERNEL); rt_timer_probe()
118 if (!rt) { rt_timer_probe()
123 rt->irq = platform_get_irq(pdev, 0); rt_timer_probe()
124 if (!rt->irq) { rt_timer_probe()
129 rt->membase = devm_ioremap_resource(&pdev->dev, res); rt_timer_probe()
130 if (IS_ERR(rt->membase)) rt_timer_probe()
131 return PTR_ERR(rt->membase); rt_timer_probe()
139 rt->timer_freq = clk_get_rate(clk) / TMR0CTL_PRESCALE_DIV; rt_timer_probe()
140 if (!rt->timer_freq) rt_timer_probe()
143 rt->dev = &pdev->dev; rt_timer_probe()
144 platform_set_drvdata(pdev, rt); rt_timer_probe()
146 rt_timer_request(rt); rt_timer_probe()
147 rt_timer_config(rt, 2); rt_timer_probe()
148 rt_timer_enable(rt); rt_timer_probe()
150 dev_info(&pdev->dev, "maximum frequency is %luHz\n", rt->timer_freq); rt_timer_probe()
157 struct rt_timer *rt = platform_get_drvdata(pdev); rt_timer_remove() local
159 rt_timer_disable(rt); rt_timer_remove()
160 rt_timer_free(rt); rt_timer_remove()
175 .name = "rt-timer",
/linux-4.1.27/sound/usb/6fire/
H A Dmidi.c28 struct midi_runtime *rt = urb->context; usb6fire_midi_out_handler() local
32 spin_lock_irqsave(&rt->out_lock, flags); usb6fire_midi_out_handler()
34 if (rt->out) { usb6fire_midi_out_handler()
35 ret = snd_rawmidi_transmit(rt->out, rt->out_buffer + 4, usb6fire_midi_out_handler()
38 rt->out_buffer[1] = ret + 2; usb6fire_midi_out_handler()
39 rt->out_buffer[3] = rt->out_serial++; usb6fire_midi_out_handler()
48 rt->out = NULL; usb6fire_midi_out_handler()
50 spin_unlock_irqrestore(&rt->out_lock, flags); usb6fire_midi_out_handler()
54 struct midi_runtime *rt, u8 *data, int length) usb6fire_midi_in_received()
58 spin_lock_irqsave(&rt->in_lock, flags); usb6fire_midi_in_received()
59 if (rt->in) usb6fire_midi_in_received()
60 snd_rawmidi_receive(rt->in, data, length); usb6fire_midi_in_received()
61 spin_unlock_irqrestore(&rt->in_lock, flags); usb6fire_midi_in_received()
77 struct midi_runtime *rt = alsa_sub->rmidi->private_data; usb6fire_midi_out_trigger() local
78 struct urb *urb = &rt->out_urb; usb6fire_midi_out_trigger()
82 spin_lock_irqsave(&rt->out_lock, flags); usb6fire_midi_out_trigger()
84 if (rt->out) { /* we are already transmitting so just return */ usb6fire_midi_out_trigger()
85 spin_unlock_irqrestore(&rt->out_lock, flags); usb6fire_midi_out_trigger()
89 ret = snd_rawmidi_transmit(alsa_sub, rt->out_buffer + 4, usb6fire_midi_out_trigger()
92 rt->out_buffer[1] = ret + 2; usb6fire_midi_out_trigger()
93 rt->out_buffer[3] = rt->out_serial++; usb6fire_midi_out_trigger()
102 rt->out = alsa_sub; usb6fire_midi_out_trigger()
104 } else if (rt->out == alsa_sub) usb6fire_midi_out_trigger()
105 rt->out = NULL; usb6fire_midi_out_trigger()
106 spin_unlock_irqrestore(&rt->out_lock, flags); usb6fire_midi_out_trigger()
111 struct midi_runtime *rt = alsa_sub->rmidi->private_data; usb6fire_midi_out_drain() local
114 while (rt->out && retry++ < 100) usb6fire_midi_out_drain()
131 struct midi_runtime *rt = alsa_sub->rmidi->private_data; usb6fire_midi_in_trigger() local
134 spin_lock_irqsave(&rt->in_lock, flags); usb6fire_midi_in_trigger()
136 rt->in = alsa_sub; usb6fire_midi_in_trigger()
138 rt->in = NULL; usb6fire_midi_in_trigger()
139 spin_unlock_irqrestore(&rt->in_lock, flags); usb6fire_midi_in_trigger()
158 struct midi_runtime *rt = kzalloc(sizeof(struct midi_runtime), usb6fire_midi_init() local
162 if (!rt) usb6fire_midi_init()
165 rt->out_buffer = kzalloc(MIDI_BUFSIZE, GFP_KERNEL); usb6fire_midi_init()
166 if (!rt->out_buffer) { usb6fire_midi_init()
167 kfree(rt); usb6fire_midi_init()
171 rt->chip = chip; usb6fire_midi_init()
172 rt->in_received = usb6fire_midi_in_received; usb6fire_midi_init()
173 rt->out_buffer[0] = 0x80; /* 'send midi' command */ usb6fire_midi_init()
174 rt->out_buffer[1] = 0x00; /* size of data */ usb6fire_midi_init()
175 rt->out_buffer[2] = 0x00; /* always 0 */ usb6fire_midi_init()
176 spin_lock_init(&rt->in_lock); usb6fire_midi_init()
177 spin_lock_init(&rt->out_lock); usb6fire_midi_init()
179 comm_rt->init_urb(comm_rt, &rt->out_urb, rt->out_buffer, rt, usb6fire_midi_init()
182 ret = snd_rawmidi_new(chip->card, "6FireUSB", 0, 1, 1, &rt->instance); usb6fire_midi_init()
184 kfree(rt->out_buffer); usb6fire_midi_init()
185 kfree(rt); usb6fire_midi_init()
189 rt->instance->private_data = rt; usb6fire_midi_init()
190 strcpy(rt->instance->name, "DMX6FireUSB MIDI"); usb6fire_midi_init()
191 rt->instance->info_flags = SNDRV_RAWMIDI_INFO_OUTPUT | usb6fire_midi_init()
194 snd_rawmidi_set_ops(rt->instance, SNDRV_RAWMIDI_STREAM_OUTPUT, usb6fire_midi_init()
196 snd_rawmidi_set_ops(rt->instance, SNDRV_RAWMIDI_STREAM_INPUT, usb6fire_midi_init()
199 chip->midi = rt; usb6fire_midi_init()
205 struct midi_runtime *rt = chip->midi; usb6fire_midi_abort() local
207 if (rt) usb6fire_midi_abort()
208 usb_poison_urb(&rt->out_urb); usb6fire_midi_abort()
213 struct midi_runtime *rt = chip->midi; usb6fire_midi_destroy() local
215 kfree(rt->out_buffer); usb6fire_midi_destroy()
216 kfree(rt); usb6fire_midi_destroy()
53 usb6fire_midi_in_received( struct midi_runtime *rt, u8 *data, int length) usb6fire_midi_in_received() argument
H A Dcontrol.c64 static void usb6fire_control_output_vol_update(struct control_runtime *rt) usb6fire_control_output_vol_update() argument
66 struct comm_runtime *comm_rt = rt->chip->comm; usb6fire_control_output_vol_update()
71 if (!(rt->ovol_updated & (1 << i))) { usb6fire_control_output_vol_update()
73 180 - rt->output_vol[i]); usb6fire_control_output_vol_update()
74 rt->ovol_updated |= 1 << i; usb6fire_control_output_vol_update()
78 static void usb6fire_control_output_mute_update(struct control_runtime *rt) usb6fire_control_output_mute_update() argument
80 struct comm_runtime *comm_rt = rt->chip->comm; usb6fire_control_output_mute_update()
83 comm_rt->write8(comm_rt, 0x12, 0x0e, ~rt->output_mute); usb6fire_control_output_mute_update()
86 static void usb6fire_control_input_vol_update(struct control_runtime *rt) usb6fire_control_input_vol_update() argument
88 struct comm_runtime *comm_rt = rt->chip->comm; usb6fire_control_input_vol_update()
93 if (!(rt->ivol_updated & (1 << i))) { usb6fire_control_input_vol_update()
95 rt->input_vol[i] & 0x3f); usb6fire_control_input_vol_update()
96 rt->ivol_updated |= 1 << i; usb6fire_control_input_vol_update()
100 static void usb6fire_control_line_phono_update(struct control_runtime *rt) usb6fire_control_line_phono_update() argument
102 struct comm_runtime *comm_rt = rt->chip->comm; usb6fire_control_line_phono_update()
104 comm_rt->write8(comm_rt, 0x22, 0x02, rt->line_phono_switch); usb6fire_control_line_phono_update()
105 comm_rt->write8(comm_rt, 0x21, 0x02, rt->line_phono_switch); usb6fire_control_line_phono_update()
109 static void usb6fire_control_opt_coax_update(struct control_runtime *rt) usb6fire_control_opt_coax_update() argument
111 struct comm_runtime *comm_rt = rt->chip->comm; usb6fire_control_opt_coax_update()
113 comm_rt->write8(comm_rt, 0x22, 0x00, rt->opt_coax_switch); usb6fire_control_opt_coax_update()
114 comm_rt->write8(comm_rt, 0x21, 0x00, rt->opt_coax_switch); usb6fire_control_opt_coax_update()
118 static int usb6fire_control_set_rate(struct control_runtime *rt, int rate) usb6fire_control_set_rate() argument
121 struct usb_device *device = rt->chip->dev; usb6fire_control_set_rate()
122 struct comm_runtime *comm_rt = rt->chip->comm; usb6fire_control_set_rate()
141 struct control_runtime *rt, int n_analog_out, usb6fire_control_set_channels()
145 struct comm_runtime *comm_rt = rt->chip->comm; usb6fire_control_set_channels()
164 static int usb6fire_control_streaming_update(struct control_runtime *rt) usb6fire_control_streaming_update() argument
166 struct comm_runtime *comm_rt = rt->chip->comm; usb6fire_control_streaming_update()
169 if (!rt->usb_streaming && rt->digital_thru_switch) usb6fire_control_streaming_update()
170 usb6fire_control_set_rate(rt, usb6fire_control_streaming_update()
173 (rt->usb_streaming ? 0x01 : 0x00) | usb6fire_control_streaming_update()
174 (rt->digital_thru_switch ? 0x08 : 0x00)); usb6fire_control_streaming_update()
192 struct control_runtime *rt = snd_kcontrol_chip(kcontrol); usb6fire_control_output_vol_put() local
197 dev_err(&rt->chip->dev->dev, usb6fire_control_output_vol_put()
202 if (rt->output_vol[ch] != ucontrol->value.integer.value[0]) { usb6fire_control_output_vol_put()
203 rt->output_vol[ch] = ucontrol->value.integer.value[0]; usb6fire_control_output_vol_put()
204 rt->ovol_updated &= ~(1 << ch); usb6fire_control_output_vol_put()
207 if (rt->output_vol[ch + 1] != ucontrol->value.integer.value[1]) { usb6fire_control_output_vol_put()
208 rt->output_vol[ch + 1] = ucontrol->value.integer.value[1]; usb6fire_control_output_vol_put()
209 rt->ovol_updated &= ~(2 << ch); usb6fire_control_output_vol_put()
214 usb6fire_control_output_vol_update(rt); usb6fire_control_output_vol_put()
222 struct control_runtime *rt = snd_kcontrol_chip(kcontrol); usb6fire_control_output_vol_get() local
226 dev_err(&rt->chip->dev->dev, usb6fire_control_output_vol_get()
231 ucontrol->value.integer.value[0] = rt->output_vol[ch]; usb6fire_control_output_vol_get()
232 ucontrol->value.integer.value[1] = rt->output_vol[ch + 1]; usb6fire_control_output_vol_get()
239 struct control_runtime *rt = snd_kcontrol_chip(kcontrol); usb6fire_control_output_mute_put() local
241 u8 old = rt->output_mute; usb6fire_control_output_mute_put()
245 dev_err(&rt->chip->dev->dev, usb6fire_control_output_mute_put()
250 rt->output_mute &= ~(3 << ch); usb6fire_control_output_mute_put()
255 rt->output_mute |= value << ch; usb6fire_control_output_mute_put()
257 if (rt->output_mute != old) usb6fire_control_output_mute_put()
258 usb6fire_control_output_mute_update(rt); usb6fire_control_output_mute_put()
260 return rt->output_mute != old; usb6fire_control_output_mute_put()
266 struct control_runtime *rt = snd_kcontrol_chip(kcontrol); usb6fire_control_output_mute_get() local
268 u8 value = rt->output_mute >> ch; usb6fire_control_output_mute_get()
271 dev_err(&rt->chip->dev->dev, usb6fire_control_output_mute_get()
296 struct control_runtime *rt = snd_kcontrol_chip(kcontrol); usb6fire_control_input_vol_put() local
299 if (rt->input_vol[0] != ucontrol->value.integer.value[0]) { usb6fire_control_input_vol_put()
300 rt->input_vol[0] = ucontrol->value.integer.value[0] - 15; usb6fire_control_input_vol_put()
301 rt->ivol_updated &= ~(1 << 0); usb6fire_control_input_vol_put()
304 if (rt->input_vol[1] != ucontrol->value.integer.value[1]) { usb6fire_control_input_vol_put()
305 rt->input_vol[1] = ucontrol->value.integer.value[1] - 15; usb6fire_control_input_vol_put()
306 rt->ivol_updated &= ~(1 << 1); usb6fire_control_input_vol_put()
311 usb6fire_control_input_vol_update(rt); usb6fire_control_input_vol_put()
319 struct control_runtime *rt = snd_kcontrol_chip(kcontrol); usb6fire_control_input_vol_get() local
321 ucontrol->value.integer.value[0] = rt->input_vol[0] + 15; usb6fire_control_input_vol_get()
322 ucontrol->value.integer.value[1] = rt->input_vol[1] + 15; usb6fire_control_input_vol_get()
336 struct control_runtime *rt = snd_kcontrol_chip(kcontrol); usb6fire_control_line_phono_put() local
338 if (rt->line_phono_switch != ucontrol->value.integer.value[0]) { usb6fire_control_line_phono_put()
339 rt->line_phono_switch = ucontrol->value.integer.value[0]; usb6fire_control_line_phono_put()
340 usb6fire_control_line_phono_update(rt); usb6fire_control_line_phono_put()
349 struct control_runtime *rt = snd_kcontrol_chip(kcontrol); usb6fire_control_line_phono_get() local
350 ucontrol->value.integer.value[0] = rt->line_phono_switch; usb6fire_control_line_phono_get()
363 struct control_runtime *rt = snd_kcontrol_chip(kcontrol); usb6fire_control_opt_coax_put() local
366 if (rt->opt_coax_switch != ucontrol->value.enumerated.item[0]) { usb6fire_control_opt_coax_put()
367 rt->opt_coax_switch = ucontrol->value.enumerated.item[0]; usb6fire_control_opt_coax_put()
368 usb6fire_control_opt_coax_update(rt); usb6fire_control_opt_coax_put()
377 struct control_runtime *rt = snd_kcontrol_chip(kcontrol); usb6fire_control_opt_coax_get() local
378 ucontrol->value.enumerated.item[0] = rt->opt_coax_switch; usb6fire_control_opt_coax_get()
385 struct control_runtime *rt = snd_kcontrol_chip(kcontrol); usb6fire_control_digital_thru_put() local
388 if (rt->digital_thru_switch != ucontrol->value.integer.value[0]) { usb6fire_control_digital_thru_put()
389 rt->digital_thru_switch = ucontrol->value.integer.value[0]; usb6fire_control_digital_thru_put()
390 usb6fire_control_streaming_update(rt); usb6fire_control_digital_thru_put()
399 struct control_runtime *rt = snd_kcontrol_chip(kcontrol); usb6fire_control_digital_thru_get() local
400 ucontrol->value.integer.value[0] = rt->digital_thru_switch; usb6fire_control_digital_thru_get()
521 struct control_runtime *rt, usb6fire_control_add_virtual()
540 control = snd_ctl_new1(&elems[i], rt); usb6fire_control_add_virtual()
558 struct control_runtime *rt = kzalloc(sizeof(struct control_runtime), usb6fire_control_init() local
562 if (!rt) usb6fire_control_init()
565 rt->chip = chip; usb6fire_control_init()
566 rt->update_streaming = usb6fire_control_streaming_update; usb6fire_control_init()
567 rt->set_rate = usb6fire_control_set_rate; usb6fire_control_init()
568 rt->set_channels = usb6fire_control_set_channels; usb6fire_control_init()
577 usb6fire_control_opt_coax_update(rt); usb6fire_control_init()
578 usb6fire_control_line_phono_update(rt); usb6fire_control_init()
579 usb6fire_control_output_vol_update(rt); usb6fire_control_init()
580 usb6fire_control_output_mute_update(rt); usb6fire_control_init()
581 usb6fire_control_input_vol_update(rt); usb6fire_control_init()
582 usb6fire_control_streaming_update(rt); usb6fire_control_init()
584 ret = usb6fire_control_add_virtual(rt, chip->card, usb6fire_control_init()
588 kfree(rt); usb6fire_control_init()
591 ret = usb6fire_control_add_virtual(rt, chip->card, usb6fire_control_init()
595 kfree(rt); usb6fire_control_init()
601 ret = snd_ctl_add(chip->card, snd_ctl_new1(&elements[i], rt)); usb6fire_control_init()
603 kfree(rt); usb6fire_control_init()
610 chip->control = rt; usb6fire_control_init()
140 usb6fire_control_set_channels( struct control_runtime *rt, int n_analog_out, int n_analog_in, bool spdif_out, bool spdif_in) usb6fire_control_set_channels() argument
520 usb6fire_control_add_virtual( struct control_runtime *rt, struct snd_card *card, char *name, struct snd_kcontrol_new *elems) usb6fire_control_add_virtual() argument
H A Dpcm.c74 static int usb6fire_pcm_set_rate(struct pcm_runtime *rt) usb6fire_pcm_set_rate() argument
77 struct control_runtime *ctrl_rt = rt->chip->control; usb6fire_pcm_set_rate()
82 dev_err(&rt->chip->dev->dev, usb6fire_pcm_set_rate()
84 rates[rt->rate]); usb6fire_pcm_set_rate()
88 ret = ctrl_rt->set_rate(ctrl_rt, rt->rate); usb6fire_pcm_set_rate()
90 dev_err(&rt->chip->dev->dev, usb6fire_pcm_set_rate()
92 rates[rt->rate]); usb6fire_pcm_set_rate()
99 dev_err(&rt->chip->dev->dev, usb6fire_pcm_set_rate()
101 rates[rt->rate]); usb6fire_pcm_set_rate()
108 dev_err(&rt->chip->dev->dev, usb6fire_pcm_set_rate()
110 rates[rt->rate]); usb6fire_pcm_set_rate()
114 rt->in_n_analog = IN_N_CHANNELS; usb6fire_pcm_set_rate()
115 rt->out_n_analog = OUT_N_CHANNELS; usb6fire_pcm_set_rate()
116 rt->in_packet_size = rates_in_packet_size[rt->rate]; usb6fire_pcm_set_rate()
117 rt->out_packet_size = rates_out_packet_size[rt->rate]; usb6fire_pcm_set_rate()
124 struct pcm_runtime *rt = snd_pcm_substream_chip(alsa_sub); usb6fire_pcm_get_substream() local
127 return &rt->playback; usb6fire_pcm_get_substream()
129 return &rt->capture; usb6fire_pcm_get_substream()
130 dev_err(&rt->chip->dev->dev, "error getting pcm substream slot.\n"); usb6fire_pcm_get_substream()
135 static void usb6fire_pcm_stream_stop(struct pcm_runtime *rt) usb6fire_pcm_stream_stop() argument
138 struct control_runtime *ctrl_rt = rt->chip->control; usb6fire_pcm_stream_stop()
140 if (rt->stream_state != STREAM_DISABLED) { usb6fire_pcm_stream_stop()
142 rt->stream_state = STREAM_STOPPING; usb6fire_pcm_stream_stop()
145 usb_kill_urb(&rt->in_urbs[i].instance); usb6fire_pcm_stream_stop()
146 usb_kill_urb(&rt->out_urbs[i].instance); usb6fire_pcm_stream_stop()
150 rt->stream_state = STREAM_DISABLED; usb6fire_pcm_stream_stop()
155 static int usb6fire_pcm_stream_start(struct pcm_runtime *rt) usb6fire_pcm_stream_start() argument
162 if (rt->stream_state == STREAM_DISABLED) { usb6fire_pcm_stream_start()
164 rt->stream_wait_cond = false; usb6fire_pcm_stream_start()
165 rt->stream_state = STREAM_STARTING; usb6fire_pcm_stream_start()
168 packet = &rt->in_urbs[i].packets[k]; usb6fire_pcm_stream_start()
169 packet->offset = k * rt->in_packet_size; usb6fire_pcm_stream_start()
170 packet->length = rt->in_packet_size; usb6fire_pcm_stream_start()
174 ret = usb_submit_urb(&rt->in_urbs[i].instance, usb6fire_pcm_stream_start()
177 usb6fire_pcm_stream_stop(rt); usb6fire_pcm_stream_start()
183 wait_event_timeout(rt->stream_wait_queue, rt->stream_wait_cond, usb6fire_pcm_stream_start()
185 if (rt->stream_wait_cond) usb6fire_pcm_stream_start()
186 rt->stream_state = STREAM_RUNNING; usb6fire_pcm_stream_start()
188 usb6fire_pcm_stream_stop(rt); usb6fire_pcm_stream_start()
202 struct pcm_runtime *rt = snd_pcm_substream_chip(sub->instance); usb6fire_pcm_capture() local
216 / (rt->in_n_analog << 2); usb6fire_pcm_capture()
231 src += rt->in_n_analog; usb6fire_pcm_capture()
249 struct pcm_runtime *rt = snd_pcm_substream_chip(sub->instance); usb6fire_pcm_playback() local
263 dev_err(&rt->chip->dev->dev, "Unknown sample format."); usb6fire_pcm_playback()
272 / (rt->out_n_analog << 2); usb6fire_pcm_playback()
279 dest += rt->out_n_analog; usb6fire_pcm_playback()
294 struct pcm_runtime *rt = in_urb->chip->pcm; usb6fire_pcm_in_urb_handler() local
304 if (usb_urb->status || rt->panic || rt->stream_state == STREAM_STOPPING) usb6fire_pcm_in_urb_handler()
308 rt->panic = true; usb6fire_pcm_in_urb_handler()
312 if (rt->stream_state == STREAM_DISABLED) { usb6fire_pcm_in_urb_handler()
313 dev_err(&rt->chip->dev->dev, usb6fire_pcm_in_urb_handler()
319 sub = &rt->capture; usb6fire_pcm_in_urb_handler()
336 - 4) / (rt->in_n_analog << 2) usb6fire_pcm_in_urb_handler()
337 * (rt->out_n_analog << 2) + 4; usb6fire_pcm_in_urb_handler()
344 sub = &rt->playback; usb6fire_pcm_in_urb_handler()
362 / (rt->out_n_analog << 2); usb6fire_pcm_in_urb_handler()
369 channel < rt->out_n_analog; usb6fire_pcm_in_urb_handler()
382 struct pcm_runtime *rt = urb->chip->pcm; usb6fire_pcm_out_urb_handler() local
384 if (rt->stream_state == STREAM_STARTING) { usb6fire_pcm_out_urb_handler()
385 rt->stream_wait_cond = true; usb6fire_pcm_out_urb_handler()
386 wake_up(&rt->stream_wait_queue); usb6fire_pcm_out_urb_handler()
392 struct pcm_runtime *rt = snd_pcm_substream_chip(alsa_sub); usb6fire_pcm_open() local
396 if (rt->panic) usb6fire_pcm_open()
399 mutex_lock(&rt->stream_mutex); usb6fire_pcm_open()
403 if (rt->rate < ARRAY_SIZE(rates)) usb6fire_pcm_open()
404 alsa_rt->hw.rates = rates_alsaid[rt->rate]; usb6fire_pcm_open()
406 sub = &rt->playback; usb6fire_pcm_open()
408 if (rt->rate < ARRAY_SIZE(rates)) usb6fire_pcm_open()
409 alsa_rt->hw.rates = rates_alsaid[rt->rate]; usb6fire_pcm_open()
411 sub = &rt->capture; usb6fire_pcm_open()
415 mutex_unlock(&rt->stream_mutex); usb6fire_pcm_open()
416 dev_err(&rt->chip->dev->dev, "invalid stream type.\n"); usb6fire_pcm_open()
422 mutex_unlock(&rt->stream_mutex); usb6fire_pcm_open()
428 struct pcm_runtime *rt = snd_pcm_substream_chip(alsa_sub); usb6fire_pcm_close() local
432 if (rt->panic) usb6fire_pcm_close()
435 mutex_lock(&rt->stream_mutex); usb6fire_pcm_close()
444 if (!rt->playback.instance && !rt->capture.instance) { usb6fire_pcm_close()
445 usb6fire_pcm_stream_stop(rt); usb6fire_pcm_close()
446 rt->rate = ARRAY_SIZE(rates); usb6fire_pcm_close()
449 mutex_unlock(&rt->stream_mutex); usb6fire_pcm_close()
467 struct pcm_runtime *rt = snd_pcm_substream_chip(alsa_sub); usb6fire_pcm_prepare() local
472 if (rt->panic) usb6fire_pcm_prepare()
477 mutex_lock(&rt->stream_mutex); usb6fire_pcm_prepare()
481 if (rt->stream_state == STREAM_DISABLED) { usb6fire_pcm_prepare()
482 for (rt->rate = 0; rt->rate < ARRAY_SIZE(rates); rt->rate++) usb6fire_pcm_prepare()
483 if (alsa_rt->rate == rates[rt->rate]) usb6fire_pcm_prepare()
485 if (rt->rate == ARRAY_SIZE(rates)) { usb6fire_pcm_prepare()
486 mutex_unlock(&rt->stream_mutex); usb6fire_pcm_prepare()
487 dev_err(&rt->chip->dev->dev, usb6fire_pcm_prepare()
493 ret = usb6fire_pcm_set_rate(rt); usb6fire_pcm_prepare()
495 mutex_unlock(&rt->stream_mutex); usb6fire_pcm_prepare()
498 ret = usb6fire_pcm_stream_start(rt); usb6fire_pcm_prepare()
500 mutex_unlock(&rt->stream_mutex); usb6fire_pcm_prepare()
501 dev_err(&rt->chip->dev->dev, usb6fire_pcm_prepare()
506 mutex_unlock(&rt->stream_mutex); usb6fire_pcm_prepare()
513 struct pcm_runtime *rt = snd_pcm_substream_chip(alsa_sub); usb6fire_pcm_trigger() local
516 if (rt->panic) usb6fire_pcm_trigger()
545 struct pcm_runtime *rt = snd_pcm_substream_chip(alsa_sub); usb6fire_pcm_pointer() local
549 if (rt->panic || !sub) usb6fire_pcm_pointer()
589 static int usb6fire_pcm_buffers_init(struct pcm_runtime *rt) usb6fire_pcm_buffers_init() argument
594 rt->out_urbs[i].buffer = kzalloc(PCM_N_PACKETS_PER_URB usb6fire_pcm_buffers_init()
596 if (!rt->out_urbs[i].buffer) usb6fire_pcm_buffers_init()
598 rt->in_urbs[i].buffer = kzalloc(PCM_N_PACKETS_PER_URB usb6fire_pcm_buffers_init()
600 if (!rt->in_urbs[i].buffer) usb6fire_pcm_buffers_init()
606 static void usb6fire_pcm_buffers_destroy(struct pcm_runtime *rt) usb6fire_pcm_buffers_destroy() argument
611 kfree(rt->out_urbs[i].buffer); usb6fire_pcm_buffers_destroy()
612 kfree(rt->in_urbs[i].buffer); usb6fire_pcm_buffers_destroy()
621 struct pcm_runtime *rt = usb6fire_pcm_init() local
624 if (!rt) usb6fire_pcm_init()
627 ret = usb6fire_pcm_buffers_init(rt); usb6fire_pcm_init()
629 usb6fire_pcm_buffers_destroy(rt); usb6fire_pcm_init()
630 kfree(rt); usb6fire_pcm_init()
634 rt->chip = chip; usb6fire_pcm_init()
635 rt->stream_state = STREAM_DISABLED; usb6fire_pcm_init()
636 rt->rate = ARRAY_SIZE(rates); usb6fire_pcm_init()
637 init_waitqueue_head(&rt->stream_wait_queue); usb6fire_pcm_init()
638 mutex_init(&rt->stream_mutex); usb6fire_pcm_init()
640 spin_lock_init(&rt->playback.lock); usb6fire_pcm_init()
641 spin_lock_init(&rt->capture.lock); usb6fire_pcm_init()
644 usb6fire_pcm_init_urb(&rt->in_urbs[i], chip, true, IN_EP, usb6fire_pcm_init()
646 usb6fire_pcm_init_urb(&rt->out_urbs[i], chip, false, OUT_EP, usb6fire_pcm_init()
649 rt->in_urbs[i].peer = &rt->out_urbs[i]; usb6fire_pcm_init()
650 rt->out_urbs[i].peer = &rt->in_urbs[i]; usb6fire_pcm_init()
655 usb6fire_pcm_buffers_destroy(rt); usb6fire_pcm_init()
656 kfree(rt); usb6fire_pcm_init()
661 pcm->private_data = rt; usb6fire_pcm_init()
667 usb6fire_pcm_buffers_destroy(rt); usb6fire_pcm_init()
668 kfree(rt); usb6fire_pcm_init()
673 rt->instance = pcm; usb6fire_pcm_init()
675 chip->pcm = rt; usb6fire_pcm_init()
681 struct pcm_runtime *rt = chip->pcm; usb6fire_pcm_abort() local
684 if (rt) { usb6fire_pcm_abort()
685 rt->panic = true; usb6fire_pcm_abort()
687 if (rt->playback.instance) usb6fire_pcm_abort()
688 snd_pcm_stop_xrun(rt->playback.instance); usb6fire_pcm_abort()
690 if (rt->capture.instance) usb6fire_pcm_abort()
691 snd_pcm_stop_xrun(rt->capture.instance); usb6fire_pcm_abort()
694 usb_poison_urb(&rt->in_urbs[i].instance); usb6fire_pcm_abort()
695 usb_poison_urb(&rt->out_urbs[i].instance); usb6fire_pcm_abort()
703 struct pcm_runtime *rt = chip->pcm; usb6fire_pcm_destroy() local
705 usb6fire_pcm_buffers_destroy(rt); usb6fire_pcm_destroy()
706 kfree(rt); usb6fire_pcm_destroy()
H A Dcomm.c25 static void usb6fire_comm_init_urb(struct comm_runtime *rt, struct urb *urb, usb6fire_comm_init_urb() argument
30 urb->pipe = usb_sndintpipe(rt->chip->dev, COMM_EP); usb6fire_comm_init_urb()
34 urb->dev = rt->chip->dev; usb6fire_comm_init_urb()
39 struct comm_runtime *rt = urb->context; usb6fire_comm_receiver_handler() local
40 struct midi_runtime *midi_rt = rt->chip->midi; usb6fire_comm_receiver_handler()
43 if (rt->receiver_buffer[0] == 0x10) /* midi in event */ usb6fire_comm_receiver_handler()
46 rt->receiver_buffer + 2, usb6fire_comm_receiver_handler()
47 rt->receiver_buffer[1]); usb6fire_comm_receiver_handler()
50 if (!rt->chip->shutdown) { usb6fire_comm_receiver_handler()
110 static int usb6fire_comm_write8(struct comm_runtime *rt, u8 request, usb6fire_comm_write8() argument
122 ret = usb6fire_comm_send_buffer(buffer, rt->chip->dev); usb6fire_comm_write8()
128 static int usb6fire_comm_write16(struct comm_runtime *rt, u8 request, usb6fire_comm_write16() argument
140 ret = usb6fire_comm_send_buffer(buffer, rt->chip->dev); usb6fire_comm_write16()
148 struct comm_runtime *rt = kzalloc(sizeof(struct comm_runtime), usb6fire_comm_init() local
153 if (!rt) usb6fire_comm_init()
156 rt->receiver_buffer = kzalloc(COMM_RECEIVER_BUFSIZE, GFP_KERNEL); usb6fire_comm_init()
157 if (!rt->receiver_buffer) { usb6fire_comm_init()
158 kfree(rt); usb6fire_comm_init()
162 urb = &rt->receiver; usb6fire_comm_init()
163 rt->serial = 1; usb6fire_comm_init()
164 rt->chip = chip; usb6fire_comm_init()
166 rt->init_urb = usb6fire_comm_init_urb; usb6fire_comm_init()
167 rt->write8 = usb6fire_comm_write8; usb6fire_comm_init()
168 rt->write16 = usb6fire_comm_write16; usb6fire_comm_init()
171 urb->transfer_buffer = rt->receiver_buffer; usb6fire_comm_init()
176 urb->context = rt; usb6fire_comm_init()
180 kfree(rt->receiver_buffer); usb6fire_comm_init()
181 kfree(rt); usb6fire_comm_init()
185 chip->comm = rt; usb6fire_comm_init()
191 struct comm_runtime *rt = chip->comm; usb6fire_comm_abort() local
193 if (rt) usb6fire_comm_abort()
194 usb_poison_urb(&rt->receiver); usb6fire_comm_abort()
199 struct comm_runtime *rt = chip->comm; usb6fire_comm_destroy() local
201 kfree(rt->receiver_buffer); usb6fire_comm_destroy()
202 kfree(rt); usb6fire_comm_destroy()
H A Dcomm.h31 void (*init_urb)(struct comm_runtime *rt, struct urb *urb, u8 *buffer,
34 int (*write8)(struct comm_runtime *rt, u8 request, u8 reg, u8 value);
35 int (*write16)(struct comm_runtime *rt, u8 request, u8 reg,
H A Dcontrol.h34 int (*update_streaming)(struct control_runtime *rt);
35 int (*set_rate)(struct control_runtime *rt, int rate);
36 int (*set_channels)(struct control_runtime *rt, int n_analog_out,
H A Dmidi.h34 void (*in_received)(struct midi_runtime *rt, u8 *data, int length);
/linux-4.1.27/sound/aoa/core/
H A Dgpio-pmf.c15 static void pmf_gpio_set_##name(struct gpio_runtime *rt, int on)\
20 if (unlikely(!rt)) return; \
21 rc = pmf_call_function(rt->node, #name "-mute", &args); \
25 rt->implementation_private &= ~(1<<bit); \
26 rt->implementation_private |= (!!on << bit); \
28 static int pmf_gpio_get_##name(struct gpio_runtime *rt) \
30 if (unlikely(!rt)) return 0; \
31 return (rt->implementation_private>>bit)&1; \
38 static void pmf_gpio_set_hw_reset(struct gpio_runtime *rt, int on) pmf_gpio_set_hw_reset() argument
43 if (unlikely(!rt)) return; pmf_gpio_set_hw_reset()
44 rc = pmf_call_function(rt->node, "hw-reset", &args); pmf_gpio_set_hw_reset()
50 static void pmf_gpio_all_amps_off(struct gpio_runtime *rt) pmf_gpio_all_amps_off() argument
54 if (unlikely(!rt)) return; pmf_gpio_all_amps_off()
55 saved = rt->implementation_private; pmf_gpio_all_amps_off()
56 pmf_gpio_set_headphone(rt, 0); pmf_gpio_all_amps_off()
57 pmf_gpio_set_amp(rt, 0); pmf_gpio_all_amps_off()
58 pmf_gpio_set_lineout(rt, 0); pmf_gpio_all_amps_off()
59 rt->implementation_private = saved; pmf_gpio_all_amps_off()
62 static void pmf_gpio_all_amps_restore(struct gpio_runtime *rt) pmf_gpio_all_amps_restore() argument
66 if (unlikely(!rt)) return; pmf_gpio_all_amps_restore()
67 s = rt->implementation_private; pmf_gpio_all_amps_restore()
68 pmf_gpio_set_headphone(rt, (s>>0)&1); pmf_gpio_all_amps_restore()
69 pmf_gpio_set_amp(rt, (s>>1)&1); pmf_gpio_all_amps_restore()
70 pmf_gpio_set_lineout(rt, (s>>2)&1); pmf_gpio_all_amps_restore()
84 static void pmf_gpio_init(struct gpio_runtime *rt) pmf_gpio_init() argument
86 pmf_gpio_all_amps_off(rt); pmf_gpio_init()
87 rt->implementation_private = 0; pmf_gpio_init()
88 INIT_DELAYED_WORK(&rt->headphone_notify.work, pmf_handle_notify); pmf_gpio_init()
89 INIT_DELAYED_WORK(&rt->line_in_notify.work, pmf_handle_notify); pmf_gpio_init()
90 INIT_DELAYED_WORK(&rt->line_out_notify.work, pmf_handle_notify); pmf_gpio_init()
91 mutex_init(&rt->headphone_notify.mutex); pmf_gpio_init()
92 mutex_init(&rt->line_in_notify.mutex); pmf_gpio_init()
93 mutex_init(&rt->line_out_notify.mutex); pmf_gpio_init()
96 static void pmf_gpio_exit(struct gpio_runtime *rt) pmf_gpio_exit() argument
98 pmf_gpio_all_amps_off(rt); pmf_gpio_exit()
99 rt->implementation_private = 0; pmf_gpio_exit()
101 if (rt->headphone_notify.gpio_private) pmf_gpio_exit()
102 pmf_unregister_irq_client(rt->headphone_notify.gpio_private); pmf_gpio_exit()
103 if (rt->line_in_notify.gpio_private) pmf_gpio_exit()
104 pmf_unregister_irq_client(rt->line_in_notify.gpio_private); pmf_gpio_exit()
105 if (rt->line_out_notify.gpio_private) pmf_gpio_exit()
106 pmf_unregister_irq_client(rt->line_out_notify.gpio_private); pmf_gpio_exit()
110 cancel_delayed_work_sync(&rt->headphone_notify.work); pmf_gpio_exit()
111 cancel_delayed_work_sync(&rt->line_in_notify.work); pmf_gpio_exit()
112 cancel_delayed_work_sync(&rt->line_out_notify.work); pmf_gpio_exit()
114 mutex_destroy(&rt->headphone_notify.mutex); pmf_gpio_exit()
115 mutex_destroy(&rt->line_in_notify.mutex); pmf_gpio_exit()
116 mutex_destroy(&rt->line_out_notify.mutex); pmf_gpio_exit()
118 kfree(rt->headphone_notify.gpio_private); pmf_gpio_exit()
119 kfree(rt->line_in_notify.gpio_private); pmf_gpio_exit()
120 kfree(rt->line_out_notify.gpio_private); pmf_gpio_exit()
130 static int pmf_set_notify(struct gpio_runtime *rt, pmf_set_notify() argument
143 notif = &rt->headphone_notify; pmf_set_notify()
147 notif = &rt->line_in_notify; pmf_set_notify()
151 notif = &rt->line_out_notify; pmf_set_notify()
189 err = pmf_register_irq_client(rt->node, pmf_set_notify()
209 static int pmf_get_detect(struct gpio_runtime *rt, pmf_get_detect() argument
230 err = pmf_call_function(rt->node, name, &args); pmf_get_detect()
H A Dgpio-feature.c131 static void ftr_gpio_set_##name(struct gpio_runtime *rt, int on)\
135 if (unlikely(!rt)) return; \
150 rt->implementation_private &= ~(1<<bit); \
151 rt->implementation_private |= (!!on << bit); \
153 static int ftr_gpio_get_##name(struct gpio_runtime *rt) \
155 if (unlikely(!rt)) return 0; \
156 return (rt->implementation_private>>bit)&1; \
164 static void ftr_gpio_set_hw_reset(struct gpio_runtime *rt, int on) ftr_gpio_set_hw_reset() argument
168 if (unlikely(!rt)) return; ftr_gpio_set_hw_reset()
181 static void ftr_gpio_all_amps_off(struct gpio_runtime *rt) ftr_gpio_all_amps_off() argument
185 if (unlikely(!rt)) return; ftr_gpio_all_amps_off()
186 saved = rt->implementation_private; ftr_gpio_all_amps_off()
187 ftr_gpio_set_headphone(rt, 0); ftr_gpio_all_amps_off()
188 ftr_gpio_set_amp(rt, 0); ftr_gpio_all_amps_off()
189 ftr_gpio_set_lineout(rt, 0); ftr_gpio_all_amps_off()
191 ftr_gpio_set_master(rt, 0); ftr_gpio_all_amps_off()
192 rt->implementation_private = saved; ftr_gpio_all_amps_off()
195 static void ftr_gpio_all_amps_restore(struct gpio_runtime *rt) ftr_gpio_all_amps_restore() argument
199 if (unlikely(!rt)) return; ftr_gpio_all_amps_restore()
200 s = rt->implementation_private; ftr_gpio_all_amps_restore()
201 ftr_gpio_set_headphone(rt, (s>>0)&1); ftr_gpio_all_amps_restore()
202 ftr_gpio_set_amp(rt, (s>>1)&1); ftr_gpio_all_amps_restore()
203 ftr_gpio_set_lineout(rt, (s>>2)&1); ftr_gpio_all_amps_restore()
205 ftr_gpio_set_master(rt, (s>>3)&1); ftr_gpio_all_amps_restore()
230 static void ftr_gpio_init(struct gpio_runtime *rt) ftr_gpio_init() argument
271 ftr_gpio_all_amps_off(rt); ftr_gpio_init()
272 rt->implementation_private = 0; ftr_gpio_init()
273 INIT_DELAYED_WORK(&rt->headphone_notify.work, ftr_handle_notify); ftr_gpio_init()
274 INIT_DELAYED_WORK(&rt->line_in_notify.work, ftr_handle_notify); ftr_gpio_init()
275 INIT_DELAYED_WORK(&rt->line_out_notify.work, ftr_handle_notify); ftr_gpio_init()
276 mutex_init(&rt->headphone_notify.mutex); ftr_gpio_init()
277 mutex_init(&rt->line_in_notify.mutex); ftr_gpio_init()
278 mutex_init(&rt->line_out_notify.mutex); ftr_gpio_init()
281 static void ftr_gpio_exit(struct gpio_runtime *rt) ftr_gpio_exit() argument
283 ftr_gpio_all_amps_off(rt); ftr_gpio_exit()
284 rt->implementation_private = 0; ftr_gpio_exit()
285 if (rt->headphone_notify.notify) ftr_gpio_exit()
286 free_irq(headphone_detect_irq, &rt->headphone_notify); ftr_gpio_exit()
287 if (rt->line_in_notify.gpio_private) ftr_gpio_exit()
288 free_irq(linein_detect_irq, &rt->line_in_notify); ftr_gpio_exit()
289 if (rt->line_out_notify.gpio_private) ftr_gpio_exit()
290 free_irq(lineout_detect_irq, &rt->line_out_notify); ftr_gpio_exit()
291 cancel_delayed_work_sync(&rt->headphone_notify.work); ftr_gpio_exit()
292 cancel_delayed_work_sync(&rt->line_in_notify.work); ftr_gpio_exit()
293 cancel_delayed_work_sync(&rt->line_out_notify.work); ftr_gpio_exit()
294 mutex_destroy(&rt->headphone_notify.mutex); ftr_gpio_exit()
295 mutex_destroy(&rt->line_in_notify.mutex); ftr_gpio_exit()
296 mutex_destroy(&rt->line_out_notify.mutex); ftr_gpio_exit()
308 static int ftr_set_notify(struct gpio_runtime *rt, ftr_set_notify() argument
321 notif = &rt->headphone_notify; ftr_set_notify()
326 notif = &rt->line_in_notify; ftr_set_notify()
331 notif = &rt->line_out_notify; ftr_set_notify()
375 static int ftr_get_detect(struct gpio_runtime *rt, ftr_get_detect() argument
/linux-4.1.27/net/x25/
H A Dx25_route.c34 struct x25_route *rt; x25_add_route() local
41 rt = list_entry(entry, struct x25_route, node); x25_add_route()
43 if (!memcmp(&rt->address, address, sigdigits) && x25_add_route()
44 rt->sigdigits == sigdigits) x25_add_route()
48 rt = kmalloc(sizeof(*rt), GFP_ATOMIC); x25_add_route()
50 if (!rt) x25_add_route()
53 strcpy(rt->address.x25_addr, "000000000000000"); x25_add_route()
54 memcpy(rt->address.x25_addr, address->x25_addr, sigdigits); x25_add_route()
56 rt->sigdigits = sigdigits; x25_add_route()
57 rt->dev = dev; x25_add_route()
58 atomic_set(&rt->refcnt, 1); x25_add_route()
60 list_add(&rt->node, &x25_route_list); x25_add_route()
69 * @rt: route to remove
74 static void __x25_remove_route(struct x25_route *rt) __x25_remove_route() argument
76 if (rt->node.next) { __x25_remove_route()
77 list_del(&rt->node); __x25_remove_route()
78 x25_route_put(rt); __x25_remove_route()
85 struct x25_route *rt; x25_del_route() local
92 rt = list_entry(entry, struct x25_route, node); x25_del_route()
94 if (!memcmp(&rt->address, address, sigdigits) && x25_del_route()
95 rt->sigdigits == sigdigits && rt->dev == dev) { x25_del_route()
96 __x25_remove_route(rt); x25_del_route()
111 struct x25_route *rt; x25_route_device_down() local
117 rt = list_entry(entry, struct x25_route, node); x25_route_device_down()
119 if (rt->dev == dev) x25_route_device_down()
120 __x25_remove_route(rt); x25_route_device_down()
156 struct x25_route *rt, *use = NULL; x25_get_route() local
162 rt = list_entry(entry, struct x25_route, node); x25_get_route()
164 if (!memcmp(&rt->address, addr, rt->sigdigits)) { x25_get_route()
166 use = rt; x25_get_route()
167 else if (rt->sigdigits > use->sigdigits) x25_get_route()
168 use = rt; x25_get_route()
184 struct x25_route_struct rt; x25_route_ioctl() local
192 if (copy_from_user(&rt, arg, sizeof(rt))) x25_route_ioctl()
196 if (rt.sigdigits > 15) x25_route_ioctl()
199 dev = x25_dev_get(rt.device); x25_route_ioctl()
204 rc = x25_add_route(&rt.address, rt.sigdigits, dev); x25_route_ioctl()
206 rc = x25_del_route(&rt.address, rt.sigdigits, dev); x25_route_ioctl()
217 struct x25_route *rt; x25_route_free() local
222 rt = list_entry(entry, struct x25_route, node); x25_route_free()
223 __x25_remove_route(rt); x25_route_free()
H A Dx25_forward.c25 struct x25_route *rt; x25_forward_call() local
33 if ((rt = x25_get_route(dest_addr)) == NULL) x25_forward_call()
36 if ((neigh_new = x25_get_neigh(rt->dev)) == NULL) { x25_forward_call()
46 if (rt->dev == from->dev) { x25_forward_call()
71 new_frwd->dev1 = rt->dev; x25_forward_call()
90 x25_route_put(rt); x25_forward_call()
H A Dx25_proc.c50 struct x25_route *rt = list_entry(v, struct x25_route, node); x25_seq_route_show() local
57 rt = v; x25_seq_route_show()
59 rt->address.x25_addr, rt->sigdigits, x25_seq_route_show()
60 rt->dev ? rt->dev->name : "???"); x25_seq_route_show()
/linux-4.1.27/arch/mips/cavium-octeon/crypto/
H A Docteon-crypto.h34 "dmtc2 %[rt],0x0048+" STR(index) \
36 : [rt] "d" (cpu_to_be64(value))); \
47 "dmfc2 %[rt],0x0048+" STR(index) \
48 : [rt] "=d" (__value) \
60 "dmtc2 %[rt],0x0040+" STR(index) \
62 : [rt] "d" (cpu_to_be64(value))); \
71 "dmtc2 %[rt],0x4047" \
73 : [rt] "d" (cpu_to_be64(value))); \
82 "dmtc2 %[rt],0x4057" \
84 : [rt] "d" (value)); \
93 "dmtc2 %[rt],0x404f" \
95 : [rt] "d" (value)); \
108 "dmtc2 %[rt],0x0250+" STR(index) \
110 : [rt] "d" (value)); \
121 "dmfc2 %[rt],0x0250+" STR(index) \
122 : [rt] "=d" (__value) \
134 "dmtc2 %[rt],0x0240+" STR(index) \
136 : [rt] "d" (value)); \
145 "dmtc2 %[rt],0x424f" \
147 : [rt] "d" (value)); \
156 "dmtc2 %[rt],0x4057" \
158 : [rt] "d" (value)); \
167 "dmtc2 %[rt],0x404f" \
169 : [rt] "d" (value)); \
182 "dmtc2 %[rt],0x0250+" STR(index) \
184 : [rt] "d" (value)); \
195 "dmfc2 %[rt],0x0250+" STR(index) \
196 : [rt] "=d" (__value) \
208 "dmtc2 %[rt],0x0240+" STR(index) \
210 : [rt] "d" (value)); \
219 "dmtc2 %[rt],0x424f" \
221 : [rt] "d" (value)); \
/linux-4.1.27/scripts/rt-tester/
H A Dcheck-all.sh6 ./rt-tester.py $1 | grep Pass
9 testit t2-l1-2rt-sameprio.tst
12 #testit t2-l2-2rt-deadlock.tst
13 testit t3-l1-pi-1rt.tst
14 testit t3-l1-pi-2rt.tst
15 testit t3-l1-pi-3rt.tst
H A Drt-tester.py3 # rt-mutex tester
63 print "rt-tester.py <-c -h -q -t> <testfile>"
/linux-4.1.27/sound/usb/hiface/
H A Dpcm.c116 static int hiface_pcm_set_rate(struct pcm_runtime *rt, unsigned int rate) hiface_pcm_set_rate() argument
118 struct usb_device *device = rt->chip->dev; hiface_pcm_set_rate()
178 struct pcm_runtime *rt = snd_pcm_substream_chip(alsa_sub); hiface_pcm_get_substream() local
179 struct device *device = &rt->chip->dev->dev; hiface_pcm_get_substream()
182 return &rt->playback; hiface_pcm_get_substream()
189 static void hiface_pcm_stream_stop(struct pcm_runtime *rt) hiface_pcm_stream_stop() argument
193 if (rt->stream_state != STREAM_DISABLED) { hiface_pcm_stream_stop()
194 rt->stream_state = STREAM_STOPPING; hiface_pcm_stream_stop()
198 &rt->out_urbs[i].submitted, 100); hiface_pcm_stream_stop()
201 &rt->out_urbs[i].submitted); hiface_pcm_stream_stop()
202 usb_kill_urb(&rt->out_urbs[i].instance); hiface_pcm_stream_stop()
205 rt->stream_state = STREAM_DISABLED; hiface_pcm_stream_stop()
210 static int hiface_pcm_stream_start(struct pcm_runtime *rt) hiface_pcm_stream_start() argument
215 if (rt->stream_state == STREAM_DISABLED) { hiface_pcm_stream_start()
218 rt->panic = false; hiface_pcm_stream_start()
221 rt->stream_state = STREAM_STARTING; hiface_pcm_stream_start()
223 memset(rt->out_urbs[i].buffer, 0, PCM_PACKET_SIZE); hiface_pcm_stream_start()
224 usb_anchor_urb(&rt->out_urbs[i].instance, hiface_pcm_stream_start()
225 &rt->out_urbs[i].submitted); hiface_pcm_stream_start()
226 ret = usb_submit_urb(&rt->out_urbs[i].instance, hiface_pcm_stream_start()
229 hiface_pcm_stream_stop(rt); hiface_pcm_stream_start()
235 wait_event_timeout(rt->stream_wait_queue, rt->stream_wait_cond, hiface_pcm_stream_start()
237 if (rt->stream_wait_cond) { hiface_pcm_stream_start()
238 struct device *device = &rt->chip->dev->dev; hiface_pcm_stream_start()
241 rt->stream_state = STREAM_RUNNING; hiface_pcm_stream_start()
243 hiface_pcm_stream_stop(rt); hiface_pcm_stream_start()
311 struct pcm_runtime *rt = out_urb->chip->pcm; hiface_pcm_out_urb_handler() local
317 if (rt->panic || rt->stream_state == STREAM_STOPPING) hiface_pcm_out_urb_handler()
327 if (rt->stream_state == STREAM_STARTING) { hiface_pcm_out_urb_handler()
328 rt->stream_wait_cond = true; hiface_pcm_out_urb_handler()
329 wake_up(&rt->stream_wait_queue); hiface_pcm_out_urb_handler()
333 sub = &rt->playback; hiface_pcm_out_urb_handler()
352 rt->panic = true; hiface_pcm_out_urb_handler()
357 struct pcm_runtime *rt = snd_pcm_substream_chip(alsa_sub); hiface_pcm_open() local
362 if (rt->panic) hiface_pcm_open()
365 mutex_lock(&rt->stream_mutex); hiface_pcm_open()
369 sub = &rt->playback; hiface_pcm_open()
372 struct device *device = &rt->chip->dev->dev; hiface_pcm_open()
373 mutex_unlock(&rt->stream_mutex); hiface_pcm_open()
378 if (rt->extra_freq) { hiface_pcm_open()
387 mutex_unlock(&rt->stream_mutex); hiface_pcm_open()
394 mutex_unlock(&rt->stream_mutex); hiface_pcm_open()
400 struct pcm_runtime *rt = snd_pcm_substream_chip(alsa_sub); hiface_pcm_close() local
404 if (rt->panic) hiface_pcm_close()
407 mutex_lock(&rt->stream_mutex); hiface_pcm_close()
409 hiface_pcm_stream_stop(rt); hiface_pcm_close()
418 mutex_unlock(&rt->stream_mutex); hiface_pcm_close()
436 struct pcm_runtime *rt = snd_pcm_substream_chip(alsa_sub); hiface_pcm_prepare() local
441 if (rt->panic) hiface_pcm_prepare()
446 mutex_lock(&rt->stream_mutex); hiface_pcm_prepare()
451 if (rt->stream_state == STREAM_DISABLED) { hiface_pcm_prepare()
453 ret = hiface_pcm_set_rate(rt, alsa_rt->rate); hiface_pcm_prepare()
455 mutex_unlock(&rt->stream_mutex); hiface_pcm_prepare()
458 ret = hiface_pcm_stream_start(rt); hiface_pcm_prepare()
460 mutex_unlock(&rt->stream_mutex); hiface_pcm_prepare()
464 mutex_unlock(&rt->stream_mutex); hiface_pcm_prepare()
471 struct pcm_runtime *rt = snd_pcm_substream_chip(alsa_sub); hiface_pcm_trigger() local
473 if (rt->panic) hiface_pcm_trigger()
501 struct pcm_runtime *rt = snd_pcm_substream_chip(alsa_sub); hiface_pcm_pointer() local
505 if (rt->panic || !sub) hiface_pcm_pointer()
549 struct pcm_runtime *rt = chip->pcm; hiface_pcm_abort() local
551 if (rt) { hiface_pcm_abort()
552 rt->panic = true; hiface_pcm_abort()
554 mutex_lock(&rt->stream_mutex); hiface_pcm_abort()
555 hiface_pcm_stream_stop(rt); hiface_pcm_abort()
556 mutex_unlock(&rt->stream_mutex); hiface_pcm_abort()
562 struct pcm_runtime *rt = chip->pcm; hiface_pcm_destroy() local
566 kfree(rt->out_urbs[i].buffer); hiface_pcm_destroy()
574 struct pcm_runtime *rt = pcm->private_data; hiface_pcm_free() local
576 if (rt) hiface_pcm_free()
577 hiface_pcm_destroy(rt->chip); hiface_pcm_free()
585 struct pcm_runtime *rt; hiface_pcm_init() local
587 rt = kzalloc(sizeof(*rt), GFP_KERNEL); hiface_pcm_init()
588 if (!rt) hiface_pcm_init()
591 rt->chip = chip; hiface_pcm_init()
592 rt->stream_state = STREAM_DISABLED; hiface_pcm_init()
594 rt->extra_freq = 1; hiface_pcm_init()
596 init_waitqueue_head(&rt->stream_wait_queue); hiface_pcm_init()
597 mutex_init(&rt->stream_mutex); hiface_pcm_init()
598 spin_lock_init(&rt->playback.lock); hiface_pcm_init()
601 hiface_pcm_init_urb(&rt->out_urbs[i], chip, OUT_EP, hiface_pcm_init()
606 kfree(rt); hiface_pcm_init()
611 pcm->private_data = rt; hiface_pcm_init()
617 rt->instance = pcm; hiface_pcm_init()
619 chip->pcm = rt; hiface_pcm_init()
/linux-4.1.27/arch/mips/include/asm/
H A Duasm.h200 # define UASM_i_ADDIU(buf, rs, rt, val) uasm_i_daddiu(buf, rs, rt, val)
201 # define UASM_i_ADDU(buf, rs, rt, rd) uasm_i_daddu(buf, rs, rt, rd)
202 # define UASM_i_LL(buf, rs, rt, off) uasm_i_lld(buf, rs, rt, off)
203 # define UASM_i_LW(buf, rs, rt, off) uasm_i_ld(buf, rs, rt, off)
204 # define UASM_i_LWX(buf, rs, rt, rd) uasm_i_ldx(buf, rs, rt, rd)
205 # define UASM_i_MFC0(buf, rt, rd...) uasm_i_dmfc0(buf, rt, rd)
206 # define UASM_i_MTC0(buf, rt, rd...) uasm_i_dmtc0(buf, rt, rd)
207 # define UASM_i_ROTR(buf, rs, rt, sh) uasm_i_drotr(buf, rs, rt, sh)
208 # define UASM_i_SC(buf, rs, rt, off) uasm_i_scd(buf, rs, rt, off)
209 # define UASM_i_SLL(buf, rs, rt, sh) uasm_i_dsll(buf, rs, rt, sh)
210 # define UASM_i_SRA(buf, rs, rt, sh) uasm_i_dsra(buf, rs, rt, sh)
211 # define UASM_i_SRL(buf, rs, rt, sh) uasm_i_dsrl(buf, rs, rt, sh)
212 # define UASM_i_SRL_SAFE(buf, rs, rt, sh) uasm_i_dsrl_safe(buf, rs, rt, sh)
213 # define UASM_i_SUBU(buf, rs, rt, rd) uasm_i_dsubu(buf, rs, rt, rd)
214 # define UASM_i_SW(buf, rs, rt, off) uasm_i_sd(buf, rs, rt, off)
216 # define UASM_i_ADDIU(buf, rs, rt, val) uasm_i_addiu(buf, rs, rt, val)
217 # define UASM_i_ADDU(buf, rs, rt, rd) uasm_i_addu(buf, rs, rt, rd)
218 # define UASM_i_LL(buf, rs, rt, off) uasm_i_ll(buf, rs, rt, off)
219 # define UASM_i_LW(buf, rs, rt, off) uasm_i_lw(buf, rs, rt, off)
220 # define UASM_i_LWX(buf, rs, rt, rd) uasm_i_lwx(buf, rs, rt, rd)
221 # define UASM_i_MFC0(buf, rt, rd...) uasm_i_mfc0(buf, rt, rd)
222 # define UASM_i_MTC0(buf, rt, rd...) uasm_i_mtc0(buf, rt, rd)
223 # define UASM_i_ROTR(buf, rs, rt, sh) uasm_i_rotr(buf, rs, rt, sh)
224 # define UASM_i_SC(buf, rs, rt, off) uasm_i_sc(buf, rs, rt, off)
225 # define UASM_i_SLL(buf, rs, rt, sh) uasm_i_sll(buf, rs, rt, sh)
226 # define UASM_i_SRA(buf, rs, rt, sh) uasm_i_sra(buf, rs, rt, sh)
227 # define UASM_i_SRL(buf, rs, rt, sh) uasm_i_srl(buf, rs, rt, sh)
228 # define UASM_i_SRL_SAFE(buf, rs, rt, sh) uasm_i_srl(buf, rs, rt, sh)
229 # define UASM_i_SUBU(buf, rs, rt, rd) uasm_i_subu(buf, rs, rt, rd)
230 # define UASM_i_SW(buf, rs, rt, off) uasm_i_sw(buf, rs, rt, off)
H A Dprocessor.h167 /* DMFC2 rt, 0x0201 */
169 /* DMFC2 rt, 0x0202 (Set with DMTC2 rt, 0x1202) */
171 /* DMFC2 rt, 0x0200 (set with DMTC2 rt, 0x4200) */
173 /* DMFC2 rt, 0x0402; DMFC2 rt, 0x040A */
175 /* DMFC2 rt, 0x0084 */
177 /* DMFC2 rt, 0x0080; DMFC2 rt, 0x0081; DMFC2 rt, 0x0082 */
179 /* DMFC2 rt, 0x0088 (Set with DMTC2 rt, 0x0098) */
181 /* DMFC2 rt, 0x0111 (FIXME: Read Pass1 Errata) */
183 /* DMFC2 rt, 0x0102; DMFC2 rt, 0x0103 */
185 /* DMFC2 rt, 0x0104; DMFC2 rt, 0x0105; DMFC2 rt, 0x0106; DMFC2
186 * rt, 0x0107 */
188 /* DMFC2 rt, 0x0110 */
190 /* DMFC2 rt, 0x0100; DMFC2 rt, 0x0101 */
192 /* DMFC2 rt, 0x0240; DMFC2 rt, 0x0241; DMFC2 rt, 0x0242; DMFC2
193 * rt, 0x0243; DMFC2 rt, 0x0244; DMFC2 rt, 0x0245; DMFC2 rt,
194 * 0x0246; DMFC2 rt, 0x0247; DMFC2 rt, 0x0248; DMFC2 rt,
195 * 0x0249; DMFC2 rt, 0x024A; DMFC2 rt, 0x024B; DMFC2 rt,
196 * 0x024C; DMFC2 rt, 0x024D; DMFC2 rt, 0x024E - Pass2 */
198 /* DMFC2 rt, 0x0250; DMFC2 rt, 0x0251; DMFC2 rt, 0x0252; DMFC2
199 * rt, 0x0253; DMFC2 rt, 0x0254; DMFC2 rt, 0x0255; DMFC2 rt,
200 * 0x0256; DMFC2 rt, 0x0257 - Pass2 */
202 /* DMFC2 rt, 0x0258; DMFC2 rt, 0x0259 - Pass2 */
204 /* DMFC2 rt, 0x025E - Pass2 */
206 /* DMFC2 rt, 0x025A; DMFC2 rt, 0x025B - Pass2 */
208 /* DMFC2 rt, 0x24F, DMFC2 rt, 0x50, OCTEON III */
H A Dasm.h178 #define MOVN(rd, rs, rt) \
181 beqz rt, 9f; \
185 #define MOVZ(rd, rs, rt) \
188 bnez rt, 9f; \
194 #define MOVN(rd, rs, rt) \
197 bnezl rt, 9f; \
201 #define MOVZ(rd, rs, rt) \
204 beqzl rt, 9f; \
211 #define MOVN(rd, rs, rt) \
212 movn rd, rs, rt
213 #define MOVZ(rd, rs, rt) \
214 movz rd, rs, rt
H A Dmipsmtregs.h284 #define mftc0(rt,sel) \
292 " # mftc0 $1, $" #rt ", " #sel " \n" \
293 " .word 0x41000800 | (" #rt " << 16) | " #sel " \n" \
301 #define mftgpr(rt) \
309 " # mftgpr $1," #rt " \n" \
310 " .word 0x41000820 | (" #rt " << 16) \n" \
318 #define mftr(rt, u, sel) \
323 " mftr %0, " #rt ", " #u ", " #sel " \n" \
H A Dasmmacro.h205 .macro MFTR rt=0, rd=0, u=0, sel=0
206 .word 0x41000000 | (\rt << 16) | (\rd << 11) | (\u << 5) | (\sel)
209 .macro MTTR rt=0, rd=0, u=0, sel=0
210 .word 0x41800000 | (\rt << 16) | (\rd << 11) | (\u << 5) | (\sel)
/linux-4.1.27/sound/aoa/
H A Daoa-gpio.h26 void (*init)(struct gpio_runtime *rt);
27 void (*exit)(struct gpio_runtime *rt);
30 void (*all_amps_off)(struct gpio_runtime *rt);
32 void (*all_amps_restore)(struct gpio_runtime *rt);
34 void (*set_headphone)(struct gpio_runtime *rt, int on);
35 void (*set_speakers)(struct gpio_runtime *rt, int on);
36 void (*set_lineout)(struct gpio_runtime *rt, int on);
37 void (*set_master)(struct gpio_runtime *rt, int on);
39 int (*get_headphone)(struct gpio_runtime *rt);
40 int (*get_speakers)(struct gpio_runtime *rt);
41 int (*get_lineout)(struct gpio_runtime *rt);
42 int (*get_master)(struct gpio_runtime *rt);
44 void (*set_hw_reset)(struct gpio_runtime *rt, int on);
53 int (*set_notify)(struct gpio_runtime *rt,
59 int (*get_detect)(struct gpio_runtime *rt,
/linux-4.1.27/include/linux/sunrpc/
H A Dtimer.h22 extern void rpc_init_rtt(struct rpc_rtt *rt, unsigned long timeo);
23 extern void rpc_update_rtt(struct rpc_rtt *rt, unsigned timer, long m);
24 extern unsigned long rpc_calc_rto(struct rpc_rtt *rt, unsigned timer);
26 static inline void rpc_set_timeo(struct rpc_rtt *rt, int timer, int ntimeo) rpc_set_timeo() argument
31 t = &rt->ntimeouts[timer-1]; rpc_set_timeo()
42 static inline int rpc_ntimeo(struct rpc_rtt *rt, int timer) rpc_ntimeo() argument
46 return rt->ntimeouts[timer-1]; rpc_ntimeo()
/linux-4.1.27/net/ipv6/
H A Droute.c95 static int rt6_score_route(struct rt6_info *rt, int oif, int strict);
107 static void rt6_bind_peer(struct rt6_info *rt, int create) rt6_bind_peer() argument
112 base = inetpeer_base_ptr(rt->_rt6i_peer); rt6_bind_peer()
116 peer = inet_getpeer_v6(base, &rt->rt6i_dst.addr, create); rt6_bind_peer()
118 if (!rt6_set_peer(rt, peer)) rt6_bind_peer()
123 static struct inet_peer *__rt6_get_peer(struct rt6_info *rt, int create) __rt6_get_peer() argument
125 if (rt6_has_peer(rt)) __rt6_get_peer()
126 return rt6_peer_ptr(rt); __rt6_get_peer()
128 rt6_bind_peer(rt, create); __rt6_get_peer()
129 return (rt6_has_peer(rt) ? rt6_peer_ptr(rt) : NULL); __rt6_get_peer()
132 static struct inet_peer *rt6_get_peer_create(struct rt6_info *rt) rt6_get_peer_create() argument
134 return __rt6_get_peer(rt, 1); rt6_get_peer_create()
139 struct rt6_info *rt = (struct rt6_info *) dst; ipv6_cow_metrics() local
143 if (!(rt->dst.flags & DST_HOST)) ipv6_cow_metrics()
146 peer = rt6_get_peer_create(rt); ipv6_cow_metrics()
168 static inline const void *choose_neigh_daddr(struct rt6_info *rt, choose_neigh_daddr() argument
172 struct in6_addr *p = &rt->rt6i_gateway; choose_neigh_daddr()
185 struct rt6_info *rt = (struct rt6_info *) dst; ip6_neigh_lookup() local
188 daddr = choose_neigh_daddr(rt, skb, daddr); ip6_neigh_lookup()
307 struct rt6_info *rt = dst_alloc(&net->ipv6.ip6_dst_ops, dev, ip6_dst_alloc() local
310 if (rt) { ip6_dst_alloc()
311 struct dst_entry *dst = &rt->dst; ip6_dst_alloc()
313 memset(dst + 1, 0, sizeof(*rt) - sizeof(*dst)); ip6_dst_alloc()
314 rt6_init_peer(rt, table ? &table->tb6_peers : net->ipv6.peers); ip6_dst_alloc()
315 INIT_LIST_HEAD(&rt->rt6i_siblings); ip6_dst_alloc()
317 return rt; ip6_dst_alloc()
322 struct rt6_info *rt = (struct rt6_info *)dst; ip6_dst_destroy() local
323 struct inet6_dev *idev = rt->rt6i_idev; ip6_dst_destroy()
326 if (!(rt->dst.flags & DST_HOST)) ip6_dst_destroy()
330 rt->rt6i_idev = NULL; ip6_dst_destroy()
337 if (rt6_has_peer(rt)) { ip6_dst_destroy()
338 struct inet_peer *peer = rt6_peer_ptr(rt); ip6_dst_destroy()
346 struct rt6_info *rt = (struct rt6_info *)dst; ip6_dst_ifdown() local
347 struct inet6_dev *idev = rt->rt6i_idev; ip6_dst_ifdown()
356 rt->rt6i_idev = loopback_idev; ip6_dst_ifdown()
363 static bool rt6_check_expired(const struct rt6_info *rt) rt6_check_expired() argument
365 if (rt->rt6i_flags & RTF_EXPIRES) { rt6_check_expired()
366 if (time_after(jiffies, rt->dst.expires)) rt6_check_expired()
368 } else if (rt->dst.from) { rt6_check_expired()
369 return rt6_check_expired((struct rt6_info *) rt->dst.from); rt6_check_expired()
438 struct rt6_info *rt, rt6_device_match()
449 for (sprt = rt; sprt; sprt = sprt->dst.rt6_next) { rt6_device_match()
481 return rt; rt6_device_match()
503 static void rt6_probe(struct rt6_info *rt) rt6_probe() argument
514 if (!rt || !(rt->rt6i_flags & RTF_GATEWAY)) rt6_probe()
517 neigh = __ipv6_neigh_lookup_noref(rt->dst.dev, &rt->rt6i_gateway); rt6_probe()
525 time_after(jiffies, neigh->updated + rt->rt6i_idev->cnf.rtr_probe_interval)) { rt6_probe()
538 work->target = rt->rt6i_gateway; rt6_probe()
539 dev_hold(rt->dst.dev); rt6_probe()
540 work->dev = rt->dst.dev; rt6_probe()
550 static inline void rt6_probe(struct rt6_info *rt) rt6_probe() argument
558 static inline int rt6_check_dev(struct rt6_info *rt, int oif) rt6_check_dev() argument
560 struct net_device *dev = rt->dst.dev; rt6_check_dev()
564 rt->rt6i_idev && rt->rt6i_idev->dev->ifindex == oif) rt6_check_dev()
569 static inline enum rt6_nud_state rt6_check_neigh(struct rt6_info *rt) rt6_check_neigh() argument
574 if (rt->rt6i_flags & RTF_NONEXTHOP || rt6_check_neigh()
575 !(rt->rt6i_flags & RTF_GATEWAY)) rt6_check_neigh()
579 neigh = __ipv6_neigh_lookup_noref(rt->dst.dev, &rt->rt6i_gateway); rt6_check_neigh()
600 static int rt6_score_route(struct rt6_info *rt, int oif, rt6_score_route() argument
605 m = rt6_check_dev(rt, oif); rt6_score_route()
609 m |= IPV6_DECODE_PREF(IPV6_EXTRACT_PREF(rt->rt6i_flags)) << 2; rt6_score_route()
612 int n = rt6_check_neigh(rt); rt6_score_route()
619 static struct rt6_info *find_match(struct rt6_info *rt, int oif, int strict, find_match() argument
626 if (rt6_check_expired(rt)) find_match()
629 m = rt6_score_route(rt, oif, strict); find_match()
638 rt6_probe(rt); find_match()
644 match = rt; find_match()
655 struct rt6_info *rt, *match; find_rr_leaf() local
659 for (rt = rr_head; rt && rt->rt6i_metric == metric; find_rr_leaf()
660 rt = rt->dst.rt6_next) find_rr_leaf()
661 match = find_match(rt, oif, strict, &mpri, match, do_rr); find_rr_leaf()
662 for (rt = fn->leaf; rt && rt != rr_head && rt->rt6i_metric == metric; find_rr_leaf()
663 rt = rt->dst.rt6_next) find_rr_leaf()
664 match = find_match(rt, oif, strict, &mpri, match, do_rr); find_rr_leaf()
706 struct rt6_info *rt; rt6_route_rcv() local
744 rt = rt6_get_dflt_router(gwaddr, dev); rt6_route_rcv()
746 rt = rt6_get_route_info(net, prefix, rinfo->prefix_len, rt6_route_rcv()
749 if (rt && !lifetime) { rt6_route_rcv()
750 ip6_del_rt(rt); rt6_route_rcv()
751 rt = NULL; rt6_route_rcv()
754 if (!rt && lifetime) rt6_route_rcv()
755 rt = rt6_add_route_info(net, prefix, rinfo->prefix_len, gwaddr, dev->ifindex, rt6_route_rcv()
757 else if (rt) rt6_route_rcv()
758 rt->rt6i_flags = RTF_ROUTEINFO | rt6_route_rcv()
759 (rt->rt6i_flags & ~RTF_PREF_MASK) | RTF_PREF(pref); rt6_route_rcv()
761 if (rt) { rt6_route_rcv()
763 rt6_clean_expires(rt); rt6_route_rcv()
765 rt6_set_expires(rt, jiffies + HZ * lifetime); rt6_route_rcv()
767 ip6_rt_put(rt); rt6_route_rcv()
795 struct rt6_info *rt; ip6_pol_route_lookup() local
800 rt = fn->leaf; ip6_pol_route_lookup()
801 rt = rt6_device_match(net, rt, &fl6->saddr, fl6->flowi6_oif, flags); ip6_pol_route_lookup()
802 if (rt->rt6i_nsiblings && fl6->flowi6_oif == 0) ip6_pol_route_lookup()
803 rt = rt6_multipath_select(rt, fl6, fl6->flowi6_oif, flags); ip6_pol_route_lookup()
804 if (rt == net->ipv6.ip6_null_entry) { ip6_pol_route_lookup()
809 dst_use(&rt->dst, jiffies); ip6_pol_route_lookup()
811 return rt; ip6_pol_route_lookup()
853 static int __ip6_ins_rt(struct rt6_info *rt, struct nl_info *info, __ip6_ins_rt() argument
859 table = rt->rt6i_table; __ip6_ins_rt()
861 err = fib6_add(&table->tb6_root, rt, info, mxc); __ip6_ins_rt()
867 int ip6_ins_rt(struct rt6_info *rt) ip6_ins_rt() argument
869 struct nl_info info = { .nl_net = dev_net(rt->dst.dev), }; ip6_ins_rt()
872 return __ip6_ins_rt(rt, &info, &mxc); ip6_ins_rt()
879 struct rt6_info *rt; rt6_alloc_cow() local
885 rt = ip6_rt_copy(ort, daddr); rt6_alloc_cow()
887 if (rt) { rt6_alloc_cow()
890 rt->rt6i_flags |= RTF_ANYCAST; rt6_alloc_cow()
892 rt->rt6i_flags |= RTF_CACHE; rt6_alloc_cow()
895 if (rt->rt6i_src.plen && saddr) { rt6_alloc_cow()
896 rt->rt6i_src.addr = *saddr; rt6_alloc_cow()
897 rt->rt6i_src.plen = 128; rt6_alloc_cow()
902 return rt; rt6_alloc_cow()
908 struct rt6_info *rt = ip6_rt_copy(ort, daddr); rt6_alloc_clone() local
910 if (rt) rt6_alloc_clone()
911 rt->rt6i_flags |= RTF_CACHE; rt6_alloc_clone()
912 return rt; rt6_alloc_clone()
919 struct rt6_info *rt, *nrt; ip6_pol_route() local
935 rt = rt6_select(fn, oif, strict); ip6_pol_route()
936 if (rt->rt6i_nsiblings) ip6_pol_route()
937 rt = rt6_multipath_select(rt, fl6, oif, strict); ip6_pol_route()
938 if (rt == net->ipv6.ip6_null_entry) { ip6_pol_route()
948 dst_hold(&rt->dst); ip6_pol_route()
954 dst_hold(&rt->dst); ip6_pol_route()
957 if (rt->rt6i_flags & RTF_CACHE) ip6_pol_route()
960 if (!(rt->rt6i_flags & (RTF_NONEXTHOP | RTF_GATEWAY))) ip6_pol_route()
961 nrt = rt6_alloc_cow(rt, &fl6->daddr, &fl6->saddr); ip6_pol_route()
962 else if (!(rt->dst.flags & DST_HOST)) ip6_pol_route()
963 nrt = rt6_alloc_clone(rt, &fl6->daddr); ip6_pol_route()
967 ip6_rt_put(rt); ip6_pol_route()
968 rt = nrt ? : net->ipv6.ip6_null_entry; ip6_pol_route()
970 dst_hold(&rt->dst); ip6_pol_route()
984 ip6_rt_put(rt); ip6_pol_route()
988 rt->dst.lastuse = jiffies; ip6_pol_route()
989 rt->dst.__use++; ip6_pol_route()
991 return rt; ip6_pol_route()
1052 struct rt6_info *rt, *ort = (struct rt6_info *) dst_orig; ip6_blackhole_route() local
1055 rt = dst_alloc(&ip6_dst_blackhole_ops, ort->dst.dev, 1, DST_OBSOLETE_NONE, 0); ip6_blackhole_route()
1056 if (rt) { ip6_blackhole_route()
1057 new = &rt->dst; ip6_blackhole_route()
1059 memset(new + 1, 0, sizeof(*rt) - sizeof(*new)); ip6_blackhole_route()
1060 rt6_init_peer(rt, net->ipv6.peers); ip6_blackhole_route()
1070 rt->rt6i_idev = ort->rt6i_idev; ip6_blackhole_route()
1071 if (rt->rt6i_idev) ip6_blackhole_route()
1072 in6_dev_hold(rt->rt6i_idev); ip6_blackhole_route()
1074 rt->rt6i_gateway = ort->rt6i_gateway; ip6_blackhole_route()
1075 rt->rt6i_flags = ort->rt6i_flags; ip6_blackhole_route()
1076 rt->rt6i_metric = 0; ip6_blackhole_route()
1078 memcpy(&rt->rt6i_dst, &ort->rt6i_dst, sizeof(struct rt6key)); ip6_blackhole_route()
1080 memcpy(&rt->rt6i_src, &ort->rt6i_src, sizeof(struct rt6key)); ip6_blackhole_route()
1096 struct rt6_info *rt; ip6_dst_check() local
1098 rt = (struct rt6_info *) dst; ip6_dst_check()
1104 if (!rt->rt6i_node || (rt->rt6i_node->fn_sernum != cookie)) ip6_dst_check()
1107 if (rt6_check_expired(rt)) ip6_dst_check()
1115 struct rt6_info *rt = (struct rt6_info *) dst; ip6_negative_advice() local
1117 if (rt) { ip6_negative_advice()
1118 if (rt->rt6i_flags & RTF_CACHE) { ip6_negative_advice()
1119 if (rt6_check_expired(rt)) { ip6_negative_advice()
1120 ip6_del_rt(rt); ip6_negative_advice()
1133 struct rt6_info *rt; ip6_link_failure() local
1137 rt = (struct rt6_info *) skb_dst(skb); ip6_link_failure()
1138 if (rt) { ip6_link_failure()
1139 if (rt->rt6i_flags & RTF_CACHE) { ip6_link_failure()
1140 dst_hold(&rt->dst); ip6_link_failure()
1141 if (ip6_del_rt(rt)) ip6_link_failure()
1142 dst_free(&rt->dst); ip6_link_failure()
1143 } else if (rt->rt6i_node && (rt->rt6i_flags & RTF_DEFAULT)) { ip6_link_failure()
1144 rt->rt6i_node->fn_sernum = -1; ip6_link_failure()
1207 struct rt6_info *rt; __ip6_route_redirect() local
1223 for (rt = fn->leaf; rt; rt = rt->dst.rt6_next) { __ip6_route_redirect()
1224 if (rt6_check_expired(rt)) __ip6_route_redirect()
1226 if (rt->dst.error) __ip6_route_redirect()
1228 if (!(rt->rt6i_flags & RTF_GATEWAY)) __ip6_route_redirect()
1230 if (fl6->flowi6_oif != rt->dst.dev->ifindex) __ip6_route_redirect()
1232 if (!ipv6_addr_equal(&rdfl->gateway, &rt->rt6i_gateway)) __ip6_route_redirect()
1237 if (!rt) __ip6_route_redirect()
1238 rt = net->ipv6.ip6_null_entry; __ip6_route_redirect()
1239 else if (rt->dst.error) { __ip6_route_redirect()
1240 rt = net->ipv6.ip6_null_entry; __ip6_route_redirect()
1244 if (rt == net->ipv6.ip6_null_entry) { __ip6_route_redirect()
1251 dst_hold(&rt->dst); __ip6_route_redirect()
1255 return rt; __ip6_route_redirect()
1367 struct rt6_info *rt; icmp6_dst_alloc() local
1374 rt = ip6_dst_alloc(net, dev, 0, NULL); icmp6_dst_alloc()
1375 if (unlikely(!rt)) { icmp6_dst_alloc()
1381 rt->dst.flags |= DST_HOST; icmp6_dst_alloc()
1382 rt->dst.output = ip6_output; icmp6_dst_alloc()
1383 atomic_set(&rt->dst.__refcnt, 1); icmp6_dst_alloc()
1384 rt->rt6i_gateway = fl6->daddr; icmp6_dst_alloc()
1385 rt->rt6i_dst.addr = fl6->daddr; icmp6_dst_alloc()
1386 rt->rt6i_dst.plen = 128; icmp6_dst_alloc()
1387 rt->rt6i_idev = idev; icmp6_dst_alloc()
1388 dst_metric_set(&rt->dst, RTAX_HOPLIMIT, 0); icmp6_dst_alloc()
1391 rt->dst.next = icmp6_dst_gc_list; icmp6_dst_alloc()
1392 icmp6_dst_gc_list = &rt->dst; icmp6_dst_alloc()
1397 dst = xfrm_lookup(net, &rt->dst, flowi6_to_flowi(fl6), NULL, 0); icmp6_dst_alloc()
1426 static void icmp6_clean_all(int (*func)(struct rt6_info *rt, void *arg), icmp6_clean_all() argument
1434 struct rt6_info *rt = (struct rt6_info *) dst; icmp6_clean_all() local
1435 if (func(rt, arg)) { icmp6_clean_all()
1520 struct rt6_info *rt = NULL; ip6_route_info_create() local
1560 rt = ip6_dst_alloc(net, NULL, (cfg->fc_flags & RTF_ADDRCONF) ? 0 : DST_NOCOUNT, table); ip6_route_info_create()
1562 if (!rt) { ip6_route_info_create()
1568 rt6_set_expires(rt, jiffies + ip6_route_info_create()
1571 rt6_clean_expires(rt); ip6_route_info_create()
1575 rt->rt6i_protocol = cfg->fc_protocol; ip6_route_info_create()
1580 rt->dst.input = ip6_mc_input; ip6_route_info_create()
1582 rt->dst.input = ip6_input; ip6_route_info_create()
1584 rt->dst.input = ip6_forward; ip6_route_info_create()
1586 rt->dst.output = ip6_output; ip6_route_info_create()
1588 ipv6_addr_prefix(&rt->rt6i_dst.addr, &cfg->fc_dst, cfg->fc_dst_len); ip6_route_info_create()
1589 rt->rt6i_dst.plen = cfg->fc_dst_len; ip6_route_info_create()
1590 if (rt->rt6i_dst.plen == 128) { ip6_route_info_create()
1591 rt->dst.flags |= DST_HOST; ip6_route_info_create()
1592 dst_metrics_set_force_overwrite(&rt->dst); ip6_route_info_create()
1596 ipv6_addr_prefix(&rt->rt6i_src.addr, &cfg->fc_src, cfg->fc_src_len); ip6_route_info_create()
1597 rt->rt6i_src.plen = cfg->fc_src_len; ip6_route_info_create()
1600 rt->rt6i_metric = cfg->fc_metric; ip6_route_info_create()
1623 rt->rt6i_flags = RTF_REJECT|RTF_NONEXTHOP; ip6_route_info_create()
1626 rt->dst.error = -EINVAL; ip6_route_info_create()
1627 rt->dst.output = dst_discard_sk; ip6_route_info_create()
1628 rt->dst.input = dst_discard; ip6_route_info_create()
1631 rt->dst.error = -EACCES; ip6_route_info_create()
1632 rt->dst.output = ip6_pkt_prohibit_out; ip6_route_info_create()
1633 rt->dst.input = ip6_pkt_prohibit; ip6_route_info_create()
1637 rt->dst.error = (cfg->fc_type == RTN_THROW) ? -EAGAIN ip6_route_info_create()
1639 rt->dst.output = ip6_pkt_discard_out; ip6_route_info_create()
1640 rt->dst.input = ip6_pkt_discard; ip6_route_info_create()
1651 rt->rt6i_gateway = *gw_addr; ip6_route_info_create()
1705 rt->rt6i_prefsrc.addr = cfg->fc_prefsrc; ip6_route_info_create()
1706 rt->rt6i_prefsrc.plen = 128; ip6_route_info_create()
1708 rt->rt6i_prefsrc.plen = 0; ip6_route_info_create()
1710 rt->rt6i_flags = cfg->fc_flags; ip6_route_info_create()
1713 rt->dst.dev = dev; ip6_route_info_create()
1714 rt->rt6i_idev = idev; ip6_route_info_create()
1715 rt->rt6i_table = table; ip6_route_info_create()
1719 *rt_ret = rt; ip6_route_info_create()
1727 if (rt) ip6_route_info_create()
1728 dst_free(&rt->dst); ip6_route_info_create()
1738 struct rt6_info *rt = NULL; ip6_route_add() local
1741 err = ip6_route_info_create(cfg, &rt); ip6_route_add()
1749 err = __ip6_ins_rt(rt, &cfg->fc_nlinfo, &mxc); ip6_route_add()
1755 if (rt) ip6_route_add()
1756 dst_free(&rt->dst); ip6_route_add()
1761 static int __ip6_del_rt(struct rt6_info *rt, struct nl_info *info) __ip6_del_rt() argument
1765 struct net *net = dev_net(rt->dst.dev); __ip6_del_rt()
1767 if (rt == net->ipv6.ip6_null_entry) { __ip6_del_rt()
1772 table = rt->rt6i_table; __ip6_del_rt()
1774 err = fib6_del(rt, info); __ip6_del_rt()
1778 ip6_rt_put(rt); __ip6_del_rt()
1782 int ip6_del_rt(struct rt6_info *rt) ip6_del_rt() argument
1785 .nl_net = dev_net(rt->dst.dev), ip6_del_rt()
1787 return __ip6_del_rt(rt, &info); ip6_del_rt()
1794 struct rt6_info *rt; ip6_route_del() local
1808 for (rt = fn->leaf; rt; rt = rt->dst.rt6_next) { ip6_route_del()
1810 (!rt->dst.dev || ip6_route_del()
1811 rt->dst.dev->ifindex != cfg->fc_ifindex)) ip6_route_del()
1814 !ipv6_addr_equal(&cfg->fc_gateway, &rt->rt6i_gateway)) ip6_route_del()
1816 if (cfg->fc_metric && cfg->fc_metric != rt->rt6i_metric) ip6_route_del()
1818 dst_hold(&rt->dst); ip6_route_del()
1821 return __ip6_del_rt(rt, &cfg->fc_nlinfo); ip6_route_del()
1833 struct rt6_info *rt, *nrt = NULL; rt6_do_redirect() local
1891 rt = (struct rt6_info *) dst; rt6_do_redirect()
1892 if (rt == net->ipv6.ip6_null_entry) { rt6_do_redirect()
1901 dst_confirm(&rt->dst); rt6_do_redirect()
1918 nrt = ip6_rt_copy(rt, &msg->dest); rt6_do_redirect()
1931 netevent.old = &rt->dst; rt6_do_redirect()
1937 if (rt->rt6i_flags & RTF_CACHE) { rt6_do_redirect()
1938 rt = (struct rt6_info *) dst_clone(&rt->dst); rt6_do_redirect()
1939 ip6_del_rt(rt); rt6_do_redirect()
1954 struct rt6_info *rt = ip6_dst_alloc(net, ort->dst.dev, 0, ip6_rt_copy() local
1957 if (rt) { ip6_rt_copy()
1958 rt->dst.input = ort->dst.input; ip6_rt_copy()
1959 rt->dst.output = ort->dst.output; ip6_rt_copy()
1960 rt->dst.flags |= DST_HOST; ip6_rt_copy()
1962 rt->rt6i_dst.addr = *dest; ip6_rt_copy()
1963 rt->rt6i_dst.plen = 128; ip6_rt_copy()
1964 dst_copy_metrics(&rt->dst, &ort->dst); ip6_rt_copy()
1965 rt->dst.error = ort->dst.error; ip6_rt_copy()
1966 rt->rt6i_idev = ort->rt6i_idev; ip6_rt_copy()
1967 if (rt->rt6i_idev) ip6_rt_copy()
1968 in6_dev_hold(rt->rt6i_idev); ip6_rt_copy()
1969 rt->dst.lastuse = jiffies; ip6_rt_copy()
1972 rt->rt6i_gateway = ort->rt6i_gateway; ip6_rt_copy()
1974 rt->rt6i_gateway = *dest; ip6_rt_copy()
1975 rt->rt6i_flags = ort->rt6i_flags; ip6_rt_copy()
1976 rt6_set_from(rt, ort); ip6_rt_copy()
1977 rt->rt6i_metric = 0; ip6_rt_copy()
1980 memcpy(&rt->rt6i_src, &ort->rt6i_src, sizeof(struct rt6key)); ip6_rt_copy()
1982 memcpy(&rt->rt6i_prefsrc, &ort->rt6i_prefsrc, sizeof(struct rt6key)); ip6_rt_copy()
1983 rt->rt6i_table = ort->rt6i_table; ip6_rt_copy()
1985 return rt; ip6_rt_copy()
1994 struct rt6_info *rt = NULL; rt6_get_route_info() local
2006 for (rt = fn->leaf; rt; rt = rt->dst.rt6_next) { rt6_get_route_info()
2007 if (rt->dst.dev->ifindex != ifindex) rt6_get_route_info()
2009 if ((rt->rt6i_flags & (RTF_ROUTEINFO|RTF_GATEWAY)) != (RTF_ROUTEINFO|RTF_GATEWAY)) rt6_get_route_info()
2011 if (!ipv6_addr_equal(&rt->rt6i_gateway, gwaddr)) rt6_get_route_info()
2013 dst_hold(&rt->dst); rt6_get_route_info()
2018 return rt; rt6_get_route_info()
2053 struct rt6_info *rt; rt6_get_dflt_router() local
2061 for (rt = table->tb6_root.leaf; rt; rt = rt->dst.rt6_next) { rt6_get_dflt_router()
2062 if (dev == rt->dst.dev && rt6_get_dflt_router()
2063 ((rt->rt6i_flags & (RTF_ADDRCONF | RTF_DEFAULT)) == (RTF_ADDRCONF | RTF_DEFAULT)) && rt6_get_dflt_router()
2064 ipv6_addr_equal(&rt->rt6i_gateway, addr)) rt6_get_dflt_router()
2067 if (rt) rt6_get_dflt_router()
2068 dst_hold(&rt->dst); rt6_get_dflt_router()
2070 return rt; rt6_get_dflt_router()
2097 struct rt6_info *rt; rt6_purge_dflt_routers() local
2107 for (rt = table->tb6_root.leaf; rt; rt = rt->dst.rt6_next) { rt6_purge_dflt_routers()
2108 if (rt->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF) && rt6_purge_dflt_routers()
2109 (!rt->rt6i_idev || rt->rt6i_idev->cnf.accept_ra != 2)) { rt6_purge_dflt_routers()
2110 dst_hold(&rt->dst); rt6_purge_dflt_routers()
2112 ip6_del_rt(rt); rt6_purge_dflt_routers()
2235 struct rt6_info *rt = ip6_dst_alloc(net, net->loopback_dev, addrconf_dst_alloc() local
2237 if (!rt) addrconf_dst_alloc()
2242 rt->dst.flags |= DST_HOST; addrconf_dst_alloc()
2243 rt->dst.input = ip6_input; addrconf_dst_alloc()
2244 rt->dst.output = ip6_output; addrconf_dst_alloc()
2245 rt->rt6i_idev = idev; addrconf_dst_alloc()
2247 rt->rt6i_flags = RTF_UP | RTF_NONEXTHOP; addrconf_dst_alloc()
2249 rt->rt6i_flags |= RTF_ANYCAST; addrconf_dst_alloc()
2251 rt->rt6i_flags |= RTF_LOCAL; addrconf_dst_alloc()
2253 rt->rt6i_gateway = *addr; addrconf_dst_alloc()
2254 rt->rt6i_dst.addr = *addr; addrconf_dst_alloc()
2255 rt->rt6i_dst.plen = 128; addrconf_dst_alloc()
2256 rt->rt6i_table = fib6_get_table(net, RT6_TABLE_LOCAL); addrconf_dst_alloc()
2258 atomic_set(&rt->dst.__refcnt, 1); addrconf_dst_alloc()
2260 return rt; addrconf_dst_alloc()
2264 struct rt6_info *rt, ip6_route_get_saddr()
2270 rt ? ip6_dst_idev((struct dst_entry *)rt) : NULL; ip6_route_get_saddr()
2272 if (rt && rt->rt6i_prefsrc.plen) ip6_route_get_saddr()
2273 *saddr = rt->rt6i_prefsrc.addr; ip6_route_get_saddr()
2287 static int fib6_remove_prefsrc(struct rt6_info *rt, void *arg) fib6_remove_prefsrc() argument
2293 if (((void *)rt->dst.dev == dev || !dev) && fib6_remove_prefsrc()
2294 rt != net->ipv6.ip6_null_entry && fib6_remove_prefsrc()
2295 ipv6_addr_equal(addr, &rt->rt6i_prefsrc.addr)) { fib6_remove_prefsrc()
2297 rt->rt6i_prefsrc.plen = 0; fib6_remove_prefsrc()
2317 static int fib6_clean_tohost(struct rt6_info *rt, void *arg) fib6_clean_tohost() argument
2321 if ((((rt->rt6i_flags & RTF_RA_ROUTER) == RTF_RA_ROUTER) || fib6_clean_tohost()
2322 ((rt->rt6i_flags & RTF_CACHE_GATEWAY) == RTF_CACHE_GATEWAY)) && fib6_clean_tohost()
2323 ipv6_addr_equal(gateway, &rt->rt6i_gateway)) { fib6_clean_tohost()
2339 static int fib6_ifdown(struct rt6_info *rt, void *arg) fib6_ifdown() argument
2344 if ((rt->dst.dev == dev || !dev) && fib6_ifdown()
2345 rt != adn->net->ipv6.ip6_null_entry) fib6_ifdown()
2367 static int rt6_mtu_change_route(struct rt6_info *rt, void *p_arg) rt6_mtu_change_route() argument
2396 if (rt->dst.dev == arg->dev && rt6_mtu_change_route()
2397 !dst_metric_locked(&rt->dst, RTAX_MTU) && rt6_mtu_change_route()
2398 (dst_mtu(&rt->dst) >= arg->mtu || rt6_mtu_change_route()
2399 (dst_mtu(&rt->dst) < arg->mtu && rt6_mtu_change_route()
2400 dst_mtu(&rt->dst) == idev->cnf.mtu6))) { rt6_mtu_change_route()
2401 dst_metric_set(&rt->dst, RTAX_MTU, arg->mtu); rt6_mtu_change_route()
2539 struct rt6_info *rt, struct fib6_config *r_cfg) ip6_route_info_append()
2549 if (rtnh->dst.dev == rt->dst.dev && list_for_each_entry()
2550 rtnh->rt6i_idev == rt->rt6i_idev && list_for_each_entry()
2552 &rt->rt6i_gateway)) list_for_each_entry()
2559 nh->rt6_info = rt;
2575 struct rt6_info *rt; ip6_route_multipath_add() local
2608 err = ip6_route_info_create(&r_cfg, &rt); ip6_route_multipath_add()
2612 err = ip6_route_info_append(&rt6_nh_list, rt, &r_cfg); ip6_route_multipath_add()
2614 dst_free(&rt->dst); ip6_route_multipath_add()
2753 struct sk_buff *skb, struct rt6_info *rt, rt6_fill_node()
2764 if (!(rt->rt6i_flags & RTF_PREFIX_RT)) { rt6_fill_node()
2776 rtm->rtm_dst_len = rt->rt6i_dst.plen; rt6_fill_node()
2777 rtm->rtm_src_len = rt->rt6i_src.plen; rt6_fill_node()
2779 if (rt->rt6i_table) rt6_fill_node()
2780 table = rt->rt6i_table->tb6_id; rt6_fill_node()
2786 if (rt->rt6i_flags & RTF_REJECT) { rt6_fill_node()
2787 switch (rt->dst.error) { rt6_fill_node()
2802 else if (rt->rt6i_flags & RTF_LOCAL) rt6_fill_node()
2804 else if (rt->dst.dev && (rt->dst.dev->flags & IFF_LOOPBACK)) rt6_fill_node()
2810 rtm->rtm_protocol = rt->rt6i_protocol; rt6_fill_node()
2811 if (rt->rt6i_flags & RTF_DYNAMIC) rt6_fill_node()
2813 else if (rt->rt6i_flags & RTF_ADDRCONF) { rt6_fill_node()
2814 if (rt->rt6i_flags & (RTF_DEFAULT | RTF_ROUTEINFO)) rt6_fill_node()
2820 if (rt->rt6i_flags & RTF_CACHE) rt6_fill_node()
2828 if (nla_put_in6_addr(skb, RTA_DST, &rt->rt6i_dst.addr)) rt6_fill_node()
2836 nla_put_in6_addr(skb, RTA_SRC, &rt->rt6i_src.addr)) rt6_fill_node()
2841 if (ipv6_addr_is_multicast(&rt->rt6i_dst.addr)) { rt6_fill_node()
2859 if (ip6_route_get_saddr(net, rt, dst, 0, &saddr_buf) == 0 && rt6_fill_node()
2864 if (rt->rt6i_prefsrc.plen) { rt6_fill_node()
2866 saddr_buf = rt->rt6i_prefsrc.addr; rt6_fill_node()
2871 if (rtnetlink_put_metrics(skb, dst_metrics_ptr(&rt->dst)) < 0) rt6_fill_node()
2874 if (rt->rt6i_flags & RTF_GATEWAY) { rt6_fill_node()
2875 if (nla_put_in6_addr(skb, RTA_GATEWAY, &rt->rt6i_gateway) < 0) rt6_fill_node()
2879 if (rt->dst.dev && rt6_fill_node()
2880 nla_put_u32(skb, RTA_OIF, rt->dst.dev->ifindex)) rt6_fill_node()
2882 if (nla_put_u32(skb, RTA_PRIORITY, rt->rt6i_metric)) rt6_fill_node()
2885 expires = (rt->rt6i_flags & RTF_EXPIRES) ? rt->dst.expires - jiffies : 0; rt6_fill_node()
2887 if (rtnl_put_cacheinfo(skb, &rt->dst, 0, expires, rt->dst.error) < 0) rt6_fill_node()
2890 if (nla_put_u8(skb, RTA_PREF, IPV6_EXTRACT_PREF(rt->rt6i_flags))) rt6_fill_node()
2901 int rt6_dump_route(struct rt6_info *rt, void *p_arg) rt6_dump_route() argument
2913 arg->skb, rt, NULL, NULL, 0, RTM_NEWROUTE, rt6_dump_route()
2922 struct rt6_info *rt; inet6_rtm_getroute() local
2973 rt = (struct rt6_info *)ip6_route_input_lookup(net, dev, &fl6, inet6_rtm_getroute()
2978 rt = (struct rt6_info *)ip6_route_output(net, NULL, &fl6); inet6_rtm_getroute()
2983 ip6_rt_put(rt); inet6_rtm_getroute()
2994 skb_dst_set(skb, &rt->dst); inet6_rtm_getroute()
2996 err = rt6_fill_node(net, skb, rt, &fl6.daddr, &fl6.saddr, iif, inet6_rtm_getroute()
3009 void inet6_rt_notify(int event, struct rt6_info *rt, struct nl_info *info) inet6_rt_notify() argument
3023 err = rt6_fill_node(net, skb, rt, NULL, NULL, 0, inet6_rt_notify()
437 rt6_device_match(struct net *net, struct rt6_info *rt, const struct in6_addr *saddr, int oif, int flags) rt6_device_match() argument
2263 ip6_route_get_saddr(struct net *net, struct rt6_info *rt, const struct in6_addr *daddr, unsigned int prefs, struct in6_addr *saddr) ip6_route_get_saddr() argument
2538 ip6_route_info_append(struct list_head *rt6_nh_list, struct rt6_info *rt, struct fib6_config *r_cfg) ip6_route_info_append() argument
2752 rt6_fill_node(struct net *net, struct sk_buff *skb, struct rt6_info *rt, struct in6_addr *dst, struct in6_addr *src, int iif, int type, u32 portid, u32 seq, int prefix, int nowait, unsigned int flags) rt6_fill_node() argument
H A Dip6_fib.c157 static void rt6_release(struct rt6_info *rt) rt6_release() argument
159 if (atomic_dec_and_test(&rt->rt6i_ref)) rt6_release()
160 dst_free(&rt->dst); rt6_release()
271 struct rt6_info *rt; fib6_dump_node() local
273 for (rt = w->leaf; rt; rt = rt->dst.rt6_next) { fib6_dump_node()
274 res = rt6_dump_route(rt, w->args); fib6_dump_node()
277 w->leaf = rt; fib6_dump_node()
626 static bool rt6_qualify_for_ecmp(struct rt6_info *rt) rt6_qualify_for_ecmp() argument
628 return (rt->rt6i_flags & (RTF_GATEWAY|RTF_ADDRCONF|RTF_DYNAMIC)) == rt6_qualify_for_ecmp()
664 static void fib6_purge_rt(struct rt6_info *rt, struct fib6_node *fn, fib6_purge_rt() argument
667 if (atomic_read(&rt->rt6i_ref) != 1) { fib6_purge_rt()
675 if (!(fn->fn_flags & RTN_RTINFO) && fn->leaf == rt) { fib6_purge_rt()
678 rt6_release(rt); fib6_purge_rt()
683 BUG_ON(atomic_read(&rt->rt6i_ref) != 1); fib6_purge_rt()
691 static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt, fib6_add_rt2node() argument
702 bool rt_can_ecmp = rt6_qualify_for_ecmp(rt); fib6_add_rt2node()
712 if (iter->rt6i_metric == rt->rt6i_metric) { fib6_add_rt2node()
729 if (iter->dst.dev == rt->dst.dev && fib6_add_rt2node()
730 iter->rt6i_idev == rt->rt6i_idev && fib6_add_rt2node()
732 &rt->rt6i_gateway)) { fib6_add_rt2node()
733 if (rt->rt6i_nsiblings) fib6_add_rt2node()
734 rt->rt6i_nsiblings = 0; fib6_add_rt2node()
737 if (!(rt->rt6i_flags & RTF_EXPIRES)) fib6_add_rt2node()
740 rt6_set_expires(iter, rt->dst.expires); fib6_add_rt2node()
756 rt->rt6i_nsiblings++; fib6_add_rt2node()
759 if (iter->rt6i_metric > rt->rt6i_metric) fib6_add_rt2node()
778 if (rt->rt6i_nsiblings) { fib6_add_rt2node()
785 if (sibling->rt6i_metric == rt->rt6i_metric && fib6_add_rt2node()
787 list_add_tail(&rt->rt6i_siblings, fib6_add_rt2node()
799 &rt->rt6i_siblings, rt6i_siblings) { fib6_add_rt2node()
801 BUG_ON(sibling->rt6i_nsiblings != rt->rt6i_nsiblings); fib6_add_rt2node()
804 BUG_ON(rt6i_nsiblings != rt->rt6i_nsiblings); fib6_add_rt2node()
815 err = fib6_commit_metrics(&rt->dst, mxc); fib6_add_rt2node()
819 rt->dst.rt6_next = iter; fib6_add_rt2node()
820 *ins = rt; fib6_add_rt2node()
821 rt->rt6i_node = fn; fib6_add_rt2node()
822 atomic_inc(&rt->rt6i_ref); fib6_add_rt2node()
823 inet6_rt_notify(RTM_NEWROUTE, rt, info); fib6_add_rt2node()
841 err = fib6_commit_metrics(&rt->dst, mxc); fib6_add_rt2node()
845 *ins = rt; fib6_add_rt2node()
846 rt->rt6i_node = fn; fib6_add_rt2node()
847 rt->dst.rt6_next = iter->dst.rt6_next; fib6_add_rt2node()
848 atomic_inc(&rt->rt6i_ref); fib6_add_rt2node()
849 inet6_rt_notify(RTM_NEWROUTE, rt, info); fib6_add_rt2node()
860 ins = &rt->dst.rt6_next; fib6_add_rt2node()
880 static void fib6_start_gc(struct net *net, struct rt6_info *rt) fib6_start_gc() argument
883 (rt->rt6i_flags & (RTF_EXPIRES | RTF_CACHE))) fib6_start_gc()
901 int fib6_add(struct fib6_node *root, struct rt6_info *rt, fib6_add() argument
919 fn = fib6_add_1(root, &rt->rt6i_dst.addr, rt->rt6i_dst.plen, fib6_add()
931 if (rt->rt6i_src.plen) { fib6_add()
959 sn = fib6_add_1(sfn, &rt->rt6i_src.addr, fib6_add()
960 rt->rt6i_src.plen, fib6_add()
978 sn = fib6_add_1(fn->subtree, &rt->rt6i_src.addr, fib6_add()
979 rt->rt6i_src.plen, fib6_add()
990 fn->leaf = rt; fib6_add()
991 atomic_inc(&rt->rt6i_ref); fib6_add()
997 err = fib6_add_rt2node(fn, rt, info, mxc); fib6_add()
999 fib6_start_gc(info->nl_net, rt); fib6_add()
1000 if (!(rt->rt6i_flags & RTF_CACHE)) fib6_add()
1011 if (pn != fn && pn->leaf == rt) { fib6_add()
1013 atomic_dec(&rt->rt6i_ref); fib6_add()
1026 dst_free(&rt->dst); fib6_add()
1037 dst_free(&rt->dst); fib6_add()
1338 struct rt6_info *rt = *rtp; fib6_del_route() local
1344 *rtp = rt->dst.rt6_next; fib6_del_route()
1345 rt->rt6i_node = NULL; fib6_del_route()
1350 if (fn->rr_ptr == rt) fib6_del_route()
1354 if (rt->rt6i_nsiblings) { fib6_del_route()
1358 &rt->rt6i_siblings, rt6i_siblings) fib6_del_route()
1360 rt->rt6i_nsiblings = 0; fib6_del_route()
1361 list_del_init(&rt->rt6i_siblings); fib6_del_route()
1367 if (w->state == FWS_C && w->leaf == rt) { FOR_WALKERS()
1369 w->leaf = rt->dst.rt6_next; FOR_WALKERS()
1376 rt->dst.rt6_next = NULL;
1385 fib6_purge_rt(rt, fn, net);
1387 inet6_rt_notify(RTM_DELROUTE, rt, info);
1388 rt6_release(rt);
1391 int fib6_del(struct rt6_info *rt, struct nl_info *info) fib6_del() argument
1394 struct fib6_node *fn = rt->rt6i_node; fib6_del()
1398 if (rt->dst.obsolete > 0) { fib6_del()
1403 if (!fn || rt == net->ipv6.ip6_null_entry) fib6_del()
1408 if (!(rt->rt6i_flags & RTF_CACHE)) { fib6_del()
1412 if (rt->rt6i_src.plen) { fib6_del()
1426 if (*rtp == rt) { fib6_del()
1559 struct rt6_info *rt; fib6_clean_node() local
1575 for (rt = w->leaf; rt; rt = rt->dst.rt6_next) { fib6_clean_node()
1576 res = c->func(rt, c->arg); fib6_clean_node()
1578 w->leaf = rt; fib6_clean_node()
1579 res = fib6_del(rt, &info); fib6_clean_node()
1582 pr_debug("%s: del failed: rt=%p@%p err=%d\n", fib6_clean_node()
1583 __func__, rt, rt->rt6i_node, res); fib6_clean_node()
1591 w->leaf = rt; fib6_clean_node()
1652 static int fib6_prune_clone(struct rt6_info *rt, void *arg) fib6_prune_clone() argument
1654 if (rt->rt6i_flags & RTF_CACHE) { fib6_prune_clone()
1655 RT6_TRACE("pruning clone %p\n", rt); fib6_prune_clone()
1685 static int fib6_age(struct rt6_info *rt, void *arg) fib6_age() argument
1697 if (rt->rt6i_flags & RTF_EXPIRES && rt->dst.expires) { fib6_age()
1698 if (time_after(now, rt->dst.expires)) { fib6_age()
1699 RT6_TRACE("expiring %p\n", rt); fib6_age()
1703 } else if (rt->rt6i_flags & RTF_CACHE) { fib6_age()
1704 if (atomic_read(&rt->dst.__refcnt) == 0 && fib6_age()
1705 time_after_eq(now, rt->dst.lastuse + gc_args.timeout)) { fib6_age()
1706 RT6_TRACE("aging clone %p\n", rt); fib6_age()
1708 } else if (rt->rt6i_flags & RTF_GATEWAY) { fib6_age()
1712 neigh = dst_neigh_lookup(&rt->dst, &rt->rt6i_gateway); fib6_age()
1719 rt); fib6_age()
1888 struct rt6_info *rt = v; ipv6_route_seq_show() local
1891 seq_printf(seq, "%pi6 %02x ", &rt->rt6i_dst.addr, rt->rt6i_dst.plen); ipv6_route_seq_show()
1894 seq_printf(seq, "%pi6 %02x ", &rt->rt6i_src.addr, rt->rt6i_src.plen); ipv6_route_seq_show()
1898 if (rt->rt6i_flags & RTF_GATEWAY) ipv6_route_seq_show()
1899 seq_printf(seq, "%pi6", &rt->rt6i_gateway); ipv6_route_seq_show()
1904 rt->rt6i_metric, atomic_read(&rt->dst.__refcnt), ipv6_route_seq_show()
1905 rt->dst.__use, rt->rt6i_flags, ipv6_route_seq_show()
1906 rt->dst.dev ? rt->dst.dev->name : ""); ipv6_route_seq_show()
H A Dfib6_rules.c54 struct rt6_info *rt = NULL; fib6_rule_action() local
65 rt = net->ipv6.ip6_null_entry; fib6_rule_action()
70 rt = net->ipv6.ip6_blk_hole_entry; fib6_rule_action()
74 rt = net->ipv6.ip6_prohibit_entry; fib6_rule_action()
84 rt = lookup(net, table, flp6, flags); fib6_rule_action()
85 if (rt != net->ipv6.ip6_null_entry) { fib6_rule_action()
97 ip6_dst_idev(&rt->dst)->dev, fib6_rule_action()
107 err = rt->dst.error; fib6_rule_action()
111 ip6_rt_put(rt); fib6_rule_action()
113 rt = NULL; fib6_rule_action()
117 dst_hold(&rt->dst); fib6_rule_action()
119 arg->result = rt; fib6_rule_action()
125 struct rt6_info *rt = (struct rt6_info *) arg->result; fib6_rule_suppress() local
128 if (rt->rt6i_idev) fib6_rule_suppress()
129 dev = rt->rt6i_idev->dev; fib6_rule_suppress()
134 if (rt->rt6i_dst.plen <= rule->suppress_prefixlen) fib6_rule_suppress()
146 ip6_rt_put(rt); fib6_rule_suppress()
H A Dip6_output.c452 struct rt6_info *rt; ip6_forward() local
459 rt = (struct rt6_info *) dst; ip6_forward()
460 if (rt->rt6i_flags & RTF_GATEWAY) ip6_forward()
461 target = &rt->rt6i_gateway; ip6_forward()
465 peer = inet_getpeer_v6(net->ipv6.peers, &rt->rt6i_dst.addr, 1); ip6_forward()
550 struct rt6_info *rt = (struct rt6_info *)skb_dst(skb); ip6_fragment() local
638 ipv6_select_ident(net, fh, rt);
650 dst_hold(&rt->dst);
678 IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
692 IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
694 ip6_rt_put(rt);
700 IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
702 ip6_rt_put(rt);
728 hroom = LL_RESERVED_SPACE(rt->dst.dev);
729 troom = rt->dst.dev->needed_tailroom;
785 ipv6_select_ident(net, fh, rt);
841 struct rt6_info *rt; ip6_sk_dst_check() local
851 rt = (struct rt6_info *)dst; ip6_sk_dst_check()
869 if (ip6_rt_check(&rt->rt6i_dst, &fl6->daddr, np->daddr_cache) || ip6_sk_dst_check()
871 ip6_rt_check(&rt->rt6i_src, &fl6->saddr, np->saddr_cache) || ip6_sk_dst_check()
888 struct rt6_info *rt; ip6_dst_lookup_tail() local
903 struct rt6_info *rt; ip6_dst_lookup_tail() local
908 rt = (*dst)->error ? NULL : (struct rt6_info *)*dst; ip6_dst_lookup_tail()
909 err = ip6_route_get_saddr(net, rt, &fl6->daddr, ip6_dst_lookup_tail()
944 rt = (struct rt6_info *) *dst; ip6_dst_lookup_tail()
946 n = __ipv6_neigh_lookup_noref(rt->dst.dev, rt6_nexthop(rt)); ip6_dst_lookup_tail()
1070 struct rt6_info *rt) ip6_ufo_append_data()
1116 ipv6_select_ident(sock_net(sk), &fhdr, rt); ip6_ufo_append_data() local
1140 struct rt6_info *rt, ip6_append_data_mtu()
1143 if (!(rt->dst.flags & DST_XFRM_TUNNEL)) { ip6_append_data_mtu()
1146 *mtu = orig_mtu - rt->dst.header_len; ip6_append_data_mtu()
1163 struct rt6_info *rt, struct flowi6 *fl6) ip6_setup_cork()
1205 dst_hold(&rt->dst); ip6_setup_cork()
1206 cork->base.dst = &rt->dst; ip6_setup_cork()
1210 if (rt->dst.flags & DST_XFRM_TUNNEL) ip6_setup_cork()
1212 rt->dst.dev->mtu : dst_mtu(&rt->dst); ip6_setup_cork()
1215 rt->dst.dev->mtu : dst_mtu(rt->dst.path); ip6_setup_cork()
1221 if (dst_allfrag(rt->dst.path)) ip6_setup_cork()
1249 struct rt6_info *rt = (struct rt6_info *)cork->dst; __ip6_append_data() local
1256 dst_exthdrlen = rt->dst.header_len - rt->rt6i_nfheader_len; __ip6_append_data()
1262 hh_len = LL_RESERVED_SPACE(rt->dst.dev); __ip6_append_data()
1264 fragheaderlen = sizeof(struct ipv6hdr) + rt->rt6i_nfheader_len + __ip6_append_data()
1274 (dst_allfrag(&rt->dst) ? __ip6_append_data()
1276 rt->rt6i_nfheader_len; __ip6_append_data()
1315 rt->dst.dev->features & NETIF_F_V6_CSUM && __ip6_append_data()
1338 (rt->dst.dev->features & NETIF_F_UFO) && __ip6_append_data()
1342 transhdrlen, mtu, flags, rt); __ip6_append_data()
1372 fragheaderlen, skb, rt, __ip6_append_data()
1384 datalen = maxfraglen - fragheaderlen - rt->dst.trailer_len; __ip6_append_data()
1386 !(rt->dst.dev->features&NETIF_F_SG)) __ip6_append_data()
1398 datalen += rt->dst.trailer_len; __ip6_append_data()
1401 alloclen += rt->dst.trailer_len; __ip6_append_data()
1488 if (!(rt->dst.dev->features&NETIF_F_SG)) { __ip6_append_data()
1539 IP6_INC_STATS(sock_net(sk), rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS); __ip6_append_data()
1548 struct rt6_info *rt, unsigned int flags, int dontfrag) ip6_append_data()
1562 tclass, opt, rt, fl6); ip6_append_data()
1612 struct rt6_info *rt = (struct rt6_info *)cork->base.dst; __ip6_make_skb() local
1660 skb_dst_set(skb, dst_clone(&rt->dst)); __ip6_make_skb()
1661 IP6_UPD_PO_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUT, skb->len); __ip6_make_skb()
1677 struct rt6_info *rt = (struct rt6_info *)skb_dst(skb); ip6_send_skb() local
1685 IP6_INC_STATS(net, rt->rt6i_idev, ip6_send_skb()
1734 struct rt6_info *rt, unsigned int flags, ip6_make_skb()
1752 err = ip6_setup_cork(sk, &cork, &v6_cork, hlimit, tclass, opt, rt, fl6); ip6_make_skb()
1064 ip6_ufo_append_data(struct sock *sk, struct sk_buff_head *queue, int getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb), void *from, int length, int hh_len, int fragheaderlen, int transhdrlen, int mtu, unsigned int flags, struct rt6_info *rt) ip6_ufo_append_data() argument
1136 ip6_append_data_mtu(unsigned int *mtu, int *maxfraglen, unsigned int fragheaderlen, struct sk_buff *skb, struct rt6_info *rt, unsigned int orig_mtu) ip6_append_data_mtu() argument
1160 ip6_setup_cork(struct sock *sk, struct inet_cork_full *cork, struct inet6_cork *v6_cork, int hlimit, int tclass, struct ipv6_txoptions *opt, struct rt6_info *rt, struct flowi6 *fl6) ip6_setup_cork() argument
1543 ip6_append_data(struct sock *sk, int getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb), void *from, int length, int transhdrlen, int hlimit, int tclass, struct ipv6_txoptions *opt, struct flowi6 *fl6, struct rt6_info *rt, unsigned int flags, int dontfrag) ip6_append_data() argument
1728 ip6_make_skb(struct sock *sk, int getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb), void *from, int length, int transhdrlen, int hlimit, int tclass, struct ipv6_txoptions *opt, struct flowi6 *fl6, struct rt6_info *rt, unsigned int flags, int dontfrag) ip6_make_skb() argument
H A Dxfrm6_policy.c76 struct rt6_info *rt = (struct rt6_info *)xdst; xfrm6_init_dst() local
78 rt6_init_peer(rt, net->ipv6.peers); xfrm6_init_dst()
85 struct rt6_info *rt = (struct rt6_info *)dst; xfrm6_init_path() local
86 if (rt->rt6i_node) xfrm6_init_path()
87 path->path_cookie = rt->rt6i_node->fn_sernum; xfrm6_init_path()
98 struct rt6_info *rt = (struct rt6_info *)xdst->route; xfrm6_fill_dst() local
109 rt6_transfer_peer(&xdst->u.rt6, rt); xfrm6_fill_dst()
113 xdst->u.rt6.rt6i_flags = rt->rt6i_flags & (RTF_ANYCAST | xfrm6_fill_dst()
115 xdst->u.rt6.rt6i_metric = rt->rt6i_metric; xfrm6_fill_dst()
116 xdst->u.rt6.rt6i_node = rt->rt6i_node; xfrm6_fill_dst()
117 if (rt->rt6i_node) xfrm6_fill_dst()
118 xdst->route_cookie = rt->rt6i_node->fn_sernum; xfrm6_fill_dst()
119 xdst->u.rt6.rt6i_gateway = rt->rt6i_gateway; xfrm6_fill_dst()
120 xdst->u.rt6.rt6i_dst = rt->rt6i_dst; xfrm6_fill_dst()
121 xdst->u.rt6.rt6i_src = rt->rt6i_src; xfrm6_fill_dst()
H A Danycast.c79 struct rt6_info *rt; ipv6_sock_ac_join() local
81 rt = rt6_lookup(net, addr, NULL, 0, 0); ipv6_sock_ac_join()
82 if (rt) { ipv6_sock_ac_join()
83 dev = rt->dst.dev; ipv6_sock_ac_join()
84 ip6_rt_put(rt); ipv6_sock_ac_join()
218 static struct ifacaddr6 *aca_alloc(struct rt6_info *rt, aca_alloc() argument
221 struct inet6_dev *idev = rt->rt6i_idev; aca_alloc()
231 aca->aca_rt = rt; aca_alloc()
246 struct rt6_info *rt; __ipv6_dev_ac_inc() local
265 rt = addrconf_dst_alloc(idev, addr, true); __ipv6_dev_ac_inc()
266 if (IS_ERR(rt)) { __ipv6_dev_ac_inc()
267 err = PTR_ERR(rt); __ipv6_dev_ac_inc()
270 aca = aca_alloc(rt, addr); __ipv6_dev_ac_inc()
272 ip6_rt_put(rt); __ipv6_dev_ac_inc()
286 ip6_ins_rt(rt); __ipv6_dev_ac_inc()
H A Doutput_core.c64 struct rt6_info *rt) ipv6_select_ident()
71 id = __ipv6_select_ident(net, ip6_idents_hashrnd, &rt->rt6i_dst.addr, ipv6_select_ident()
72 &rt->rt6i_src.addr); ipv6_select_ident()
63 ipv6_select_ident(struct net *net, struct frag_hdr *fhdr, struct rt6_info *rt) ipv6_select_ident() argument
H A Dip6_tunnel.c153 struct rt6_info *rt = (struct rt6_info *) dst; ip6_tnl_dst_store() local
154 t->dst_cookie = rt->rt6i_node ? rt->rt6i_node->fn_sernum : 0; ip6_tnl_dst_store()
578 struct rtable *rt; ip4ip6_err() local
623 rt = ip_route_output_ports(dev_net(skb->dev), &fl4, NULL, ip4ip6_err()
627 if (IS_ERR(rt)) ip4ip6_err()
630 skb2->dev = rt->dst.dev; ip4ip6_err()
633 if (rt->rt_flags & RTCF_LOCAL) { ip4ip6_err()
634 ip_rt_put(rt); ip4ip6_err()
635 rt = NULL; ip4ip6_err()
636 rt = ip_route_output_ports(dev_net(skb->dev), &fl4, NULL, ip4ip6_err()
641 if (IS_ERR(rt) || ip4ip6_err()
642 rt->dst.dev->type != ARPHRD_TUNNEL) { ip4ip6_err()
643 if (!IS_ERR(rt)) ip4ip6_err()
644 ip_rt_put(rt); ip4ip6_err()
647 skb_dst_set(skb2, &rt->dst); ip4ip6_err()
649 ip_rt_put(rt); ip4ip6_err()
689 struct rt6_info *rt; ip6ip6_err() local
700 rt = rt6_lookup(dev_net(skb->dev), &ipv6_hdr(skb2)->saddr, ip6ip6_err()
703 if (rt && rt->dst.dev) ip6ip6_err()
704 skb2->dev = rt->dst.dev; ip6ip6_err()
708 ip6_rt_put(rt); ip6ip6_err()
1271 struct rt6_info *rt = rt6_lookup(t->net, ip6_tnl_link_config() local
1275 if (!rt) ip6_tnl_link_config()
1278 if (rt->dst.dev) { ip6_tnl_link_config()
1279 dev->hard_header_len = rt->dst.dev->hard_header_len + ip6_tnl_link_config()
1282 dev->mtu = rt->dst.dev->mtu - sizeof(struct ipv6hdr); ip6_tnl_link_config()
1289 ip6_rt_put(rt); ip6_tnl_link_config()
H A Dndisc.c1072 struct rt6_info *rt = NULL; ndisc_router_discovery() local
1183 rt = rt6_get_dflt_router(&ipv6_hdr(skb)->saddr, skb->dev); ndisc_router_discovery()
1185 if (rt) { ndisc_router_discovery()
1186 neigh = dst_neigh_lookup(&rt->dst, &ipv6_hdr(skb)->saddr); ndisc_router_discovery()
1191 ip6_rt_put(rt); ndisc_router_discovery()
1195 if (rt && lifetime == 0) { ndisc_router_discovery()
1196 ip6_del_rt(rt); ndisc_router_discovery()
1197 rt = NULL; ndisc_router_discovery()
1200 ND_PRINTK(3, info, "RA: rt: %p lifetime: %d, for dev: %s\n", ndisc_router_discovery()
1201 rt, lifetime, skb->dev->name); ndisc_router_discovery()
1202 if (!rt && lifetime) { ndisc_router_discovery()
1205 rt = rt6_add_dflt_router(&ipv6_hdr(skb)->saddr, skb->dev, pref); ndisc_router_discovery()
1206 if (!rt) { ndisc_router_discovery()
1213 neigh = dst_neigh_lookup(&rt->dst, &ipv6_hdr(skb)->saddr); ndisc_router_discovery()
1218 ip6_rt_put(rt); ndisc_router_discovery()
1222 } else if (rt) { ndisc_router_discovery()
1223 rt->rt6i_flags = (rt->rt6i_flags & ~RTF_PREF_MASK) | RTF_PREF(pref); ndisc_router_discovery()
1226 if (rt) ndisc_router_discovery()
1227 rt6_set_expires(rt, jiffies + (HZ * lifetime)); ndisc_router_discovery()
1232 if (rt) ndisc_router_discovery()
1233 dst_metric_set(&rt->dst, RTAX_HOPLIMIT, ndisc_router_discovery()
1378 if (rt) ndisc_router_discovery()
1379 dst_metric_set(&rt->dst, RTAX_MTU, mtu); ndisc_router_discovery()
1398 ip6_rt_put(rt); ndisc_router_discovery()
1468 struct rt6_info *rt; ndisc_send_redirect() local
1500 rt = (struct rt6_info *) dst; ndisc_send_redirect()
1502 if (rt->rt6i_flags & RTF_GATEWAY) { ndisc_send_redirect()
1507 peer = inet_getpeer_v6(net->ipv6.peers, &rt->rt6i_dst.addr, 1); ndisc_send_redirect()
H A Dsit.c488 struct rt6_info *rt; ipip6_err_gen_icmpv6_unreach() local
503 rt = rt6_lookup(dev_net(skb->dev), &ipv6_hdr(skb2)->saddr, NULL, 0, 0); ipip6_err_gen_icmpv6_unreach()
505 if (rt && rt->dst.dev) ipip6_err_gen_icmpv6_unreach()
506 skb2->dev = rt->dst.dev; ipip6_err_gen_icmpv6_unreach()
510 if (rt) ipip6_err_gen_icmpv6_unreach()
511 ip6_rt_put(rt); ipip6_err_gen_icmpv6_unreach()
814 struct rtable *rt; /* Route to the other host */ ipip6_tunnel_xmit() local
893 rt = ip_route_output_ports(tunnel->net, &fl4, NULL, ipip6_tunnel_xmit()
898 if (IS_ERR(rt)) { ipip6_tunnel_xmit()
902 if (rt->rt_type != RTN_UNICAST) { ipip6_tunnel_xmit()
903 ip_rt_put(rt); ipip6_tunnel_xmit()
907 tdev = rt->dst.dev; ipip6_tunnel_xmit()
910 ip_rt_put(rt); ipip6_tunnel_xmit()
917 ip_rt_put(rt); ipip6_tunnel_xmit()
922 mtu = dst_mtu(&rt->dst) - t_hlen; ipip6_tunnel_xmit()
926 ip_rt_put(rt); ipip6_tunnel_xmit()
940 ip_rt_put(rt); ipip6_tunnel_xmit()
963 ip_rt_put(rt); ipip6_tunnel_xmit()
980 ip_rt_put(rt); ipip6_tunnel_xmit()
986 err = iptunnel_xmit(NULL, rt, skb, fl4.saddr, fl4.daddr, ipip6_tunnel_xmit()
1053 struct rtable *rt = ip_route_output_ports(tunnel->net, &fl4, ipip6_tunnel_bind_dev() local
1061 if (!IS_ERR(rt)) { ipip6_tunnel_bind_dev()
1062 tdev = rt->dst.dev; ipip6_tunnel_bind_dev()
1063 ip_rt_put(rt); ipip6_tunnel_bind_dev()
H A Dping.c92 struct rt6_info *rt; ping_v6_sendmsg() local
150 rt = (struct rt6_info *) dst; ping_v6_sendmsg()
175 np->tclass, NULL, &fl6, rt, ping_v6_sendmsg()
179 ICMP6_INC_STATS(sock_net(sk), rt->rt6i_idev, ping_v6_sendmsg()
H A Daddrconf.c795 ip6_rt_put(ifp->rt); inet6_ifa_finish_destroy()
833 struct rt6_info *rt; ipv6_add_addr() local
872 rt = addrconf_dst_alloc(idev, addr, false); ipv6_add_addr()
873 if (IS_ERR(rt)) { ipv6_add_addr()
874 err = PTR_ERR(rt); ipv6_add_addr()
895 ifa->rt = rt; ipv6_add_addr()
999 struct rt6_info *rt; cleanup_prefix_route() local
1001 rt = addrconf_get_prefix_route(&ifp->addr, cleanup_prefix_route()
1005 if (rt) { cleanup_prefix_route()
1007 ip6_del_rt(rt); cleanup_prefix_route()
1009 if (!(rt->rt6i_flags & RTF_EXPIRES)) cleanup_prefix_route()
1010 rt6_set_expires(rt, expires); cleanup_prefix_route()
1011 ip6_rt_put(rt); cleanup_prefix_route()
2122 struct rt6_info *rt = NULL; addrconf_get_prefix_route() local
2133 for (rt = fn->leaf; rt; rt = rt->dst.rt6_next) { addrconf_get_prefix_route()
2134 if (rt->dst.dev->ifindex != dev->ifindex) addrconf_get_prefix_route()
2136 if ((rt->rt6i_flags & flags) != flags) addrconf_get_prefix_route()
2138 if ((rt->rt6i_flags & noflags) != 0) addrconf_get_prefix_route()
2140 dst_hold(&rt->dst); addrconf_get_prefix_route()
2145 return rt; addrconf_get_prefix_route()
2303 struct rt6_info *rt; addrconf_prefix_rcv() local
2319 rt = addrconf_get_prefix_route(&pinfo->prefix, addrconf_prefix_rcv()
2325 if (rt) { addrconf_prefix_rcv()
2328 ip6_del_rt(rt); addrconf_prefix_rcv()
2329 rt = NULL; addrconf_prefix_rcv()
2332 rt6_set_expires(rt, jiffies + rt_expires); addrconf_prefix_rcv()
2334 rt6_clean_expires(rt); addrconf_prefix_rcv()
2347 ip6_rt_put(rt); addrconf_prefix_rcv()
2844 if (sp_ifa->rt) { init_loopback()
2849 if (sp_ifa->rt->dst.obsolete > 0) { init_loopback()
2850 ip6_rt_put(sp_ifa->rt); init_loopback()
2851 sp_ifa->rt = NULL; init_loopback()
2861 sp_ifa->rt = sp_rt; init_loopback()
3467 ip6_ins_rt(ifp->rt); addrconf_dad_begin()
5045 if (!(ifp->rt->rt6i_node)) __ipv6_ifa_notify()
5046 ip6_ins_rt(ifp->rt); __ipv6_ifa_notify()
5058 struct rt6_info *rt; __ipv6_ifa_notify() local
5060 rt = addrconf_get_prefix_route(&ifp->peer_addr, 128, __ipv6_ifa_notify()
5062 if (rt && ip6_del_rt(rt)) __ipv6_ifa_notify()
5063 dst_free(&rt->dst); __ipv6_ifa_notify()
5065 dst_hold(&ifp->rt->dst); __ipv6_ifa_notify()
5067 if (ip6_del_rt(ifp->rt)) __ipv6_ifa_notify()
5068 dst_free(&ifp->rt->dst); __ipv6_ifa_notify()
H A Draw.c619 struct rt6_info *rt = (struct rt6_info *)*dstp; rawv6_send_hdrinc() local
620 int hlen = LL_RESERVED_SPACE(rt->dst.dev); rawv6_send_hdrinc()
621 int tlen = rt->dst.dev->needed_tailroom; rawv6_send_hdrinc()
623 if (length > rt->dst.dev->mtu) { rawv6_send_hdrinc()
624 ipv6_local_error(sk, EMSGSIZE, fl6, rt->dst.dev->mtu); rawv6_send_hdrinc()
640 skb_dst_set(skb, &rt->dst); rawv6_send_hdrinc()
654 IP6_UPD_PO_STATS(sock_net(sk), rt->rt6i_idev, IPSTATS_MIB_OUT, skb->len); rawv6_send_hdrinc()
656 NULL, rt->dst.dev, dst_output_sk); rawv6_send_hdrinc()
668 IP6_INC_STATS(sock_net(sk), rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS); rawv6_send_hdrinc()
H A Dmip6.c420 struct ipv6_rt_hdr *rt; mip6_rthdr_offset() local
421 rt = (struct ipv6_rt_hdr *)(nh + offset); mip6_rthdr_offset()
422 if (rt->type != 0) mip6_rthdr_offset()
H A Dicmp.c199 struct rt6_info *rt = (struct rt6_info *)dst; icmpv6_xrlim_allow() local
203 if (rt->rt6i_dst.plen < 128) icmpv6_xrlim_allow()
204 tmo >>= ((128 - rt->rt6i_dst.plen)>>5); icmpv6_xrlim_allow()
210 &rt->rt6i_dst.addr, 1); icmpv6_xrlim_allow()
/linux-4.1.27/net/decnet/
H A Ddn_route.c156 struct dn_route *rt = (struct dn_route *) dst; dn_dst_destroy() local
158 if (rt->n) dn_dst_destroy()
159 neigh_release(rt->n); dn_dst_destroy()
166 struct dn_route *rt = (struct dn_route *) dst; dn_dst_ifdown() local
167 struct neighbour *n = rt->n; dn_dst_ifdown()
186 static inline void dnrt_free(struct dn_route *rt) dnrt_free() argument
188 call_rcu_bh(&rt->dst.rcu_head, dst_rcu_free); dnrt_free()
191 static inline void dnrt_drop(struct dn_route *rt) dnrt_drop() argument
193 dst_release(&rt->dst); dnrt_drop()
194 call_rcu_bh(&rt->dst.rcu_head, dst_rcu_free); dnrt_drop()
200 struct dn_route *rt; dn_dst_check_expire() local
209 while ((rt = rcu_dereference_protected(*rtp, dn_dst_check_expire()
211 if (atomic_read(&rt->dst.__refcnt) || dn_dst_check_expire()
212 (now - rt->dst.lastuse) < expire) { dn_dst_check_expire()
213 rtp = &rt->dst.dn_next; dn_dst_check_expire()
216 *rtp = rt->dst.dn_next; dn_dst_check_expire()
217 rt->dst.dn_next = NULL; dn_dst_check_expire()
218 dnrt_free(rt); dn_dst_check_expire()
231 struct dn_route *rt; dn_dst_gc() local
242 while ((rt = rcu_dereference_protected(*rtp, dn_dst_gc()
244 if (atomic_read(&rt->dst.__refcnt) || dn_dst_gc()
245 (now - rt->dst.lastuse) < expire) { dn_dst_gc()
246 rtp = &rt->dst.dn_next; dn_dst_gc()
249 *rtp = rt->dst.dn_next; dn_dst_gc()
250 rt->dst.dn_next = NULL; dn_dst_gc()
251 dnrt_drop(rt); dn_dst_gc()
273 struct dn_route *rt = (struct dn_route *) dst; dn_dst_update_pmtu() local
274 struct neighbour *n = rt->n; dn_dst_update_pmtu()
332 static int dn_insert_route(struct dn_route *rt, unsigned int hash, struct dn_route **rp) dn_insert_route() argument
343 if (compare_keys(&rth->fld, &rt->fld)) { dn_insert_route()
353 dnrt_drop(rt); dn_insert_route()
360 rcu_assign_pointer(rt->dst.dn_next, dn_rt_hash_table[hash].chain); dn_insert_route()
361 rcu_assign_pointer(dn_rt_hash_table[hash].chain, rt); dn_insert_route()
363 dst_use(&rt->dst, now); dn_insert_route()
365 *rp = rt; dn_insert_route()
372 struct dn_route *rt, *next; dn_run_flush() local
377 if ((rt = xchg((struct dn_route **)&dn_rt_hash_table[i].chain, NULL)) == NULL) dn_run_flush()
380 for(; rt; rt = next) { dn_run_flush()
381 next = rcu_dereference_raw(rt->dst.dn_next); dn_run_flush()
382 RCU_INIT_POINTER(rt->dst.dn_next, NULL); dn_run_flush()
383 dst_free((struct dst_entry *)rt); dn_run_flush()
750 struct dn_route *rt = (struct dn_route *)dst; dn_output() local
756 if (rt->n == NULL) dn_output()
761 cb->src = rt->rt_saddr; dn_output()
762 cb->dst = rt->rt_daddr; dn_output()
790 struct dn_route *rt; dn_forward() local
800 rt = (struct dn_route *)skb_dst(skb); dn_forward()
802 if (skb_cow(skb, LL_RESERVED_SPACE(rt->dst.dev)+header_len)) dn_forward()
811 skb->dev = rt->dst.dev; dn_forward()
819 if (rt->rt_flags & RTCF_DOREDIRECT) dn_forward()
878 static int dn_rt_set_next_hop(struct dn_route *rt, struct dn_fib_res *res) dn_rt_set_next_hop() argument
881 struct net_device *dev = rt->dst.dev; dn_rt_set_next_hop()
888 rt->rt_gateway = DN_FIB_RES_GW(*res); dn_rt_set_next_hop()
889 dst_init_metrics(&rt->dst, fi->fib_metrics, true); dn_rt_set_next_hop()
891 rt->rt_type = res->type; dn_rt_set_next_hop()
893 if (dev != NULL && rt->n == NULL) { dn_rt_set_next_hop()
894 n = __neigh_lookup_errno(&dn_neigh_table, &rt->rt_gateway, dev); dn_rt_set_next_hop()
897 rt->n = n; dn_rt_set_next_hop()
900 if (dst_metric(&rt->dst, RTAX_MTU) > rt->dst.dev->mtu) dn_rt_set_next_hop()
901 dst_metric_set(&rt->dst, RTAX_MTU, rt->dst.dev->mtu); dn_rt_set_next_hop()
902 mss_metric = dst_metric_raw(&rt->dst, RTAX_ADVMSS); dn_rt_set_next_hop()
904 unsigned int mss = dn_mss_from_pmtu(dev, dst_mtu(&rt->dst)); dn_rt_set_next_hop()
906 dst_metric_set(&rt->dst, RTAX_ADVMSS, mss); dn_rt_set_next_hop()
973 struct dn_route *rt = NULL; dn_route_output_slow() local
1185 rt = dst_alloc(&dn_dst_ops, dev_out, 1, DST_OBSOLETE_NONE, DST_HOST); dn_route_output_slow()
1186 if (rt == NULL) dn_route_output_slow()
1189 memset(&rt->fld, 0, sizeof(rt->fld)); dn_route_output_slow()
1190 rt->fld.saddr = oldflp->saddr; dn_route_output_slow()
1191 rt->fld.daddr = oldflp->daddr; dn_route_output_slow()
1192 rt->fld.flowidn_oif = oldflp->flowidn_oif; dn_route_output_slow()
1193 rt->fld.flowidn_iif = 0; dn_route_output_slow()
1194 rt->fld.flowidn_mark = oldflp->flowidn_mark; dn_route_output_slow()
1196 rt->rt_saddr = fld.saddr; dn_route_output_slow()
1197 rt->rt_daddr = fld.daddr; dn_route_output_slow()
1198 rt->rt_gateway = gateway ? gateway : fld.daddr; dn_route_output_slow()
1199 rt->rt_local_src = fld.saddr; dn_route_output_slow()
1201 rt->rt_dst_map = fld.daddr; dn_route_output_slow()
1202 rt->rt_src_map = fld.saddr; dn_route_output_slow()
1204 rt->n = neigh; dn_route_output_slow()
1207 rt->dst.lastuse = jiffies; dn_route_output_slow()
1208 rt->dst.output = dn_output; dn_route_output_slow()
1209 rt->dst.input = dn_rt_bug; dn_route_output_slow()
1210 rt->rt_flags = flags; dn_route_output_slow()
1212 rt->dst.input = dn_nsp_rx; dn_route_output_slow()
1214 err = dn_rt_set_next_hop(rt, &res); dn_route_output_slow()
1218 hash = dn_hash(rt->fld.saddr, rt->fld.daddr); dn_route_output_slow()
1219 dn_insert_route(rt, hash, (struct dn_route **)pprt); dn_route_output_slow()
1241 dst_free(&rt->dst); dn_route_output_slow()
1252 struct dn_route *rt = NULL; __dn_route_output_key() local
1256 for (rt = rcu_dereference_bh(dn_rt_hash_table[hash].chain); rt; __dn_route_output_key()
1257 rt = rcu_dereference_bh(rt->dst.dn_next)) { __dn_route_output_key()
1258 if ((flp->daddr == rt->fld.daddr) && __dn_route_output_key()
1259 (flp->saddr == rt->fld.saddr) && __dn_route_output_key()
1260 (flp->flowidn_mark == rt->fld.flowidn_mark) && __dn_route_output_key()
1261 dn_is_output_route(rt) && __dn_route_output_key()
1262 (rt->fld.flowidn_oif == flp->flowidn_oif)) { __dn_route_output_key()
1263 dst_use(&rt->dst, jiffies); __dn_route_output_key()
1265 *pprt = &rt->dst; __dn_route_output_key()
1309 struct dn_route *rt = NULL; dn_route_input_slow() local
1449 rt = dst_alloc(&dn_dst_ops, out_dev, 0, DST_OBSOLETE_NONE, DST_HOST); dn_route_input_slow()
1450 if (rt == NULL) dn_route_input_slow()
1453 memset(&rt->fld, 0, sizeof(rt->fld)); dn_route_input_slow()
1454 rt->rt_saddr = fld.saddr; dn_route_input_slow()
1455 rt->rt_daddr = fld.daddr; dn_route_input_slow()
1456 rt->rt_gateway = fld.daddr; dn_route_input_slow()
1458 rt->rt_gateway = gateway; dn_route_input_slow()
1459 rt->rt_local_src = local_src ? local_src : rt->rt_saddr; dn_route_input_slow()
1461 rt->rt_dst_map = fld.daddr; dn_route_input_slow()
1462 rt->rt_src_map = fld.saddr; dn_route_input_slow()
1464 rt->fld.saddr = cb->src; dn_route_input_slow()
1465 rt->fld.daddr = cb->dst; dn_route_input_slow()
1466 rt->fld.flowidn_oif = 0; dn_route_input_slow()
1467 rt->fld.flowidn_iif = in_dev->ifindex; dn_route_input_slow()
1468 rt->fld.flowidn_mark = fld.flowidn_mark; dn_route_input_slow()
1470 rt->n = neigh; dn_route_input_slow()
1471 rt->dst.lastuse = jiffies; dn_route_input_slow()
1472 rt->dst.output = dn_rt_bug_sk; dn_route_input_slow()
1475 rt->dst.input = dn_forward; dn_route_input_slow()
1478 rt->dst.output = dn_output; dn_route_input_slow()
1479 rt->dst.input = dn_nsp_rx; dn_route_input_slow()
1480 rt->dst.dev = in_dev; dn_route_input_slow()
1486 rt->dst.input = dst_discard; dn_route_input_slow()
1488 rt->rt_flags = flags; dn_route_input_slow()
1490 err = dn_rt_set_next_hop(rt, &res); dn_route_input_slow()
1494 hash = dn_hash(rt->fld.saddr, rt->fld.daddr); dn_route_input_slow()
1495 dn_insert_route(rt, hash, &rt); dn_route_input_slow()
1496 skb_dst_set(skb, &rt->dst); dn_route_input_slow()
1518 dst_free(&rt->dst); dn_route_input_slow()
1524 struct dn_route *rt; dn_route_input() local
1532 for(rt = rcu_dereference(dn_rt_hash_table[hash].chain); rt != NULL; dn_route_input()
1533 rt = rcu_dereference(rt->dst.dn_next)) { dn_route_input()
1534 if ((rt->fld.saddr == cb->src) && dn_route_input()
1535 (rt->fld.daddr == cb->dst) && dn_route_input()
1536 (rt->fld.flowidn_oif == 0) && dn_route_input()
1537 (rt->fld.flowidn_mark == skb->mark) && dn_route_input()
1538 (rt->fld.flowidn_iif == cb->iif)) { dn_route_input()
1539 dst_use(&rt->dst, jiffies); dn_route_input()
1541 skb_dst_set(skb, (struct dst_entry *)rt); dn_route_input()
1553 struct dn_route *rt = (struct dn_route *)skb_dst(skb); dn_rt_fill_info() local
1568 r->rtm_type = rt->rt_type; dn_rt_fill_info()
1569 r->rtm_flags = (rt->rt_flags & ~0xFFFF) | RTM_F_CLONED; dn_rt_fill_info()
1573 if (rt->rt_flags & RTCF_NOTIFY) dn_rt_fill_info()
1577 nla_put_le16(skb, RTA_DST, rt->rt_daddr) < 0) dn_rt_fill_info()
1580 if (rt->fld.saddr) { dn_rt_fill_info()
1582 if (nla_put_le16(skb, RTA_SRC, rt->fld.saddr) < 0) dn_rt_fill_info()
1585 if (rt->dst.dev && dn_rt_fill_info()
1586 nla_put_u32(skb, RTA_OIF, rt->dst.dev->ifindex) < 0) dn_rt_fill_info()
1594 if (nla_put_le16(skb, RTA_PREFSRC, rt->rt_local_src) < 0) dn_rt_fill_info()
1597 if (rt->rt_daddr != rt->rt_gateway && dn_rt_fill_info()
1598 nla_put_le16(skb, RTA_GATEWAY, rt->rt_gateway) < 0) dn_rt_fill_info()
1601 if (rtnetlink_put_metrics(skb, dst_metrics_ptr(&rt->dst)) < 0) dn_rt_fill_info()
1604 expires = rt->dst.expires ? rt->dst.expires - jiffies : 0; dn_rt_fill_info()
1605 if (rtnl_put_cacheinfo(skb, &rt->dst, 0, expires, dn_rt_fill_info()
1606 rt->dst.error) < 0) dn_rt_fill_info()
1609 if (dn_is_input_route(rt) && dn_rt_fill_info()
1610 nla_put_u32(skb, RTA_IIF, rt->fld.flowidn_iif) < 0) dn_rt_fill_info()
1642 struct dn_route *rt = NULL; dn_cache_getroute() local
1689 rt = (struct dn_route *)skb_dst(skb); dn_cache_getroute()
1690 if (!err && -rt->dst.error) dn_cache_getroute()
1691 err = rt->dst.error; dn_cache_getroute()
1696 err = dn_route_output_key((struct dst_entry **)&rt, &fld, 0); dn_cache_getroute()
1702 skb_dst_set(skb, &rt->dst); dn_cache_getroute()
1704 rt->rt_flags |= RTCF_NOTIFY; dn_cache_getroute()
1726 struct dn_route *rt; dn_cache_dump() local
1749 for(rt = rcu_dereference_bh(dn_rt_hash_table[h].chain), idx = 0; dn_cache_dump()
1750 rt; dn_cache_dump()
1751 rt = rcu_dereference_bh(rt->dst.dn_next), idx++) { dn_cache_dump()
1754 skb_dst_set(skb, dst_clone(&rt->dst)); dn_cache_dump()
1780 struct dn_route *rt = NULL; dn_rt_cache_get_first() local
1785 rt = rcu_dereference_bh(dn_rt_hash_table[s->bucket].chain); dn_rt_cache_get_first()
1786 if (rt) dn_rt_cache_get_first()
1790 return rt; dn_rt_cache_get_first()
1793 static struct dn_route *dn_rt_cache_get_next(struct seq_file *seq, struct dn_route *rt) dn_rt_cache_get_next() argument
1797 rt = rcu_dereference_bh(rt->dst.dn_next); dn_rt_cache_get_next()
1798 while (!rt) { dn_rt_cache_get_next()
1803 rt = rcu_dereference_bh(dn_rt_hash_table[s->bucket].chain); dn_rt_cache_get_next()
1805 return rt; dn_rt_cache_get_next()
1810 struct dn_route *rt = dn_rt_cache_get_first(seq); dn_rt_cache_seq_start() local
1812 if (rt) { dn_rt_cache_seq_start()
1813 while(*pos && (rt = dn_rt_cache_get_next(seq, rt))) dn_rt_cache_seq_start()
1816 return *pos ? NULL : rt; dn_rt_cache_seq_start()
1821 struct dn_route *rt = dn_rt_cache_get_next(seq, v); dn_rt_cache_seq_next() local
1823 return rt; dn_rt_cache_seq_next()
1834 struct dn_route *rt = v; dn_rt_cache_seq_show() local
1838 rt->dst.dev ? rt->dst.dev->name : "*", dn_rt_cache_seq_show()
1839 dn_addr2asc(le16_to_cpu(rt->rt_daddr), buf1), dn_rt_cache_seq_show()
1840 dn_addr2asc(le16_to_cpu(rt->rt_saddr), buf2), dn_rt_cache_seq_show()
1841 atomic_read(&rt->dst.__refcnt), dn_rt_cache_seq_show()
1842 rt->dst.__use, 0); dn_rt_cache_seq_show()
H A Ddn_neigh.c175 struct dn_route *rt = (struct dn_route *)dst; dn_neigh_output() local
181 dn_dn2eth(mac_addr, rt->rt_local_src); dn_neigh_output()
200 struct dn_route *rt = (struct dn_route *)dst; dn_neigh_output_packet() local
201 struct neighbour *neigh = rt->n; dn_neigh_output_packet()
337 struct dn_route *rt = (struct dn_route *) dst; dn_to_neigh_output() local
338 struct neighbour *neigh = rt->n; dn_to_neigh_output()
/linux-4.1.27/drivers/net/appletalk/
H A Dipddp.c55 static int ipddp_delete(struct ipddp_route *rt);
56 static struct ipddp_route* __ipddp_find_route(struct ipddp_route *rt);
122 struct ipddp_route *rt; ipddp_xmit() local
130 for(rt = ipddp_route_list; rt != NULL; rt = rt->next) ipddp_xmit()
132 if(rt->ip == paddr) ipddp_xmit()
135 if(rt == NULL) { ipddp_xmit()
140 our_addr = atalk_find_dev_addr(rt->dev); ipddp_xmit()
159 if(rt->dev->type == ARPHRD_LOCALTLK) ipddp_xmit()
166 ddp->deh_dnet = rt->at.s_net; /* FIXME more hops?? */ ipddp_xmit()
169 ddp->deh_dnode = rt->at.s_node; ipddp_xmit()
181 aarp_send_ddp(rt->dev, skb, &rt->at, NULL); ipddp_xmit()
194 struct ipddp_route *rt = kmalloc(sizeof(*rt), GFP_KERNEL); ipddp_create() local
196 if (rt == NULL) ipddp_create()
199 rt->ip = new_rt->ip; ipddp_create()
200 rt->at = new_rt->at; ipddp_create()
201 rt->next = NULL; ipddp_create()
202 if ((rt->dev = atrtr_get_dev(&rt->at)) == NULL) { ipddp_create()
203 kfree(rt); ipddp_create()
208 if (__ipddp_find_route(rt)) { ipddp_create()
210 kfree(rt); ipddp_create()
214 rt->next = ipddp_route_list; ipddp_create()
215 ipddp_route_list = rt; ipddp_create()
226 static int ipddp_delete(struct ipddp_route *rt) ipddp_delete() argument
234 if(tmp->ip == rt->ip && ipddp_delete()
235 tmp->at.s_net == rt->at.s_net && ipddp_delete()
236 tmp->at.s_node == rt->at.s_node) ipddp_delete()
253 static struct ipddp_route* __ipddp_find_route(struct ipddp_route *rt) __ipddp_find_route() argument
259 if(f->ip == rt->ip && __ipddp_find_route()
260 f->at.s_net == rt->at.s_net && __ipddp_find_route()
261 f->at.s_node == rt->at.s_node) __ipddp_find_route()
270 struct ipddp_route __user *rt = ifr->ifr_data; ipddp_ioctl() local
276 if(copy_from_user(&rcp, rt, sizeof(rcp))) ipddp_ioctl()
292 if (copy_to_user(rt, &rcp2, ipddp_ioctl()
/linux-4.1.27/include/net/
H A Dip6_fib.h132 static inline struct inet_peer *rt6_peer_ptr(struct rt6_info *rt) rt6_peer_ptr() argument
134 return inetpeer_ptr(rt->_rt6i_peer); rt6_peer_ptr()
137 static inline bool rt6_has_peer(struct rt6_info *rt) rt6_has_peer() argument
139 return inetpeer_ptr_is_peer(rt->_rt6i_peer); rt6_has_peer()
142 static inline void __rt6_set_peer(struct rt6_info *rt, struct inet_peer *peer) __rt6_set_peer() argument
144 __inetpeer_ptr_set_peer(&rt->_rt6i_peer, peer); __rt6_set_peer()
147 static inline bool rt6_set_peer(struct rt6_info *rt, struct inet_peer *peer) rt6_set_peer() argument
149 return inetpeer_ptr_set_peer(&rt->_rt6i_peer, peer); rt6_set_peer()
152 static inline void rt6_init_peer(struct rt6_info *rt, struct inet_peer_base *base) rt6_init_peer() argument
154 inetpeer_init_ptr(&rt->_rt6i_peer, base); rt6_init_peer()
157 static inline void rt6_transfer_peer(struct rt6_info *rt, struct rt6_info *ort) rt6_transfer_peer() argument
159 inetpeer_transfer_peer(&rt->_rt6i_peer, &ort->_rt6i_peer); rt6_transfer_peer()
167 static inline void rt6_clean_expires(struct rt6_info *rt) rt6_clean_expires() argument
169 rt->rt6i_flags &= ~RTF_EXPIRES; rt6_clean_expires()
170 rt->dst.expires = 0; rt6_clean_expires()
173 static inline void rt6_set_expires(struct rt6_info *rt, unsigned long expires) rt6_set_expires() argument
175 rt->dst.expires = expires; rt6_set_expires()
176 rt->rt6i_flags |= RTF_EXPIRES; rt6_set_expires()
181 struct rt6_info *rt; rt6_update_expires() local
183 for (rt = rt0; rt && !(rt->rt6i_flags & RTF_EXPIRES); rt6_update_expires()
184 rt = (struct rt6_info *)rt->dst.from); rt6_update_expires()
185 if (rt && rt != rt0) rt6_update_expires()
186 rt0->dst.expires = rt->dst.expires; rt6_update_expires()
192 static inline void rt6_set_from(struct rt6_info *rt, struct rt6_info *from) rt6_set_from() argument
196 rt->rt6i_flags &= ~RTF_EXPIRES; rt6_set_from()
198 rt->dst.from = new; rt6_set_from()
201 static inline void ip6_rt_put(struct rt6_info *rt) ip6_rt_put() argument
207 dst_release(&rt->dst); ip6_rt_put()
236 __u32 fib_rt_entries; /* rt entries in table */
299 int fib6_add(struct fib6_node *root, struct rt6_info *rt,
301 int fib6_del(struct rt6_info *rt, struct nl_info *info);
303 void inet6_rt_notify(int event, struct rt6_info *rt, struct nl_info *info);
H A Dip6_route.h89 int ip6_route_get_saddr(struct net *net, struct rt6_info *rt,
134 int rt6_dump_route(struct rt6_info *rt, void *p_arg);
149 struct rt6_info *rt = (struct rt6_info *) dst; __ip6_dst_store() local
156 np->dst_cookie = rt->rt6i_node ? rt->rt6i_node->fn_sernum : 0; __ip6_dst_store()
169 struct rt6_info *rt = (struct rt6_info *) skb_dst(skb); ipv6_unicast_destination() local
171 return rt->rt6i_flags & RTF_LOCAL; ipv6_unicast_destination()
176 struct rt6_info *rt = (struct rt6_info *) skb_dst(skb); ipv6_anycast_destination() local
178 return rt->rt6i_flags & RTF_ANYCAST; ipv6_anycast_destination()
205 static inline struct in6_addr *rt6_nexthop(struct rt6_info *rt) rt6_nexthop() argument
207 return &rt->rt6i_gateway; rt6_nexthop()
H A Droute.h71 static inline bool rt_is_input_route(const struct rtable *rt) rt_is_input_route() argument
73 return rt->rt_is_input != 0; rt_is_input_route()
76 static inline bool rt_is_output_route(const struct rtable *rt) rt_is_output_route() argument
78 return rt->rt_is_input == 0; rt_is_output_route()
81 static inline __be32 rt_nexthop(const struct rtable *rt, __be32 daddr) rt_nexthop() argument
83 if (rt->rt_gateway) rt_nexthop()
84 return rt->rt_gateway; rt_nexthop()
195 void ip_rt_get_source(u8 *src, struct sk_buff *skb, struct rtable *rt);
201 static inline void ip_rt_put(struct rtable *rt) ip_rt_put() argument
207 dst_release(&rt->dst); ip_rt_put()
264 struct rtable *rt; ip_route_connect() local
270 rt = __ip_route_output_key(net, fl4); ip_route_connect()
271 if (IS_ERR(rt)) ip_route_connect()
272 return rt; ip_route_connect()
273 ip_rt_put(rt); ip_route_connect()
280 static inline struct rtable *ip_route_newports(struct flowi4 *fl4, struct rtable *rt, ip_route_newports() argument
288 ip_rt_put(rt); ip_route_newports()
295 return rt; ip_route_newports()
H A Dipx.h164 static __inline__ void ipxrtr_hold(struct ipx_route *rt) ipxrtr_hold() argument
166 atomic_inc(&rt->refcnt); ipxrtr_hold()
169 static __inline__ void ipxrtr_put(struct ipx_route *rt) ipxrtr_put() argument
171 if (atomic_dec_and_test(&rt->refcnt)) ipxrtr_put()
172 kfree(rt); ipxrtr_put()
H A Ddn_route.h88 static inline bool dn_is_input_route(struct dn_route *rt) dn_is_input_route() argument
90 return rt->fld.flowidn_iif != 0; dn_is_input_route()
93 static inline bool dn_is_output_route(struct dn_route *rt) dn_is_output_route() argument
95 return rt->fld.flowidn_iif == 0; dn_is_output_route()
H A Dx25.h266 static __inline__ void x25_route_hold(struct x25_route *rt) x25_route_hold() argument
268 atomic_inc(&rt->refcnt); x25_route_hold()
271 static __inline__ void x25_route_put(struct x25_route *rt) x25_route_put() argument
273 if (atomic_dec_and_test(&rt->refcnt)) x25_route_put()
274 kfree(rt); x25_route_put()
H A Dgeneve.h89 int geneve_xmit_skb(struct geneve_sock *gs, struct rtable *rt,
H A Dudp_tunnel.h80 int udp_tunnel_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *skb,
H A Dif_inet6.h65 struct rt6_info *rt; member in struct:inet6_ifaddr
H A Dvxlan.h148 int vxlan_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *skb,
/linux-4.1.27/net/sunrpc/
H A Dtimer.c30 * @rt: context to initialize
34 void rpc_init_rtt(struct rpc_rtt *rt, unsigned long timeo) rpc_init_rtt() argument
39 rt->timeo = timeo; rpc_init_rtt()
44 rt->srtt[i] = init; rpc_init_rtt()
45 rt->sdrtt[i] = RPC_RTO_INIT; rpc_init_rtt()
46 rt->ntimeouts[i] = 0; rpc_init_rtt()
53 * @rt: context to update
60 void rpc_update_rtt(struct rpc_rtt *rt, unsigned int timer, long m) rpc_update_rtt() argument
74 srtt = (long *)&rt->srtt[timer]; rpc_update_rtt()
81 sdrtt = (long *)&rt->sdrtt[timer]; rpc_update_rtt()
93 * @rt: context to use for calculation
109 unsigned long rpc_calc_rto(struct rpc_rtt *rt, unsigned int timer) rpc_calc_rto() argument
114 return rt->timeo; rpc_calc_rto()
116 res = ((rt->srtt[timer] + 7) >> 3) + rt->sdrtt[timer]; rpc_calc_rto()
/linux-4.1.27/arch/arm64/include/asm/
H A Dsysreg.h33 .macro mrs_s, rt, sreg variable
34 .inst 0xd5300000|(\sreg)|(__reg_num_\rt)
37 .macro msr_s, sreg, rt
38 .inst 0xd5100000|(\sreg)|(__reg_num_\rt)
49 " .macro mrs_s, rt, sreg\n"
50 " .inst 0xd5300000|(\\sreg)|(__reg_num_\\rt)\n"
53 " .macro msr_s, sreg, rt\n"
54 " .inst 0xd5100000|(\\sreg)|(__reg_num_\\rt)\n"
H A Dkvm_mmio.h30 unsigned long rt; member in struct:kvm_decode
/linux-4.1.27/net/ipx/
H A Dipx_route.c51 struct ipx_route *rt; ipxrtr_add_route() local
55 rt = ipxrtr_lookup(network); ipxrtr_add_route()
56 if (!rt) { ipxrtr_add_route()
57 rt = kmalloc(sizeof(*rt), GFP_ATOMIC); ipxrtr_add_route()
59 if (!rt) ipxrtr_add_route()
62 atomic_set(&rt->refcnt, 1); ipxrtr_add_route()
63 ipxrtr_hold(rt); ipxrtr_add_route()
65 list_add(&rt->node, &ipx_routes); ipxrtr_add_route()
73 rt->ir_net = network; ipxrtr_add_route()
74 rt->ir_intrfc = intrfc; ipxrtr_add_route()
76 memset(rt->ir_router_node, '\0', IPX_NODE_LEN); ipxrtr_add_route()
77 rt->ir_routed = 0; ipxrtr_add_route()
79 memcpy(rt->ir_router_node, node, IPX_NODE_LEN); ipxrtr_add_route()
80 rt->ir_routed = 1; ipxrtr_add_route()
85 ipxrtr_put(rt); ipxrtr_add_route()
176 struct ipx_route *rt = NULL; ipxrtr_route_packet() local
184 rt = ipxrtr_lookup(usipx->sipx_network); ipxrtr_route_packet()
186 if (!rt) ipxrtr_route_packet()
188 intrfc = rt->ir_intrfc; ipxrtr_route_packet()
245 rc = ipxitf_send(intrfc, skb, (rt && rt->ir_routed) ? ipxrtr_route_packet()
246 rt->ir_router_node : ipx->ipx_dest.node); ipxrtr_route_packet()
249 if (rt) ipxrtr_route_packet()
250 ipxrtr_put(rt); ipxrtr_route_packet()
260 struct rtentry rt; /* Use these to behave like 'other' stacks */ ipxrtr_ioctl() local
264 if (copy_from_user(&rt, arg, sizeof(rt))) ipxrtr_ioctl()
267 sg = (struct sockaddr_ipx *)&rt.rt_gateway; ipxrtr_ioctl()
268 st = (struct sockaddr_ipx *)&rt.rt_dst; ipxrtr_ioctl()
271 if (!(rt.rt_flags & RTF_GATEWAY) || /* Direct routes are fixed */ ipxrtr_ioctl()
H A Dipx_proc.c81 struct ipx_route *rt; ipx_seq_route_show() local
88 rt = list_entry(v, struct ipx_route, node); ipx_seq_route_show()
90 seq_printf(seq, "%08X ", ntohl(rt->ir_net)); ipx_seq_route_show()
91 if (rt->ir_routed) ipx_seq_route_show()
93 ntohl(rt->ir_intrfc->if_netnum), ipx_seq_route_show()
94 rt->ir_router_node[0], rt->ir_router_node[1], ipx_seq_route_show()
95 rt->ir_router_node[2], rt->ir_router_node[3], ipx_seq_route_show()
96 rt->ir_router_node[4], rt->ir_router_node[5]); ipx_seq_route_show()
/linux-4.1.27/net/ipv4/
H A Ddatagram.c28 struct rtable *rt; __ip4_datagram_connect() local
51 rt = ip_route_connect(fl4, usin->sin_addr.s_addr, saddr, __ip4_datagram_connect()
55 if (IS_ERR(rt)) { __ip4_datagram_connect()
56 err = PTR_ERR(rt); __ip4_datagram_connect()
62 if ((rt->rt_flags & RTCF_BROADCAST) && !sock_flag(sk, SOCK_BROADCAST)) { __ip4_datagram_connect()
63 ip_rt_put(rt); __ip4_datagram_connect()
80 sk_dst_set(sk, &rt->dst); __ip4_datagram_connect()
109 struct rtable *rt; ip4_datagram_release_cb() local
121 rt = ip_route_output_ports(sock_net(sk), &fl4, sk, daddr, ip4_datagram_release_cb()
126 dst = !IS_ERR(rt) ? &rt->dst : NULL; ip4_datagram_release_cb()
H A Dxfrm4_policy.c26 struct rtable *rt; __xfrm4_dst_lookup() local
34 rt = __ip_route_output_key(net, fl4); __xfrm4_dst_lookup()
35 if (!IS_ERR(rt)) __xfrm4_dst_lookup()
36 return &rt->dst; __xfrm4_dst_lookup()
38 return ERR_CAST(rt); __xfrm4_dst_lookup()
79 struct rtable *rt = (struct rtable *)xdst->route; xfrm4_fill_dst() local
82 xdst->u.rt.rt_iif = fl4->flowi4_iif; xfrm4_fill_dst()
89 xdst->u.rt.rt_is_input = rt->rt_is_input; xfrm4_fill_dst()
90 xdst->u.rt.rt_flags = rt->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST | xfrm4_fill_dst()
92 xdst->u.rt.rt_type = rt->rt_type; xfrm4_fill_dst()
93 xdst->u.rt.rt_gateway = rt->rt_gateway; xfrm4_fill_dst()
94 xdst->u.rt.rt_uses_gateway = rt->rt_uses_gateway; xfrm4_fill_dst()
95 xdst->u.rt.rt_pmtu = rt->rt_pmtu; xfrm4_fill_dst()
96 INIT_LIST_HEAD(&xdst->u.rt.rt_uncached); xfrm4_fill_dst()
H A Droute.c445 const struct rtable *rt; ipv4_neigh_lookup() local
448 rt = (const struct rtable *) dst; ipv4_neigh_lookup()
449 if (rt->rt_gateway) ipv4_neigh_lookup()
450 pkey = (const __be32 *) &rt->rt_gateway; ipv4_neigh_lookup()
560 static inline void rt_free(struct rtable *rt) rt_free() argument
562 call_rcu(&rt->dst.rcu_head, dst_rcu_free); rt_free()
569 struct rtable *rt; fnhe_flush_routes() local
571 rt = rcu_dereference(fnhe->fnhe_rth_input); fnhe_flush_routes()
572 if (rt) { fnhe_flush_routes()
574 rt_free(rt); fnhe_flush_routes()
576 rt = rcu_dereference(fnhe->fnhe_rth_output); fnhe_flush_routes()
577 if (rt) { fnhe_flush_routes()
579 rt_free(rt); fnhe_flush_routes()
607 static void fill_route_from_fnhe(struct rtable *rt, struct fib_nh_exception *fnhe) fill_route_from_fnhe() argument
609 rt->rt_pmtu = fnhe->fnhe_pmtu; fill_route_from_fnhe()
610 rt->dst.expires = fnhe->fnhe_expires; fill_route_from_fnhe()
613 rt->rt_flags |= RTCF_REDIRECTED; fill_route_from_fnhe()
614 rt->rt_gateway = fnhe->fnhe_gw; fill_route_from_fnhe()
615 rt->rt_uses_gateway = 1; fill_route_from_fnhe()
624 struct rtable *rt; update_or_create_fnhe() local
657 rt = rcu_dereference(fnhe->fnhe_rth_input); update_or_create_fnhe()
658 if (rt) update_or_create_fnhe()
659 fill_route_from_fnhe(rt, fnhe); update_or_create_fnhe()
660 rt = rcu_dereference(fnhe->fnhe_rth_output); update_or_create_fnhe()
661 if (rt) update_or_create_fnhe()
662 fill_route_from_fnhe(rt, fnhe); update_or_create_fnhe()
684 rt = rcu_dereference(nh->nh_rth_input); update_or_create_fnhe()
685 if (rt) update_or_create_fnhe()
686 rt->dst.obsolete = DST_OBSOLETE_KILL; update_or_create_fnhe()
691 rt = rcu_dereference(*prt); for_each_possible_cpu()
692 if (rt) for_each_possible_cpu()
693 rt->dst.obsolete = DST_OBSOLETE_KILL; for_each_possible_cpu()
703 static void __ip_do_redirect(struct rtable *rt, struct sk_buff *skb, struct flowi4 *fl4, __ip_do_redirect() argument
725 if (rt->rt_gateway != old_gw) __ip_do_redirect()
748 n = ipv4_neigh_lookup(&rt->dst, NULL, &new_gw); __ip_do_redirect()
760 rt->dst.obsolete = DST_OBSOLETE_KILL; __ip_do_redirect()
785 struct rtable *rt; ip_do_redirect() local
793 rt = (struct rtable *) dst; ip_do_redirect()
796 __ip_do_redirect(rt, skb, &fl4, true); ip_do_redirect()
801 struct rtable *rt = (struct rtable *)dst; ipv4_negative_advice() local
804 if (rt) { ipv4_negative_advice()
806 ip_rt_put(rt); ipv4_negative_advice()
808 } else if ((rt->rt_flags & RTCF_REDIRECTED) || ipv4_negative_advice()
809 rt->dst.expires) { ipv4_negative_advice()
810 ip_rt_put(rt); ipv4_negative_advice()
835 struct rtable *rt = skb_rtable(skb); ip_rt_send_redirect() local
842 in_dev = __in_dev_get_rcu(rt->dst.dev); ip_rt_send_redirect()
850 net = dev_net(rt->dst.dev); ip_rt_send_redirect()
854 rt_nexthop(rt, ip_hdr(skb)->daddr)); ip_rt_send_redirect()
879 __be32 gw = rt_nexthop(rt, ip_hdr(skb)->daddr); ip_rt_send_redirect()
899 struct rtable *rt = skb_rtable(skb); ip_error() local
910 net = dev_net(rt->dst.dev); ip_error()
912 switch (rt->dst.error) { ip_error()
924 switch (rt->dst.error) { ip_error()
962 static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu) __ip_rt_update_pmtu() argument
964 struct dst_entry *dst = &rt->dst; __ip_rt_update_pmtu()
976 if (rt->rt_pmtu == mtu && __ip_rt_update_pmtu()
993 struct rtable *rt = (struct rtable *) dst; ip_rt_update_pmtu() local
997 __ip_rt_update_pmtu(rt, &fl4, mtu); ip_rt_update_pmtu()
1005 struct rtable *rt; ipv4_update_pmtu() local
1012 rt = __ip_route_output_key(net, &fl4); ipv4_update_pmtu()
1013 if (!IS_ERR(rt)) { ipv4_update_pmtu()
1014 __ip_rt_update_pmtu(rt, &fl4, mtu); ipv4_update_pmtu()
1015 ip_rt_put(rt); ipv4_update_pmtu()
1024 struct rtable *rt; __ipv4_sk_update_pmtu() local
1031 rt = __ip_route_output_key(sock_net(sk), &fl4); __ipv4_sk_update_pmtu()
1032 if (!IS_ERR(rt)) { __ipv4_sk_update_pmtu()
1033 __ip_rt_update_pmtu(rt, &fl4, mtu); __ipv4_sk_update_pmtu()
1034 ip_rt_put(rt); __ipv4_sk_update_pmtu()
1042 struct rtable *rt; ipv4_sk_update_pmtu() local
1060 rt = (struct rtable *)odst; ipv4_sk_update_pmtu()
1062 rt = ip_route_output_flow(sock_net(sk), &fl4, sk); ipv4_sk_update_pmtu()
1063 if (IS_ERR(rt)) ipv4_sk_update_pmtu()
1069 __ip_rt_update_pmtu((struct rtable *) rt->dst.path, &fl4, mtu); ipv4_sk_update_pmtu()
1071 if (!dst_check(&rt->dst, 0)) { ipv4_sk_update_pmtu()
1073 dst_release(&rt->dst); ipv4_sk_update_pmtu()
1075 rt = ip_route_output_flow(sock_net(sk), &fl4, sk); ipv4_sk_update_pmtu()
1076 if (IS_ERR(rt)) ipv4_sk_update_pmtu()
1083 sk_dst_set(sk, &rt->dst); ipv4_sk_update_pmtu()
1096 struct rtable *rt; ipv4_redirect() local
1100 rt = __ip_route_output_key(net, &fl4); ipv4_redirect()
1101 if (!IS_ERR(rt)) { ipv4_redirect()
1102 __ip_do_redirect(rt, skb, &fl4, false); ipv4_redirect()
1103 ip_rt_put(rt); ipv4_redirect()
1112 struct rtable *rt; ipv4_sk_redirect() local
1115 rt = __ip_route_output_key(sock_net(sk), &fl4); ipv4_sk_redirect()
1116 if (!IS_ERR(rt)) { ipv4_sk_redirect()
1117 __ip_do_redirect(rt, skb, &fl4, false); ipv4_sk_redirect()
1118 ip_rt_put(rt); ipv4_sk_redirect()
1125 struct rtable *rt = (struct rtable *) dst; ipv4_dst_check() local
1135 if (dst->obsolete != DST_OBSOLETE_FORCE_CHK || rt_is_expired(rt)) ipv4_dst_check()
1142 struct rtable *rt; ipv4_link_failure() local
1146 rt = skb_rtable(skb); ipv4_link_failure()
1147 if (rt) ipv4_link_failure()
1148 dst_set_expires(&rt->dst, 0); ipv4_link_failure()
1170 void ip_rt_get_source(u8 *addr, struct sk_buff *skb, struct rtable *rt) ip_rt_get_source() argument
1174 if (rt_is_output_route(rt)) ip_rt_get_source()
1187 fl4.flowi4_oif = rt->dst.dev->ifindex; ip_rt_get_source()
1192 if (fib_lookup(dev_net(rt->dst.dev), &fl4, &res) == 0) ip_rt_get_source()
1193 src = FIB_RES_PREFSRC(dev_net(rt->dst.dev), res); ip_rt_get_source()
1195 src = inet_select_addr(rt->dst.dev, ip_rt_get_source()
1196 rt_nexthop(rt, iph->daddr), ip_rt_get_source()
1204 static void set_class_tag(struct rtable *rt, u32 tag) set_class_tag() argument
1206 if (!(rt->dst.tclassid & 0xFFFF)) set_class_tag()
1207 rt->dst.tclassid |= tag & 0xFFFF; set_class_tag()
1208 if (!(rt->dst.tclassid & 0xFFFF0000)) set_class_tag()
1209 rt->dst.tclassid |= tag & 0xFFFF0000; set_class_tag()
1228 const struct rtable *rt = (const struct rtable *) dst; ipv4_mtu() local
1229 unsigned int mtu = rt->rt_pmtu; ipv4_mtu()
1231 if (!mtu || time_after_eq(jiffies, rt->dst.expires)) ipv4_mtu()
1240 if (rt->rt_uses_gateway && mtu > 576) ipv4_mtu()
1266 static bool rt_bind_exception(struct rtable *rt, struct fib_nh_exception *fnhe, rt_bind_exception() argument
1276 int genid = fnhe_genid(dev_net(rt->dst.dev)); rt_bind_exception()
1278 if (rt_is_input_route(rt)) rt_bind_exception()
1292 fill_route_from_fnhe(rt, fnhe); rt_bind_exception()
1293 if (!rt->rt_gateway) rt_bind_exception()
1294 rt->rt_gateway = daddr; rt_bind_exception()
1296 if (!(rt->dst.flags & DST_NOCACHE)) { rt_bind_exception()
1297 rcu_assign_pointer(*porig, rt); rt_bind_exception()
1310 static bool rt_cache_route(struct fib_nh *nh, struct rtable *rt) rt_cache_route() argument
1315 if (rt_is_input_route(rt)) { rt_cache_route()
1322 prev = cmpxchg(p, orig, rt); rt_cache_route()
1339 static void rt_add_uncached_list(struct rtable *rt) rt_add_uncached_list() argument
1343 rt->rt_uncached_list = ul; rt_add_uncached_list()
1346 list_add_tail(&rt->rt_uncached, &ul->head); rt_add_uncached_list()
1352 struct rtable *rt = (struct rtable *) dst; ipv4_dst_destroy() local
1354 if (!list_empty(&rt->rt_uncached)) { ipv4_dst_destroy()
1355 struct uncached_list *ul = rt->rt_uncached_list; ipv4_dst_destroy()
1358 list_del(&rt->rt_uncached); ipv4_dst_destroy()
1366 struct rtable *rt; rt_flush_dev() local
1373 list_for_each_entry(rt, &ul->head, rt_uncached) { for_each_possible_cpu()
1374 if (rt->dst.dev != dev) for_each_possible_cpu()
1376 rt->dst.dev = net->loopback_dev; for_each_possible_cpu()
1377 dev_hold(rt->dst.dev); for_each_possible_cpu()
1384 static bool rt_cache_valid(const struct rtable *rt) rt_cache_valid() argument
1386 return rt && rt_cache_valid()
1387 rt->dst.obsolete == DST_OBSOLETE_FORCE_CHK && rt_cache_valid()
1388 !rt_is_expired(rt); rt_cache_valid()
1391 static void rt_set_nexthop(struct rtable *rt, __be32 daddr, rt_set_nexthop() argument
1402 rt->rt_gateway = nh->nh_gw; rt_set_nexthop()
1403 rt->rt_uses_gateway = 1; rt_set_nexthop()
1405 dst_init_metrics(&rt->dst, fi->fib_metrics, true); rt_set_nexthop()
1407 rt->dst.tclassid = nh->nh_tclassid; rt_set_nexthop()
1410 cached = rt_bind_exception(rt, fnhe, daddr); rt_set_nexthop()
1411 else if (!(rt->dst.flags & DST_NOCACHE)) rt_set_nexthop()
1412 cached = rt_cache_route(nh, rt); rt_set_nexthop()
1419 rt->dst.flags |= DST_NOCACHE; rt_set_nexthop()
1420 if (!rt->rt_gateway) rt_set_nexthop()
1421 rt->rt_gateway = daddr; rt_set_nexthop()
1422 rt_add_uncached_list(rt); rt_set_nexthop()
1425 rt_add_uncached_list(rt); rt_set_nexthop()
1429 set_class_tag(rt, res->tclassid); rt_set_nexthop()
1431 set_class_tag(rt, itag); rt_set_nexthop()
2290 struct rtable *rt; ipv4_blackhole_route() local
2292 rt = dst_alloc(&ipv4_dst_blackhole_ops, NULL, 1, DST_OBSOLETE_NONE, 0); ipv4_blackhole_route()
2293 if (rt) { ipv4_blackhole_route()
2294 struct dst_entry *new = &rt->dst; ipv4_blackhole_route()
2304 rt->rt_is_input = ort->rt_is_input; ipv4_blackhole_route()
2305 rt->rt_iif = ort->rt_iif; ipv4_blackhole_route()
2306 rt->rt_pmtu = ort->rt_pmtu; ipv4_blackhole_route()
2308 rt->rt_genid = rt_genid_ipv4(net); ipv4_blackhole_route()
2309 rt->rt_flags = ort->rt_flags; ipv4_blackhole_route()
2310 rt->rt_type = ort->rt_type; ipv4_blackhole_route()
2311 rt->rt_gateway = ort->rt_gateway; ipv4_blackhole_route()
2312 rt->rt_uses_gateway = ort->rt_uses_gateway; ipv4_blackhole_route()
2314 INIT_LIST_HEAD(&rt->rt_uncached); ipv4_blackhole_route()
2321 return rt ? &rt->dst : ERR_PTR(-ENOMEM); ipv4_blackhole_route()
2327 struct rtable *rt = __ip_route_output_key(net, flp4); ip_route_output_flow() local
2329 if (IS_ERR(rt)) ip_route_output_flow()
2330 return rt; ip_route_output_flow()
2333 rt = (struct rtable *)xfrm_lookup_route(net, &rt->dst, ip_route_output_flow()
2337 return rt; ip_route_output_flow()
2345 struct rtable *rt = skb_rtable(skb); rt_fill_info() local
2364 r->rtm_type = rt->rt_type; rt_fill_info()
2367 r->rtm_flags = (rt->rt_flags & ~0xFFFF) | RTM_F_CLONED; rt_fill_info()
2368 if (rt->rt_flags & RTCF_NOTIFY) rt_fill_info()
2380 if (rt->dst.dev && rt_fill_info()
2381 nla_put_u32(skb, RTA_OIF, rt->dst.dev->ifindex)) rt_fill_info()
2384 if (rt->dst.tclassid && rt_fill_info()
2385 nla_put_u32(skb, RTA_FLOW, rt->dst.tclassid)) rt_fill_info()
2388 if (!rt_is_input_route(rt) && rt_fill_info()
2393 if (rt->rt_uses_gateway && rt_fill_info()
2394 nla_put_in_addr(skb, RTA_GATEWAY, rt->rt_gateway)) rt_fill_info()
2397 expires = rt->dst.expires; rt_fill_info()
2407 memcpy(metrics, dst_metrics_ptr(&rt->dst), sizeof(metrics)); rt_fill_info()
2408 if (rt->rt_pmtu && expires) rt_fill_info()
2409 metrics[RTAX_MTU - 1] = rt->rt_pmtu; rt_fill_info()
2417 error = rt->dst.error; rt_fill_info()
2419 if (rt_is_input_route(rt)) { rt_fill_info()
2443 if (rtnl_put_cacheinfo(skb, &rt->dst, 0, expires, error) < 0) rt_fill_info()
2459 struct rtable *rt = NULL; inet_rtm_getroute() local
2518 rt = skb_rtable(skb); inet_rtm_getroute()
2519 if (err == 0 && rt->dst.error) inet_rtm_getroute()
2520 err = -rt->dst.error; inet_rtm_getroute()
2522 rt = ip_route_output_key(net, &fl4); inet_rtm_getroute()
2525 if (IS_ERR(rt)) inet_rtm_getroute()
2526 err = PTR_ERR(rt); inet_rtm_getroute()
2532 skb_dst_set(skb, &rt->dst); inet_rtm_getroute()
2534 rt->rt_flags |= RTCF_NOTIFY; inet_rtm_getroute()
H A Dnetfilter.c24 struct rtable *rt; ip_route_me_harder() local
46 rt = ip_route_output_key(net, &fl4); ip_route_me_harder()
47 if (IS_ERR(rt)) ip_route_me_harder()
48 return PTR_ERR(rt); ip_route_me_harder()
52 skb_dst_set(skb, &rt->dst); ip_route_me_harder()
179 struct rtable *rt = ip_route_output_key(net, &fl->u.ip4); nf_ip_route() local
180 if (IS_ERR(rt)) nf_ip_route()
181 return PTR_ERR(rt); nf_ip_route()
182 *dst = &rt->dst; nf_ip_route()
H A Dip_forward.c78 struct rtable *rt; /* Route we use */ ip_forward() local
110 rt = skb_rtable(skb); ip_forward()
112 if (opt->is_strictroute && rt->rt_uses_gateway) ip_forward()
116 mtu = ip_dst_mtu_maybe_forward(&rt->dst, true); ip_forward()
118 IP_INC_STATS(dev_net(rt->dst.dev), IPSTATS_MIB_FRAGFAILS); ip_forward()
125 if (skb_cow(skb, LL_RESERVED_SPACE(rt->dst.dev)+rt->dst.header_len)) ip_forward()
143 skb->dev, rt->dst.dev, ip_forward_finish); ip_forward()
H A Dip_output.c138 struct rtable *rt = skb_rtable(skb); ip_build_and_send_pkt() local
148 if (ip_dont_fragment(sk, &rt->dst)) ip_build_and_send_pkt()
152 iph->ttl = ip_select_ttl(inet, &rt->dst); ip_build_and_send_pkt()
160 ip_options_build(skb, &opt->opt, daddr, rt, 0); ip_build_and_send_pkt()
174 struct rtable *rt = (struct rtable *)dst; ip_finish_output2() local
180 if (rt->rt_type == RTN_MULTICAST) { ip_finish_output2()
182 } else if (rt->rt_type == RTN_BROADCAST) ip_finish_output2()
201 nexthop = (__force u32) rt_nexthop(rt, ip_hdr(skb)->daddr); ip_finish_output2()
282 struct rtable *rt = skb_rtable(skb); ip_mc_output() local
283 struct net_device *dev = rt->dst.dev; ip_mc_output()
297 if (rt->rt_flags&RTCF_MULTICAST) { ip_mc_output()
309 ((rt->rt_flags & RTCF_LOCAL) || ip_mc_output()
328 if (rt->rt_flags&RTCF_BROADCAST) { ip_mc_output()
375 struct rtable *rt; ip_queue_xmit() local
385 rt = skb_rtable(skb); ip_queue_xmit()
386 if (rt) ip_queue_xmit()
390 rt = (struct rtable *)__sk_dst_check(sk, 0); ip_queue_xmit()
391 if (!rt) { ip_queue_xmit()
403 rt = ip_route_output_ports(sock_net(sk), fl4, sk, ip_queue_xmit()
410 if (IS_ERR(rt)) ip_queue_xmit()
412 sk_setup_caps(sk, &rt->dst); ip_queue_xmit()
414 skb_dst_set_noref(skb, &rt->dst); ip_queue_xmit()
417 if (inet_opt && inet_opt->opt.is_strictroute && rt->rt_uses_gateway) ip_queue_xmit()
425 if (ip_dont_fragment(sk, &rt->dst) && !skb->ignore_df) ip_queue_xmit()
429 iph->ttl = ip_select_ttl(inet, &rt->dst); ip_queue_xmit()
437 ip_options_build(skb, &inet_opt->opt, inet->inet_daddr, rt, 0); ip_queue_xmit()
499 struct rtable *rt = skb_rtable(skb); ip_fragment() local
502 dev = rt->dst.dev; ip_fragment()
647 ll_rs = LL_RESERVED_SPACE(rt->dst.dev);
860 struct rtable *rt = (struct rtable *)cork->dst; __ip_append_data() local
865 exthdrlen = !skb ? rt->dst.header_len : 0; __ip_append_data()
871 hh_len = LL_RESERVED_SPACE(rt->dst.dev); __ip_append_data()
889 rt->dst.dev->features & NETIF_F_V4_CSUM && __ip_append_data()
896 (rt->dst.dev->features & NETIF_F_UFO) && !rt->dst.header_len && __ip_append_data()
945 !(rt->dst.dev->features&NETIF_F_SG)) __ip_append_data()
958 alloclen += rt->dst.trailer_len; __ip_append_data()
1032 if (!(rt->dst.dev->features&NETIF_F_SG)) { __ip_append_data()
1091 struct rtable *rt; ip_setup_cork() local
1108 rt = *rtp; ip_setup_cork()
1109 if (unlikely(!rt)) ip_setup_cork()
1116 dst_mtu(&rt->dst) : rt->dst.dev->mtu; ip_setup_cork()
1117 cork->dst = &rt->dst; ip_setup_cork()
1169 struct rtable *rt; ip_append_page() local
1188 rt = (struct rtable *)cork->dst; ip_append_page()
1192 if (!(rt->dst.dev->features&NETIF_F_SG)) ip_append_page()
1195 hh_len = LL_RESERVED_SPACE(rt->dst.dev); ip_append_page()
1215 (rt->dst.dev->features & NETIF_F_UFO)) { ip_append_page()
1334 struct rtable *rt = (struct rtable *)cork->dst; __ip_make_skb() local
1369 (skb->len <= dst_mtu(&rt->dst) && __ip_make_skb()
1370 ip_dont_fragment(sk, &rt->dst))) __ip_make_skb()
1378 else if (rt->rt_type == RTN_MULTICAST) __ip_make_skb()
1381 ttl = ip_select_ttl(inet, &rt->dst); __ip_make_skb()
1395 ip_options_build(skb, opt, cork->addr, rt, 0); __ip_make_skb()
1401 * Steal rt from cork.dst to avoid a pair of atomic_inc/atomic_dec __ip_make_skb()
1405 skb_dst_set(skb, &rt->dst); __ip_make_skb()
1524 struct rtable *rt = skb_rtable(skb); ip_send_unicast_reply() local
1553 rt = ip_route_output_key(net, &fl4); ip_send_unicast_reply()
1554 if (IS_ERR(rt)) ip_send_unicast_reply()
1564 len, 0, &ipc, &rt, MSG_DONTWAIT); ip_send_unicast_reply()
1581 ip_rt_put(rt); ip_send_unicast_reply()
H A Dicmp.c288 static bool icmpv4_xrlim_allow(struct net *net, struct rtable *rt, icmpv4_xrlim_allow() argument
291 struct dst_entry *dst = &rt->dst; icmpv4_xrlim_allow()
354 struct ipcm_cookie *ipc, struct rtable **rt) icmp_push_reply()
359 sk = icmp_sk(dev_net((*rt)->dst.dev)); icmp_push_reply()
363 ipc, rt, MSG_DONTWAIT) < 0) { icmp_push_reply()
390 struct rtable *rt = skb_rtable(skb); icmp_reply() local
391 struct net *net = dev_net(rt->dst.dev); icmp_reply()
429 rt = ip_route_output_key(net, &fl4); icmp_reply()
430 if (IS_ERR(rt)) icmp_reply()
432 if (icmpv4_xrlim_allow(net, rt, &fl4, icmp_param->data.icmph.type, icmp_reply()
434 icmp_push_reply(icmp_param, &fl4, &ipc, &rt); icmp_reply()
435 ip_rt_put(rt); icmp_reply()
448 struct rtable *rt, *rt2; icmp_route_lookup() local
462 rt = __ip_route_output_key(net, fl4); icmp_route_lookup()
463 if (IS_ERR(rt)) icmp_route_lookup()
464 return rt; icmp_route_lookup()
467 rt2 = rt; icmp_route_lookup()
469 rt = (struct rtable *) xfrm_lookup(net, &rt->dst, icmp_route_lookup()
471 if (!IS_ERR(rt)) { icmp_route_lookup()
472 if (rt != rt2) icmp_route_lookup()
473 return rt; icmp_route_lookup()
474 } else if (PTR_ERR(rt) == -EPERM) { icmp_route_lookup()
475 rt = NULL; icmp_route_lookup()
477 return rt; icmp_route_lookup()
514 dst_release(&rt->dst); icmp_route_lookup()
516 rt = rt2; icmp_route_lookup()
518 if (rt) icmp_route_lookup()
519 dst_release(&rt->dst); icmp_route_lookup()
525 return rt; icmp_route_lookup()
528 if (rt) icmp_route_lookup()
529 return rt; icmp_route_lookup()
549 struct rtable *rt = skb_rtable(skb_in); icmp_send() local
558 if (!rt) icmp_send()
560 net = dev_net(rt->dst.dev); icmp_send()
583 if (rt->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) icmp_send()
638 if (!(rt->rt_flags & RTCF_LOCAL)) { icmp_send()
642 if (rt_is_input_route(rt) && icmp_send()
680 rt = icmp_route_lookup(net, &fl4, skb_in, iph, saddr, tos, mark, icmp_send()
682 if (IS_ERR(rt)) icmp_send()
685 if (!icmpv4_xrlim_allow(net, rt, &fl4, type, code)) icmp_send()
690 room = dst_mtu(&rt->dst); icmp_send()
701 icmp_push_reply(icmp_param, &fl4, &ipc, &rt); icmp_send()
703 ip_rt_put(rt); icmp_send()
954 struct rtable *rt = skb_rtable(skb); icmp_rcv() local
955 struct net *net = dev_net(rt->dst.dev); icmp_rcv()
1003 if (rt->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) { icmp_rcv()
352 icmp_push_reply(struct icmp_bxm *icmp_param, struct flowi4 *fl4, struct ipcm_cookie *ipc, struct rtable **rt) icmp_push_reply() argument
H A Dip_options.c44 __be32 daddr, struct rtable *rt, int is_frag) ip_options_build()
57 ip_rt_get_source(iph+opt->rr+iph[opt->rr+2]-5, skb, rt); ip_options_build()
59 ip_rt_get_source(iph+opt->ts+iph[opt->ts+2]-9, skb, rt); ip_options_build()
262 struct rtable *rt = NULL; ip_options_compile() local
268 rt = skb_rtable(skb); ip_options_compile()
344 if (rt) { ip_options_compile()
385 if (rt) { ip_options_compile()
564 struct rtable *rt = skb_rtable(skb); ip_forward_options() local
569 ip_rt_get_source(&optptr[optptr[2]-5], skb, rt); ip_forward_options()
589 ip_rt_get_source(&optptr[srrptr-1], skb, rt); ip_forward_options()
597 ip_rt_get_source(&optptr[optptr[2]-9], skb, rt); ip_forward_options()
614 struct rtable *rt = skb_rtable(skb); ip_options_rcv_srr() local
619 if (!rt) ip_options_rcv_srr()
624 if (rt->rt_type == RTN_UNICAST) { ip_options_rcv_srr()
630 if (rt->rt_type != RTN_LOCAL) ip_options_rcv_srr()
43 ip_options_build(struct sk_buff *skb, struct ip_options *opt, __be32 daddr, struct rtable *rt, int is_frag) ip_options_build() argument
H A Dfib_frontend.c259 struct rtable *rt; fib_compute_spec_dst() local
264 rt = skb_rtable(skb); fib_compute_spec_dst()
265 if ((rt->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST | RTCF_LOCAL)) == fib_compute_spec_dst()
408 static int rtentry_to_fib_config(struct net *net, int cmd, struct rtentry *rt, rtentry_to_fib_config() argument
417 if (rt->rt_dst.sa_family != AF_INET) rtentry_to_fib_config()
429 addr = sk_extract_addr(&rt->rt_dst); rtentry_to_fib_config()
430 if (!(rt->rt_flags & RTF_HOST)) { rtentry_to_fib_config()
431 __be32 mask = sk_extract_addr(&rt->rt_genmask); rtentry_to_fib_config()
433 if (rt->rt_genmask.sa_family != AF_INET) { rtentry_to_fib_config()
434 if (mask || rt->rt_genmask.sa_family) rtentry_to_fib_config()
452 if (rt->rt_metric) rtentry_to_fib_config()
453 cfg->fc_priority = rt->rt_metric - 1; rtentry_to_fib_config()
455 if (rt->rt_flags & RTF_REJECT) { rtentry_to_fib_config()
464 if (rt->rt_dev) { rtentry_to_fib_config()
469 if (copy_from_user(devname, rt->rt_dev, IFNAMSIZ-1)) rtentry_to_fib_config()
495 addr = sk_extract_addr(&rt->rt_gateway); rtentry_to_fib_config()
496 if (rt->rt_gateway.sa_family == AF_INET && addr) { rtentry_to_fib_config()
498 if (rt->rt_flags & RTF_GATEWAY && rtentry_to_fib_config()
506 if (rt->rt_flags & RTF_GATEWAY && !cfg->fc_gw) rtentry_to_fib_config()
512 if (rt->rt_flags & (RTF_MTU | RTF_WINDOW | RTF_IRTT)) { rtentry_to_fib_config()
520 if (rt->rt_flags & RTF_MTU) rtentry_to_fib_config()
521 len = put_rtax(mx, len, RTAX_ADVMSS, rt->rt_mtu - 40); rtentry_to_fib_config()
523 if (rt->rt_flags & RTF_WINDOW) rtentry_to_fib_config()
524 len = put_rtax(mx, len, RTAX_WINDOW, rt->rt_window); rtentry_to_fib_config()
526 if (rt->rt_flags & RTF_IRTT) rtentry_to_fib_config()
527 len = put_rtax(mx, len, RTAX_RTT, rt->rt_irtt << 3); rtentry_to_fib_config()
543 struct rtentry rt; ip_rt_ioctl() local
552 if (copy_from_user(&rt, arg, sizeof(rt))) ip_rt_ioctl()
556 err = rtentry_to_fib_config(net, cmd, &rt, &cfg); ip_rt_ioctl()
H A Darp.c391 struct rtable *rt; arp_filter() local
396 rt = ip_route_output(net, sip, tip, 0, 0); arp_filter()
397 if (IS_ERR(rt)) arp_filter()
399 if (rt->dst.dev != dev) { arp_filter()
403 ip_rt_put(rt); arp_filter()
411 struct net_device *dev, struct rtable *rt) arp_fwd_proxy()
416 if (rt->dst.dev == dev) arp_fwd_proxy()
429 out_dev = __in_dev_get_rcu(rt->dst.dev); arp_fwd_proxy()
456 struct net_device *dev, struct rtable *rt, arp_fwd_pvlan()
460 if (rt->dst.dev != dev) arp_fwd_pvlan()
635 struct rtable *rt; arp_process() local
755 rt = skb_rtable(skb); arp_process()
756 addr_type = rt->rt_type; arp_process()
776 (arp_fwd_proxy(in_dev, dev, rt) || arp_process()
777 arp_fwd_pvlan(in_dev, dev, rt, sip, tip) || arp_process()
778 (rt->dst.dev != dev && arp_process()
953 struct rtable *rt = ip_route_output(net, ip, 0, RTO_ONLINK, 0); arp_req_set() local
955 if (IS_ERR(rt)) arp_req_set()
956 return PTR_ERR(rt); arp_req_set()
957 dev = rt->dst.dev; arp_req_set()
958 ip_rt_put(rt); arp_req_set()
1073 struct rtable *rt = ip_route_output(net, ip, 0, RTO_ONLINK, 0); arp_req_delete() local
1074 if (IS_ERR(rt)) arp_req_delete()
1075 return PTR_ERR(rt); arp_req_delete()
1076 dev = rt->dst.dev; arp_req_delete()
1077 ip_rt_put(rt); arp_req_delete()
410 arp_fwd_proxy(struct in_device *in_dev, struct net_device *dev, struct rtable *rt) arp_fwd_proxy() argument
455 arp_fwd_pvlan(struct in_device *in_dev, struct net_device *dev, struct rtable *rt, __be32 sip, __be32 tip) arp_fwd_pvlan() argument
H A Dip_tunnel.c369 struct rtable *rt; ip_tunnel_bind_dev() local
374 rt = ip_route_output_key(tunnel->net, &fl4); ip_tunnel_bind_dev()
376 if (!IS_ERR(rt)) { ip_tunnel_bind_dev()
377 tdev = rt->dst.dev; ip_tunnel_bind_dev()
378 tunnel_dst_set(tunnel, &rt->dst, fl4.saddr); ip_tunnel_bind_dev()
379 ip_rt_put(rt); ip_tunnel_bind_dev()
589 struct rtable *rt, __be16 df, tnl_update_pmtu()
597 mtu = dst_mtu(&rt->dst) - dev->hard_header_len tnl_update_pmtu()
646 struct rtable *rt; /* Route to the other host */ ip_tunnel_xmit() local
665 rt = skb_rtable(skb); ip_tunnel_xmit()
666 dst = rt_nexthop(rt, inner_iph->daddr); ip_tunnel_xmit()
723 rt = connected ? tunnel_rtable_get(tunnel, 0, &fl4.saddr) : NULL; ip_tunnel_xmit()
725 if (!rt) { ip_tunnel_xmit()
726 rt = ip_route_output_key(tunnel->net, &fl4); ip_tunnel_xmit()
728 if (IS_ERR(rt)) { ip_tunnel_xmit()
733 tunnel_dst_set(tunnel, &rt->dst, fl4.saddr); ip_tunnel_xmit()
736 if (rt->dst.dev == dev) { ip_tunnel_xmit()
737 ip_rt_put(rt); ip_tunnel_xmit()
742 if (tnl_update_pmtu(dev, skb, rt, tnl_params->frag_off, inner_iph)) { ip_tunnel_xmit()
743 ip_rt_put(rt); ip_tunnel_xmit()
768 ttl = ip4_dst_hoplimit(&rt->dst); ip_tunnel_xmit()
775 max_headroom = LL_RESERVED_SPACE(rt->dst.dev) + sizeof(struct iphdr) ip_tunnel_xmit()
776 + rt->dst.header_len + ip_encap_hlen(&tunnel->encap); ip_tunnel_xmit()
781 ip_rt_put(rt); ip_tunnel_xmit()
787 err = iptunnel_xmit(NULL, rt, skb, fl4.saddr, fl4.daddr, protocol, ip_tunnel_xmit()
588 tnl_update_pmtu(struct net_device *dev, struct sk_buff *skb, struct rtable *rt, __be16 df, const struct iphdr *inner_iph) tnl_update_pmtu() argument
H A Dudp_tunnel.c78 int udp_tunnel_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *skb, udp_tunnel_xmit_skb() argument
95 return iptunnel_xmit(sk, rt, skb, src, dst, IPPROTO_UDP, udp_tunnel_xmit_skb()
H A Dinet_connection_sock.c409 struct rtable *rt; inet_csk_route_req() local
418 rt = ip_route_output_flow(net, fl4, sk); inet_csk_route_req()
419 if (IS_ERR(rt)) inet_csk_route_req()
421 if (opt && opt->opt.is_strictroute && rt->rt_uses_gateway) inet_csk_route_req()
423 return &rt->dst; inet_csk_route_req()
426 ip_rt_put(rt); inet_csk_route_req()
442 struct rtable *rt; inet_csk_route_child_sock() local
455 rt = ip_route_output_flow(net, fl4, sk); inet_csk_route_child_sock()
456 if (IS_ERR(rt)) inet_csk_route_child_sock()
458 if (opt && opt->opt.is_strictroute && rt->rt_uses_gateway) inet_csk_route_child_sock()
461 return &rt->dst; inet_csk_route_child_sock()
464 ip_rt_put(rt); inet_csk_route_child_sock()
941 struct rtable *rt; inet_csk_rebuild_route() local
948 rt = ip_route_output_ports(sock_net(sk), fl4, sk, daddr, inet_csk_rebuild_route()
952 if (IS_ERR(rt)) inet_csk_rebuild_route()
953 rt = NULL; inet_csk_rebuild_route()
954 if (rt) inet_csk_rebuild_route()
955 sk_setup_caps(sk, &rt->dst); inet_csk_rebuild_route()
958 return &rt->dst; inet_csk_rebuild_route()
H A Dip_tunnel_core.c49 int iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb, iptunnel_xmit() argument
60 skb_dst_set(skb, &rt->dst); iptunnel_xmit()
77 __ip_select_ident(dev_net(rt->dst.dev), iph, iptunnel_xmit()
H A Draw.c349 struct rtable *rt = *rtp; raw_send_hdrinc() local
352 if (length > rt->dst.dev->mtu) { raw_send_hdrinc()
354 rt->dst.dev->mtu); raw_send_hdrinc()
360 hlen = LL_RESERVED_SPACE(rt->dst.dev); raw_send_hdrinc()
361 tlen = rt->dst.dev->needed_tailroom; raw_send_hdrinc()
371 skb_dst_set(skb, &rt->dst); raw_send_hdrinc()
415 NULL, rt->dst.dev, dst_output_sk); raw_send_hdrinc()
487 struct rtable *rt = NULL; raw_sendmsg() local
612 rt = ip_route_output_flow(sock_net(sk), &fl4, sk); raw_sendmsg()
613 if (IS_ERR(rt)) { raw_sendmsg()
614 err = PTR_ERR(rt); raw_sendmsg()
615 rt = NULL; raw_sendmsg()
620 if (rt->rt_flags & RTCF_BROADCAST && !sock_flag(sk, SOCK_BROADCAST)) raw_sendmsg()
629 &rt, msg->msg_flags); raw_sendmsg()
639 &ipc, &rt, msg->msg_flags); raw_sendmsg()
652 ip_rt_put(rt); raw_sendmsg()
660 dst_confirm(&rt->dst); raw_sendmsg()
H A Dsyncookies.c303 struct rtable *rt; cookie_v4_check() local
377 rt = ip_route_output_key(sock_net(sk), &fl4); cookie_v4_check()
378 if (IS_ERR(rt)) { cookie_v4_check()
384 req->window_clamp = tp->window_clamp ? :dst_metric(&rt->dst, RTAX_WINDOW); cookie_v4_check()
389 dst_metric(&rt->dst, RTAX_INITRWND)); cookie_v4_check()
392 ireq->ecn_ok = cookie_ecn_ok(&tcp_opt, sock_net(sk), &rt->dst); cookie_v4_check()
394 ret = get_cookie_sock(sk, skb, req, &rt->dst); cookie_v4_check()
H A Dip_input.c316 struct rtable *rt; ip_rcv_finish() local
359 rt = skb_rtable(skb); ip_rcv_finish()
360 if (rt->rt_type == RTN_MULTICAST) { ip_rcv_finish()
361 IP_UPD_PO_STATS_BH(dev_net(rt->dst.dev), IPSTATS_MIB_INMCAST, ip_rcv_finish()
363 } else if (rt->rt_type == RTN_BROADCAST) ip_rcv_finish()
364 IP_UPD_PO_STATS_BH(dev_net(rt->dst.dev), IPSTATS_MIB_INBCAST, ip_rcv_finish()
H A Dgeneve.c106 int geneve_xmit_skb(struct geneve_sock *gs, struct rtable *rt, geneve_xmit_skb() argument
116 min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len geneve_xmit_skb()
139 return udp_tunnel_xmit_skb(rt, gs->sock->sk, skb, src, dst, geneve_xmit_skb()
H A Dping.c704 struct rtable *rt = NULL; ping_v4_sendmsg() local
796 rt = ip_route_output_flow(net, &fl4, sk); ping_v4_sendmsg()
797 if (IS_ERR(rt)) { ping_v4_sendmsg()
798 err = PTR_ERR(rt); ping_v4_sendmsg()
799 rt = NULL; ping_v4_sendmsg()
806 if ((rt->rt_flags & RTCF_BROADCAST) && ping_v4_sendmsg()
829 0, &ipc, &rt, msg->msg_flags); ping_v4_sendmsg()
837 ip_rt_put(rt); ping_v4_sendmsg()
847 dst_confirm(&rt->dst); ping_v4_sendmsg()
H A Dtcp_ipv4.c149 struct rtable *rt; tcp_v4_connect() local
171 rt = ip_route_connect(fl4, nexthop, inet->inet_saddr, tcp_v4_connect()
175 if (IS_ERR(rt)) { tcp_v4_connect()
176 err = PTR_ERR(rt); tcp_v4_connect()
182 if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) { tcp_v4_connect()
183 ip_rt_put(rt); tcp_v4_connect()
204 tcp_fetch_timewait_stamp(sk, &rt->dst); tcp_v4_connect()
227 rt = ip_route_newports(fl4, rt, orig_sport, orig_dport, tcp_v4_connect()
229 if (IS_ERR(rt)) { tcp_v4_connect()
230 err = PTR_ERR(rt); tcp_v4_connect()
231 rt = NULL; tcp_v4_connect()
236 sk_setup_caps(sk, &rt->dst); tcp_v4_connect()
248 rt = NULL; tcp_v4_connect()
260 ip_rt_put(rt); tcp_v4_connect()
H A Daf_inet.c1111 struct rtable *rt; inet_sk_reselect_saddr() local
1122 rt = ip_route_connect(fl4, daddr, 0, RT_CONN_FLAGS(sk), inet_sk_reselect_saddr()
1125 if (IS_ERR(rt)) inet_sk_reselect_saddr()
1126 return PTR_ERR(rt); inet_sk_reselect_saddr()
1128 sk_setup_caps(sk, &rt->dst); inet_sk_reselect_saddr()
1157 struct rtable *rt = (struct rtable *)__sk_dst_check(sk, 0); inet_sk_rebuild_header() local
1164 if (rt) inet_sk_rebuild_header()
1175 rt = ip_route_output_ports(sock_net(sk), fl4, sk, daddr, inet->inet_saddr, inet_sk_rebuild_header()
1179 if (!IS_ERR(rt)) { inet_sk_rebuild_header()
1181 sk_setup_caps(sk, &rt->dst); inet_sk_rebuild_header()
1183 err = PTR_ERR(rt); inet_sk_rebuild_header()
H A Dipmr.c1706 struct rtable *rt; ipmr_queue_xmit() local
1725 rt = ip_route_output_ports(net, &fl4, NULL, ipmr_queue_xmit()
1730 if (IS_ERR(rt)) ipmr_queue_xmit()
1734 rt = ip_route_output_ports(net, &fl4, NULL, iph->daddr, 0, ipmr_queue_xmit()
1738 if (IS_ERR(rt)) ipmr_queue_xmit()
1742 dev = rt->dst.dev; ipmr_queue_xmit()
1744 if (skb->len+encap > dst_mtu(&rt->dst) && (ntohs(iph->frag_off) & IP_DF)) { ipmr_queue_xmit()
1751 ip_rt_put(rt); ipmr_queue_xmit()
1755 encap += LL_RESERVED_SPACE(dev) + rt->dst.header_len; ipmr_queue_xmit()
1758 ip_rt_put(rt); ipmr_queue_xmit()
1766 skb_dst_set(skb, &rt->dst); ipmr_queue_xmit()
1933 struct rtable *rt = skb_rtable(skb); ipmr_rt_fib_lookup() local
1939 .flowi4_oif = (rt_is_output_route(rt) ? ipmr_rt_fib_lookup()
1941 .flowi4_iif = (rt_is_output_route(rt) ? ipmr_rt_fib_lookup()
H A Dip_gre.c414 struct rtable *rt; ipgre_open() local
416 rt = ip_route_output_gre(t->net, &fl4, ipgre_open()
422 if (IS_ERR(rt)) ipgre_open()
424 dev = rt->dst.dev; ipgre_open()
425 ip_rt_put(rt); ipgre_open()
H A Digmp.c325 struct rtable *rt; igmpv3_newpack() local
345 rt = ip_route_output_ports(net, &fl4, NULL, IGMPV3_ALL_MCR, 0, igmpv3_newpack()
348 if (IS_ERR(rt)) { igmpv3_newpack()
353 skb_dst_set(skb, &rt->dst); igmpv3_newpack()
671 struct rtable *rt; igmp_send_report() local
686 rt = ip_route_output_ports(net, &fl4, NULL, dst, 0, igmp_send_report()
689 if (IS_ERR(rt)) igmp_send_report()
696 ip_rt_put(rt); igmp_send_report()
701 skb_dst_set(skb, &rt->dst); igmp_send_report()
1523 struct rtable *rt = ip_route_output(net, ip_mc_find_dev() local
1526 if (!IS_ERR(rt)) { ip_mc_find_dev()
1527 dev = rt->dst.dev; ip_mc_find_dev()
1528 ip_rt_put(rt); ip_mc_find_dev()
H A Dfib_semantics.c145 struct rtable *rt = rcu_dereference_protected(*rtp, 1); rt_fibinfo_free() local
147 if (!rt) rt_fibinfo_free()
155 dst_free(&rt->dst); rt_fibinfo_free()
194 struct rtable *rt; for_each_possible_cpu() local
196 rt = rcu_dereference_protected(*per_cpu_ptr(rtp, cpu), 1); for_each_possible_cpu()
197 if (rt) for_each_possible_cpu()
198 dst_free(&rt->dst); for_each_possible_cpu()
/linux-4.1.27/net/bridge/
H A Dbr_nf_core.c77 struct rtable *rt = &br->fake_rtable; br_netfilter_rtable_init() local
79 atomic_set(&rt->dst.__refcnt, 1); br_netfilter_rtable_init()
80 rt->dst.dev = br->dev; br_netfilter_rtable_init()
81 rt->dst.path = &rt->dst; br_netfilter_rtable_init()
82 dst_init_metrics(&rt->dst, br_dst_default_metrics, true); br_netfilter_rtable_init()
83 rt->dst.flags = DST_NOXFRM | DST_FAKE_RTABLE; br_netfilter_rtable_init()
84 rt->dst.ops = &fake_dst_ops; br_netfilter_rtable_init()
H A Dbr_netfilter.c279 struct rtable *rt; br_nf_pre_routing_finish_ipv6() local
287 rt = bridge_parent_rtable(nf_bridge->physindev); br_nf_pre_routing_finish_ipv6()
288 if (!rt) { br_nf_pre_routing_finish_ipv6()
292 skb_dst_set_noref(skb, &rt->dst); br_nf_pre_routing_finish_ipv6()
399 struct rtable *rt; br_nf_pre_routing_finish() local
425 rt = ip_route_output(dev_net(dev), iph->daddr, 0, br_nf_pre_routing_finish()
427 if (!IS_ERR(rt)) { br_nf_pre_routing_finish()
430 if (rt->dst.dev == dev) { br_nf_pre_routing_finish()
431 skb_dst_set(skb, &rt->dst); br_nf_pre_routing_finish()
434 ip_rt_put(rt); br_nf_pre_routing_finish()
456 rt = bridge_parent_rtable(nf_bridge->physindev); br_nf_pre_routing_finish()
457 if (!rt) { br_nf_pre_routing_finish()
461 skb_dst_set_noref(skb, &rt->dst); br_nf_pre_routing_finish()
/linux-4.1.27/net/ipv6/netfilter/
H A Dip6t_rpfilter.c32 struct rt6_info *rt; rpfilter_lookup_reverse6() local
56 rt = (void *) ip6_route_lookup(dev_net(dev), &fl6, lookup_flags); rpfilter_lookup_reverse6()
57 if (rt->dst.error) rpfilter_lookup_reverse6()
60 if (rt->rt6i_flags & (RTF_REJECT|RTF_ANYCAST)) rpfilter_lookup_reverse6()
63 if (rt->rt6i_flags & RTF_LOCAL) { rpfilter_lookup_reverse6()
68 if (rt->rt6i_idev->dev == dev || (flags & XT_RPFILTER_LOOSE)) rpfilter_lookup_reverse6()
71 ip6_rt_put(rt); rpfilter_lookup_reverse6()
77 const struct rt6_info *rt = (const void *) skb_dst(skb); rpfilter_is_local() local
78 return rt && (rt->rt6i_flags & RTF_LOCAL); rpfilter_is_local()
H A Dip6t_rt.c198 pr_debug("`--rt-type 0' required before `--rt-0-*'"); rt_mt6_check()
206 .name = "rt",
/linux-4.1.27/net/appletalk/
H A Datalk_proc.c125 struct atalk_route *rt; atalk_seq_route_show() local
133 rt = &atrtr_default; atalk_seq_route_show()
135 ntohs(rt->gateway.s_net), rt->gateway.s_node, atalk_seq_route_show()
136 rt->flags, rt->dev->name); atalk_seq_route_show()
139 rt = v; atalk_seq_route_show()
141 ntohs(rt->target.s_net), rt->target.s_node, atalk_seq_route_show()
142 ntohs(rt->gateway.s_net), rt->gateway.s_node, atalk_seq_route_show()
143 rt->flags, rt->dev->name); atalk_seq_route_show()
H A Dddp.c505 struct atalk_route *rt; atrtr_create() local
521 for (rt = atalk_routes; rt; rt = rt->next) { atrtr_create()
522 if (r->rt_flags != rt->flags) atrtr_create()
525 if (ta->sat_addr.s_net == rt->target.s_net) { atrtr_create()
526 if (!(rt->flags & RTF_HOST)) atrtr_create()
528 if (ta->sat_addr.s_node == rt->target.s_node) atrtr_create()
558 if (!rt) { atrtr_create()
559 rt = kzalloc(sizeof(*rt), GFP_ATOMIC); atrtr_create()
562 if (!rt) atrtr_create()
565 rt->next = atalk_routes; atrtr_create()
566 atalk_routes = rt; atrtr_create()
570 rt->target = ta->sat_addr; atrtr_create()
572 rt->dev = devhint; atrtr_create()
573 rt->flags = r->rt_flags; atrtr_create()
574 rt->gateway = ga->sat_addr; atrtr_create()
879 struct rtentry rt; atrtr_ioctl() local
881 if (copy_from_user(&rt, arg, sizeof(rt))) atrtr_ioctl()
886 if (rt.rt_dst.sa_family != AF_APPLETALK) atrtr_ioctl()
889 &rt.rt_dst)->sat_addr); atrtr_ioctl()
893 if (rt.rt_dev) { atrtr_ioctl()
895 if (copy_from_user(name, rt.rt_dev, IFNAMSIZ-1)) atrtr_ioctl()
902 return atrtr_create(&rt, dev); atrtr_ioctl()
1317 struct atalk_route *rt; atalk_route_packet() local
1341 rt = atrtr_find(&ta); atalk_route_packet()
1344 if (!rt || !(len_hops & (15 << 10))) atalk_route_packet()
1354 if (rt->flags & RTF_GATEWAY) { atalk_route_packet()
1355 ta.s_net = rt->gateway.s_net; atalk_route_packet()
1356 ta.s_node = rt->gateway.s_node; atalk_route_packet()
1361 (rt->dev->hard_header_len + atalk_route_packet()
1393 if (aarp_send_ddp(rt->dev, skb, &ta, NULL) == NET_XMIT_DROP) atalk_route_packet()
1574 struct atalk_route *rt; atalk_sendmsg() local
1619 rt = atrtr_find(&usat->sat_addr); atalk_sendmsg()
1626 rt = atrtr_find(&at_hint); atalk_sendmsg()
1629 if (!rt) atalk_sendmsg()
1632 dev = rt->dev; atalk_sendmsg()
1678 !(rt->flags & RTF_GATEWAY) && !(dev->flags & IFF_LOOPBACK)) { atalk_sendmsg()
1701 rt = atrtr_find(&at_lo); atalk_sendmsg()
1702 if (!rt) { atalk_sendmsg()
1707 dev = rt->dev; atalk_sendmsg()
1713 if (rt->flags & RTF_GATEWAY) { atalk_sendmsg()
1714 gsat.sat_addr = rt->gateway; atalk_sendmsg()
/linux-4.1.27/arch/powerpc/kvm/
H A Demulate_loadstore.c54 int ra, rs, rt; kvmppc_emulate_loadstore() local
67 rt = get_rt(inst); kvmppc_emulate_loadstore()
73 emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); kvmppc_emulate_loadstore()
77 emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); kvmppc_emulate_loadstore()
81 emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); kvmppc_emulate_loadstore()
105 emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1); kvmppc_emulate_loadstore()
109 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); kvmppc_emulate_loadstore()
113 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); kvmppc_emulate_loadstore()
141 emulated = kvmppc_handle_load(run, vcpu, rt, 4, 0); kvmppc_emulate_loadstore()
151 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 0); kvmppc_emulate_loadstore()
167 emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); kvmppc_emulate_loadstore()
172 rt = get_rt(inst); kvmppc_emulate_loadstore()
173 emulated = kvmppc_handle_load(run, vcpu, rt, 8, 1); kvmppc_emulate_loadstore()
177 emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); kvmppc_emulate_loadstore()
182 emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); kvmppc_emulate_loadstore()
186 emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); kvmppc_emulate_loadstore()
225 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); kvmppc_emulate_loadstore()
229 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); kvmppc_emulate_loadstore()
234 emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1); kvmppc_emulate_loadstore()
238 emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1); kvmppc_emulate_loadstore()
H A Demulate.c146 static int kvmppc_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt) kvmppc_emulate_mfspr() argument
204 kvmppc_set_gpr(vcpu, rt, spr_val); kvmppc_emulate_mfspr()
215 int rs, rt, sprn; kvmppc_emulate_instruction() local
229 rt = get_rt(inst); kvmppc_emulate_instruction()
261 emulated = kvmppc_emulate_mfspr(vcpu, sprn, rt); kvmppc_emulate_instruction()
H A Dbook3s_emulate.c93 int rt = get_rt(inst); kvmppc_core_emulate_op_pr() local
133 kvmppc_set_gpr(vcpu, rt, kvmppc_get_msr(vcpu)); kvmppc_core_emulate_op_pr()
158 kvmppc_set_gpr(vcpu, rt, sr); kvmppc_core_emulate_op_pr()
170 kvmppc_set_gpr(vcpu, rt, sr); kvmppc_core_emulate_op_pr()
251 kvmppc_set_gpr(vcpu, rt, t); kvmppc_core_emulate_op_pr()
262 kvmppc_set_gpr(vcpu, rt, t); kvmppc_core_emulate_op_pr()
H A De500_emulate.c122 int rt = get_rt(inst); kvmppc_core_emulate_op_e500() local
157 int type = rt & 0x3; kvmppc_core_emulate_op_e500()
/linux-4.1.27/arch/mips/kernel/
H A Dbranch.c78 if (insn.mm_i_format.rt != 0) /* Not mm_jr */ __mm_isBranchInstr()
79 regs->regs[insn.mm_i_format.rt] = __mm_isBranchInstr()
89 switch (insn.mm_i_format.rt) { __mm_isBranchInstr()
172 switch (insn.mm_i_format.rt) { __mm_isBranchInstr()
207 regs->regs[insn.mm_i_format.rt]) __mm_isBranchInstr()
218 regs->regs[insn.mm_i_format.rt]) __mm_isBranchInstr()
446 switch (insn.i_format.rt) { __compute_return_epc_for_insn()
453 if (insn.i_format.rt == bltzl_op) __compute_return_epc_for_insn()
466 if (insn.i_format.rt == bgezl_op) __compute_return_epc_for_insn()
476 insn.i_format.rt == bltzall_op)) { __compute_return_epc_for_insn()
500 if (insn.i_format.rt == bltzall_op) __compute_return_epc_for_insn()
510 insn.i_format.rt == bgezall_op)) { __compute_return_epc_for_insn()
534 if (insn.i_format.rt == bgezall_op) __compute_return_epc_for_insn()
579 regs->regs[insn.i_format.rt]) { __compute_return_epc_for_insn()
593 regs->regs[insn.i_format.rt]) { __compute_return_epc_for_insn()
609 * BLEZ | rs = 0 | rt != 0 == BLEZALC __compute_return_epc_for_insn()
610 * BLEZ | rs = rt != 0 == BGEZALC __compute_return_epc_for_insn()
611 * BLEZ | rs != 0 | rt != 0 == BGEUC __compute_return_epc_for_insn()
612 * BLEZL | rs = 0 | rt != 0 == BLEZC __compute_return_epc_for_insn()
613 * BLEZL | rs = rt != 0 == BGEZC __compute_return_epc_for_insn()
614 * BLEZL | rs != 0 | rt != 0 == BGEC __compute_return_epc_for_insn()
616 * For real BLEZ{,L}, rt is always 0. __compute_return_epc_for_insn()
619 if (cpu_has_mips_r6 && insn.i_format.rt) { __compute_return_epc_for_insn()
621 ((!insn.i_format.rs && insn.i_format.rt) || __compute_return_epc_for_insn()
622 (insn.i_format.rs == insn.i_format.rt))) __compute_return_epc_for_insn()
627 /* rt field assumed to be zero */ __compute_return_epc_for_insn()
644 * BGTZ | rs = 0 | rt != 0 == BGTZALC __compute_return_epc_for_insn()
645 * BGTZ | rs = rt != 0 == BLTZALC __compute_return_epc_for_insn()
646 * BGTZ | rs != 0 | rt != 0 == BLTUC __compute_return_epc_for_insn()
647 * BGTZL | rs = 0 | rt != 0 == BGTZC __compute_return_epc_for_insn()
648 * BGTZL | rs = rt != 0 == BLTZC __compute_return_epc_for_insn()
649 * BGTZL | rs != 0 | rt != 0 == BLTC __compute_return_epc_for_insn()
651 * *ZALC varint for BGTZ &&& rt != 0 __compute_return_epc_for_insn()
652 * For real GTZ{,L}, rt is always 0. __compute_return_epc_for_insn()
654 if (cpu_has_mips_r6 && insn.i_format.rt) { __compute_return_epc_for_insn()
656 ((!insn.i_format.rs && insn.i_format.rt) || __compute_return_epc_for_insn()
657 (insn.i_format.rs == insn.i_format.rt))) __compute_return_epc_for_insn()
663 /* rt field assumed to be zero */ __compute_return_epc_for_insn()
690 reg = insn.i_format.rt; __compute_return_epc_for_insn()
724 bit = (insn.i_format.rt >> 2); __compute_return_epc_for_insn()
727 switch (insn.i_format.rt & 3) { __compute_return_epc_for_insn()
733 if (insn.i_format.rt == 2) __compute_return_epc_for_insn()
745 if (insn.i_format.rt == 3) __compute_return_epc_for_insn()
756 if ((regs->regs[insn.i_format.rs] & (1ull<<insn.i_format.rt)) __compute_return_epc_for_insn()
765 (1ull<<(insn.i_format.rt+32))) == 0) __compute_return_epc_for_insn()
772 if (regs->regs[insn.i_format.rs] & (1ull<<insn.i_format.rt)) __compute_return_epc_for_insn()
780 (1ull<<(insn.i_format.rt+32))) __compute_return_epc_for_insn()
835 if (insn.i_format.rt && !insn.i_format.rs) __compute_return_epc_for_insn()
H A Dmips-r2-to-r6-emul.c403 s32 rt, rs; mult_func() local
405 rt = regs->regs[MIPSInst_RT(ir)]; mult_func()
407 res = (s64)rt * (s64)rs; mult_func()
411 rt = res >> 32; mult_func()
412 res = (s64)rt; mult_func()
430 u32 rt, rs; multu_func() local
432 rt = regs->regs[MIPSInst_RT(ir)]; multu_func()
434 res = (u64)rt * (u64)rs; multu_func()
435 rt = res; multu_func()
436 regs->lo = (s64)rt; multu_func()
453 s32 rt, rs; div_func() local
455 rt = regs->regs[MIPSInst_RT(ir)]; div_func()
458 regs->lo = (s64)(rs / rt); div_func()
459 regs->hi = (s64)(rs % rt); div_func()
475 u32 rt, rs; divu_func() local
477 rt = regs->regs[MIPSInst_RT(ir)]; divu_func()
480 regs->lo = (s64)(rs / rt); divu_func()
481 regs->hi = (s64)(rs % rt); divu_func()
498 s64 rt, rs; dmult_func() local
503 rt = regs->regs[MIPSInst_RT(ir)]; dmult_func()
505 res = rt * rs; dmult_func()
511 : "r"(rt), "r"(rs)); dmult_func()
530 u64 rt, rs; dmultu_func() local
535 rt = regs->regs[MIPSInst_RT(ir)]; dmultu_func()
537 res = rt * rs; dmultu_func()
543 : "r"(rt), "r"(rs)); dmultu_func()
561 s64 rt, rs; ddiv_func() local
566 rt = regs->regs[MIPSInst_RT(ir)]; ddiv_func()
569 regs->lo = rs / rt; ddiv_func()
570 regs->hi = rs % rt; ddiv_func()
586 u64 rt, rs; ddivu_func() local
591 rt = regs->regs[MIPSInst_RT(ir)]; ddivu_func()
594 regs->lo = rs / rt; ddivu_func()
595 regs->hi = rs % rt; ddivu_func()
634 s32 rt, rs; madd_func() local
636 rt = regs->regs[MIPSInst_RT(ir)]; madd_func()
638 res = (s64)rt * (s64)rs; madd_func()
639 rt = regs->hi; madd_func()
641 res += ((((s64)rt) << 32) | (u32)rs); madd_func()
643 rt = res; madd_func()
644 regs->lo = (s64)rt; madd_func()
663 u32 rt, rs; maddu_func() local
665 rt = regs->regs[MIPSInst_RT(ir)]; maddu_func()
667 res = (u64)rt * (u64)rs; maddu_func()
668 rt = regs->hi; maddu_func()
670 res += ((((s64)rt) << 32) | (u32)rs); maddu_func()
672 rt = res; maddu_func()
673 regs->lo = (s64)rt; maddu_func()
692 s32 rt, rs; msub_func() local
694 rt = regs->regs[MIPSInst_RT(ir)]; msub_func()
696 res = (s64)rt * (s64)rs; msub_func()
697 rt = regs->hi; msub_func()
699 res = ((((s64)rt) << 32) | (u32)rs) - res; msub_func()
701 rt = res; msub_func()
702 regs->lo = (s64)rt; msub_func()
721 u32 rt, rs; msubu_func() local
723 rt = regs->regs[MIPSInst_RT(ir)]; msubu_func()
725 res = (u64)rt * (u64)rs; msubu_func()
726 rt = regs->hi; msubu_func()
728 res = ((((s64)rt) << 32) | (u32)rs) - res; msubu_func()
730 rt = res; msubu_func()
731 regs->lo = (s64)rt; msubu_func()
750 s32 rt, rs; mul_func() local
754 rt = regs->regs[MIPSInst_RT(ir)]; mul_func()
756 res = (s64)rt * (s64)rs; mul_func()
909 unsigned long cpc, epc, nepc, r31, res, rs, rt; mipsr2_decoder() local
938 rt = MIPSInst_RT(inst); mipsr2_decoder()
940 switch (rt) { mipsr2_decoder()
1009 switch (rt) { mipsr2_decoder()
1064 switch (rt) { mipsr2_decoder()
1199 rt = regs->regs[MIPSInst_RT(inst)]; mipsr2_decoder()
1260 : "+&r"(rt), "=&r"(rs), mipsr2_decoder()
1265 regs->regs[MIPSInst_RT(inst)] = rt; mipsr2_decoder()
1272 rt = regs->regs[MIPSInst_RT(inst)]; mipsr2_decoder()
1335 : "+&r"(rt), "=&r"(rs), mipsr2_decoder()
1339 regs->regs[MIPSInst_RT(inst)] = rt; mipsr2_decoder()
1346 rt = regs->regs[MIPSInst_RT(inst)]; mipsr2_decoder()
1406 : "+&r"(rt), "=&r"(rs), mipsr2_decoder()
1416 rt = regs->regs[MIPSInst_RT(inst)]; mipsr2_decoder()
1476 : "+&r"(rt), "=&r"(rs), mipsr2_decoder()
1491 rt = regs->regs[MIPSInst_RT(inst)]; mipsr2_decoder()
1595 : "+&r"(rt), "=&r"(rs), mipsr2_decoder()
1599 regs->regs[MIPSInst_RT(inst)] = rt; mipsr2_decoder()
1610 rt = regs->regs[MIPSInst_RT(inst)]; mipsr2_decoder()
1714 : "+&r"(rt), "=&r"(rs), mipsr2_decoder()
1718 regs->regs[MIPSInst_RT(inst)] = rt; mipsr2_decoder()
1729 rt = regs->regs[MIPSInst_RT(inst)]; mipsr2_decoder()
1833 : "+&r"(rt), "=&r"(rs), mipsr2_decoder()
1847 rt = regs->regs[MIPSInst_RT(inst)]; mipsr2_decoder()
1951 : "+&r"(rt), "=&r"(rs), mipsr2_decoder()
H A Drtlx.c284 struct rtlx_channel *rt; rtlx_write() local
292 rt = &rtlx->channel[index]; rtlx_write()
296 rt_read = rt->rt_read; rtlx_write()
299 count = min_t(size_t, count, write_spacefree(rt_read, rt->rt_write, rtlx_write()
300 rt->buffer_size)); rtlx_write()
303 fl = min(count, (size_t) rt->buffer_size - rt->rt_write); rtlx_write()
305 failed = copy_from_user(rt->rt_buffer + rt->rt_write, buffer, fl); rtlx_write()
311 failed = copy_from_user(rt->rt_buffer, buffer + fl, count - fl); rtlx_write()
317 rt->rt_write = (rt->rt_write + count) % rt->buffer_size; rtlx_write()
H A Dunaligned.c959 regs->regs[insn.spec3_format.rt] = value; emulate_load_store_insn()
972 regs->regs[insn.spec3_format.rt] = value; emulate_load_store_insn()
985 regs->regs[insn.spec3_format.rt] = value; emulate_load_store_insn()
993 value = regs->regs[insn.spec3_format.rt]; emulate_load_store_insn()
1006 value = regs->regs[insn.spec3_format.rt]; emulate_load_store_insn()
1036 regs->regs[insn.i_format.rt] = value; emulate_load_store_insn()
1055 regs->regs[insn.i_format.rt] = value; emulate_load_store_insn()
1074 regs->regs[insn.i_format.rt] = value; emulate_load_store_insn()
1093 regs->regs[insn.i_format.rt] = value; emulate_load_store_insn()
1116 regs->regs[insn.i_format.rt] = value; emulate_load_store_insn()
1128 value = regs->regs[insn.i_format.rt]; emulate_load_store_insn()
1148 value = regs->regs[insn.i_format.rt]; emulate_load_store_insn()
1176 value = regs->regs[insn.i_format.rt]; emulate_load_store_insn()
1641 reg = insn.mm_i_format.rt; emulate_load_store_microMIPS()
1645 reg = insn.mm_i_format.rt; emulate_load_store_microMIPS()
1649 reg = insn.mm_i_format.rt; emulate_load_store_microMIPS()
1653 reg = insn.mm_i_format.rt; emulate_load_store_microMIPS()
1657 reg = insn.mm_i_format.rt; emulate_load_store_microMIPS()
1661 reg = insn.mm_i_format.rt; emulate_load_store_microMIPS()
1665 reg = insn.mm_i_format.rt; emulate_load_store_microMIPS()
1715 reg = reg16to32[insn.mm16_rb_format.rt]; emulate_load_store_microMIPS()
1719 reg = reg16to32[insn.mm16_rb_format.rt]; emulate_load_store_microMIPS()
1723 reg = reg16to32st[insn.mm16_rb_format.rt]; emulate_load_store_microMIPS()
1727 reg = reg16to32st[insn.mm16_rb_format.rt]; emulate_load_store_microMIPS()
1731 reg = insn.mm16_r5_format.rt; emulate_load_store_microMIPS()
1735 reg = insn.mm16_r5_format.rt; emulate_load_store_microMIPS()
1739 reg = reg16to32[insn.mm16_r3_format.rt]; emulate_load_store_microMIPS()
H A Dprocess.c211 mmi.mm16_r5_format.rt == 31) || is_ra_save_ins()
224 mmi.i_format.rt == 31); is_ra_save_ins()
230 ip->i_format.rt == 31; is_ra_save_ins()
250 (mmi.mm16_r5_format.rt & mm_jr16_op) == mm_jr16_op) || is_jump_ins()
286 mmi.mm16_r5_format.rt == 29); is_sp_move_ins()
289 ip->mm_i_format.rt == 29 && ip->mm_i_format.rs == 29; is_sp_move_ins()
292 if (ip->i_format.rs != 29 || ip->i_format.rt != 29) is_sp_move_ins()
H A Dtraps.c620 static int simulate_rdhwr(struct pt_regs *regs, int rd, int rt) simulate_rdhwr() argument
628 regs->regs[rt] = smp_processor_id(); simulate_rdhwr()
631 regs->regs[rt] = min(current_cpu_data.dcache.linesz, simulate_rdhwr()
635 regs->regs[rt] = read_c0_count(); simulate_rdhwr()
641 regs->regs[rt] = 1; simulate_rdhwr()
644 regs->regs[rt] = 2; simulate_rdhwr()
648 regs->regs[rt] = ti->tp_value; simulate_rdhwr()
659 int rt = (opcode & RT) >> 16; simulate_rdhwr_normal() local
661 simulate_rdhwr(regs, rd, rt); simulate_rdhwr_normal()
673 int rt = (opcode & MM_RT) >> 21; simulate_rdhwr_mm() local
674 simulate_rdhwr(regs, rd, rt); simulate_rdhwr_mm()
/linux-4.1.27/arch/arm/lib/
H A Decard.S16 #define CPSR2SPSR(rt) \
17 mrs rt, cpsr; \
18 msr spsr_cxsf, rt
/linux-4.1.27/net/mpls/
H A Daf_mpls.c40 static void rtmsg_lfib(int event, u32 label, struct mpls_route *rt,
46 struct mpls_route *rt = NULL; mpls_route_input_rcu() local
51 rt = rcu_dereference(platform_label[index]); mpls_route_input_rcu()
53 return rt; mpls_route_input_rcu()
66 static unsigned int mpls_rt_header_size(const struct mpls_route *rt) mpls_rt_header_size() argument
69 return rt->rt_labels * sizeof(struct mpls_shim_hdr); mpls_rt_header_size()
89 static bool mpls_egress(struct mpls_route *rt, struct sk_buff *skb, mpls_egress() argument
141 struct mpls_route *rt; mpls_forward() local
175 rt = mpls_route_input_rcu(net, dec.label); mpls_forward()
176 if (!rt) mpls_forward()
180 out_dev = rcu_dereference(rt->rt_dev); mpls_forward()
195 new_header_size = mpls_rt_header_size(rt); mpls_forward()
213 if (!mpls_egress(rt, skb, dec)) mpls_forward()
223 for (i = rt->rt_labels - 1; i >= 0; i--) { mpls_forward()
224 hdr[i] = mpls_entry_encode(rt->rt_label[i], dec.ttl, 0, bos); mpls_forward()
229 err = neigh_xmit(rt->rt_via_table, out_dev, rt->rt_via, skb); mpls_forward()
265 struct mpls_route *rt; mpls_rt_alloc() local
267 rt = kzalloc(sizeof(*rt) + alen, GFP_KERNEL); mpls_rt_alloc()
268 if (rt) mpls_rt_alloc()
269 rt->rt_via_alen = alen; mpls_rt_alloc()
270 return rt; mpls_rt_alloc()
273 static void mpls_rt_free(struct mpls_route *rt) mpls_rt_free() argument
275 if (rt) mpls_rt_free()
276 kfree_rcu(rt, rt_rcu); mpls_rt_free()
286 struct mpls_route *rt = new ? new : old; mpls_notify_route() local
289 if (rt && (index >= 16)) mpls_notify_route()
290 rtmsg_lfib(event, index, rt, nlh, net, portid, nlm_flags); mpls_notify_route()
298 struct mpls_route *rt, *old = NULL; mpls_route_update() local
303 rt = rtnl_dereference(platform_label[index]); mpls_route_update()
304 if (!dev || (rt && (rtnl_dereference(rt->rt_dev) == dev))) { mpls_route_update()
306 old = rt; mpls_route_update()
335 struct mpls_route *rt, *old; mpls_route_add() local
395 rt = mpls_rt_alloc(cfg->rc_via_alen); mpls_route_add()
396 if (!rt) mpls_route_add()
399 rt->rt_labels = cfg->rc_output_labels; mpls_route_add()
400 for (i = 0; i < rt->rt_labels; i++) mpls_route_add()
401 rt->rt_label[i] = cfg->rc_output_label[i]; mpls_route_add()
402 rt->rt_protocol = cfg->rc_protocol; mpls_route_add()
403 RCU_INIT_POINTER(rt->rt_dev, dev); mpls_route_add()
404 rt->rt_via_table = cfg->rc_via_table; mpls_route_add()
405 memcpy(rt->rt_via, cfg->rc_via, cfg->rc_via_alen); mpls_route_add()
407 mpls_route_update(net, index, NULL, rt, &cfg->rc_nlinfo); mpls_route_add()
528 struct mpls_route *rt = rtnl_dereference(platform_label[index]); mpls_ifdown() local
529 if (!rt) mpls_ifdown()
531 if (rtnl_dereference(rt->rt_dev) != dev) mpls_ifdown()
533 rt->rt_dev = NULL; mpls_ifdown()
819 u32 label, struct mpls_route *rt, int flags) mpls_dump_route()
835 rtm->rtm_protocol = rt->rt_protocol; mpls_dump_route()
840 if (rt->rt_labels && mpls_dump_route()
841 nla_put_labels(skb, RTA_NEWDST, rt->rt_labels, rt->rt_label)) mpls_dump_route()
843 if (nla_put_via(skb, rt->rt_via_table, rt->rt_via, rt->rt_via_alen)) mpls_dump_route()
845 dev = rtnl_dereference(rt->rt_dev); mpls_dump_route()
875 struct mpls_route *rt; mpls_dump_routes() local
876 rt = rtnl_dereference(platform_label[index]); mpls_dump_routes()
877 if (!rt) mpls_dump_routes()
882 index, rt, NLM_F_MULTI) < 0) mpls_dump_routes()
890 static inline size_t lfib_nlmsg_size(struct mpls_route *rt) lfib_nlmsg_size() argument
894 + nla_total_size(2 + rt->rt_via_alen) /* RTA_VIA */ lfib_nlmsg_size()
896 if (rt->rt_labels) /* RTA_NEWDST */ lfib_nlmsg_size()
897 payload += nla_total_size(rt->rt_labels * 4); lfib_nlmsg_size()
898 if (rt->rt_dev) /* RTA_OIF */ lfib_nlmsg_size()
903 static void rtmsg_lfib(int event, u32 label, struct mpls_route *rt, rtmsg_lfib() argument
911 skb = nlmsg_new(lfib_nlmsg_size(rt), GFP_KERNEL); rtmsg_lfib()
915 err = mpls_dump_route(skb, portid, seq, event, label, rt, nlm_flags); rtmsg_lfib()
1099 struct mpls_route *rt = rtnl_dereference(platform_label[index]); mpls_net_exit() local
1101 mpls_rt_free(rt); mpls_net_exit()
818 mpls_dump_route(struct sk_buff *skb, u32 portid, u32 seq, int event, u32 label, struct mpls_route *rt, int flags) mpls_dump_route() argument
/linux-4.1.27/drivers/media/radio/
H A Dradio-aimslab.c69 struct rtrack *rt = kzalloc(sizeof(struct rtrack), GFP_KERNEL); rtrack_alloc() local
71 if (rt) rtrack_alloc()
72 rt->curvol = 0xff; rtrack_alloc()
73 return rt ? &rt->isa : NULL; rtrack_alloc()
88 struct rtrack *rt = container_of(isa, struct rtrack, isa); rtrack_set_pins() local
91 if (!v4l2_ctrl_g_ctrl(rt->isa.mute)) rtrack_set_pins()
101 outb_p(bits, rt->isa.io); rtrack_set_pins()
119 struct rtrack *rt = container_of(isa, struct rtrack, isa); rtrack_s_mute_volume() local
120 int curvol = rt->curvol; rtrack_s_mute_volume()
139 rt->curvol = vol; rtrack_s_mute_volume()
/linux-4.1.27/net/netfilter/
H A Dnf_conntrack_broadcast.c30 struct rtable *rt = skb_rtable(skb); nf_conntrack_broadcast_help() local
38 if (rt == NULL || !(rt->rt_flags & RTCF_BROADCAST)) nf_conntrack_broadcast_help()
44 in_dev = __in_dev_get_rcu(rt->dst.dev); nf_conntrack_broadcast_help()
H A Dxt_addrtype.c41 struct rt6_info *rt; match_lookup_rt6() local
61 route_err = afinfo->route(net, (struct dst_entry **)&rt, match_lookup_rt6()
71 if (rt->rt6i_flags & RTF_REJECT) match_lookup_rt6()
74 if (dev == NULL && rt->rt6i_flags & RTF_LOCAL) match_lookup_rt6()
76 if (rt->rt6i_flags & RTF_ANYCAST) match_lookup_rt6()
79 dst_release(&rt->dst); match_lookup_rt6()
H A Dxt_TCPMSS.c52 struct rtable *rt = NULL; tcpmss_reverse_mtu() local
68 ai->route(net, (struct dst_entry **)&rt, &fl, false); tcpmss_reverse_mtu()
71 if (rt != NULL) { tcpmss_reverse_mtu()
72 mtu = dst_mtu(&rt->dst); tcpmss_reverse_mtu()
73 dst_release(&rt->dst); tcpmss_reverse_mtu()
H A Dxt_TEE.c61 struct rtable *rt; tee_tg_route4() local
74 rt = ip_route_output_key(net, &fl4); tee_tg_route4()
75 if (IS_ERR(rt)) tee_tg_route4()
79 skb_dst_set(skb, &rt->dst); tee_tg_route4()
80 skb->dev = rt->dst.dev; tee_tg_route4()
/linux-4.1.27/arch/mips/kvm/
H A Ddyntrans.c75 int32_t rt, rd, sel; kvm_mips_trans_mfc0() local
79 rt = (inst >> 16) & 0x1f; kvm_mips_trans_mfc0()
85 mfc0_inst |= ((rt & 0x1f) << 16); kvm_mips_trans_mfc0()
88 mfc0_inst |= ((rt & 0x1f) << 16); kvm_mips_trans_mfc0()
117 int32_t rt, rd, sel; kvm_mips_trans_mtc0() local
121 rt = (inst >> 16) & 0x1f; kvm_mips_trans_mtc0()
125 mtc0_inst |= ((rt & 0x1f) << 16); kvm_mips_trans_mtc0()
H A Demulate.c79 switch (insn.i_format.rt) { kvm_compute_return_epc()
147 arch->gprs[insn.i_format.rt]) kvm_compute_return_epc()
157 arch->gprs[insn.i_format.rt]) kvm_compute_return_epc()
166 /* rt field assumed to be zero */ kvm_compute_return_epc()
176 /* rt field assumed to be zero */ kvm_compute_return_epc()
982 int32_t rt, rd, copz, sel, co_bit, op; kvm_mips_emulate_CP0() local
996 rt = (inst >> 16) & 0x1f; kvm_mips_emulate_CP0()
1036 vcpu->arch.gprs[rt] = kvm_mips_read_count(vcpu); kvm_mips_emulate_CP0()
1038 vcpu->arch.gprs[rt] = 0x0; kvm_mips_emulate_CP0()
1043 vcpu->arch.gprs[rt] = cop0->reg[rd][sel]; kvm_mips_emulate_CP0()
1052 pc, rd, sel, rt, vcpu->arch.gprs[rt]); kvm_mips_emulate_CP0()
1057 vcpu->arch.gprs[rt] = cop0->reg[rd][sel]; kvm_mips_emulate_CP0()
1065 && (vcpu->arch.gprs[rt] >= kvm_mips_emulate_CP0()
1068 vcpu->arch.gprs[rt]); kvm_mips_emulate_CP0()
1077 vcpu->arch.gprs[rt]); kvm_mips_emulate_CP0()
1082 vcpu->arch.gprs[rt] & ASID_MASK; kvm_mips_emulate_CP0()
1083 if ((KSEGX(vcpu->arch.gprs[rt]) != CKSEG0) && kvm_mips_emulate_CP0()
1089 vcpu->arch.gprs[rt] kvm_mips_emulate_CP0()
1096 vcpu->arch.gprs[rt]); kvm_mips_emulate_CP0()
1100 kvm_mips_write_count(vcpu, vcpu->arch.gprs[rt]); kvm_mips_emulate_CP0()
1105 vcpu->arch.gprs[rt]); kvm_mips_emulate_CP0()
1110 vcpu->arch.gprs[rt], kvm_mips_emulate_CP0()
1116 val = vcpu->arch.gprs[rt]; kvm_mips_emulate_CP0()
1188 val = vcpu->arch.gprs[rt]; kvm_mips_emulate_CP0()
1225 new_cause = vcpu->arch.gprs[rt]; kvm_mips_emulate_CP0()
1237 cop0->reg[rd][sel] = vcpu->arch.gprs[rt]; kvm_mips_emulate_CP0()
1248 kvm_err("!!!!!!![%#lx]dmtc_op: rt: %d, rd: %d, sel: %d!!!!!!\n", kvm_mips_emulate_CP0()
1249 vcpu->arch.pc, rt, rd, sel); kvm_mips_emulate_CP0()
1257 if (rt != 0) { kvm_mips_emulate_CP0()
1258 vcpu->arch.gprs[rt] = kvm_mips_emulate_CP0()
1289 vcpu->arch.gprs[rt]); kvm_mips_emulate_CP0()
1290 vcpu->arch.gprs[rd] = vcpu->arch.gprs[rt]; kvm_mips_emulate_CP0()
1321 int32_t op, base, rt, offset; kvm_mips_emulate_store() local
1335 rt = (inst >> 16) & 0x1f; kvm_mips_emulate_store()
1358 *(u8 *) data = vcpu->arch.gprs[rt]; kvm_mips_emulate_store()
1360 vcpu->arch.host_cp0_badvaddr, vcpu->arch.gprs[rt], kvm_mips_emulate_store()
1383 *(uint32_t *) data = vcpu->arch.gprs[rt]; kvm_mips_emulate_store()
1387 vcpu->arch.gprs[rt], *(uint32_t *) data); kvm_mips_emulate_store()
1408 *(uint16_t *) data = vcpu->arch.gprs[rt]; kvm_mips_emulate_store()
1412 vcpu->arch.gprs[rt], *(uint32_t *) data); kvm_mips_emulate_store()
1433 int32_t op, base, rt, offset; kvm_mips_emulate_load() local
1436 rt = (inst >> 16) & 0x1f; kvm_mips_emulate_load()
1442 vcpu->arch.io_gpr = rt; kvm_mips_emulate_load()
2346 int rt = (inst & RT) >> 16; kvm_mips_handle_ri() local
2355 arch->gprs[rt] = 0; kvm_mips_handle_ri()
2358 arch->gprs[rt] = min(current_cpu_data.dcache.linesz, kvm_mips_handle_ri()
2362 arch->gprs[rt] = kvm_mips_read_count(vcpu); kvm_mips_handle_ri()
2368 arch->gprs[rt] = 1; kvm_mips_handle_ri()
2371 arch->gprs[rt] = 2; kvm_mips_handle_ri()
2375 arch->gprs[rt] = kvm_read_c0_guest_userlocal(cop0); kvm_mips_handle_ri()
/linux-4.1.27/net/netfilter/ipvs/
H A Dip_vs_xmit.c128 struct rtable *rt; do_output_route4() local
137 rt = ip_route_output_key(net, &fl4); do_output_route4()
138 if (IS_ERR(rt)) { do_output_route4()
140 if (PTR_ERR(rt) == -EINVAL && *saddr && do_output_route4()
149 ip_rt_put(rt); do_output_route4()
156 return rt; do_output_route4()
160 static inline int __ip_vs_is_local_route6(struct rt6_info *rt) __ip_vs_is_local_route6() argument
162 return rt->dst.dev && rt->dst.dev->flags & IFF_LOOPBACK; __ip_vs_is_local_route6()
265 struct rtable *rt; /* Route to the other host */ __ip_vs_get_out_rt() local
272 rt = (struct rtable *) dest_dst->dst_cache; __ip_vs_get_out_rt()
281 rt = do_output_route4(net, dest->addr.ip, rt_mode, __ip_vs_get_out_rt()
283 if (!rt) { __ip_vs_get_out_rt()
289 __ip_vs_dst_set(dest, dest_dst, &rt->dst, 0); __ip_vs_get_out_rt()
293 atomic_read(&rt->dst.__refcnt)); __ip_vs_get_out_rt()
306 rt = do_output_route4(net, daddr, rt_mode, &saddr); __ip_vs_get_out_rt()
307 if (!rt) __ip_vs_get_out_rt()
313 local = (rt->rt_flags & RTCF_LOCAL) ? 1 : 0; __ip_vs_get_out_rt()
324 ip_rt_put(rt); __ip_vs_get_out_rt()
329 mtu = dst_mtu(&rt->dst); __ip_vs_get_out_rt()
331 mtu = dst_mtu(&rt->dst) - sizeof(struct iphdr); __ip_vs_get_out_rt()
345 skb_dst_set_noref(skb, &rt->dst); __ip_vs_get_out_rt()
347 skb_dst_set(skb, dst_clone(&rt->dst)); __ip_vs_get_out_rt()
349 skb_dst_set(skb, &rt->dst); __ip_vs_get_out_rt()
355 ip_rt_put(rt); __ip_vs_get_out_rt()
408 struct rt6_info *rt; /* Route to the other host */ __ip_vs_get_out_rt_v6() local
416 rt = (struct rt6_info *) dest_dst->dst_cache; __ip_vs_get_out_rt_v6()
436 rt = (struct rt6_info *) dst; __ip_vs_get_out_rt_v6()
437 cookie = rt->rt6i_node ? rt->rt6i_node->fn_sernum : 0; __ip_vs_get_out_rt_v6()
438 __ip_vs_dst_set(dest, dest_dst, &rt->dst, cookie); __ip_vs_get_out_rt_v6()
442 atomic_read(&rt->dst.__refcnt)); __ip_vs_get_out_rt_v6()
451 rt = (struct rt6_info *) dst; __ip_vs_get_out_rt_v6()
454 local = __ip_vs_is_local_route6(rt); __ip_vs_get_out_rt_v6()
466 dst_release(&rt->dst); __ip_vs_get_out_rt_v6()
472 mtu = dst_mtu(&rt->dst); __ip_vs_get_out_rt_v6()
474 mtu = dst_mtu(&rt->dst) - sizeof(struct ipv6hdr); __ip_vs_get_out_rt_v6()
489 skb_dst_set_noref(skb, &rt->dst); __ip_vs_get_out_rt_v6()
491 skb_dst_set(skb, dst_clone(&rt->dst)); __ip_vs_get_out_rt_v6()
493 skb_dst_set(skb, &rt->dst); __ip_vs_get_out_rt_v6()
499 dst_release(&rt->dst); __ip_vs_get_out_rt_v6()
680 struct rtable *rt; /* Route to the other host */ ip_vs_nat_xmit() local
704 rt = skb_rtable(skb); ip_vs_nat_xmit()
734 if (skb_cow(skb, rt->dst.dev->hard_header_len)) ip_vs_nat_xmit()
770 struct rt6_info *rt; /* Route to the other host */ ip_vs_nat_xmit_v6() local
793 rt = (struct rt6_info *) skb_dst(skb); ip_vs_nat_xmit_v6()
814 ipv6_addr_type(&rt->rt6i_dst.addr) & IPV6_ADDR_LOOPBACK) { ip_vs_nat_xmit_v6()
825 if (skb_cow(skb, rt->dst.dev->hard_header_len)) ip_vs_nat_xmit_v6()
961 struct rtable *rt; /* Route to the other host */ ip_vs_tunnel_xmit() local
988 rt = skb_rtable(skb); ip_vs_tunnel_xmit()
989 tdev = rt->dst.dev; ip_vs_tunnel_xmit()
1056 struct rt6_info *rt; /* Route to the other host */ ip_vs_tunnel_xmit_v6() local
1082 rt = (struct rt6_info *) skb_dst(skb); ip_vs_tunnel_xmit_v6()
1083 tdev = rt->dst.dev; ip_vs_tunnel_xmit_v6()
1234 struct rtable *rt; /* Route to the other host */ ip_vs_icmp_xmit() local
1268 rt = skb_rtable(skb); ip_vs_icmp_xmit()
1300 if (skb_cow(skb, rt->dst.dev->hard_header_len)) ip_vs_icmp_xmit()
1327 struct rt6_info *rt; /* Route to the other host */ ip_vs_icmp_xmit_v6() local
1360 rt = (struct rt6_info *) skb_dst(skb); ip_vs_icmp_xmit_v6()
1381 ipv6_addr_type(&rt->rt6i_dst.addr) & IPV6_ADDR_LOOPBACK) { ip_vs_icmp_xmit_v6()
1392 if (skb_cow(skb, rt->dst.dev->hard_header_len)) ip_vs_icmp_xmit_v6()
/linux-4.1.27/arch/powerpc/kernel/
H A Dkvm.c86 static void kvm_patch_ins_ll(u32 *inst, long addr, u32 rt) kvm_patch_ins_ll() argument
89 kvm_patch_ins(inst, KVM_INST_LD | rt | (addr & 0x0000fffc)); kvm_patch_ins_ll()
91 kvm_patch_ins(inst, KVM_INST_LWZ | rt | (addr & 0x0000fffc)); kvm_patch_ins_ll()
95 static void kvm_patch_ins_ld(u32 *inst, long addr, u32 rt) kvm_patch_ins_ld() argument
98 kvm_patch_ins(inst, KVM_INST_LD | rt | (addr & 0x0000fffc)); kvm_patch_ins_ld()
100 kvm_patch_ins(inst, KVM_INST_LWZ | rt | ((addr + 4) & 0x0000fffc)); kvm_patch_ins_ld()
104 static void kvm_patch_ins_lwz(u32 *inst, long addr, u32 rt) kvm_patch_ins_lwz() argument
106 kvm_patch_ins(inst, KVM_INST_LWZ | rt | (addr & 0x0000ffff)); kvm_patch_ins_lwz()
109 static void kvm_patch_ins_std(u32 *inst, long addr, u32 rt) kvm_patch_ins_std() argument
112 kvm_patch_ins(inst, KVM_INST_STD | rt | (addr & 0x0000fffc)); kvm_patch_ins_std()
114 kvm_patch_ins(inst, KVM_INST_STW | rt | ((addr + 4) & 0x0000fffc)); kvm_patch_ins_std()
118 static void kvm_patch_ins_stw(u32 *inst, long addr, u32 rt) kvm_patch_ins_stw() argument
120 kvm_patch_ins(inst, KVM_INST_STW | rt | (addr & 0x0000fffc)); kvm_patch_ins_stw()
164 static void kvm_patch_ins_mtmsrd(u32 *inst, u32 rt) kvm_patch_ins_mtmsrd() argument
189 switch (get_rt(rt)) { kvm_patch_ins_mtmsrd()
199 p[kvm_emulate_mtmsrd_reg_offs] |= rt; kvm_patch_ins_mtmsrd()
217 static void kvm_patch_ins_mtmsr(u32 *inst, u32 rt) kvm_patch_ins_mtmsr() argument
244 switch (get_rt(rt)) { kvm_patch_ins_mtmsr()
258 p[kvm_emulate_mtmsr_reg1_offs] |= rt; kvm_patch_ins_mtmsr()
259 p[kvm_emulate_mtmsr_reg2_offs] |= rt; kvm_patch_ins_mtmsr()
278 static void kvm_patch_ins_wrtee(u32 *inst, u32 rt, int imm_one) kvm_patch_ins_wrtee() argument
309 switch (get_rt(rt)) { kvm_patch_ins_wrtee()
319 p[kvm_emulate_wrtee_reg_offs] |= rt; kvm_patch_ins_wrtee()
376 static void kvm_patch_ins_mtsrin(u32 *inst, u32 rt, u32 rb) kvm_patch_ins_mtsrin() argument
402 p[kvm_emulate_mtsrin_reg2_offs] |= rt; kvm_patch_ins_mtsrin()
/linux-4.1.27/arch/arm/kvm/
H A Dmmio.c118 *vcpu_reg(vcpu, vcpu->arch.mmio_decode.rt) = data; kvm_handle_mmio_return()
126 unsigned long rt; decode_hsr() local
148 rt = kvm_vcpu_dabt_get_rd(vcpu); decode_hsr()
152 vcpu->arch.mmio_decode.rt = rt; decode_hsr()
166 unsigned long rt; io_mem_abort() local
186 rt = vcpu->arch.mmio_decode.rt; io_mem_abort()
189 data = vcpu_data_guest_to_host(vcpu, *vcpu_reg(vcpu, rt), len); io_mem_abort()
/linux-4.1.27/drivers/input/touchscreen/
H A Dtsc2007.c133 u32 rt = 0; tsc2007_calculate_pressure() local
141 rt = tc->z2 - tc->z1; tsc2007_calculate_pressure()
142 rt *= tc->x; tsc2007_calculate_pressure()
143 rt *= tsc->x_plate_ohms; tsc2007_calculate_pressure()
144 rt /= tc->z1; tsc2007_calculate_pressure()
145 rt = (rt + 2047) >> 12; tsc2007_calculate_pressure()
148 return rt; tsc2007_calculate_pressure()
178 u32 rt; tsc2007_soft_irq() local
185 rt = tsc2007_calculate_pressure(ts, &tc); tsc2007_soft_irq()
187 if (!rt && !ts->get_pendown_state) { tsc2007_soft_irq()
196 if (rt <= ts->max_rt) { tsc2007_soft_irq()
199 tc.x, tc.y, rt); tsc2007_soft_irq()
204 input_report_abs(input, ABS_PRESSURE, rt); tsc2007_soft_irq()
214 dev_dbg(&ts->client->dev, "ignored pressure %d\n", rt); tsc2007_soft_irq()
301 if (!of_property_read_u32(np, "ti,max-rt", &val32)) tsc2007_probe_dt()
H A D88pm860x-ts.c57 int z1, z2, rt = 0; pm860x_touch_handler() local
72 rt = z2 / z1 - 1; pm860x_touch_handler()
73 rt = (rt * touch->res_x * x) >> ACCURATE_BIT; pm860x_touch_handler()
74 dev_dbg(chip->dev, "z1:%d, z2:%d, rt:%d\n", pm860x_touch_handler()
75 z1, z2, rt); pm860x_touch_handler()
79 input_report_abs(touch->idev, ABS_PRESSURE, rt); pm860x_touch_handler()
/linux-4.1.27/init/
H A Dinit_task.c6 #include <linux/sched/rt.h>
/linux-4.1.27/kernel/locking/
H A Drtmutex-debug.c9 * This code is based on the rt.c implementation in the preempt-rt tree.
17 * See rt.c in preempt-rt for proper credits and further information
20 #include <linux/sched/rt.h>
H A Drtmutex-tester.c2 * RT-Mutex-tester: scriptable tester for rt mutexes
13 #include <linux/sched/rt.h>
335 * sysfs_test_status - sysfs interface for rt tester
383 threads[id] = kthread_run(test_func, &thread_data[id], "rt-test-%d", id); init_test_thread()
H A Drtmutex_common.h105 * Constants for rt mutex functions which have a selectable deadlock
H A Drtmutex.c11 * See Documentation/locking/rt-mutex-design.txt for details.
16 #include <linux/sched/rt.h>
744 * Try to take an rt-mutex
1256 * Slow path to release a rt-mutex:
1484 * __rt_mutex_init - initialize the rt lock
1486 * @lock: the rt lock to be initialized
1488 * Initialize the rt lock to unlocked state.
1490 * Initializing of a locked rt lock is not allowed
1591 * @lock: the rt lock query
/linux-4.1.27/kernel/sched/
H A Drt.c116 return container_of(rt_se, struct task_struct, rt); rt_task_of()
172 rt_se->rt_rq = &rq->rt; init_tg_rt_entry()
227 return container_of(rt_se, struct task_struct, rt); rt_task_of()
232 return container_of(rt_rq, struct rq, rt); rq_of_rt_rq()
246 return &rq->rt; rt_rq_of_se()
264 return rq->rt.highest_prio.curr > prev->prio; need_pull_rt_task()
322 rt_rq = &rq_of_rt_rq(rt_rq)->rt; inc_rt_migration()
339 rt_rq = &rq_of_rt_rq(rt_rq)->rt; dec_rt_migration()
350 return !plist_head_empty(&rq->rt.pushable_tasks); has_pushable_tasks()
364 plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks); enqueue_pushable_task()
366 plist_add(&p->pushable_tasks, &rq->rt.pushable_tasks); enqueue_pushable_task()
369 if (p->prio < rq->rt.highest_prio.next) enqueue_pushable_task()
370 rq->rt.highest_prio.next = p->prio; enqueue_pushable_task()
375 plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks); dequeue_pushable_task()
379 p = plist_first_entry(&rq->rt.pushable_tasks, dequeue_pushable_task()
381 rq->rt.highest_prio.next = p->prio; dequeue_pushable_task()
383 rq->rt.highest_prio.next = MAX_RT_PRIO; dequeue_pushable_task()
564 for ((void) iter, rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
603 return &cpu_rq(cpu)->rt; sched_rt_period_rt_rq()
846 * When we're idle and a woken (rt) task is for_each_cpu()
939 struct sched_rt_entity *rt_se = &curr->rt; update_curr_rt()
981 BUG_ON(&rq->rt != rt_rq); dequeue_top_rt_rq()
997 BUG_ON(&rq->rt != rt_rq); enqueue_top_rt_rq()
1019 if (&rq->rt != rt_rq) inc_rt_prio_smp()
1035 if (&rq->rt != rt_rq) dec_rt_prio_smp()
1231 enqueue_top_rt_rq(&rq->rt); enqueue_rt_entity()
1246 enqueue_top_rt_rq(&rq->rt);
1255 struct sched_rt_entity *rt_se = &p->rt; enqueue_task_rt()
1268 struct sched_rt_entity *rt_se = &p->rt; dequeue_task_rt()
1296 struct sched_rt_entity *rt_se = &p->rt; requeue_task_rt()
1360 p->prio < cpu_rq(target)->rt.highest_prio.curr) select_task_rq_rt()
1447 struct rt_rq *rt_rq = &rq->rt; _pick_next_task_rt()
1465 struct rt_rq *rt_rq = &rq->rt; pick_next_task_rt()
1509 if (on_rt_rq(&p->rt) && p->nr_cpus_allowed > 1) put_prev_task_rt()
1532 struct plist_head *head = &rq->rt.pushable_tasks; pick_highest_pushable_task()
1637 if (lowest_rq->rt.highest_prio.curr <= task->prio) { find_lock_lowest_rq()
1668 if (lowest_rq->rt.highest_prio.curr > task->prio) find_lock_lowest_rq()
1686 p = plist_first_entry(&rq->rt.pushable_tasks, pick_next_pushable_task()
1710 if (!rq->rt.overloaded) push_rt_task()
1800 * rq->rt.push_cpu holds the last cpu returned by this function,
1805 int prev_cpu = rq->rt.push_cpu; rto_next_cpu()
1829 rq->rt.push_cpu = cpu; rto_next_cpu()
1847 if (next_rq->rt.highest_prio.next < rq->rt.highest_prio.curr) find_next_push_cpu()
1861 if (rq->rt.push_flags & RT_PUSH_IPI_EXECUTING) { tell_cpu_to_push()
1862 raw_spin_lock(&rq->rt.push_lock); tell_cpu_to_push()
1864 if (rq->rt.push_flags & RT_PUSH_IPI_EXECUTING) { tell_cpu_to_push()
1869 rq->rt.push_flags |= RT_PUSH_IPI_RESTART; tell_cpu_to_push()
1870 raw_spin_unlock(&rq->rt.push_lock); tell_cpu_to_push()
1873 raw_spin_unlock(&rq->rt.push_lock); tell_cpu_to_push()
1878 rq->rt.push_cpu = rq->cpu; tell_cpu_to_push()
1883 rq->rt.push_flags = RT_PUSH_IPI_EXECUTING; tell_cpu_to_push()
1885 irq_work_queue_on(&rq->rt.push_work, cpu); tell_cpu_to_push()
1911 /* Pass the IPI to the next rt overloaded queue */ try_to_push_tasks()
1986 if (src_rq->rt.highest_prio.next >= pull_rt_task()
1987 this_rq->rt.highest_prio.curr) pull_rt_task()
2007 if (p && (p->prio < this_rq->rt.highest_prio.curr)) { pull_rt_task()
2090 BUG_ON(!rq->rt.rt_nr_migratory); set_cpus_allowed_rt()
2091 rq->rt.rt_nr_migratory--; set_cpus_allowed_rt()
2095 rq->rt.rt_nr_migratory++; set_cpus_allowed_rt()
2098 update_rt_migration(&rq->rt); set_cpus_allowed_rt()
2104 if (rq->rt.overloaded) rq_online_rt()
2109 cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr); rq_online_rt()
2115 if (rq->rt.overloaded) rq_offline_rt()
2124 * When switch from the rt queue, we bring ourselves to a position
2136 if (!task_on_rq_queued(p) || rq->rt.rt_nr_running) switched_from_rt()
2172 if (p->nr_cpus_allowed > 1 && rq->rt.overloaded && switched_to_rt()
2206 if (p->prio > rq->rt.highest_prio.curr && rq->curr == p) prio_changed_rt()
2235 if (p->rt.watchdog_stamp != jiffies) { watchdog()
2236 p->rt.timeout++; watchdog()
2237 p->rt.watchdog_stamp = jiffies; watchdog()
2241 if (p->rt.timeout > next) watchdog()
2248 struct sched_rt_entity *rt_se = &p->rt; task_tick_rt()
2261 if (--p->rt.time_slice) task_tick_rt()
2264 p->rt.time_slice = sched_rr_timeslice; task_tick_rt()
H A DMakefile15 obj-y += idle_task.o fair.o rt.o deadline.o stop_task.o
H A Dcpupri.c32 #include <linux/sched/rt.h>
182 * priority, as that will trigger an rt pull anyway. cpupri_set()
/linux-4.1.27/arch/arm/net/
H A Dbpf_jit_32.h155 #define ARM_LDR_I(rt, rn, off) (ARM_INST_LDR_I | (rt) << 12 | (rn) << 16 \
157 #define ARM_LDRB_I(rt, rn, off) (ARM_INST_LDRB_I | (rt) << 12 | (rn) << 16 \
159 #define ARM_LDRB_R(rt, rn, rm) (ARM_INST_LDRB_R | (rt) << 12 | (rn) << 16 \
161 #define ARM_LDRH_I(rt, rn, off) (ARM_INST_LDRH_I | (rt) << 12 | (rn) << 16 \
199 #define ARM_STR_I(rt, rn, off) (ARM_INST_STR_I | (rt) << 12 | (rn) << 16 \
/linux-4.1.27/arch/mips/math-emu/
H A Dcp1emu.c104 mips32_insn.mm_i_format.rt = insn.mm_i_format.rs; microMIPS32_to_MIPS32()
105 mips32_insn.mm_i_format.rs = insn.mm_i_format.rt; microMIPS32_to_MIPS32()
109 mips32_insn.mm_i_format.rt = insn.mm_i_format.rs; microMIPS32_to_MIPS32()
110 mips32_insn.mm_i_format.rs = insn.mm_i_format.rt; microMIPS32_to_MIPS32()
114 mips32_insn.mm_i_format.rt = insn.mm_i_format.rs; microMIPS32_to_MIPS32()
115 mips32_insn.mm_i_format.rs = insn.mm_i_format.rt; microMIPS32_to_MIPS32()
119 mips32_insn.mm_i_format.rt = insn.mm_i_format.rs; microMIPS32_to_MIPS32()
120 mips32_insn.mm_i_format.rs = insn.mm_i_format.rt; microMIPS32_to_MIPS32()
124 if ((insn.mm_i_format.rt == mm_bc1f_op) || microMIPS32_to_MIPS32()
125 (insn.mm_i_format.rt == mm_bc1t_op)) { microMIPS32_to_MIPS32()
129 (insn.mm_i_format.rt == mm_bc1t_op) ? 1 : 0; microMIPS32_to_MIPS32()
183 mips32_insn.r_format.rt = microMIPS32_to_MIPS32()
268 mips32_insn.r_format.rt = microMIPS32_to_MIPS32()
270 mips32_insn.r_format.rd = insn.mm_fp4_format.rt; microMIPS32_to_MIPS32()
292 insn.mm_fp3_format.rt; microMIPS32_to_MIPS32()
316 insn.mm_fp3_format.rt; microMIPS32_to_MIPS32()
356 insn.mm_fp1_format.rt; microMIPS32_to_MIPS32()
375 insn.mm_fp1_format.rt; microMIPS32_to_MIPS32()
398 mips32_insn.fp1_format.rt = microMIPS32_to_MIPS32()
399 insn.mm_fp1_format.rt; microMIPS32_to_MIPS32()
413 mips32_insn.fp0_format.ft = insn.mm_fp4_format.rt; microMIPS32_to_MIPS32()
463 switch (insn.i_format.rt) { isBranchInstr()
467 insn.i_format.rt == bltzall_op)) isBranchInstr()
490 insn.i_format.rt == bgezall_op)) isBranchInstr()
532 regs->regs[insn.i_format.rt]) isBranchInstr()
546 regs->regs[insn.i_format.rt]) isBranchInstr()
563 * BLEZ | rs = 0 | rt != 0 == BLEZALC isBranchInstr()
564 * BLEZ | rs = rt != 0 == BGEZALC isBranchInstr()
565 * BLEZ | rs != 0 | rt != 0 == BGEUC isBranchInstr()
566 * BLEZL | rs = 0 | rt != 0 == BLEZC isBranchInstr()
567 * BLEZL | rs = rt != 0 == BGEZC isBranchInstr()
568 * BLEZL | rs != 0 | rt != 0 == BGEC isBranchInstr()
570 * For real BLEZ{,L}, rt is always 0. isBranchInstr()
572 if (cpu_has_mips_r6 && insn.i_format.rt) { isBranchInstr()
574 ((!insn.i_format.rs && insn.i_format.rt) || isBranchInstr()
575 (insn.i_format.rs == insn.i_format.rt))) isBranchInstr()
599 * BGTZ | rs = 0 | rt != 0 == BGTZALC isBranchInstr()
600 * BGTZ | rs = rt != 0 == BLTZALC isBranchInstr()
601 * BGTZ | rs != 0 | rt != 0 == BLTUC isBranchInstr()
602 * BGTZL | rs = 0 | rt != 0 == BGTZC isBranchInstr()
603 * BGTZL | rs = rt != 0 == BLTZC isBranchInstr()
604 * BGTZL | rs != 0 | rt != 0 == BLTC isBranchInstr()
606 * *ZALC varint for BGTZ &&& rt != 0 isBranchInstr()
607 * For real GTZ{,L}, rt is always 0. isBranchInstr()
609 if (cpu_has_mips_r6 && insn.i_format.rt) { isBranchInstr()
611 ((!insn.i_format.rs && insn.i_format.rt) || isBranchInstr()
612 (insn.i_format.rs == insn.i_format.rt))) isBranchInstr()
634 if (insn.i_format.rt && !insn.i_format.rs) isBranchInstr()
642 if ((regs->regs[insn.i_format.rs] & (1ull<<insn.i_format.rt)) == 0) isBranchInstr()
648 if ((regs->regs[insn.i_format.rs] & (1ull<<(insn.i_format.rt + 32))) == 0) isBranchInstr()
654 if (regs->regs[insn.i_format.rs] & (1ull<<insn.i_format.rt)) isBranchInstr()
660 if (regs->regs[insn.i_format.rs] & (1ull<<(insn.i_format.rt + 32))) isBranchInstr()
712 if (get_fpr32(&current->thread.fpu.fpr[insn.i_format.rt], 0) & 0x1) isBranchInstr()
716 if (!(get_fpr32(&current->thread.fpu.fpr[insn.i_format.rt], 0) & 0x1)) isBranchInstr()
742 bit = (insn.i_format.rt >> 2); isBranchInstr()
745 switch (insn.i_format.rt & 3) { isBranchInstr()
1126 /* copregister fs -> gpr[rt] */ cop1Emulate()
1137 /* copregister fs <- rt */ cop1Emulate()
1145 /* copregister rd -> gpr[rt] */ cop1Emulate()
1156 /* copregister rd <- gpr[rt] */ cop1Emulate()
1161 /* copregister rd -> gpr[rt] */ cop1Emulate()
1169 /* copregister rd <- rt */ cop1Emulate()
1174 /* cop control register rd -> gpr[rt] */ cop1Emulate()
1179 /* copregister rd <- rt */ cop1Emulate()
/linux-4.1.27/sound/parisc/
H A Dharmony.c411 struct snd_pcm_runtime *rt = ss->runtime; snd_harmony_playback_prepare() local
422 h->st.rate = snd_harmony_rate_bits(rt->rate); snd_harmony_playback_prepare()
423 h->st.format = snd_harmony_set_data_format(h, rt->format, 0); snd_harmony_playback_prepare()
425 if (rt->channels == 2) snd_harmony_playback_prepare()
432 h->pbuf.addr = rt->dma_addr; snd_harmony_playback_prepare()
441 struct snd_pcm_runtime *rt = ss->runtime; snd_harmony_capture_prepare() local
452 h->st.rate = snd_harmony_rate_bits(rt->rate); snd_harmony_capture_prepare()
453 h->st.format = snd_harmony_set_data_format(h, rt->format, 0); snd_harmony_capture_prepare()
455 if (rt->channels == 2) snd_harmony_capture_prepare()
462 h->cbuf.addr = rt->dma_addr; snd_harmony_capture_prepare()
470 struct snd_pcm_runtime *rt = ss->runtime; snd_harmony_playback_pointer() local
493 return bytes_to_frames(rt, played); snd_harmony_playback_pointer()
499 struct snd_pcm_runtime *rt = ss->runtime; snd_harmony_capture_pointer() local
522 return bytes_to_frames(rt, caught); snd_harmony_capture_pointer()
529 struct snd_pcm_runtime *rt = ss->runtime; snd_harmony_playback_open() local
533 rt->hw = snd_harmony_playback; snd_harmony_playback_open()
534 snd_pcm_hw_constraint_list(rt, 0, SNDRV_PCM_HW_PARAM_RATE, snd_harmony_playback_open()
537 err = snd_pcm_hw_constraint_integer(rt, SNDRV_PCM_HW_PARAM_PERIODS); snd_harmony_playback_open()
548 struct snd_pcm_runtime *rt = ss->runtime; snd_harmony_capture_open() local
552 rt->hw = snd_harmony_capture; snd_harmony_capture_open()
553 snd_pcm_hw_constraint_list(rt, 0, SNDRV_PCM_HW_PARAM_RATE, snd_harmony_capture_open()
556 err = snd_pcm_hw_constraint_integer(rt, SNDRV_PCM_HW_PARAM_PERIODS); snd_harmony_capture_open()
/linux-4.1.27/arch/arm/probes/kprobes/
H A Dactions-arm.c81 int rt = (insn >> 12) & 0xf; emulate_ldrdstrd() local
85 register unsigned long rtv asm("r0") = regs->uregs[rt]; emulate_ldrdstrd()
86 register unsigned long rt2v asm("r1") = regs->uregs[rt+1]; emulate_ldrdstrd()
99 regs->uregs[rt] = rtv; emulate_ldrdstrd()
100 regs->uregs[rt+1] = rt2v; emulate_ldrdstrd()
110 int rt = (insn >> 12) & 0xf; emulate_ldr() local
126 if (rt == 15) emulate_ldr()
129 regs->uregs[rt] = rtv; emulate_ldr()
141 int rt = (insn >> 12) & 0xf; emulate_str() local
145 register unsigned long rtv asm("r0") = (rt == 15) ? rtpc emulate_str()
146 : regs->uregs[rt]; emulate_str()
H A Dactions-thumb.c113 int rt = (insn >> 12) & 0xf; t32_simulate_ldr_literal() local
125 if (rt == 15) { t32_simulate_ldr_literal()
143 regs->uregs[rt] = rtv; t32_simulate_ldr_literal()
191 int rt = (insn >> 12) & 0xf; t32_emulate_ldrstr() local
195 register unsigned long rtv asm("r0") = regs->uregs[rt]; t32_emulate_ldrstr()
207 if (rt == 15) /* Can't be true for a STR as they aren't allowed */ t32_emulate_ldrstr()
210 regs->uregs[rt] = rtv; t32_emulate_ldrstr()
328 int rt = (insn >> 8) & 0x7; t16_simulate_ldr_literal() local
329 regs->uregs[rt] = base[index]; t16_simulate_ldr_literal()
338 int rt = (insn >> 8) & 0x7; t16_simulate_ldrstr_sp_relative() local
340 regs->uregs[rt] = base[index]; t16_simulate_ldrstr_sp_relative()
342 base[index] = regs->uregs[rt]; t16_simulate_ldrstr_sp_relative()
352 int rt = (insn >> 8) & 0x7; t16_simulate_reladr() local
353 regs->uregs[rt] = base + offset * 4; t16_simulate_reladr()
/linux-4.1.27/arch/x86/pci/
H A Dirq.c66 struct irq_routing_table *rt; pirq_check_routing_table() local
70 rt = (struct irq_routing_table *) addr; pirq_check_routing_table()
71 if (rt->signature != PIRQ_SIGNATURE || pirq_check_routing_table()
72 rt->version != PIRQ_VERSION || pirq_check_routing_table()
73 rt->size % 16 || pirq_check_routing_table()
74 rt->size < sizeof(struct irq_routing_table)) pirq_check_routing_table()
77 for (i = 0; i < rt->size; i++) pirq_check_routing_table()
81 rt); pirq_check_routing_table()
82 return rt; pirq_check_routing_table()
96 struct irq_routing_table *rt; pirq_find_routing_table() local
99 rt = pirq_check_routing_table((u8 *) __va(pirq_table_addr)); pirq_find_routing_table()
100 if (rt) pirq_find_routing_table()
101 return rt; pirq_find_routing_table()
105 rt = pirq_check_routing_table(addr); pirq_find_routing_table()
106 if (rt) pirq_find_routing_table()
107 return rt; pirq_find_routing_table()
120 struct irq_routing_table *rt = pirq_table; pirq_peer_trick() local
126 for (i = 0; i < (rt->size - sizeof(struct irq_routing_table)) / sizeof(struct irq_info); i++) { pirq_peer_trick()
127 e = &rt->slots[i]; pirq_peer_trick()
820 struct irq_routing_table *rt = pirq_table; pirq_find_router() local
824 if (!rt->signature) { pirq_find_router()
838 rt->rtr_vendor, rt->rtr_device); pirq_find_router()
840 pirq_router_dev = pci_get_bus_and_slot(rt->rtr_bus, rt->rtr_devfn); pirq_find_router()
843 "%02x:%02x\n", rt->rtr_bus, rt->rtr_devfn); pirq_find_router()
849 if (rt->rtr_vendor == h->vendor && pirq_find_router()
850 h->probe(r, pirq_router_dev, rt->rtr_device)) pirq_find_router()
866 struct irq_routing_table *rt = pirq_table; pirq_get_info() local
867 int entries = (rt->size - sizeof(struct irq_routing_table)) / pirq_get_info()
871 for (info = rt->slots; entries--; info++) pirq_get_info()
H A Dpcbios.c382 struct irq_routing_table *rt = NULL; pcibios_get_irq_routing_table() local
417 rt = kmalloc(sizeof(struct irq_routing_table) + opt.size, GFP_KERNEL); pcibios_get_irq_routing_table()
418 if (rt) { pcibios_get_irq_routing_table()
419 memset(rt, 0, sizeof(struct irq_routing_table)); pcibios_get_irq_routing_table()
420 rt->size = opt.size + sizeof(struct irq_routing_table); pcibios_get_irq_routing_table()
421 rt->exclusive_irqs = map; pcibios_get_irq_routing_table()
422 memcpy(rt->slots, (void *) page, opt.size); pcibios_get_irq_routing_table()
427 return rt; pcibios_get_irq_routing_table()
/linux-4.1.27/net/openvswitch/
H A Dvport-gre.c139 struct rtable *rt; gre_tnl_send() local
151 rt = ovs_tunnel_route_lookup(net, tun_key, skb->mark, &fl, IPPROTO_GRE); gre_tnl_send()
152 if (IS_ERR(rt)) { gre_tnl_send()
153 err = PTR_ERR(rt); gre_tnl_send()
159 min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len gre_tnl_send()
191 return iptunnel_xmit(skb->sk, rt, skb, fl.saddr, gre_tnl_send()
195 ip_rt_put(rt); gre_tnl_send()
H A Dvport-geneve.c179 struct rtable *rt; geneve_tnl_send() local
192 rt = ovs_tunnel_route_lookup(net, tun_key, skb->mark, &fl, IPPROTO_UDP); geneve_tnl_send()
193 if (IS_ERR(rt)) { geneve_tnl_send()
194 err = PTR_ERR(rt); geneve_tnl_send()
211 err = geneve_xmit_skb(geneve_port->gs, rt, skb, fl.saddr, geneve_tnl_send()
217 ip_rt_put(rt); geneve_tnl_send()
H A Dvport-vxlan.c229 struct rtable *rt; vxlan_tnl_send() local
242 rt = ovs_tunnel_route_lookup(net, tun_key, skb->mark, &fl, IPPROTO_UDP); vxlan_tnl_send()
243 if (IS_ERR(rt)) { vxlan_tnl_send()
244 err = PTR_ERR(rt); vxlan_tnl_send()
259 err = vxlan_xmit_skb(rt, sk, skb, fl.saddr, tun_key->ipv4_dst, vxlan_tnl_send()
264 ip_rt_put(rt); vxlan_tnl_send()
H A Dvport.h247 struct rtable *rt; ovs_tunnel_route_lookup() local
256 rt = ip_route_output_key(net, fl); ovs_tunnel_route_lookup()
257 return rt; ovs_tunnel_route_lookup()
H A Dvport.c584 struct rtable *rt; ovs_tunnel_get_egress_info() local
596 rt = ovs_tunnel_route_lookup(net, tun_key, skb_mark, &fl, ipproto); ovs_tunnel_get_egress_info()
597 if (IS_ERR(rt)) ovs_tunnel_get_egress_info()
598 return PTR_ERR(rt); ovs_tunnel_get_egress_info()
600 ip_rt_put(rt); ovs_tunnel_get_egress_info()
/linux-4.1.27/sound/soc/sh/
H A Dsiu_pcm.c218 struct snd_pcm_runtime *rt = substream->runtime; siu_io_tasklet() local
233 buff = (dma_addr_t)PERIOD_OFFSET(rt->dma_addr, siu_io_tasklet()
236 virt = PERIOD_OFFSET(rt->dma_area, siu_io_tasklet()
245 (dma_addr_t)PERIOD_OFFSET(rt->dma_addr, siu_io_tasklet()
408 struct snd_pcm_runtime *rt = ss->runtime; siu_pcm_prepare() local
417 rt = siu_stream->substream->runtime; siu_pcm_prepare()
423 info->port_id, rt->channels, siu_stream->period_bytes); siu_pcm_prepare()
433 xfer_cnt = bytes_to_frames(rt, siu_stream->period_bytes); siu_pcm_prepare()
437 siu_stream->format = rt->format; siu_pcm_prepare()
442 (unsigned long)rt->dma_addr, siu_stream->buf_bytes, siu_pcm_prepare()
444 siu_stream->format, rt->channels, (int)xfer_cnt); siu_pcm_prepare()
497 struct snd_pcm_runtime *rt = ss->runtime; siu_pcm_pointer_dma() local
510 ptr = PERIOD_OFFSET(rt->dma_addr, siu_pcm_pointer_dma()
512 siu_stream->period_bytes) - rt->dma_addr; siu_pcm_pointer_dma()
H A Dsiu_dai.c511 struct snd_pcm_runtime *rt = substream->runtime; siu_dai_startup() local
520 ret = snd_pcm_hw_constraint_integer(rt, SNDRV_PCM_HW_PARAM_PERIODS); siu_dai_startup()
558 struct snd_pcm_runtime *rt = substream->runtime; siu_dai_prepare() local
565 __func__, info->port_id, port_info->play_cap, rt->channels); siu_dai_prepare()
/linux-4.1.27/sound/usb/caiaq/
H A Daudio.c392 struct snd_pcm_runtime *rt = sub->runtime; read_in_urb_mode0() local
393 char *audio_buf = rt->dma_area; read_in_urb_mode0()
394 int sz = frames_to_bytes(rt, rt->buffer_size); read_in_urb_mode0()
439 struct snd_pcm_runtime *rt = sub->runtime; read_in_urb_mode2() local
440 char *audio_buf = rt->dma_area; read_in_urb_mode2()
441 int sz = frames_to_bytes(rt, rt->buffer_size); read_in_urb_mode2()
471 struct snd_pcm_runtime *rt = sub->runtime; read_in_urb_mode3() local
472 audio_buf = rt->dma_area; read_in_urb_mode3()
473 sz = frames_to_bytes(rt, rt->buffer_size); read_in_urb_mode3()
552 struct snd_pcm_runtime *rt = sub->runtime; fill_out_urb_mode_0() local
553 char *audio_buf = rt->dma_area; fill_out_urb_mode_0()
554 int sz = frames_to_bytes(rt, rt->buffer_size); fill_out_urb_mode_0()
588 struct snd_pcm_runtime *rt = sub->runtime; fill_out_urb_mode_3() local
589 audio_buf = rt->dma_area; fill_out_urb_mode_3()
590 sz = frames_to_bytes(rt, rt->buffer_size); fill_out_urb_mode_3()
/linux-4.1.27/drivers/bus/
H A Domap_l3_smx.c184 status = omap3_l3_readll(l3->rt, L3_SI_FLAG_STATUS_0); omap3_l3_app_irq()
193 status = omap3_l3_readll(l3->rt, L3_SI_FLAG_STATUS_1); omap3_l3_app_irq()
200 base = l3->rt + omap3_l3_bases[int_type][err_source]; omap3_l3_app_irq()
246 l3->rt = ioremap(res->start, resource_size(res)); omap3_l3_probe()
247 if (!l3->rt) { omap3_l3_probe()
274 iounmap(l3->rt); omap3_l3_probe()
286 iounmap(l3->rt); omap3_l3_remove()
/linux-4.1.27/net/ipv4/netfilter/
H A Dipt_rpfilter.c71 const struct rtable *rt = skb_rtable(skb); rpfilter_is_local() local
72 return rt && (rt->rt_flags & RTCF_LOCAL); rpfilter_is_local()
H A Dnf_nat_masquerade_ipv4.c34 const struct rtable *rt; nf_nat_masquerade_ipv4() local
51 rt = skb_rtable(skb); nf_nat_masquerade_ipv4()
52 nh = rt_nexthop(rt, ip_hdr(skb)->daddr); nf_nat_masquerade_ipv4()
/linux-4.1.27/arch/mips/include/asm/octeon/
H A Dcvmx-asm.h136 asm volatile ("rdhwr %[rt],$" CVMX_TMP_STR(regstr) : [rt] "=d" (result))
138 asm ("rdhwr %[rt],$" CVMX_TMP_STR(regstr) : [rt] "=d" (result))
/linux-4.1.27/include/math-emu/
H A Ddouble.h122 #define FP_FROM_INT_D(X,r,rs,rt) _FP_FROM_INT(D,2,X,r,rs,rt)
197 #define FP_FROM_INT_D(X,r,rs,rt) _FP_FROM_INT(D,1,X,r,rs,rt)
H A Dquad.h128 #define FP_FROM_INT_Q(X,r,rs,rt) _FP_FROM_INT(Q,4,X,r,rs,rt)
201 #define FP_FROM_INT_Q(X,r,rs,rt) _FP_FROM_INT(Q,2,X,r,rs,rt)
H A Dsingle.h111 #define FP_FROM_INT_S(X,r,rs,rt) _FP_FROM_INT(S,1,X,r,rs,rt)
/linux-4.1.27/arch/s390/include/uapi/asm/
H A Ducontext.h15 * on a rt signal frame. Please note that the structure is not fixed,
/linux-4.1.27/arch/mips/include/uapi/asm/
H A Dinst.h96 * rt field of bcond opcodes.
124 * rt field of cop.bc_op opcodes
522 __BITFIELD_FIELD(unsigned int rt : 5,
530 __BITFIELD_FIELD(unsigned int rt : 5,
547 __BITFIELD_FIELD(unsigned int rt : 5,
557 __BITFIELD_FIELD(unsigned int rt : 5,
568 __BITFIELD_FIELD(unsigned int rt : 5,
617 __BITFIELD_FIELD(unsigned int rt:5,
663 __BITFIELD_FIELD(unsigned int rt : 5,
672 __BITFIELD_FIELD(unsigned int rt : 5,
694 __BITFIELD_FIELD(unsigned int rt : 5,
704 __BITFIELD_FIELD(unsigned int rt : 5,
745 __BITFIELD_FIELD(unsigned int rt : 5,
798 __BITFIELD_FIELD(unsigned int rt : 3,
807 __BITFIELD_FIELD(unsigned int rt : 3,
815 __BITFIELD_FIELD(unsigned int rt : 5,
/linux-4.1.27/arch/powerpc/include/asm/
H A Dmmu-hash64.h448 * rt = register continaing the proto-VSID and into which the
452 * - rt and rx must be different registers
453 * - The answer will end up in the low VSID_BITS bits of rt. The higher
457 #define ASM_VSID_SCRAMBLE(rt, rx, size) \
460 mulld rt,rt,rx; /* rt = rt * MULTIPLIER */ \
462 srdi rx,rt,VSID_BITS_##size; \
463 clrldi rt,rt,(64-VSID_BITS_##size); \
464 add rt,rt,rx; /* add high and low bits */ \
472 addi rx,rt,1; \
474 add rt,rt,rx
/linux-4.1.27/arch/m68k/68000/
H A Dromvec.S9 * Copyright 1999 D. Jeff Dionne <jeff@rt-control.com>
H A Dints.c9 * Copyright 1999 D. Jeff Dionne <jeff@rt-control.com>
/linux-4.1.27/arch/arm/include/asm/
H A Dkvm_mmio.h27 unsigned long rt; member in struct:kvm_decode
/linux-4.1.27/virt/kvm/
H A Dirqchip.c128 static int setup_routing_entry(struct kvm_irq_routing_table *rt, setup_routing_entry() argument
139 hlist_for_each_entry(ei, &rt->map[ue->gsi], link) setup_routing_entry()
151 rt->chip[e->irqchip.irqchip][e->irqchip.pin] = e->gsi; setup_routing_entry()
153 hlist_add_head(&e->link, &rt->map[e->gsi]); setup_routing_entry()
/linux-4.1.27/arch/sparc/include/asm/
H A Dsigcontext.h11 /* This is what SunOS does, so shall I unless we use new 32bit signals or rt signals. */
35 /* This is what we use for 32bit new non-rt signals. */
/linux-4.1.27/net/rxrpc/
H A Dar-peer.c38 struct rtable *rt; rxrpc_assess_MTU_size() local
43 rt = ip_route_output_ports(&init_net, &fl4, NULL, rxrpc_assess_MTU_size()
47 if (IS_ERR(rt)) { rxrpc_assess_MTU_size()
48 _leave(" [route err %ld]", PTR_ERR(rt)); rxrpc_assess_MTU_size()
52 peer->if_mtu = dst_mtu(&rt->dst); rxrpc_assess_MTU_size()
53 dst_release(&rt->dst); rxrpc_assess_MTU_size()
/linux-4.1.27/drivers/net/ppp/
H A Dpptp.c187 struct rtable *rt; pptp_xmit() local
195 rt = ip_route_output_ports(sock_net(sk), &fl4, NULL, pptp_xmit()
200 if (IS_ERR(rt)) pptp_xmit()
203 tdev = rt->dst.dev; pptp_xmit()
210 ip_rt_put(rt); pptp_xmit()
270 if (ip_dont_fragment(sk, &rt->dst)) pptp_xmit()
278 iph->ttl = ip4_dst_hoplimit(&rt->dst); pptp_xmit()
282 skb_dst_set(skb, &rt->dst); pptp_xmit()
457 struct rtable *rt; pptp_connect() local
491 rt = ip_route_output_ports(sock_net(sk), &fl4, sk, pptp_connect()
496 if (IS_ERR(rt)) { pptp_connect()
500 sk_setup_caps(sk, &rt->dst); pptp_connect()
502 po->chan.mtu = dst_mtu(&rt->dst); pptp_connect()
505 ip_rt_put(rt); pptp_connect()
/linux-4.1.27/drivers/infiniband/core/
H A Daddr.c231 struct rtable *rt; addr4_resolve() local
239 rt = ip_route_output_key(&init_net, &fl4); addr4_resolve()
240 if (IS_ERR(rt)) { addr4_resolve()
241 ret = PTR_ERR(rt); addr4_resolve()
247 if (rt->dst.dev->flags & IFF_LOOPBACK) { addr4_resolve()
255 if (rt->dst.dev->flags & IFF_NOARP) { addr4_resolve()
256 ret = rdma_copy_addr(addr, rt->dst.dev, NULL); addr4_resolve()
260 ret = dst_fetch_ha(&rt->dst, addr, &fl4.daddr); addr4_resolve()
262 ip_rt_put(rt); addr4_resolve()
H A Dcma.c1204 struct rdma_route *rt; cma_new_conn_id() local
1216 rt = &id->route; cma_new_conn_id()
1217 rt->num_paths = ib_event->param.req_rcvd.alternate_path ? 2 : 1; cma_new_conn_id()
1218 rt->path_rec = kmalloc(sizeof *rt->path_rec * rt->num_paths, cma_new_conn_id()
1220 if (!rt->path_rec) cma_new_conn_id()
1223 rt->path_rec[0] = *ib_event->param.req_rcvd.primary_path; cma_new_conn_id()
1224 if (rt->num_paths == 2) cma_new_conn_id()
1225 rt->path_rec[1] = *ib_event->param.req_rcvd.alternate_path; cma_new_conn_id()
1228 rt->addr.dev_addr.dev_type = ARPHRD_INFINIBAND; cma_new_conn_id()
1229 rdma_addr_set_sgid(&rt->addr.dev_addr, &rt->path_rec[0].sgid); cma_new_conn_id()
1230 ib_addr_set_pkey(&rt->addr.dev_addr, be16_to_cpu(rt->path_rec[0].pkey)); cma_new_conn_id()
1232 ret = cma_translate_addr(cma_src_addr(id_priv), &rt->addr.dev_addr); cma_new_conn_id()
1236 rdma_addr_set_dgid(&rt->addr.dev_addr, &rt->path_rec[0].dgid); cma_new_conn_id()
/linux-4.1.27/drivers/net/wireless/rt2x00/
H A Drt2x00.h153 u16 rt; member in struct:rt2x00_chip
1079 const u16 rt, const u16 rf, const u16 rev) rt2x00_set_chip()
1081 rt2x00dev->chip.rt = rt; rt2x00_set_chip()
1085 rt2x00_info(rt2x00dev, "Chipset detected - rt: %04x, rf: %04x, rev: %04x\n", rt2x00_set_chip()
1086 rt2x00dev->chip.rt, rt2x00dev->chip.rf, rt2x00_set_chip()
1091 const u16 rt, const u16 rev) rt2x00_set_rt()
1093 rt2x00dev->chip.rt = rt; rt2x00_set_rt()
1097 rt2x00dev->chip.rt, rt2x00dev->chip.rev); rt2x00_set_rt()
1108 static inline bool rt2x00_rt(struct rt2x00_dev *rt2x00dev, const u16 rt) rt2x00_rt() argument
1110 return (rt2x00dev->chip.rt == rt); rt2x00_rt()
1124 const u16 rt, const u16 rev) rt2x00_rt_rev()
1126 return (rt2x00_rt(rt2x00dev, rt) && rt2x00_rev(rt2x00dev) == rev); rt2x00_rt_rev()
1130 const u16 rt, const u16 rev) rt2x00_rt_rev_lt()
1132 return (rt2x00_rt(rt2x00dev, rt) && rt2x00_rev(rt2x00dev) < rev); rt2x00_rt_rev_lt()
1136 const u16 rt, const u16 rev) rt2x00_rt_rev_gte()
1138 return (rt2x00_rt(rt2x00dev, rt) && rt2x00_rev(rt2x00dev) >= rev); rt2x00_rt_rev_gte()
1078 rt2x00_set_chip(struct rt2x00_dev *rt2x00dev, const u16 rt, const u16 rf, const u16 rev) rt2x00_set_chip() argument
1090 rt2x00_set_rt(struct rt2x00_dev *rt2x00dev, const u16 rt, const u16 rev) rt2x00_set_rt() argument
1123 rt2x00_rt_rev(struct rt2x00_dev *rt2x00dev, const u16 rt, const u16 rev) rt2x00_rt_rev() argument
1129 rt2x00_rt_rev_lt(struct rt2x00_dev *rt2x00dev, const u16 rt, const u16 rev) rt2x00_rt_rev_lt() argument
1135 rt2x00_rt_rev_gte(struct rt2x00_dev *rt2x00dev, const u16 rt, const u16 rev) rt2x00_rt_rev_gte() argument
H A Drt2x00pci.c137 rt2x00dev->chip.rt = chip; rt2x00pci_probe()
/linux-4.1.27/arch/mips/oprofile/
H A Dbacktrace.c35 && ip->i_format.rs == 29 && ip->i_format.rt == 31; is_ra_save_ins()
41 if (ip->i_format.rs != 29 || ip->i_format.rt != 29) is_sp_move_ins()
58 if (ip->i_format.opcode == lui_op && ip->i_format.rt == 28) is_end_of_function_marker()
/linux-4.1.27/net/xfrm/
H A Dxfrm_user.c37 struct nlattr *rt = attrs[type]; verify_one_alg() local
40 if (!rt) verify_one_alg()
43 algp = nla_data(rt); verify_one_alg()
44 if (nla_len(rt) < xfrm_alg_len(algp)) verify_one_alg()
63 struct nlattr *rt = attrs[XFRMA_ALG_AUTH_TRUNC]; verify_auth_trunc() local
66 if (!rt) verify_auth_trunc()
69 algp = nla_data(rt); verify_auth_trunc()
70 if (nla_len(rt) < xfrm_alg_auth_len(algp)) verify_auth_trunc()
79 struct nlattr *rt = attrs[XFRMA_ALG_AEAD]; verify_aead() local
82 if (!rt) verify_aead()
85 algp = nla_data(rt); verify_aead()
86 if (nla_len(rt) < aead_len(algp)) verify_aead()
96 struct nlattr *rt = attrs[type]; verify_one_addr() local
98 if (rt && addrp) verify_one_addr()
99 *addrp = nla_data(rt); verify_one_addr()
104 struct nlattr *rt = attrs[XFRMA_SEC_CTX]; verify_sec_ctx_len() local
107 if (!rt) verify_sec_ctx_len()
110 uctx = nla_data(rt); verify_sec_ctx_len()
120 struct nlattr *rt = attrs[XFRMA_REPLAY_ESN_VAL]; verify_replay() local
124 if (!rt) verify_replay()
127 rs = nla_data(rt); verify_replay()
132 if (nla_len(rt) < xfrm_replay_state_esn_len(rs) && verify_replay()
133 nla_len(rt) != sizeof(*rs)) verify_replay()
137 if (!rt) verify_replay()
468 struct nlattr *rt = attrs[XFRMA_REPLAY_THRESH]; xfrm_update_ae_params() local
498 if (rt) xfrm_update_ae_params()
499 x->replay_maxdiff = nla_get_u32(rt); xfrm_update_ae_params()
1312 struct nlattr *rt = attrs[XFRMA_SEC_CTX]; copy_from_user_sec_ctx() local
1315 if (!rt) copy_from_user_sec_ctx()
1318 uctx = nla_data(rt); copy_from_user_sec_ctx()
1382 struct nlattr *rt = attrs[XFRMA_TMPL]; copy_from_user_tmpl() local
1384 if (!rt) { copy_from_user_tmpl()
1387 struct xfrm_user_tmpl *utmpl = nla_data(rt); copy_from_user_tmpl()
1388 int nr = nla_len(rt) / sizeof(*utmpl); copy_from_user_tmpl()
1402 struct nlattr *rt = attrs[XFRMA_POLICY_TYPE]; copy_from_user_policy_type() local
1407 if (rt) { copy_from_user_policy_type()
1408 upt = nla_data(rt); copy_from_user_policy_type()
1709 struct nlattr *rt = attrs[XFRMA_SEC_CTX]; xfrm_get_policy() local
1717 if (rt) { xfrm_get_policy()
1718 struct xfrm_user_sec_ctx *uctx = nla_data(rt); xfrm_get_policy()
1998 struct nlattr *rt = attrs[XFRMA_SEC_CTX]; xfrm_add_pol_expire() local
2006 if (rt) { xfrm_add_pol_expire()
2007 struct xfrm_user_sec_ctx *uctx = nla_data(rt); xfrm_add_pol_expire()
2079 struct nlattr *rt = attrs[XFRMA_TMPL]; xfrm_add_acquire() local
2105 ut = nla_data(rt); xfrm_add_acquire()
2138 struct nlattr *rt = attrs[XFRMA_MIGRATE]; copy_from_user_migrate() local
2152 um = nla_data(rt); copy_from_user_migrate()
2153 num_migrate = nla_len(rt) / sizeof(*um); copy_from_user_migrate()
/linux-4.1.27/include/linux/
H A Dinit_task.h16 #include <linux/sched/rt.h>
209 .rt = { \
210 .run_list = LIST_HEAD_INIT(tsk.rt.run_list), \
H A Dioprio.h60 * Check for idle and rt task process, and return appropriate IO class.
/linux-4.1.27/drivers/block/drbd/
H A Ddrbd_proc.c116 unsigned long db, dt, dbdt, rt, rs_total, rs_left; drbd_syncer_progress() local
158 * rt: remaining time drbd_syncer_progress()
172 rt = (dt * (rs_left / (db/100+1)))/100; /* seconds */ drbd_syncer_progress()
175 rt / 3600, (rt % 3600) / 60, rt % 60); drbd_syncer_progress()
/linux-4.1.27/drivers/pinctrl/
H A Dpinctrl-st.c219 struct regmap_field *rt[ST_GPIO_PINS_PER_BANK]; member in struct:st_retime_dedicated
239 } rt; member in struct:st_pio_control
249 const int alt, oe, pu, od, rt; member in struct:st_pctl_data
354 .alt = 0, .oe = 5, .pu = 7, .od = 9, .rt = 16,
359 .alt = 0, .oe = 8, .pu = 10, .od = 12, .rt = 16,
364 .alt = 0, .oe = 6, .pu = 8, .od = 10, .rt = 38,
369 .alt = 0, .oe = 3, .pu = 4, .od = 5, .rt = 6,
374 .alt = 0, .oe = 5, .pu = 7, .od = 9, .rt = 11,
387 .alt = 0, .oe = 40, .pu = 50, .od = 60, .rt = 100,
400 .rt = 100,
561 struct st_retime_packed *rt_p = &pc->rt.rt_p; st_pinconf_set_retime_packed()
601 struct st_retime_dedicated *rt_d = &pc->rt.rt_d; st_pinconf_set_retime_dedicated()
612 regmap_field_write(rt_d->rt[pin], retime_config); st_pinconf_set_retime_dedicated()
643 struct st_retime_packed *rt_p = &pc->rt.rt_p; st_pinconf_get_retime_packed()
678 struct st_retime_dedicated *rt_d = &pc->rt.rt_d; st_pinconf_get_retime_dedicated()
680 regmap_field_read(rt_d->rt[pin], &value); st_pinconf_get_retime_dedicated()
1078 "de:%ld,rt-clk:%ld,rt-delay:%ld]", st_pinconf_dbg_show()
1118 int reg = (data->rt + bank * RT_P_CFGS_PER_BANK) * 4; st_pctl_dt_setup_retime_packed()
1119 struct st_retime_packed *rt_p = &pc->rt.rt_p; st_pctl_dt_setup_retime_packed()
1154 int reg_offset = (data->rt + bank * RT_D_CFGS_PER_BANK) * 4; st_pctl_dt_setup_retime_dedicated()
1155 struct st_retime_dedicated *rt_d = &pc->rt.rt_d; st_pctl_dt_setup_retime_dedicated()
1162 rt_d->rt[j] = devm_regmap_field_alloc(dev, rm, reg); st_pctl_dt_setup_retime_dedicated()
1163 if (IS_ERR(rt_d->rt[j])) st_pctl_dt_setup_retime_dedicated()
/linux-4.1.27/net/l2tp/
H A Dl2tp_ip.c393 struct rtable *rt = NULL; l2tp_ip_sendmsg() local
452 rt = (struct rtable *) __sk_dst_check(sk, 0); l2tp_ip_sendmsg()
455 if (rt == NULL) { l2tp_ip_sendmsg()
468 rt = ip_route_output_ports(sock_net(sk), fl4, sk, l2tp_ip_sendmsg()
473 if (IS_ERR(rt)) l2tp_ip_sendmsg()
476 sk_setup_caps(sk, &rt->dst); l2tp_ip_sendmsg()
478 skb_dst_set(skb, &rt->dst); l2tp_ip_sendmsg()
486 skb_dst_set_noref(skb, &rt->dst); l2tp_ip_sendmsg()
/linux-4.1.27/net/dccp/
H A Dipv4.c50 struct rtable *rt; dccp_v4_connect() local
75 rt = ip_route_connect(fl4, nexthop, inet->inet_saddr, dccp_v4_connect()
79 if (IS_ERR(rt)) dccp_v4_connect()
80 return PTR_ERR(rt); dccp_v4_connect()
82 if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) { dccp_v4_connect()
83 ip_rt_put(rt); dccp_v4_connect()
110 rt = ip_route_newports(fl4, rt, orig_sport, orig_dport, dccp_v4_connect()
112 if (IS_ERR(rt)) { dccp_v4_connect()
113 err = PTR_ERR(rt); dccp_v4_connect()
114 rt = NULL; dccp_v4_connect()
118 sk_setup_caps(sk, &rt->dst); dccp_v4_connect()
127 rt = NULL; dccp_v4_connect()
137 ip_rt_put(rt); dccp_v4_connect()
479 struct rtable *rt; dccp_v4_route_skb() local
492 rt = ip_route_output_flow(net, &fl4, sk); dccp_v4_route_skb()
493 if (IS_ERR(rt)) { dccp_v4_route_skb()
498 return &rt->dst; dccp_v4_route_skb()
/linux-4.1.27/arch/ia64/kernel/
H A Dsmpboot.c217 get_delta (long *rt, long *master) get_delta() argument
235 *rt = best_t1 - best_t0; get_delta()
281 unsigned long flags, rt, master_time_stamp, bound; ia64_sync_itc() local
284 long rt; /* roundtrip time */ ia64_sync_itc() member in struct:__anon1733
312 delta = get_delta(&rt, &master_time_stamp); ia64_sync_itc()
315 bound = rt; ia64_sync_itc()
328 t[i].rt = rt; ia64_sync_itc()
339 printk("rt=%5ld master=%5ld diff=%5ld adjlat=%5ld\n", ia64_sync_itc()
340 t[i].rt, t[i].master, t[i].diff, t[i].lat); ia64_sync_itc()
344 "maxerr %lu cycles)\n", smp_processor_id(), master, delta, rt); ia64_sync_itc()
/linux-4.1.27/drivers/isdn/gigaset/
H A Dev-layer.c448 const struct resp_type_t *rt; gigaset_handle_modem_response() local
470 for (rt = resp_type; rt->response; ++rt) { gigaset_handle_modem_response()
471 eoc = skip_prefix(cs->respdata, rt->response); gigaset_handle_modem_response()
475 if (!rt->response) { gigaset_handle_modem_response()
498 switch (rt->type) { gigaset_handle_modem_response()
504 add_cid_event(cs, cid, rt->resp_code, NULL, 0); gigaset_handle_modem_response()
514 add_cid_event(cs, 0, rt->resp_code, NULL, cid); gigaset_handle_modem_response()
520 for (rt = resp_type; rt->response; ++rt) { gigaset_handle_modem_response()
521 psep = skip_prefix(eoc, rt->response); gigaset_handle_modem_response()
527 if (!psep || rt->type != RT_STRING) { gigaset_handle_modem_response()
547 add_cid_event(cs, cid, rt->resp_code, ptr, 0); gigaset_handle_modem_response()
555 add_cid_event(cs, cid, rt->resp_code, NULL, ZSAU_NONE); gigaset_handle_modem_response()
568 add_cid_event(cs, cid, rt->resp_code, NULL, zr->code); gigaset_handle_modem_response()
580 add_cid_event(cs, cid, rt->resp_code, ptr, 0); gigaset_handle_modem_response()
599 add_cid_event(cs, cid, rt->resp_code, NULL, parameter); gigaset_handle_modem_response()
612 if (rt->resp_code == RSP_ZDLE) gigaset_handle_modem_response()
615 add_cid_event(cs, cid, rt->resp_code, NULL, parameter); gigaset_handle_modem_response()
622 add_cid_event(cs, cid, rt->resp_code, NULL, -1); gigaset_handle_modem_response()
/linux-4.1.27/drivers/net/ipvlan/
H A Dipvlan_core.c342 struct rtable *rt; ipvlan_process_v4_outbound() local
352 rt = ip_route_output_flow(dev_net(dev), &fl4, NULL); ipvlan_process_v4_outbound()
353 if (IS_ERR(rt)) ipvlan_process_v4_outbound()
356 if (rt->rt_type != RTN_UNICAST && rt->rt_type != RTN_LOCAL) { ipvlan_process_v4_outbound()
357 ip_rt_put(rt); ipvlan_process_v4_outbound()
361 skb_dst_set(skb, &rt->dst); ipvlan_process_v4_outbound()
/linux-4.1.27/net/tipc/
H A Dudp_media.c158 struct rtable *rt; tipc_udp_send_msg() local
174 rt = ip_route_output_key(net, &fl); tipc_udp_send_msg()
175 if (IS_ERR(rt)) { tipc_udp_send_msg()
176 err = PTR_ERR(rt); tipc_udp_send_msg()
179 ttl = ip4_dst_hoplimit(&rt->dst); tipc_udp_send_msg()
180 err = udp_tunnel_xmit_skb(rt, ub->ubsock->sk, clone, tipc_udp_send_msg()
186 ip_rt_put(rt); tipc_udp_send_msg()
/linux-4.1.27/net/atm/
H A Dclip.c332 struct rtable *rt; clip_start_xmit() local
344 rt = (struct rtable *) dst; clip_start_xmit()
345 if (rt->rt_gateway) clip_start_xmit()
346 daddr = &rt->rt_gateway; clip_start_xmit()
447 struct rtable *rt; clip_setentry() local
463 rt = ip_route_output(&init_net, ip, 0, 1, 0); clip_setentry()
464 if (IS_ERR(rt)) clip_setentry()
465 return PTR_ERR(rt); clip_setentry()
466 neigh = __neigh_lookup(&arp_tbl, &ip, rt->dst.dev, 1); clip_setentry()
467 ip_rt_put(rt); clip_setentry()
/linux-4.1.27/drivers/net/wireless/ath/
H A Ddfs_pattern_detector.c308 const struct radar_types *rt; dpd_set_domain() local
316 rt = get_dfs_domain_radar_types(region); dpd_set_domain()
317 if (rt == NULL) dpd_set_domain()
324 dpd->radar_spec = rt->radar_types; dpd_set_domain()
325 dpd->num_radar_types = rt->num_radar_types; dpd_set_domain()
/linux-4.1.27/sound/pci/
H A Dad1889.c324 struct snd_pcm_runtime *rt = ss->runtime; snd_ad1889_playback_open() local
327 rt->hw = snd_ad1889_playback_hw; snd_ad1889_playback_open()
336 struct snd_pcm_runtime *rt = ss->runtime; snd_ad1889_capture_open() local
339 rt->hw = snd_ad1889_capture_hw; snd_ad1889_capture_open()
364 struct snd_pcm_runtime *rt = ss->runtime; snd_ad1889_playback_prepare() local
376 if (snd_pcm_format_width(rt->format) == 16) snd_ad1889_playback_prepare()
379 if (rt->channels > 1) snd_ad1889_playback_prepare()
387 chip->wave.addr = rt->dma_addr; snd_ad1889_playback_prepare()
392 ad1889_writew(chip, AD_DS_WAS, rt->rate); snd_ad1889_playback_prepare()
406 chip->wave.addr, count, size, reg, rt->rate); snd_ad1889_playback_prepare()
414 struct snd_pcm_runtime *rt = ss->runtime; snd_ad1889_capture_prepare() local
426 if (snd_pcm_format_width(rt->format) == 16) snd_ad1889_capture_prepare()
429 if (rt->channels > 1) snd_ad1889_capture_prepare()
437 chip->ramc.addr = rt->dma_addr; snd_ad1889_capture_prepare()
453 chip->ramc.addr, count, size, reg, rt->rate); snd_ad1889_capture_prepare()
/linux-4.1.27/fs/xfs/
H A Dxfs_qm.h72 time_t qi_rtbtimelimit;/* limit for rt blks timer */
75 xfs_qwarncnt_t qi_rtbwarnlimit;/* limit for rt blks warnings */
H A Dxfs_bmap_util.h50 int rt, int eof, int delay, int convert,
H A Dxfs_trans.h98 unsigned int t_rtx_res; /* # of rt extents resvd */
99 unsigned int t_rtx_res_used; /* # of resvd rt extents used */
H A Dxfs_mount.h70 uint m_rsumlevels; /* rt summary levels */
71 uint m_rsumsize; /* size of rt summary, bytes */
78 xfs_buftarg_t *m_rtdev_targp; /* ptr to rt device */
H A Dxfs_rtalloc.h88 xfs_rtblock_t *pick); /* result rt extent */
/linux-4.1.27/arch/mips/sgi-ip27/
H A Dip27-timer.c96 .name = "hub-rt",
117 sprintf(name, "hub-rt %d", cpu); hub_rt_clock_event_init()
/linux-4.1.27/drivers/ide/
H A Dht6560b.c77 * The higher nibble of value is the Recovery Time (rt) and the lower nibble
80 * So 0x24 means 2 for rt and 4 for at. Each of the drives should have
81 * both values, and IDESETUP gives automatically rt=15 st=15 for CDROMs or
85 * High nibble: Recovery Cycle Time (rt)
/linux-4.1.27/fs/hpfs/
H A Dalloc.c135 goto rt; alloc_in_bmp()
150 goto rt; alloc_in_bmp()
173 goto rt; alloc_in_bmp()
178 rt: alloc_in_bmp()
/linux-4.1.27/arch/hexagon/include/asm/
H A Dcmpxchg.h63 * see rt-mutex-design.txt; cmpxchg supposedly checks if *ptr == A and swaps.
/linux-4.1.27/tools/firewire/
H A Dnosy-dump.h82 uint32_t rt:2; member in struct:link_packet::__anon14756::__anon14757
/linux-4.1.27/drivers/net/ethernet/amd/
H A Ddeclance.c231 #define lib_off(rt, type) \
232 shift_off(offsetof(struct lance_init_block, rt), type)
234 #define lib_ptr(ib, rt, type) \
235 ((volatile u16 *)((u8 *)(ib) + lib_off(rt, type)))
237 #define rds_off(rt, type) \
238 shift_off(offsetof(struct lance_rx_desc, rt), type)
240 #define rds_ptr(rd, rt, type) \
241 ((volatile u16 *)((u8 *)(rd) + rds_off(rt, type)))
243 #define tds_off(rt, type) \
244 shift_off(offsetof(struct lance_tx_desc, rt), type)
246 #define tds_ptr(td, rt, type) \
247 ((volatile u16 *)((u8 *)(td) + tds_off(rt, type)))
/linux-4.1.27/drivers/net/wimax/i2400m/
H A Dusb.c275 int i2400mu_bus_reset(struct i2400m *i2400m, enum i2400m_reset_type rt) i2400mu_bus_reset() argument
294 d_fnstart(3, dev, "(i2400m %p rt %u)\n", i2400m, rt); i2400mu_bus_reset()
295 if (rt == I2400M_RT_WARM) i2400mu_bus_reset()
300 else if (rt == I2400M_RT_COLD) i2400mu_bus_reset()
305 else if (rt == I2400M_RT_BUS) { i2400mu_bus_reset()
325 && rt != I2400M_RT_BUS) { i2400mu_bus_reset()
334 rt == I2400M_RT_WARM ? "warm" : "cold", result); i2400mu_bus_reset()
338 d_fnend(3, dev, "(i2400m %p rt %u) = %d\n", i2400m, rt, result); i2400mu_bus_reset()
H A Ddebugfs.c204 enum i2400m_reset_type rt = val; debugfs_i2400m_reset_set() local
205 switch(rt) { debugfs_i2400m_reset_set()
209 result = i2400m_reset(i2400m, rt); debugfs_i2400m_reset_set()
/linux-4.1.27/arch/sparc/kernel/
H A Dsmp_64.c164 static inline long get_delta (long *rt, long *master) get_delta() argument
184 *rt = best_t1 - best_t0; get_delta()
197 unsigned long flags, rt, master_time_stamp; smp_synchronize_tick_client() local
200 long rt; /* roundtrip time */ smp_synchronize_tick_client() member in struct:__anon2741
215 delta = get_delta(&rt, &master_time_stamp); smp_synchronize_tick_client()
229 t[i].rt = rt; smp_synchronize_tick_client()
240 printk("rt=%5ld master=%5ld diff=%5ld adjlat=%5ld\n", smp_synchronize_tick_client()
241 t[i].rt, t[i].master, t[i].diff, t[i].lat); smp_synchronize_tick_client()
246 smp_processor_id(), delta, rt); smp_synchronize_tick_client()
/linux-4.1.27/drivers/s390/char/
H A Dtape_char.c76 device->rt = register_tape_dev( tapechar_setup_device()
90 unregister_tape_dev(&device->cdev->dev, device->rt); tapechar_cleanup_device()
91 device->rt = NULL; tapechar_cleanup_device()
/linux-4.1.27/drivers/acpi/
H A Dacpi_pad.c192 * current sched_rt has threshold for rt task running time. power_saving_thread()
193 * When a rt task uses 95% CPU time, the rt thread will be power_saving_thread()
/linux-4.1.27/arch/xtensa/include/asm/
H A Dpgtable.h404 #define _PGD_INDEX(rt,rs) extui rt, rs, PGDIR_SHIFT, 32-PGDIR_SHIFT
405 #define _PTE_INDEX(rt,rs) extui rt, rs, PAGE_SHIFT, PTRS_PER_PTE_SHIFT
/linux-4.1.27/drivers/gpu/drm/nouveau/nvkm/subdev/fb/
H A Dramnv50.c319 u32 r0, r4, rt, rblock_size; nv50_fb_vram_rblock() local
323 rt = nv_rd32(pfb, 0x100250); nv50_fb_vram_rblock()
325 r0, r4, rt, nv_rd32(pfb, 0x001540)); nv50_fb_vram_rblock()
343 if (rt & 1) nv50_fb_vram_rblock()
/linux-4.1.27/security/selinux/ss/
H A Dpolicydb.c762 struct mls_range *rt = datum; range_tr_destroy() local
764 ebitmap_destroy(&rt->level[0].cat); range_tr_destroy()
765 ebitmap_destroy(&rt->level[1].cat); range_tr_destroy()
1838 struct range_trans *rt = NULL; range_read() local
1854 rt = kzalloc(sizeof(*rt), GFP_KERNEL); range_read()
1855 if (!rt) range_read()
1862 rt->source_type = le32_to_cpu(buf[0]); range_read()
1863 rt->target_type = le32_to_cpu(buf[1]); range_read()
1868 rt->target_class = le32_to_cpu(buf[0]); range_read()
1870 rt->target_class = p->process_class; range_read()
1873 if (!policydb_type_isvalid(p, rt->source_type) || range_read()
1874 !policydb_type_isvalid(p, rt->target_type) || range_read()
1875 !policydb_class_isvalid(p, rt->target_class)) range_read()
1893 rc = hashtab_insert(p->range_tr, rt, r); range_read()
1897 rt = NULL; range_read()
1903 kfree(rt); range_read()
3214 struct range_trans *rt = key; range_write_helper() local
3221 buf[0] = cpu_to_le32(rt->source_type); range_write_helper()
3222 buf[1] = cpu_to_le32(rt->target_type); range_write_helper()
3227 buf[0] = cpu_to_le32(rt->target_class); range_write_helper()
/linux-4.1.27/include/linux/amba/
H A Dpl022.h245 * @rt: indicates the controller should run the message pump with realtime
257 bool rt; member in struct:pl022_ssp_controller
/linux-4.1.27/drivers/scsi/cxgbi/
H A Dlibcxgbi.c583 struct rtable *rt; find_route_ipv4() local
585 rt = ip_route_output_ports(&init_net, fl4, NULL, daddr, saddr, find_route_ipv4()
587 if (IS_ERR(rt)) find_route_ipv4()
590 return rt; find_route_ipv4()
599 struct rtable *rt = NULL; cxgbi_check_route() local
607 rt = find_route_ipv4(&fl4, 0, daddr->sin_addr.s_addr, 0, daddr->sin_port, 0); cxgbi_check_route()
608 if (!rt) { cxgbi_check_route()
615 dst = &rt->dst; cxgbi_check_route()
623 if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) { cxgbi_check_route()
634 pr_info("rt dev %s, loopback -> %s, mtu %u.\n", cxgbi_check_route()
674 ip_rt_put(rt); cxgbi_check_route()
700 struct rt6_info *rt = NULL; cxgbi_check_route6() local
708 rt = find_route_ipv6(NULL, &daddr6->sin6_addr); cxgbi_check_route6()
710 if (!rt) { cxgbi_check_route6()
718 dst = &rt->dst; cxgbi_check_route6()
731 if (ipv6_addr_is_multicast(&rt->rt6i_dst.addr)) { cxgbi_check_route6()
763 if (ipv6_addr_any(&rt->rt6i_prefsrc.addr)) { cxgbi_check_route6()
764 struct inet6_dev *idev = ip6_dst_idev((struct dst_entry *)rt); cxgbi_check_route6()
774 pref_saddr = rt->rt6i_prefsrc.addr; cxgbi_check_route6()
791 ip6_rt_put(rt); cxgbi_check_route6()
/linux-4.1.27/drivers/usb/host/
H A Dfhci-tds.c249 u8 rt; fhci_init_ep_registers() local
257 rt = (BUS_MODE_BO_BE | BUS_MODE_GBL); fhci_init_ep_registers()
260 rt |= BUS_MODE_DTB; fhci_init_ep_registers()
262 out_8(&ep->ep_pram_ptr->rx_func_code, rt); fhci_init_ep_registers()
263 out_8(&ep->ep_pram_ptr->tx_func_code, rt); fhci_init_ep_registers()
/linux-4.1.27/drivers/media/radio/si4713/
H A Dsi4713.c862 static int si4713_set_rds_radio_text(struct si4713_device *sdev, const char *rt) si4713_set_rds_radio_text() argument
877 if (!strlen(rt)) si4713_set_rds_radio_text()
886 if (!rt[t_index + i] || si4713_set_rds_radio_text()
887 rt[t_index + i] == RDS_CARRIAGE_RETURN) { si4713_set_rds_radio_text()
888 rt = cr; si4713_set_rds_radio_text()
897 compose_u16(rt[t_index], rt[t_index + 1]), si4713_set_rds_radio_text()
898 compose_u16(rt[t_index + 2], rt[t_index + 3]), si4713_set_rds_radio_text()
/linux-4.1.27/net/sctp/
H A Dprotocol.c427 struct rtable *rt; sctp_v4_get_dst() local
452 rt = ip_route_output_key(sock_net(sk), fl4); sctp_v4_get_dst()
453 if (!IS_ERR(rt)) sctp_v4_get_dst()
454 dst = &rt->dst; sctp_v4_get_dst()
503 rt = ip_route_output_key(sock_net(sk), fl4); sctp_v4_get_dst()
504 if (!IS_ERR(rt)) { sctp_v4_get_dst()
505 dst = &rt->dst; sctp_v4_get_dst()
530 struct rtable *rt = (struct rtable *)t->dst; sctp_v4_get_saddr() local
532 if (rt) { sctp_v4_get_saddr()
/linux-4.1.27/drivers/infiniband/hw/cxgb3/
H A Diwch_cm.c339 struct rtable *rt; find_route() local
342 rt = ip_route_output_ports(&init_net, &fl4, NULL, peer_ip, local_ip, find_route()
345 if (IS_ERR(rt)) find_route()
347 return rt; find_route()
1347 struct rtable *rt; pass_accept_req() local
1370 rt = find_route(tdev, pass_accept_req()
1375 if (!rt) { pass_accept_req()
1380 dst = &rt->dst; pass_accept_req()
1893 struct rtable *rt; iwch_connect() local
1945 rt = find_route(h->rdev.t3cdev_p, laddr->sin_addr.s_addr, iwch_connect()
1948 if (!rt) { iwch_connect()
1953 ep->dst = &rt->dst; iwch_connect()
/linux-4.1.27/arch/x86/platform/efi/
H A Defi_64.c327 u32 *rt, *___f; \
329 rt = (u32 *)(table + offsetof(efi_system_table_32_t, runtime)); \
330 ___f = (u32 *)(*rt + offsetof(efi_runtime_services_32_t, func)); \
/linux-4.1.27/net/sched/
H A Dcls_route.c356 struct route4_filter *rt; route4_delete() local
358 rt = rtnl_dereference(b->ht[i]); route4_delete()
359 if (rt) route4_delete()
/linux-4.1.27/block/
H A Dioprio.c75 /* fall through, rt has prio field too */ SYSCALL_DEFINE3()
/linux-4.1.27/fs/xfs/libxfs/
H A Dxfs_fs.h63 #define XFS_XFLAG_RTINHERIT 0x00000100 /* create with rt bit set */
170 __u64 rtextents; /* rt extents in realtime subvol*/
196 __u64 rtextents; /* rt extents in realtime subvol*/
212 __u64 freertx; /* free rt extents */

Completed in 4646 milliseconds

12