Searched refs:cp (Results 1 - 200 of 494) sorted by relevance

123

/linux-4.1.27/drivers/media/usb/pvrusb2/
H A Dpvrusb2-ioread.c53 static int pvr2_ioread_init(struct pvr2_ioread *cp) pvr2_ioread_init() argument
57 cp->stream = NULL; pvr2_ioread_init()
58 mutex_init(&cp->mutex); pvr2_ioread_init()
61 cp->buffer_storage[idx] = kmalloc(BUFFER_SIZE,GFP_KERNEL); pvr2_ioread_init()
62 if (!(cp->buffer_storage[idx])) break; pvr2_ioread_init()
68 if (!(cp->buffer_storage[idx])) continue; pvr2_ioread_init()
69 kfree(cp->buffer_storage[idx]); pvr2_ioread_init()
76 static void pvr2_ioread_done(struct pvr2_ioread *cp) pvr2_ioread_done() argument
80 pvr2_ioread_setup(cp,NULL); pvr2_ioread_done()
82 if (!(cp->buffer_storage[idx])) continue; pvr2_ioread_done()
83 kfree(cp->buffer_storage[idx]); pvr2_ioread_done()
89 struct pvr2_ioread *cp; pvr2_ioread_create() local
90 cp = kzalloc(sizeof(*cp),GFP_KERNEL); pvr2_ioread_create()
91 if (!cp) return NULL; pvr2_ioread_create()
92 pvr2_trace(PVR2_TRACE_STRUCT,"pvr2_ioread_create id=%p",cp); pvr2_ioread_create()
93 if (pvr2_ioread_init(cp) < 0) { pvr2_ioread_create()
94 kfree(cp); pvr2_ioread_create()
97 return cp; pvr2_ioread_create()
100 void pvr2_ioread_destroy(struct pvr2_ioread *cp) pvr2_ioread_destroy() argument
102 if (!cp) return; pvr2_ioread_destroy()
103 pvr2_ioread_done(cp); pvr2_ioread_destroy()
104 pvr2_trace(PVR2_TRACE_STRUCT,"pvr2_ioread_destroy id=%p",cp); pvr2_ioread_destroy()
105 if (cp->sync_key_ptr) { pvr2_ioread_destroy()
106 kfree(cp->sync_key_ptr); pvr2_ioread_destroy()
107 cp->sync_key_ptr = NULL; pvr2_ioread_destroy()
109 kfree(cp); pvr2_ioread_destroy()
112 void pvr2_ioread_set_sync_key(struct pvr2_ioread *cp, pvr2_ioread_set_sync_key() argument
116 if (!cp) return; pvr2_ioread_set_sync_key()
119 if ((sync_key_len == cp->sync_key_len) && pvr2_ioread_set_sync_key()
121 (!memcmp(sync_key_ptr,cp->sync_key_ptr,sync_key_len)))) return; pvr2_ioread_set_sync_key()
123 if (sync_key_len != cp->sync_key_len) { pvr2_ioread_set_sync_key()
124 if (cp->sync_key_ptr) { pvr2_ioread_set_sync_key()
125 kfree(cp->sync_key_ptr); pvr2_ioread_set_sync_key()
126 cp->sync_key_ptr = NULL; pvr2_ioread_set_sync_key()
128 cp->sync_key_len = 0; pvr2_ioread_set_sync_key()
130 cp->sync_key_ptr = kmalloc(sync_key_len,GFP_KERNEL); pvr2_ioread_set_sync_key()
131 if (cp->sync_key_ptr) { pvr2_ioread_set_sync_key()
132 cp->sync_key_len = sync_key_len; pvr2_ioread_set_sync_key()
136 if (!cp->sync_key_len) return; pvr2_ioread_set_sync_key()
137 memcpy(cp->sync_key_ptr,sync_key_ptr,cp->sync_key_len); pvr2_ioread_set_sync_key()
140 static void pvr2_ioread_stop(struct pvr2_ioread *cp) pvr2_ioread_stop() argument
142 if (!(cp->enabled)) return; pvr2_ioread_stop()
144 "/*---TRACE_READ---*/ pvr2_ioread_stop id=%p",cp); pvr2_ioread_stop()
145 pvr2_stream_kill(cp->stream); pvr2_ioread_stop()
146 cp->c_buf = NULL; pvr2_ioread_stop()
147 cp->c_data_ptr = NULL; pvr2_ioread_stop()
148 cp->c_data_len = 0; pvr2_ioread_stop()
149 cp->c_data_offs = 0; pvr2_ioread_stop()
150 cp->enabled = 0; pvr2_ioread_stop()
151 cp->stream_running = 0; pvr2_ioread_stop()
152 cp->spigot_open = 0; pvr2_ioread_stop()
153 if (cp->sync_state) { pvr2_ioread_stop()
156 cp->sync_state = 0; pvr2_ioread_stop()
160 static int pvr2_ioread_start(struct pvr2_ioread *cp) pvr2_ioread_start() argument
164 if (cp->enabled) return 0; pvr2_ioread_start()
165 if (!(cp->stream)) return 0; pvr2_ioread_start()
167 "/*---TRACE_READ---*/ pvr2_ioread_start id=%p",cp); pvr2_ioread_start()
168 while ((bp = pvr2_stream_get_idle_buffer(cp->stream)) != NULL) { pvr2_ioread_start()
175 cp,stat); pvr2_ioread_start()
176 pvr2_ioread_stop(cp); pvr2_ioread_start()
180 cp->enabled = !0; pvr2_ioread_start()
181 cp->c_buf = NULL; pvr2_ioread_start()
182 cp->c_data_ptr = NULL; pvr2_ioread_start()
183 cp->c_data_len = 0; pvr2_ioread_start()
184 cp->c_data_offs = 0; pvr2_ioread_start()
185 cp->stream_running = 0; pvr2_ioread_start()
186 if (cp->sync_key_len) { pvr2_ioread_start()
189 cp->sync_state = 1; pvr2_ioread_start()
190 cp->sync_trashed_count = 0; pvr2_ioread_start()
191 cp->sync_buf_offs = 0; pvr2_ioread_start()
193 cp->spigot_open = 0; pvr2_ioread_start()
197 struct pvr2_stream *pvr2_ioread_get_stream(struct pvr2_ioread *cp) pvr2_ioread_get_stream() argument
199 return cp->stream; pvr2_ioread_get_stream()
202 int pvr2_ioread_setup(struct pvr2_ioread *cp,struct pvr2_stream *sp) pvr2_ioread_setup() argument
208 mutex_lock(&cp->mutex); do { pvr2_ioread_setup()
209 if (cp->stream) { pvr2_ioread_setup()
212 " pvr2_ioread_setup (tear-down) id=%p",cp); pvr2_ioread_setup()
213 pvr2_ioread_stop(cp); pvr2_ioread_setup()
214 pvr2_stream_kill(cp->stream); pvr2_ioread_setup()
215 if (pvr2_stream_get_buffer_count(cp->stream)) { pvr2_ioread_setup()
216 pvr2_stream_set_buffer_count(cp->stream,0); pvr2_ioread_setup()
218 cp->stream = NULL; pvr2_ioread_setup()
223 " pvr2_ioread_setup (setup) id=%p",cp); pvr2_ioread_setup()
227 mutex_unlock(&cp->mutex); pvr2_ioread_setup()
233 cp->buffer_storage[idx], pvr2_ioread_setup()
236 cp->stream = sp; pvr2_ioread_setup()
238 } while (0); mutex_unlock(&cp->mutex); pvr2_ioread_setup()
243 int pvr2_ioread_set_enabled(struct pvr2_ioread *cp,int fl) pvr2_ioread_set_enabled() argument
246 if ((!fl) == (!(cp->enabled))) return ret; pvr2_ioread_set_enabled()
248 mutex_lock(&cp->mutex); do { pvr2_ioread_set_enabled()
250 ret = pvr2_ioread_start(cp); pvr2_ioread_set_enabled()
252 pvr2_ioread_stop(cp); pvr2_ioread_set_enabled()
254 } while (0); mutex_unlock(&cp->mutex); pvr2_ioread_set_enabled()
258 static int pvr2_ioread_get_buffer(struct pvr2_ioread *cp) pvr2_ioread_get_buffer() argument
262 while (cp->c_data_len <= cp->c_data_offs) { pvr2_ioread_get_buffer()
263 if (cp->c_buf) { pvr2_ioread_get_buffer()
265 stat = pvr2_buffer_queue(cp->c_buf); pvr2_ioread_get_buffer()
272 cp,stat); pvr2_ioread_get_buffer()
273 pvr2_ioread_stop(cp); pvr2_ioread_get_buffer()
276 cp->c_buf = NULL; pvr2_ioread_get_buffer()
277 cp->c_data_ptr = NULL; pvr2_ioread_get_buffer()
278 cp->c_data_len = 0; pvr2_ioread_get_buffer()
279 cp->c_data_offs = 0; pvr2_ioread_get_buffer()
282 cp->c_buf = pvr2_stream_get_ready_buffer(cp->stream); pvr2_ioread_get_buffer()
283 if (!cp->c_buf) break; // Nothing ready; done. pvr2_ioread_get_buffer()
284 cp->c_data_len = pvr2_buffer_get_count(cp->c_buf); pvr2_ioread_get_buffer()
285 if (!cp->c_data_len) { pvr2_ioread_get_buffer()
287 stat = pvr2_buffer_get_status(cp->c_buf); pvr2_ioread_get_buffer()
294 cp,stat); pvr2_ioread_get_buffer()
295 pvr2_ioread_stop(cp); pvr2_ioread_get_buffer()
302 cp->c_data_offs = 0; pvr2_ioread_get_buffer()
303 cp->c_data_ptr = cp->buffer_storage[ pvr2_ioread_get_buffer()
304 pvr2_buffer_get_id(cp->c_buf)]; pvr2_ioread_get_buffer()
309 static void pvr2_ioread_filter(struct pvr2_ioread *cp) pvr2_ioread_filter() argument
312 if (!cp->enabled) return; pvr2_ioread_filter()
313 if (cp->sync_state != 1) return; pvr2_ioread_filter()
318 mutex_lock(&cp->mutex); while (1) { pvr2_ioread_filter()
320 if (!pvr2_ioread_get_buffer(cp)) break; pvr2_ioread_filter()
321 if (!cp->c_data_len) break; pvr2_ioread_filter()
325 for (idx = cp->c_data_offs; idx < cp->c_data_len; idx++) { pvr2_ioread_filter()
326 if (cp->sync_buf_offs >= cp->sync_key_len) break; pvr2_ioread_filter()
327 if (cp->c_data_ptr[idx] == pvr2_ioread_filter()
328 cp->sync_key_ptr[cp->sync_buf_offs]) { pvr2_ioread_filter()
330 (cp->sync_buf_offs)++; pvr2_ioread_filter()
333 cp->sync_buf_offs = 0; pvr2_ioread_filter()
338 cp->c_data_offs += idx; pvr2_ioread_filter()
339 cp->sync_trashed_count += idx; pvr2_ioread_filter()
342 if (cp->sync_buf_offs >= cp->sync_key_len) { pvr2_ioread_filter()
343 cp->sync_trashed_count -= cp->sync_key_len; pvr2_ioread_filter()
347 cp->sync_trashed_count); pvr2_ioread_filter()
348 cp->sync_state = 2; pvr2_ioread_filter()
349 cp->sync_buf_offs = 0; pvr2_ioread_filter()
353 if (cp->c_data_offs < cp->c_data_len) { pvr2_ioread_filter()
358 cp->c_data_len,cp->c_data_offs); pvr2_ioread_filter()
365 } mutex_unlock(&cp->mutex); pvr2_ioread_filter()
368 int pvr2_ioread_avail(struct pvr2_ioread *cp) pvr2_ioread_avail() argument
371 if (!(cp->enabled)) { pvr2_ioread_avail()
376 if (cp->sync_state == 1) { pvr2_ioread_avail()
377 pvr2_ioread_filter(cp); pvr2_ioread_avail()
378 if (cp->sync_state == 1) return -EAGAIN; pvr2_ioread_avail()
382 if (cp->stream_running) { pvr2_ioread_avail()
383 if (!pvr2_stream_get_ready_count(cp->stream)) { pvr2_ioread_avail()
388 if (pvr2_stream_get_ready_count(cp->stream) < BUFFER_COUNT/2) { pvr2_ioread_avail()
394 if ((!(cp->spigot_open)) != (!(ret == 0))) { pvr2_ioread_avail()
395 cp->spigot_open = (ret == 0); pvr2_ioread_avail()
398 cp->spigot_open ? "available" : "pending"); pvr2_ioread_avail()
404 int pvr2_ioread_read(struct pvr2_ioread *cp,void __user *buf,unsigned int cnt) pvr2_ioread_read() argument
416 " ZERO Request? Returning zero.",cp); pvr2_ioread_read()
420 stat = pvr2_ioread_avail(cp); pvr2_ioread_read()
423 cp->stream_running = !0; pvr2_ioread_read()
425 mutex_lock(&cp->mutex); do { pvr2_ioread_read()
431 if (!pvr2_ioread_get_buffer(cp)) { pvr2_ioread_read()
438 if (cp->sync_state == 2) { pvr2_ioread_read()
441 src = cp->sync_key_ptr + cp->sync_buf_offs; pvr2_ioread_read()
442 bcnt = cp->sync_key_len - cp->sync_buf_offs; pvr2_ioread_read()
445 src = cp->c_data_ptr + cp->c_data_offs; pvr2_ioread_read()
446 bcnt = cp->c_data_len - cp->c_data_offs; pvr2_ioread_read()
465 if (cp->sync_state == 2) { pvr2_ioread_read()
468 cp->sync_buf_offs += bcnt; pvr2_ioread_read()
469 if (cp->sync_buf_offs >= cp->sync_key_len) { pvr2_ioread_read()
475 cp->sync_state = 0; pvr2_ioread_read()
479 cp->c_data_offs += bcnt; pvr2_ioread_read()
483 } while (0); mutex_unlock(&cp->mutex); pvr2_ioread_read()
499 cp,req_cnt,ret); pvr2_ioread_read()
H A Dpvrusb2-context.c250 struct pvr2_channel *cp; pvr2_context_reset_input_limits() local
254 for (cp = mp->mc_first; cp; cp = cp->mc_next) { pvr2_context_reset_input_limits()
255 if (!cp->input_mask) continue; pvr2_context_reset_input_limits()
256 tmsk &= cp->input_mask; pvr2_context_reset_input_limits()
288 void pvr2_channel_init(struct pvr2_channel *cp,struct pvr2_context *mp) pvr2_channel_init() argument
291 cp->hdw = mp->hdw; pvr2_channel_init()
292 cp->mc_head = mp; pvr2_channel_init()
293 cp->mc_next = NULL; pvr2_channel_init()
294 cp->mc_prev = mp->mc_last; pvr2_channel_init()
296 mp->mc_last->mc_next = cp; pvr2_channel_init()
298 mp->mc_first = cp; pvr2_channel_init()
300 mp->mc_last = cp; pvr2_channel_init()
305 static void pvr2_channel_disclaim_stream(struct pvr2_channel *cp) pvr2_channel_disclaim_stream() argument
307 if (!cp->stream) return; pvr2_channel_disclaim_stream()
308 pvr2_stream_kill(cp->stream->stream); pvr2_channel_disclaim_stream()
309 cp->stream->user = NULL; pvr2_channel_disclaim_stream()
310 cp->stream = NULL; pvr2_channel_disclaim_stream()
314 void pvr2_channel_done(struct pvr2_channel *cp) pvr2_channel_done() argument
316 struct pvr2_context *mp = cp->mc_head; pvr2_channel_done()
318 cp->input_mask = 0; pvr2_channel_done()
319 pvr2_channel_disclaim_stream(cp); pvr2_channel_done()
321 if (cp->mc_next) { pvr2_channel_done()
322 cp->mc_next->mc_prev = cp->mc_prev; pvr2_channel_done()
324 mp->mc_last = cp->mc_prev; pvr2_channel_done()
326 if (cp->mc_prev) { pvr2_channel_done()
327 cp->mc_prev->mc_next = cp->mc_next; pvr2_channel_done()
329 mp->mc_first = cp->mc_next; pvr2_channel_done()
331 cp->hdw = NULL; pvr2_channel_done()
336 int pvr2_channel_limit_inputs(struct pvr2_channel *cp,unsigned int cmsk) pvr2_channel_limit_inputs() argument
341 struct pvr2_hdw *hdw = cp->hdw; pvr2_channel_limit_inputs()
345 if (cmsk == cp->input_mask) { pvr2_channel_limit_inputs()
350 pvr2_context_enter(cp->mc_head); pvr2_channel_limit_inputs()
353 cp->input_mask = 0; pvr2_channel_limit_inputs()
354 pvr2_context_reset_input_limits(cp->mc_head); pvr2_channel_limit_inputs()
358 for (p2 = cp->mc_head->mc_first; p2; p2 = p2->mc_next) { pvr2_channel_limit_inputs()
359 if (p2 == cp) continue; pvr2_channel_limit_inputs()
373 cp->input_mask = cmsk; pvr2_channel_limit_inputs()
376 pvr2_context_exit(cp->mc_head); pvr2_channel_limit_inputs()
381 unsigned int pvr2_channel_get_limited_inputs(struct pvr2_channel *cp) pvr2_channel_get_limited_inputs() argument
383 return cp->input_mask; pvr2_channel_get_limited_inputs()
387 int pvr2_channel_claim_stream(struct pvr2_channel *cp, pvr2_channel_claim_stream() argument
391 pvr2_context_enter(cp->mc_head); do { pvr2_channel_claim_stream()
392 if (sp == cp->stream) break; pvr2_channel_claim_stream()
397 pvr2_channel_disclaim_stream(cp); pvr2_channel_claim_stream()
399 sp->user = cp; pvr2_channel_claim_stream()
400 cp->stream = sp; pvr2_channel_claim_stream()
401 } while (0); pvr2_context_exit(cp->mc_head); pvr2_channel_claim_stream()
414 struct pvr2_ioread *cp; pvr2_channel_create_mpeg_stream() local
415 cp = pvr2_ioread_create(); pvr2_channel_create_mpeg_stream()
416 if (!cp) return NULL; pvr2_channel_create_mpeg_stream()
417 pvr2_ioread_setup(cp,sp->stream); pvr2_channel_create_mpeg_stream()
418 pvr2_ioread_set_sync_key(cp,stream_sync_key,sizeof(stream_sync_key)); pvr2_channel_create_mpeg_stream()
419 return cp; pvr2_channel_create_mpeg_stream()
/linux-4.1.27/drivers/s390/char/
H A Dcon3270.c76 static void con3270_set_timer(struct con3270 *cp, int expires) con3270_set_timer() argument
79 del_timer(&cp->timer); con3270_set_timer()
81 mod_timer(&cp->timer, jiffies + expires); con3270_set_timer()
90 con3270_update_status(struct con3270 *cp) con3270_update_status() argument
94 str = (cp->nr_up != 0) ? "History" : "Running"; con3270_update_status()
95 memcpy(cp->status->string + 24, str, 7); con3270_update_status()
96 codepage_convert(cp->view.ascebc, cp->status->string + 24, 7); con3270_update_status()
97 cp->update_flags |= CON_UPDATE_STATUS; con3270_update_status()
101 con3270_create_status(struct con3270 *cp) con3270_create_status() argument
108 cp->status = alloc_string(&cp->freemem, sizeof(blueprint)); con3270_create_status()
110 memcpy(cp->status->string, blueprint, sizeof(blueprint)); con3270_create_status()
112 raw3270_buffer_address(cp->view.dev, cp->status->string + 1, con3270_create_status()
113 cp->view.cols * (cp->view.rows - 1)); con3270_create_status()
114 raw3270_buffer_address(cp->view.dev, cp->status->string + 21, con3270_create_status()
115 cp->view.cols * cp->view.rows - 8); con3270_create_status()
117 codepage_convert(cp->view.ascebc, cp->status->string + 8, 12); con3270_create_status()
118 codepage_convert(cp->view.ascebc, cp->status->string + 24, 7); con3270_create_status()
125 con3270_update_string(struct con3270 *cp, struct string *s, int nr) con3270_update_string() argument
127 if (s->len >= cp->view.cols - 5) con3270_update_string()
129 raw3270_buffer_address(cp->view.dev, s->string + s->len - 3, con3270_update_string()
130 cp->view.cols * (nr + 1)); con3270_update_string()
137 con3270_rebuild_update(struct con3270 *cp) con3270_rebuild_update() argument
146 list_for_each_entry_safe(s, n, &cp->update, update) con3270_rebuild_update()
148 nr = cp->view.rows - 2 + cp->nr_up; con3270_rebuild_update()
149 list_for_each_entry_reverse(s, &cp->lines, list) { con3270_rebuild_update()
150 if (nr < cp->view.rows - 1) con3270_rebuild_update()
151 list_add(&s->update, &cp->update); con3270_rebuild_update()
155 cp->line_nr = 0; con3270_rebuild_update()
156 cp->update_flags |= CON_UPDATE_LIST; con3270_rebuild_update()
163 con3270_alloc_string(struct con3270 *cp, size_t size) con3270_alloc_string() argument
167 s = alloc_string(&cp->freemem, size); con3270_alloc_string()
170 list_for_each_entry_safe(s, n, &cp->lines, list) { con3270_alloc_string()
174 cp->nr_lines--; con3270_alloc_string()
175 if (free_string(&cp->freemem, s) >= size) con3270_alloc_string()
178 s = alloc_string(&cp->freemem, size); con3270_alloc_string()
180 if (cp->nr_up != 0 && cp->nr_up + cp->view.rows > cp->nr_lines) { con3270_alloc_string()
181 cp->nr_up = cp->nr_lines - cp->view.rows + 1; con3270_alloc_string()
182 con3270_rebuild_update(cp); con3270_alloc_string()
183 con3270_update_status(cp); con3270_alloc_string()
202 con3270_update(struct con3270 *cp) con3270_update() argument
211 if (!auto_update && !raw3270_view_active(&cp->view)) con3270_update()
213 if (cp->view.dev) con3270_update()
214 raw3270_activate_view(&cp->view); con3270_update()
216 wrq = xchg(&cp->write, 0); con3270_update()
218 con3270_set_timer(cp, 1); con3270_update()
222 spin_lock_irqsave(&cp->view.lock, flags); con3270_update()
224 if (cp->update_flags & CON_UPDATE_ALL) { con3270_update()
225 con3270_rebuild_update(cp); con3270_update()
226 con3270_update_status(cp); con3270_update()
227 cp->update_flags = CON_UPDATE_ERASE | CON_UPDATE_LIST | con3270_update()
230 if (cp->update_flags & CON_UPDATE_ERASE) { con3270_update()
243 if (cp->update_flags & CON_UPDATE_STATUS) con3270_update()
244 if (raw3270_request_add_data(wrq, cp->status->string, con3270_update()
245 cp->status->len) == 0) con3270_update()
248 if (cp->update_flags & CON_UPDATE_LIST) { con3270_update()
253 raw3270_buffer_address(cp->view.dev, prolog + 1, con3270_update()
254 cp->view.cols * cp->line_nr); con3270_update()
257 list_for_each_entry_safe(s, n, &cp->update, update) { con3270_update()
258 if (s != cp->cline) con3270_update()
259 con3270_update_string(cp, s, cp->line_nr); con3270_update()
264 if (s != cp->cline) con3270_update()
265 cp->line_nr++; con3270_update()
267 if (list_empty(&cp->update)) con3270_update()
271 rc = raw3270_start(&cp->view, wrq); con3270_update()
273 cp->update_flags &= ~updated; con3270_update()
274 if (cp->update_flags) con3270_update()
275 con3270_set_timer(cp, 1); con3270_update()
278 xchg(&cp->write, wrq); con3270_update()
280 spin_unlock_irqrestore(&cp->view.lock, flags); con3270_update()
290 struct con3270 *cp; con3270_read_tasklet() local
294 cp = (struct con3270 *) rrq->view; con3270_read_tasklet()
295 spin_lock_irqsave(&cp->view.lock, flags); con3270_read_tasklet()
296 nr_up = cp->nr_up; con3270_read_tasklet()
299 switch (cp->input->string[0]) { con3270_read_tasklet()
307 cp->update_flags = CON_UPDATE_ALL; con3270_read_tasklet()
308 con3270_set_timer(cp, 1); con3270_read_tasklet()
311 nr_up += cp->view.rows - 2; con3270_read_tasklet()
312 if (nr_up + cp->view.rows - 1 > cp->nr_lines) { con3270_read_tasklet()
313 nr_up = cp->nr_lines - cp->view.rows + 1; con3270_read_tasklet()
319 nr_up -= cp->view.rows - 2; con3270_read_tasklet()
324 if (nr_up != cp->nr_up) { con3270_read_tasklet()
325 cp->nr_up = nr_up; con3270_read_tasklet()
326 con3270_rebuild_update(cp); con3270_read_tasklet()
327 con3270_update_status(cp); con3270_read_tasklet()
328 con3270_set_timer(cp, 1); con3270_read_tasklet()
330 spin_unlock_irqrestore(&cp->view.lock, flags); con3270_read_tasklet()
333 raw3270_request_reset(cp->kreset); con3270_read_tasklet()
334 raw3270_request_set_cmd(cp->kreset, TC_WRITE); con3270_read_tasklet()
335 raw3270_request_add_data(cp->kreset, &kreset_data, 1); con3270_read_tasklet()
336 raw3270_start(&cp->view, cp->kreset); con3270_read_tasklet()
339 raw3270_deactivate_view(&cp->view); con3270_read_tasklet()
342 xchg(&cp->read, rrq); con3270_read_tasklet()
343 raw3270_put_view(&cp->view); con3270_read_tasklet()
361 con3270_issue_read(struct con3270 *cp) con3270_issue_read() argument
366 rrq = xchg(&cp->read, 0); con3270_issue_read()
371 rrq->callback_data = cp; con3270_issue_read()
373 raw3270_request_set_data(rrq, cp->input->string, cp->input->len); con3270_issue_read()
375 rc = raw3270_start_irq(&cp->view, rrq); con3270_issue_read()
386 struct con3270 *cp; con3270_activate() local
388 cp = (struct con3270 *) view; con3270_activate()
389 cp->update_flags = CON_UPDATE_ALL; con3270_activate()
390 con3270_set_timer(cp, 1); con3270_activate()
397 struct con3270 *cp; con3270_deactivate() local
399 cp = (struct con3270 *) view; con3270_deactivate()
400 del_timer(&cp->timer); con3270_deactivate()
404 con3270_irq(struct con3270 *cp, struct raw3270_request *rq, struct irb *irb) con3270_irq() argument
408 con3270_issue_read(cp); con3270_irq()
428 con3270_cline_add(struct con3270 *cp) con3270_cline_add() argument
430 if (!list_empty(&cp->cline->list)) con3270_cline_add()
433 list_add_tail(&cp->cline->list, &cp->lines); con3270_cline_add()
434 cp->nr_lines++; con3270_cline_add()
435 con3270_rebuild_update(cp); con3270_cline_add()
439 con3270_cline_insert(struct con3270 *cp, unsigned char c) con3270_cline_insert() argument
441 cp->cline->string[cp->cline->len++] = con3270_cline_insert()
442 cp->view.ascebc[(c < ' ') ? ' ' : c]; con3270_cline_insert()
443 if (list_empty(&cp->cline->update)) { con3270_cline_insert()
444 list_add_tail(&cp->cline->update, &cp->update); con3270_cline_insert()
445 cp->update_flags |= CON_UPDATE_LIST; con3270_cline_insert()
450 con3270_cline_end(struct con3270 *cp) con3270_cline_end() argument
456 size = (cp->cline->len < cp->view.cols - 5) ? con3270_cline_end()
457 cp->cline->len + 4 : cp->view.cols; con3270_cline_end()
458 s = con3270_alloc_string(cp, size); con3270_cline_end()
459 memcpy(s->string, cp->cline->string, cp->cline->len); con3270_cline_end()
460 if (s->len < cp->view.cols - 5) { con3270_cline_end()
464 while (--size > cp->cline->len) con3270_cline_end()
465 s->string[size] = cp->view.ascebc[' ']; con3270_cline_end()
468 list_add(&s->list, &cp->cline->list); con3270_cline_end()
469 list_del_init(&cp->cline->list); con3270_cline_end()
470 if (!list_empty(&cp->cline->update)) { con3270_cline_end()
471 list_add(&s->update, &cp->cline->update); con3270_cline_end()
472 list_del_init(&cp->cline->update); con3270_cline_end()
474 cp->cline->len = 0; con3270_cline_end()
483 struct con3270 *cp; con3270_write() local
487 cp = condev; con3270_write()
488 spin_lock_irqsave(&cp->view.lock, flags); con3270_write()
491 if (cp->cline->len == 0) con3270_write()
492 con3270_cline_add(cp); con3270_write()
494 con3270_cline_insert(cp, c); con3270_write()
495 if (c == '\n' || cp->cline->len >= cp->view.cols) con3270_write()
496 con3270_cline_end(cp); con3270_write()
499 cp->nr_up = 0; con3270_write()
500 if (cp->view.dev && !timer_pending(&cp->timer)) con3270_write()
501 con3270_set_timer(cp, HZ/10); con3270_write()
502 spin_unlock_irqrestore(&cp->view.lock,flags); con3270_write()
516 con3270_wait_write(struct con3270 *cp) con3270_wait_write() argument
518 while (!cp->write) { con3270_wait_write()
519 raw3270_wait_cons_dev(cp->view.dev); con3270_wait_write()
531 struct con3270 *cp; con3270_flush() local
534 cp = condev; con3270_flush()
535 if (!cp->view.dev) con3270_flush()
537 raw3270_pm_unfreeze(&cp->view); con3270_flush()
538 raw3270_activate_view(&cp->view); con3270_flush()
539 spin_lock_irqsave(&cp->view.lock, flags); con3270_flush()
540 con3270_wait_write(cp); con3270_flush()
541 cp->nr_up = 0; con3270_flush()
542 con3270_rebuild_update(cp); con3270_flush()
543 con3270_update_status(cp); con3270_flush()
544 while (cp->update_flags != 0) { con3270_flush()
545 spin_unlock_irqrestore(&cp->view.lock, flags); con3270_flush()
546 con3270_update(cp); con3270_flush()
547 spin_lock_irqsave(&cp->view.lock, flags); con3270_flush()
548 con3270_wait_write(cp); con3270_flush()
550 spin_unlock_irqrestore(&cp->view.lock, flags); con3270_flush()
H A Dkeyboard.h51 kbd_puts_queue(struct tty_port *port, char *cp) kbd_puts_queue() argument
53 while (*cp) kbd_puts_queue()
54 tty_insert_flip_char(port, *cp++, 0); kbd_puts_queue()
H A Dfs3270.c121 char *cp; fs3270_activate() local
139 cp = fp->rdbuf->data[0]; fs3270_activate()
140 cp[0] = TW_KR; fs3270_activate()
141 cp[1] = TO_SBA; fs3270_activate()
142 cp[2] = cp[6]; fs3270_activate()
143 cp[3] = cp[7]; fs3270_activate()
144 cp[4] = TO_IC; fs3270_activate()
145 cp[5] = TO_SBA; fs3270_activate()
146 cp[6] = 0x40; fs3270_activate()
147 cp[7] = 0x40; fs3270_activate()
H A Dtty3270.c229 unsigned char *cp; tty3270_update_string() local
233 cp = line->string + line->len - 4; tty3270_update_string()
234 if (*cp == TO_RA) tty3270_update_string()
235 raw3270_buffer_address(tp->view.dev, cp + 1, tty3270_update_string()
1069 char *cp; tty3270_convert_line() local
1115 cp = s->string; tty3270_convert_line()
1116 *cp++ = TO_SBA; tty3270_convert_line()
1117 *cp++ = 0; tty3270_convert_line()
1118 *cp++ = 0; tty3270_convert_line()
1124 *cp++ = TO_SA; tty3270_convert_line()
1125 *cp++ = TAT_EXTHI; tty3270_convert_line()
1126 *cp++ = cell->highlight; tty3270_convert_line()
1130 *cp++ = TO_SA; tty3270_convert_line()
1131 *cp++ = TAT_COLOR; tty3270_convert_line()
1132 *cp++ = cell->f_color; tty3270_convert_line()
1135 *cp++ = cell->character; tty3270_convert_line()
1138 *cp++ = TO_SA; tty3270_convert_line()
1139 *cp++ = TAT_EXTHI; tty3270_convert_line()
1140 *cp++ = TAX_RESET; tty3270_convert_line()
1143 *cp++ = TO_SA; tty3270_convert_line()
1144 *cp++ = TAT_COLOR; tty3270_convert_line()
1145 *cp++ = TAC_RESET; tty3270_convert_line()
1148 *cp++ = TO_RA; tty3270_convert_line()
1149 *cp++ = 0; tty3270_convert_line()
1150 *cp++ = 0; tty3270_convert_line()
1151 *cp++ = 0; tty3270_convert_line()
/linux-4.1.27/drivers/net/ethernet/sun/
H A Dcassini.c123 * also, we need to make cp->lock finer-grained.
173 #define CAS_MAX_MTU min(((cp->page_size << 1) - 0x50), 9000)
242 static void cas_set_link_modes(struct cas *cp);
244 static inline void cas_lock_tx(struct cas *cp) cas_lock_tx() argument
249 spin_lock_nested(&cp->tx_lock[i], i); cas_lock_tx()
252 static inline void cas_lock_all(struct cas *cp) cas_lock_all() argument
254 spin_lock_irq(&cp->lock); cas_lock_all()
255 cas_lock_tx(cp); cas_lock_all()
266 #define cas_lock_all_save(cp, flags) \
268 struct cas *xxxcp = (cp); \
273 static inline void cas_unlock_tx(struct cas *cp) cas_unlock_tx() argument
278 spin_unlock(&cp->tx_lock[i - 1]); cas_unlock_tx()
281 static inline void cas_unlock_all(struct cas *cp) cas_unlock_all() argument
283 cas_unlock_tx(cp); cas_unlock_all()
284 spin_unlock_irq(&cp->lock); cas_unlock_all()
287 #define cas_unlock_all_restore(cp, flags) \
289 struct cas *xxxcp = (cp); \
294 static void cas_disable_irq(struct cas *cp, const int ring) cas_disable_irq() argument
298 writel(0xFFFFFFFF, cp->regs + REG_INTR_MASK); cas_disable_irq()
303 if (cp->cas_flags & CAS_FLAG_REG_PLUS) { cas_disable_irq()
316 cp->regs + REG_PLUS_INTRN_MASK(ring)); cas_disable_irq()
320 writel(INTRN_MASK_CLEAR_ALL, cp->regs + cas_disable_irq()
327 static inline void cas_mask_intr(struct cas *cp) cas_mask_intr() argument
332 cas_disable_irq(cp, i); cas_mask_intr()
335 static void cas_enable_irq(struct cas *cp, const int ring) cas_enable_irq() argument
338 writel(INTR_TX_DONE, cp->regs + REG_INTR_MASK); cas_enable_irq()
342 if (cp->cas_flags & CAS_FLAG_REG_PLUS) { cas_enable_irq()
354 writel(INTRN_MASK_RX_EN, cp->regs + cas_enable_irq()
364 static inline void cas_unmask_intr(struct cas *cp) cas_unmask_intr() argument
369 cas_enable_irq(cp, i); cas_unmask_intr()
372 static inline void cas_entropy_gather(struct cas *cp) cas_entropy_gather() argument
375 if ((cp->cas_flags & CAS_FLAG_ENTROPY_DEV) == 0) cas_entropy_gather()
378 batch_entropy_store(readl(cp->regs + REG_ENTROPY_IV), cas_entropy_gather()
379 readl(cp->regs + REG_ENTROPY_IV), cas_entropy_gather()
384 static inline void cas_entropy_reset(struct cas *cp) cas_entropy_reset() argument
387 if ((cp->cas_flags & CAS_FLAG_ENTROPY_DEV) == 0) cas_entropy_reset()
391 cp->regs + REG_BIM_LOCAL_DEV_EN); cas_entropy_reset()
392 writeb(ENTROPY_RESET_STC_MODE, cp->regs + REG_ENTROPY_RESET); cas_entropy_reset()
393 writeb(0x55, cp->regs + REG_ENTROPY_RAND_REG); cas_entropy_reset()
396 if (readb(cp->regs + REG_ENTROPY_RAND_REG) == 0) cas_entropy_reset()
397 cp->cas_flags &= ~CAS_FLAG_ENTROPY_DEV; cas_entropy_reset()
404 static u16 cas_phy_read(struct cas *cp, int reg) cas_phy_read() argument
410 cmd |= CAS_BASE(MIF_FRAME_PHY_ADDR, cp->phy_addr); cas_phy_read()
413 writel(cmd, cp->regs + REG_MIF_FRAME); cas_phy_read()
418 cmd = readl(cp->regs + REG_MIF_FRAME); cas_phy_read()
425 static int cas_phy_write(struct cas *cp, int reg, u16 val) cas_phy_write() argument
431 cmd |= CAS_BASE(MIF_FRAME_PHY_ADDR, cp->phy_addr); cas_phy_write()
435 writel(cmd, cp->regs + REG_MIF_FRAME); cas_phy_write()
440 cmd = readl(cp->regs + REG_MIF_FRAME); cas_phy_write()
447 static void cas_phy_powerup(struct cas *cp) cas_phy_powerup() argument
449 u16 ctl = cas_phy_read(cp, MII_BMCR); cas_phy_powerup()
454 cas_phy_write(cp, MII_BMCR, ctl); cas_phy_powerup()
457 static void cas_phy_powerdown(struct cas *cp) cas_phy_powerdown() argument
459 u16 ctl = cas_phy_read(cp, MII_BMCR); cas_phy_powerdown()
464 cas_phy_write(cp, MII_BMCR, ctl); cas_phy_powerdown()
467 /* cp->lock held. note: the last put_page will free the buffer */ cas_page_free()
468 static int cas_page_free(struct cas *cp, cas_page_t *page) cas_page_free() argument
470 pci_unmap_page(cp->pdev, page->dma_addr, cp->page_size, cas_page_free()
472 __free_pages(page->buffer, cp->page_order); cas_page_free()
488 static cas_page_t *cas_page_alloc(struct cas *cp, const gfp_t flags) cas_page_alloc() argument
498 page->buffer = alloc_pages(flags, cp->page_order); cas_page_alloc()
501 page->dma_addr = pci_map_page(cp->pdev, page->buffer, 0, cas_page_alloc()
502 cp->page_size, PCI_DMA_FROMDEVICE); cas_page_alloc()
511 static void cas_spare_init(struct cas *cp) cas_spare_init() argument
513 spin_lock(&cp->rx_inuse_lock); cas_spare_init()
514 INIT_LIST_HEAD(&cp->rx_inuse_list); cas_spare_init()
515 spin_unlock(&cp->rx_inuse_lock); cas_spare_init()
517 spin_lock(&cp->rx_spare_lock); cas_spare_init()
518 INIT_LIST_HEAD(&cp->rx_spare_list); cas_spare_init()
519 cp->rx_spares_needed = RX_SPARE_COUNT; cas_spare_init()
520 spin_unlock(&cp->rx_spare_lock); cas_spare_init()
524 static void cas_spare_free(struct cas *cp) cas_spare_free() argument
530 spin_lock(&cp->rx_spare_lock); cas_spare_free()
531 list_splice_init(&cp->rx_spare_list, &list); cas_spare_free()
532 spin_unlock(&cp->rx_spare_lock); cas_spare_free()
534 cas_page_free(cp, list_entry(elem, cas_page_t, list)); cas_spare_free()
543 spin_lock(&cp->rx_inuse_lock); cas_spare_free()
544 list_splice_init(&cp->rx_inuse_list, &list); cas_spare_free()
545 spin_unlock(&cp->rx_inuse_lock); cas_spare_free()
547 spin_lock(&cp->rx_spare_lock); cas_spare_free()
548 list_splice_init(&cp->rx_inuse_list, &list); cas_spare_free()
549 spin_unlock(&cp->rx_spare_lock); cas_spare_free()
552 cas_page_free(cp, list_entry(elem, cas_page_t, list)); cas_spare_free()
557 static void cas_spare_recover(struct cas *cp, const gfp_t flags) cas_spare_recover() argument
568 spin_lock(&cp->rx_inuse_lock); cas_spare_recover()
569 list_splice_init(&cp->rx_inuse_list, &list); cas_spare_recover()
570 spin_unlock(&cp->rx_inuse_lock); cas_spare_recover()
591 spin_lock(&cp->rx_spare_lock); cas_spare_recover()
592 if (cp->rx_spares_needed > 0) { cas_spare_recover()
593 list_add(elem, &cp->rx_spare_list); cas_spare_recover()
594 cp->rx_spares_needed--; cas_spare_recover()
595 spin_unlock(&cp->rx_spare_lock); cas_spare_recover()
597 spin_unlock(&cp->rx_spare_lock); cas_spare_recover()
598 cas_page_free(cp, page); cas_spare_recover()
604 spin_lock(&cp->rx_inuse_lock); cas_spare_recover()
605 list_splice(&list, &cp->rx_inuse_list); cas_spare_recover()
606 spin_unlock(&cp->rx_inuse_lock); cas_spare_recover()
609 spin_lock(&cp->rx_spare_lock); cas_spare_recover()
610 needed = cp->rx_spares_needed; cas_spare_recover()
611 spin_unlock(&cp->rx_spare_lock); cas_spare_recover()
619 cas_page_t *spare = cas_page_alloc(cp, flags); cas_spare_recover()
626 spin_lock(&cp->rx_spare_lock); cas_spare_recover()
627 list_splice(&list, &cp->rx_spare_list); cas_spare_recover()
628 cp->rx_spares_needed -= i; cas_spare_recover()
629 spin_unlock(&cp->rx_spare_lock); cas_spare_recover()
633 static cas_page_t *cas_page_dequeue(struct cas *cp) cas_page_dequeue() argument
638 spin_lock(&cp->rx_spare_lock); cas_page_dequeue()
639 if (list_empty(&cp->rx_spare_list)) { cas_page_dequeue()
641 spin_unlock(&cp->rx_spare_lock); cas_page_dequeue()
642 cas_spare_recover(cp, GFP_ATOMIC); cas_page_dequeue()
643 spin_lock(&cp->rx_spare_lock); cas_page_dequeue()
644 if (list_empty(&cp->rx_spare_list)) { cas_page_dequeue()
645 netif_err(cp, rx_err, cp->dev, cas_page_dequeue()
647 spin_unlock(&cp->rx_spare_lock); cas_page_dequeue()
652 entry = cp->rx_spare_list.next; cas_page_dequeue()
654 recover = ++cp->rx_spares_needed; cas_page_dequeue()
655 spin_unlock(&cp->rx_spare_lock); cas_page_dequeue()
660 atomic_inc(&cp->reset_task_pending); cas_page_dequeue()
661 atomic_inc(&cp->reset_task_pending_spare); cas_page_dequeue()
662 schedule_work(&cp->reset_task); cas_page_dequeue()
664 atomic_set(&cp->reset_task_pending, CAS_RESET_SPARE); cas_page_dequeue()
665 schedule_work(&cp->reset_task); cas_page_dequeue()
672 static void cas_mif_poll(struct cas *cp, const int enable) cas_mif_poll() argument
676 cfg = readl(cp->regs + REG_MIF_CFG); cas_mif_poll()
679 if (cp->phy_type & CAS_PHY_MII_MDIO1) cas_mif_poll()
686 cfg |= CAS_BASE(MIF_CFG_POLL_PHY, cp->phy_addr); cas_mif_poll()
689 cp->regs + REG_MIF_MASK); cas_mif_poll()
690 writel(cfg, cp->regs + REG_MIF_CFG); cas_mif_poll()
693 /* Must be invoked under cp->lock */ cas_begin_auto_negotiation()
694 static void cas_begin_auto_negotiation(struct cas *cp, struct ethtool_cmd *ep) cas_begin_auto_negotiation() argument
700 int oldstate = cp->lstate; cas_begin_auto_negotiation()
706 lcntl = cp->link_cntl; cas_begin_auto_negotiation()
708 cp->link_cntl = BMCR_ANENABLE; cas_begin_auto_negotiation()
711 cp->link_cntl = 0; cas_begin_auto_negotiation()
713 cp->link_cntl |= BMCR_SPEED100; cas_begin_auto_negotiation()
715 cp->link_cntl |= CAS_BMCR_SPEED1000; cas_begin_auto_negotiation()
717 cp->link_cntl |= BMCR_FULLDPLX; cas_begin_auto_negotiation()
720 changed = (lcntl != cp->link_cntl); cas_begin_auto_negotiation()
723 if (cp->lstate == link_up) { cas_begin_auto_negotiation()
724 netdev_info(cp->dev, "PCS link down\n"); cas_begin_auto_negotiation()
727 netdev_info(cp->dev, "link configuration changed\n"); cas_begin_auto_negotiation()
730 cp->lstate = link_down; cas_begin_auto_negotiation()
731 cp->link_transition = LINK_TRANSITION_LINK_DOWN; cas_begin_auto_negotiation()
732 if (!cp->hw_running) cas_begin_auto_negotiation()
741 netif_carrier_off(cp->dev); cas_begin_auto_negotiation()
748 atomic_inc(&cp->reset_task_pending); cas_begin_auto_negotiation()
749 atomic_inc(&cp->reset_task_pending_all); cas_begin_auto_negotiation()
750 schedule_work(&cp->reset_task); cas_begin_auto_negotiation()
751 cp->timer_ticks = 0; cas_begin_auto_negotiation()
752 mod_timer(&cp->link_timer, jiffies + CAS_LINK_TIMEOUT); cas_begin_auto_negotiation()
756 if (cp->phy_type & CAS_PHY_SERDES) { cas_begin_auto_negotiation()
757 u32 val = readl(cp->regs + REG_PCS_MII_CTRL); cas_begin_auto_negotiation()
759 if (cp->link_cntl & BMCR_ANENABLE) { cas_begin_auto_negotiation()
761 cp->lstate = link_aneg; cas_begin_auto_negotiation()
763 if (cp->link_cntl & BMCR_FULLDPLX) cas_begin_auto_negotiation()
766 cp->lstate = link_force_ok; cas_begin_auto_negotiation()
768 cp->link_transition = LINK_TRANSITION_LINK_CONFIG; cas_begin_auto_negotiation()
769 writel(val, cp->regs + REG_PCS_MII_CTRL); cas_begin_auto_negotiation()
772 cas_mif_poll(cp, 0); cas_begin_auto_negotiation()
773 ctl = cas_phy_read(cp, MII_BMCR); cas_begin_auto_negotiation()
776 ctl |= cp->link_cntl; cas_begin_auto_negotiation()
779 cp->lstate = link_aneg; cas_begin_auto_negotiation()
781 cp->lstate = link_force_ok; cas_begin_auto_negotiation()
783 cp->link_transition = LINK_TRANSITION_LINK_CONFIG; cas_begin_auto_negotiation()
784 cas_phy_write(cp, MII_BMCR, ctl); cas_begin_auto_negotiation()
785 cas_mif_poll(cp, 1); cas_begin_auto_negotiation()
788 cp->timer_ticks = 0; cas_begin_auto_negotiation()
789 mod_timer(&cp->link_timer, jiffies + CAS_LINK_TIMEOUT); cas_begin_auto_negotiation()
792 /* Must be invoked under cp->lock. */ cas_reset_mii_phy()
793 static int cas_reset_mii_phy(struct cas *cp) cas_reset_mii_phy() argument
798 cas_phy_write(cp, MII_BMCR, BMCR_RESET); cas_reset_mii_phy()
801 val = cas_phy_read(cp, MII_BMCR); cas_reset_mii_phy()
809 static void cas_saturn_firmware_init(struct cas *cp) cas_saturn_firmware_init() argument
815 if (PHY_NS_DP83065 != cp->phy_id) cas_saturn_firmware_init()
818 err = request_firmware(&fw, fw_name, &cp->pdev->dev); cas_saturn_firmware_init()
829 cp->fw_load_addr= fw->data[1] << 8 | fw->data[0]; cas_saturn_firmware_init()
830 cp->fw_size = fw->size - 2; cas_saturn_firmware_init()
831 cp->fw_data = vmalloc(cp->fw_size); cas_saturn_firmware_init()
832 if (!cp->fw_data) cas_saturn_firmware_init()
834 memcpy(cp->fw_data, &fw->data[2], cp->fw_size); cas_saturn_firmware_init()
839 static void cas_saturn_firmware_load(struct cas *cp) cas_saturn_firmware_load() argument
843 if (!cp->fw_data) cas_saturn_firmware_load()
846 cas_phy_powerdown(cp); cas_saturn_firmware_load()
849 cas_phy_write(cp, DP83065_MII_MEM, 0x0); cas_saturn_firmware_load()
852 cas_phy_write(cp, DP83065_MII_REGE, 0x8ff9); cas_saturn_firmware_load()
853 cas_phy_write(cp, DP83065_MII_REGD, 0xbd); cas_saturn_firmware_load()
854 cas_phy_write(cp, DP83065_MII_REGE, 0x8ffa); cas_saturn_firmware_load()
855 cas_phy_write(cp, DP83065_MII_REGD, 0x82); cas_saturn_firmware_load()
856 cas_phy_write(cp, DP83065_MII_REGE, 0x8ffb); cas_saturn_firmware_load()
857 cas_phy_write(cp, DP83065_MII_REGD, 0x0); cas_saturn_firmware_load()
858 cas_phy_write(cp, DP83065_MII_REGE, 0x8ffc); cas_saturn_firmware_load()
859 cas_phy_write(cp, DP83065_MII_REGD, 0x39); cas_saturn_firmware_load()
862 cas_phy_write(cp, DP83065_MII_MEM, 0x1); cas_saturn_firmware_load()
863 cas_phy_write(cp, DP83065_MII_REGE, cp->fw_load_addr); cas_saturn_firmware_load()
864 for (i = 0; i < cp->fw_size; i++) cas_saturn_firmware_load()
865 cas_phy_write(cp, DP83065_MII_REGD, cp->fw_data[i]); cas_saturn_firmware_load()
868 cas_phy_write(cp, DP83065_MII_REGE, 0x8ff8); cas_saturn_firmware_load()
869 cas_phy_write(cp, DP83065_MII_REGD, 0x1); cas_saturn_firmware_load()
874 static void cas_phy_init(struct cas *cp) cas_phy_init() argument
879 if (CAS_PHY_MII(cp->phy_type)) { cas_phy_init()
881 cp->regs + REG_PCS_DATAPATH_MODE); cas_phy_init()
883 cas_mif_poll(cp, 0); cas_phy_init()
884 cas_reset_mii_phy(cp); /* take out of isolate mode */ cas_phy_init()
886 if (PHY_LUCENT_B0 == cp->phy_id) { cas_phy_init()
888 cas_phy_write(cp, LUCENT_MII_REG, 0x8000); cas_phy_init()
889 cas_phy_write(cp, MII_BMCR, 0x00f1); cas_phy_init()
890 cas_phy_write(cp, LUCENT_MII_REG, 0x0); cas_phy_init()
892 } else if (PHY_BROADCOM_B0 == (cp->phy_id & 0xFFFFFFFC)) { cas_phy_init()
894 cas_phy_write(cp, BROADCOM_MII_REG8, 0x0C20); cas_phy_init()
895 cas_phy_write(cp, BROADCOM_MII_REG7, 0x0012); cas_phy_init()
896 cas_phy_write(cp, BROADCOM_MII_REG5, 0x1804); cas_phy_init()
897 cas_phy_write(cp, BROADCOM_MII_REG7, 0x0013); cas_phy_init()
898 cas_phy_write(cp, BROADCOM_MII_REG5, 0x1204); cas_phy_init()
899 cas_phy_write(cp, BROADCOM_MII_REG7, 0x8006); cas_phy_init()
900 cas_phy_write(cp, BROADCOM_MII_REG5, 0x0132); cas_phy_init()
901 cas_phy_write(cp, BROADCOM_MII_REG7, 0x8006); cas_phy_init()
902 cas_phy_write(cp, BROADCOM_MII_REG5, 0x0232); cas_phy_init()
903 cas_phy_write(cp, BROADCOM_MII_REG7, 0x201F); cas_phy_init()
904 cas_phy_write(cp, BROADCOM_MII_REG5, 0x0A20); cas_phy_init()
906 } else if (PHY_BROADCOM_5411 == cp->phy_id) { cas_phy_init()
907 val = cas_phy_read(cp, BROADCOM_MII_REG4); cas_phy_init()
908 val = cas_phy_read(cp, BROADCOM_MII_REG4); cas_phy_init()
911 cas_phy_write(cp, BROADCOM_MII_REG4, cas_phy_init()
915 } else if (cp->cas_flags & CAS_FLAG_SATURN) { cas_phy_init()
916 writel((cp->phy_type & CAS_PHY_MII_MDIO0) ? cas_phy_init()
918 cp->regs + REG_SATURN_PCFG); cas_phy_init()
924 if (PHY_NS_DP83065 == cp->phy_id) { cas_phy_init()
925 cas_saturn_firmware_load(cp); cas_phy_init()
927 cas_phy_powerup(cp); cas_phy_init()
931 val = cas_phy_read(cp, MII_BMCR); cas_phy_init()
933 cas_phy_write(cp, MII_BMCR, val); cas_phy_init()
936 cas_phy_write(cp, MII_ADVERTISE, cas_phy_init()
937 cas_phy_read(cp, MII_ADVERTISE) | cas_phy_init()
943 if (cp->cas_flags & CAS_FLAG_1000MB_CAP) { cas_phy_init()
947 val = cas_phy_read(cp, CAS_MII_1000_CTRL); cas_phy_init()
950 cas_phy_write(cp, CAS_MII_1000_CTRL, val); cas_phy_init()
959 cp->regs + REG_PCS_DATAPATH_MODE); cas_phy_init()
962 if (cp->cas_flags & CAS_FLAG_SATURN) cas_phy_init()
963 writel(0, cp->regs + REG_SATURN_PCFG); cas_phy_init()
966 val = readl(cp->regs + REG_PCS_MII_CTRL); cas_phy_init()
968 writel(val, cp->regs + REG_PCS_MII_CTRL); cas_phy_init()
973 if ((readl(cp->regs + REG_PCS_MII_CTRL) & cas_phy_init()
978 netdev_warn(cp->dev, "PCS reset bit would not clear [%08x]\n", cas_phy_init()
979 readl(cp->regs + REG_PCS_STATE_MACHINE)); cas_phy_init()
984 writel(0x0, cp->regs + REG_PCS_CFG); cas_phy_init()
987 val = readl(cp->regs + REG_PCS_MII_ADVERT); cas_phy_init()
991 writel(val, cp->regs + REG_PCS_MII_ADVERT); cas_phy_init()
994 writel(PCS_CFG_EN, cp->regs + REG_PCS_CFG); cas_phy_init()
998 cp->regs + REG_PCS_SERDES_CTRL); cas_phy_init()
1003 static int cas_pcs_link_check(struct cas *cp) cas_pcs_link_check() argument
1012 stat = readl(cp->regs + REG_PCS_MII_STATUS); cas_pcs_link_check()
1014 stat = readl(cp->regs + REG_PCS_MII_STATUS); cas_pcs_link_check()
1022 netif_info(cp, link, cp->dev, "PCS RemoteFault\n"); cas_pcs_link_check()
1027 state_machine = readl(cp->regs + REG_PCS_STATE_MACHINE); cas_pcs_link_check()
1035 if (cp->lstate != link_up) { cas_pcs_link_check()
1036 if (cp->opened) { cas_pcs_link_check()
1037 cp->lstate = link_up; cas_pcs_link_check()
1038 cp->link_transition = LINK_TRANSITION_LINK_UP; cas_pcs_link_check()
1040 cas_set_link_modes(cp); cas_pcs_link_check()
1041 netif_carrier_on(cp->dev); cas_pcs_link_check()
1044 } else if (cp->lstate == link_up) { cas_pcs_link_check()
1045 cp->lstate = link_down; cas_pcs_link_check()
1047 cp->link_transition != LINK_TRANSITION_REQUESTED_RESET && cas_pcs_link_check()
1048 !cp->link_transition_jiffies_valid) { cas_pcs_link_check()
1062 cp->link_transition = LINK_TRANSITION_REQUESTED_RESET; cas_pcs_link_check()
1063 cp->link_transition_jiffies = jiffies; cas_pcs_link_check()
1064 cp->link_transition_jiffies_valid = 1; cas_pcs_link_check()
1066 cp->link_transition = LINK_TRANSITION_ON_FAILURE; cas_pcs_link_check()
1068 netif_carrier_off(cp->dev); cas_pcs_link_check()
1069 if (cp->opened) cas_pcs_link_check()
1070 netif_info(cp, link, cp->dev, "PCS link down\n"); cas_pcs_link_check()
1080 if ((cp->cas_flags & CAS_FLAG_REG_PLUS) == 0) { cas_pcs_link_check()
1082 stat = readl(cp->regs + REG_PCS_SERDES_STATE); cas_pcs_link_check()
1086 } else if (cp->lstate == link_down) { cas_pcs_link_check()
1088 cp->link_transition != LINK_TRANSITION_REQUESTED_RESET && cas_pcs_link_check()
1089 !cp->link_transition_jiffies_valid) { cas_pcs_link_check()
1096 cp->link_transition = LINK_TRANSITION_REQUESTED_RESET; cas_pcs_link_check()
1097 cp->link_transition_jiffies = jiffies; cas_pcs_link_check()
1098 cp->link_transition_jiffies_valid = 1; cas_pcs_link_check()
1100 cp->link_transition = LINK_TRANSITION_STILL_FAILED; cas_pcs_link_check()
1108 struct cas *cp, u32 status) cas_pcs_interrupt()
1110 u32 stat = readl(cp->regs + REG_PCS_INTR_STATUS); cas_pcs_interrupt()
1114 return cas_pcs_link_check(cp); cas_pcs_interrupt()
1118 struct cas *cp, u32 status) cas_txmac_interrupt()
1120 u32 txmac_stat = readl(cp->regs + REG_MAC_TX_STATUS); cas_txmac_interrupt()
1125 netif_printk(cp, intr, KERN_DEBUG, cp->dev, cas_txmac_interrupt()
1135 spin_lock(&cp->stat_lock[0]); cas_txmac_interrupt()
1138 cp->net_stats[0].tx_fifo_errors++; cas_txmac_interrupt()
1143 cp->net_stats[0].tx_errors++; cas_txmac_interrupt()
1150 cp->net_stats[0].collisions += 0x10000; cas_txmac_interrupt()
1153 cp->net_stats[0].tx_aborted_errors += 0x10000; cas_txmac_interrupt()
1154 cp->net_stats[0].collisions += 0x10000; cas_txmac_interrupt()
1158 cp->net_stats[0].tx_aborted_errors += 0x10000; cas_txmac_interrupt()
1159 cp->net_stats[0].collisions += 0x10000; cas_txmac_interrupt()
1161 spin_unlock(&cp->stat_lock[0]); cas_txmac_interrupt()
1169 static void cas_load_firmware(struct cas *cp, cas_hp_inst_t *firmware) cas_load_firmware() argument
1177 writel(i, cp->regs + REG_HP_INSTR_RAM_ADDR); cas_load_firmware()
1181 writel(val, cp->regs + REG_HP_INSTR_RAM_DATA_HI); cas_load_firmware()
1190 writel(val, cp->regs + REG_HP_INSTR_RAM_DATA_MID); cas_load_firmware()
1196 writel(val, cp->regs + REG_HP_INSTR_RAM_DATA_LOW); cas_load_firmware()
1202 static void cas_init_rx_dma(struct cas *cp) cas_init_rx_dma() argument
1204 u64 desc_dma = cp->block_dvma; cas_init_rx_dma()
1213 (cp->cas_flags & CAS_FLAG_REG_PLUS)) /* do desc 2 */ cas_init_rx_dma()
1215 writel(val, cp->regs + REG_RX_CFG); cas_init_rx_dma()
1217 val = (unsigned long) cp->init_rxds[0] - cas_init_rx_dma()
1218 (unsigned long) cp->init_block; cas_init_rx_dma()
1219 writel((desc_dma + val) >> 32, cp->regs + REG_RX_DB_HI); cas_init_rx_dma()
1220 writel((desc_dma + val) & 0xffffffff, cp->regs + REG_RX_DB_LOW); cas_init_rx_dma()
1221 writel(RX_DESC_RINGN_SIZE(0) - 4, cp->regs + REG_RX_KICK); cas_init_rx_dma()
1223 if (cp->cas_flags & CAS_FLAG_REG_PLUS) { cas_init_rx_dma()
1227 val = (unsigned long) cp->init_rxds[1] - cas_init_rx_dma()
1228 (unsigned long) cp->init_block; cas_init_rx_dma()
1229 writel((desc_dma + val) >> 32, cp->regs + REG_PLUS_RX_DB1_HI); cas_init_rx_dma()
1230 writel((desc_dma + val) & 0xffffffff, cp->regs + cas_init_rx_dma()
1232 writel(RX_DESC_RINGN_SIZE(1) - 4, cp->regs + cas_init_rx_dma()
1237 val = (unsigned long) cp->init_rxcs[0] - cas_init_rx_dma()
1238 (unsigned long) cp->init_block; cas_init_rx_dma()
1239 writel((desc_dma + val) >> 32, cp->regs + REG_RX_CB_HI); cas_init_rx_dma()
1240 writel((desc_dma + val) & 0xffffffff, cp->regs + REG_RX_CB_LOW); cas_init_rx_dma()
1242 if (cp->cas_flags & CAS_FLAG_REG_PLUS) { cas_init_rx_dma()
1245 val = (unsigned long) cp->init_rxcs[i] - cas_init_rx_dma()
1246 (unsigned long) cp->init_block; cas_init_rx_dma()
1247 writel((desc_dma + val) >> 32, cp->regs + cas_init_rx_dma()
1249 writel((desc_dma + val) & 0xffffffff, cp->regs + cas_init_rx_dma()
1258 readl(cp->regs + REG_INTR_STATUS_ALIAS); cas_init_rx_dma()
1259 writel(INTR_RX_DONE | INTR_RX_BUF_UNAVAIL, cp->regs + REG_ALIAS_CLEAR); cas_init_rx_dma()
1260 if (cp->cas_flags & CAS_FLAG_REG_PLUS) { cas_init_rx_dma()
1262 readl(cp->regs + REG_PLUS_INTRN_STATUS_ALIAS(i)); cas_init_rx_dma()
1267 cp->regs + REG_PLUS_ALIASN_CLEAR(1)); cas_init_rx_dma()
1271 cp->regs + REG_PLUS_ALIASN_CLEAR(i)); cas_init_rx_dma()
1276 cp->rx_pause_off / RX_PAUSE_THRESH_QUANTUM); cas_init_rx_dma()
1278 cp->rx_pause_on / RX_PAUSE_THRESH_QUANTUM); cas_init_rx_dma()
1279 writel(val, cp->regs + REG_RX_PAUSE_THRESH); cas_init_rx_dma()
1283 writel(i, cp->regs + REG_RX_TABLE_ADDR); cas_init_rx_dma()
1284 writel(0x0, cp->regs + REG_RX_TABLE_DATA_LOW); cas_init_rx_dma()
1285 writel(0x0, cp->regs + REG_RX_TABLE_DATA_MID); cas_init_rx_dma()
1286 writel(0x0, cp->regs + REG_RX_TABLE_DATA_HI); cas_init_rx_dma()
1290 writel(0x0, cp->regs + REG_RX_CTRL_FIFO_ADDR); cas_init_rx_dma()
1291 writel(0x0, cp->regs + REG_RX_IPP_FIFO_ADDR); cas_init_rx_dma()
1297 writel(val, cp->regs + REG_RX_BLANK); cas_init_rx_dma()
1299 writel(0x0, cp->regs + REG_RX_BLANK); cas_init_rx_dma()
1309 writel(val, cp->regs + REG_RX_AE_THRESH); cas_init_rx_dma()
1310 if (cp->cas_flags & CAS_FLAG_REG_PLUS) { cas_init_rx_dma()
1312 writel(val, cp->regs + REG_PLUS_RX_AE1_THRESH); cas_init_rx_dma()
1318 writel(0x0, cp->regs + REG_RX_RED); cas_init_rx_dma()
1322 if (cp->page_size == 0x1000) cas_init_rx_dma()
1324 else if (cp->page_size == 0x2000) cas_init_rx_dma()
1326 else if (cp->page_size == 0x4000) cas_init_rx_dma()
1330 size = cp->dev->mtu + 64; cas_init_rx_dma()
1331 if (size > cp->page_size) cas_init_rx_dma()
1332 size = cp->page_size; cas_init_rx_dma()
1343 cp->mtu_stride = 1 << (i + 10); cas_init_rx_dma()
1346 val |= CAS_BASE(RX_PAGE_SIZE_MTU_COUNT, cp->page_size >> (i + 10)); cas_init_rx_dma()
1348 writel(val, cp->regs + REG_RX_PAGE_SIZE); cas_init_rx_dma()
1357 writel(val, cp->regs + REG_HP_CFG); cas_init_rx_dma()
1370 static inline cas_page_t *cas_page_spare(struct cas *cp, const int index) cas_page_spare() argument
1372 cas_page_t *page = cp->rx_pages[1][index]; cas_page_spare()
1378 new = cas_page_dequeue(cp); cas_page_spare()
1380 spin_lock(&cp->rx_inuse_lock); cas_page_spare()
1381 list_add(&page->list, &cp->rx_inuse_list); cas_page_spare()
1382 spin_unlock(&cp->rx_inuse_lock); cas_page_spare()
1388 static cas_page_t *cas_page_swap(struct cas *cp, const int ring, cas_page_swap() argument
1391 cas_page_t **page0 = cp->rx_pages[0]; cas_page_swap()
1392 cas_page_t **page1 = cp->rx_pages[1]; cas_page_swap()
1396 cas_page_t *new = cas_page_spare(cp, index); cas_page_swap()
1406 static void cas_clean_rxds(struct cas *cp) cas_clean_rxds() argument
1409 struct cas_rx_desc *rxd = cp->init_rxds[0]; cas_clean_rxds()
1415 while ((skb = __skb_dequeue(&cp->rx_flows[i]))) { cas_clean_rxds()
1423 cas_page_t *page = cas_page_swap(cp, 0, i); cas_clean_rxds()
1429 cp->rx_old[0] = RX_DESC_RINGN_SIZE(0) - 4; cas_clean_rxds()
1430 cp->rx_last[0] = 0; cas_clean_rxds()
1431 cp->cas_flags &= ~CAS_FLAG_RXD_POST(0); cas_clean_rxds()
1434 static void cas_clean_rxcs(struct cas *cp) cas_clean_rxcs() argument
1439 memset(cp->rx_cur, 0, sizeof(*cp->rx_cur)*N_RX_COMP_RINGS); cas_clean_rxcs()
1440 memset(cp->rx_new, 0, sizeof(*cp->rx_new)*N_RX_COMP_RINGS); cas_clean_rxcs()
1442 struct cas_rx_comp *rxc = cp->init_rxcs[i]; cas_clean_rxcs()
1456 static int cas_rxmac_reset(struct cas *cp)
1458 struct net_device *dev = cp->dev;
1463 writel(cp->mac_rx_cfg & ~MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG);
1465 if (!(readl(cp->regs + REG_MAC_RX_CFG) & MAC_RX_CFG_EN))
1475 writel(0, cp->regs + REG_RX_CFG);
1477 if (!(readl(cp->regs + REG_RX_CFG) & RX_CFG_DMA_EN))
1489 writel(SW_RESET_RX, cp->regs + REG_SW_RESET);
1491 if (!(readl(cp->regs + REG_SW_RESET) & SW_RESET_RX))
1501 cas_clean_rxds(cp);
1502 cas_clean_rxcs(cp);
1505 cas_init_rx_dma(cp);
1508 val = readl(cp->regs + REG_RX_CFG);
1509 writel(val | RX_CFG_DMA_EN, cp->regs + REG_RX_CFG);
1510 writel(MAC_RX_FRAME_RECV, cp->regs + REG_MAC_RX_MASK);
1511 val = readl(cp->regs + REG_MAC_RX_CFG);
1512 writel(val | MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG);
1517 static int cas_rxmac_interrupt(struct net_device *dev, struct cas *cp, cas_rxmac_interrupt() argument
1520 u32 stat = readl(cp->regs + REG_MAC_RX_STATUS); cas_rxmac_interrupt()
1525 netif_dbg(cp, intr, cp->dev, "rxmac interrupt, stat: 0x%x\n", stat); cas_rxmac_interrupt()
1528 spin_lock(&cp->stat_lock[0]); cas_rxmac_interrupt()
1530 cp->net_stats[0].rx_frame_errors += 0x10000; cas_rxmac_interrupt()
1533 cp->net_stats[0].rx_crc_errors += 0x10000; cas_rxmac_interrupt()
1536 cp->net_stats[0].rx_length_errors += 0x10000; cas_rxmac_interrupt()
1539 cp->net_stats[0].rx_over_errors++; cas_rxmac_interrupt()
1540 cp->net_stats[0].rx_fifo_errors++; cas_rxmac_interrupt()
1546 spin_unlock(&cp->stat_lock[0]); cas_rxmac_interrupt()
1550 static int cas_mac_interrupt(struct net_device *dev, struct cas *cp, cas_mac_interrupt() argument
1553 u32 stat = readl(cp->regs + REG_MAC_CTRL_STATUS); cas_mac_interrupt()
1558 netif_printk(cp, intr, KERN_DEBUG, cp->dev, cas_mac_interrupt()
1566 cp->pause_entered++; cas_mac_interrupt()
1569 cp->pause_last_time_recvd = (stat >> 16); cas_mac_interrupt()
1575 /* Must be invoked under cp->lock. */ cas_mdio_link_not_up()
1576 static inline int cas_mdio_link_not_up(struct cas *cp) cas_mdio_link_not_up() argument
1580 switch (cp->lstate) { cas_mdio_link_not_up()
1582 netif_info(cp, link, cp->dev, "Autoneg failed again, keeping forced mode\n"); cas_mdio_link_not_up()
1583 cas_phy_write(cp, MII_BMCR, cp->link_fcntl); cas_mdio_link_not_up()
1584 cp->timer_ticks = 5; cas_mdio_link_not_up()
1585 cp->lstate = link_force_ok; cas_mdio_link_not_up()
1586 cp->link_transition = LINK_TRANSITION_LINK_CONFIG; cas_mdio_link_not_up()
1590 val = cas_phy_read(cp, MII_BMCR); cas_mdio_link_not_up()
1597 val |= (cp->cas_flags & CAS_FLAG_1000MB_CAP) ? cas_mdio_link_not_up()
1599 cas_phy_write(cp, MII_BMCR, val); cas_mdio_link_not_up()
1600 cp->timer_ticks = 5; cas_mdio_link_not_up()
1601 cp->lstate = link_force_try; cas_mdio_link_not_up()
1602 cp->link_transition = LINK_TRANSITION_LINK_CONFIG; cas_mdio_link_not_up()
1607 val = cas_phy_read(cp, MII_BMCR); cas_mdio_link_not_up()
1608 cp->timer_ticks = 5; cas_mdio_link_not_up()
1612 cas_phy_write(cp, MII_BMCR, val); cas_mdio_link_not_up()
1622 cas_phy_write(cp, MII_BMCR, val); cas_mdio_link_not_up()
1632 /* must be invoked with cp->lock held */ cas_mii_link_check()
1633 static int cas_mii_link_check(struct cas *cp, const u16 bmsr) cas_mii_link_check() argument
1643 if ((cp->lstate == link_force_try) && cas_mii_link_check()
1644 (cp->link_cntl & BMCR_ANENABLE)) { cas_mii_link_check()
1645 cp->lstate = link_force_ret; cas_mii_link_check()
1646 cp->link_transition = LINK_TRANSITION_LINK_CONFIG; cas_mii_link_check()
1647 cas_mif_poll(cp, 0); cas_mii_link_check()
1648 cp->link_fcntl = cas_phy_read(cp, MII_BMCR); cas_mii_link_check()
1649 cp->timer_ticks = 5; cas_mii_link_check()
1650 if (cp->opened) cas_mii_link_check()
1651 netif_info(cp, link, cp->dev, cas_mii_link_check()
1653 cas_phy_write(cp, MII_BMCR, cas_mii_link_check()
1654 cp->link_fcntl | BMCR_ANENABLE | cas_mii_link_check()
1656 cas_mif_poll(cp, 1); cas_mii_link_check()
1658 } else if (cp->lstate != link_up) { cas_mii_link_check()
1659 cp->lstate = link_up; cas_mii_link_check()
1660 cp->link_transition = LINK_TRANSITION_LINK_UP; cas_mii_link_check()
1662 if (cp->opened) { cas_mii_link_check()
1663 cas_set_link_modes(cp); cas_mii_link_check()
1664 netif_carrier_on(cp->dev); cas_mii_link_check()
1674 if (cp->lstate == link_up) { cas_mii_link_check()
1675 cp->lstate = link_down; cas_mii_link_check()
1676 cp->link_transition = LINK_TRANSITION_LINK_DOWN; cas_mii_link_check()
1678 netif_carrier_off(cp->dev); cas_mii_link_check()
1679 if (cp->opened) cas_mii_link_check()
1680 netif_info(cp, link, cp->dev, "Link down\n"); cas_mii_link_check()
1683 } else if (++cp->timer_ticks > 10) cas_mii_link_check()
1684 cas_mdio_link_not_up(cp); cas_mii_link_check()
1689 static int cas_mif_interrupt(struct net_device *dev, struct cas *cp, cas_mif_interrupt() argument
1692 u32 stat = readl(cp->regs + REG_MIF_STATUS); cas_mif_interrupt()
1700 return cas_mii_link_check(cp, bmsr); cas_mif_interrupt()
1703 static int cas_pci_interrupt(struct net_device *dev, struct cas *cp, cas_pci_interrupt() argument
1706 u32 stat = readl(cp->regs + REG_PCI_ERR_STATUS); cas_pci_interrupt()
1712 stat, readl(cp->regs + REG_BIM_DIAG)); cas_pci_interrupt()
1716 ((cp->cas_flags & CAS_FLAG_REG_PLUS) == 0)) cas_pci_interrupt()
1735 pci_read_config_word(cp->pdev, PCI_STATUS, &cfg); cas_pci_interrupt()
1757 pci_write_config_word(cp->pdev, PCI_STATUS, cfg); cas_pci_interrupt()
1769 static int cas_abnormal_irq(struct net_device *dev, struct cas *cp, cas_abnormal_irq() argument
1774 netif_printk(cp, rx_err, KERN_DEBUG, cp->dev, cas_abnormal_irq()
1776 spin_lock(&cp->stat_lock[0]); cas_abnormal_irq()
1777 cp->net_stats[0].rx_errors++; cas_abnormal_irq()
1778 spin_unlock(&cp->stat_lock[0]); cas_abnormal_irq()
1784 netif_printk(cp, rx_err, KERN_DEBUG, cp->dev, cas_abnormal_irq()
1786 spin_lock(&cp->stat_lock[0]); cas_abnormal_irq()
1787 cp->net_stats[0].rx_errors++; cas_abnormal_irq()
1788 spin_unlock(&cp->stat_lock[0]); cas_abnormal_irq()
1793 if (cas_pcs_interrupt(dev, cp, status)) cas_abnormal_irq()
1798 if (cas_txmac_interrupt(dev, cp, status)) cas_abnormal_irq()
1803 if (cas_rxmac_interrupt(dev, cp, status)) cas_abnormal_irq()
1808 if (cas_mac_interrupt(dev, cp, status)) cas_abnormal_irq()
1813 if (cas_mif_interrupt(dev, cp, status)) cas_abnormal_irq()
1818 if (cas_pci_interrupt(dev, cp, status)) cas_abnormal_irq()
1825 atomic_inc(&cp->reset_task_pending); cas_abnormal_irq()
1826 atomic_inc(&cp->reset_task_pending_all); cas_abnormal_irq()
1828 schedule_work(&cp->reset_task); cas_abnormal_irq()
1830 atomic_set(&cp->reset_task_pending, CAS_RESET_ALL); cas_abnormal_irq()
1832 schedule_work(&cp->reset_task); cas_abnormal_irq()
1842 static inline int cas_calc_tabort(struct cas *cp, const unsigned long addr, cas_calc_tabort() argument
1847 if (CAS_TABORT(cp) == 1) cas_calc_tabort()
1854 static inline void cas_tx_ringN(struct cas *cp, int ring, int limit) cas_tx_ringN() argument
1858 struct net_device *dev = cp->dev; cas_tx_ringN()
1861 spin_lock(&cp->tx_lock[ring]); cas_tx_ringN()
1862 txds = cp->init_txds[ring]; cas_tx_ringN()
1863 skbs = cp->tx_skbs[ring]; cas_tx_ringN()
1864 entry = cp->tx_old[ring]; cas_tx_ringN()
1881 + cp->tx_tiny_use[ring][entry].nbufs + 1; cas_tx_ringN()
1885 netif_printk(cp, tx_done, KERN_DEBUG, cp->dev, cas_tx_ringN()
1889 cp->tx_tiny_use[ring][entry].nbufs = 0; cas_tx_ringN()
1897 pci_unmap_page(cp->pdev, daddr, dlen, cas_tx_ringN()
1902 if (cp->tx_tiny_use[ring][entry].used) { cas_tx_ringN()
1903 cp->tx_tiny_use[ring][entry].used = 0; cas_tx_ringN()
1908 spin_lock(&cp->stat_lock[ring]); cas_tx_ringN()
1909 cp->net_stats[ring].tx_packets++; cas_tx_ringN()
1910 cp->net_stats[ring].tx_bytes += skb->len; cas_tx_ringN()
1911 spin_unlock(&cp->stat_lock[ring]); cas_tx_ringN()
1914 cp->tx_old[ring] = entry; cas_tx_ringN()
1921 (TX_BUFFS_AVAIL(cp, ring) > CAS_TABORT(cp)*(MAX_SKB_FRAGS + 1))) cas_tx_ringN()
1923 spin_unlock(&cp->tx_lock[ring]); cas_tx_ringN()
1926 static void cas_tx(struct net_device *dev, struct cas *cp, cas_tx() argument
1931 u64 compwb = le64_to_cpu(cp->init_block->tx_compwb); cas_tx()
1933 netif_printk(cp, intr, KERN_DEBUG, cp->dev, cas_tx()
1944 limit = readl(cp->regs + REG_TX_COMPN(ring)); cas_tx()
1946 if (cp->tx_old[ring] != limit) cas_tx()
1947 cas_tx_ringN(cp, ring, limit); cas_tx()
1952 static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc, cas_rx_process_pkt() argument
1973 skb = netdev_alloc_skb(cp->dev, alloclen + swivel + cp->crc_size); cas_rx_process_pkt()
1984 page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)]; cas_rx_process_pkt()
1990 i += cp->crc_size; cas_rx_process_pkt()
1991 pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr + off, i, cas_rx_process_pkt()
1995 pci_dma_sync_single_for_device(cp->pdev, page->dma_addr + off, i, cas_rx_process_pkt()
2009 page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)]; cas_rx_process_pkt()
2012 hlen = min(cp->page_size - off, dlen); cas_rx_process_pkt()
2014 netif_printk(cp, rx_err, KERN_DEBUG, cp->dev, cas_rx_process_pkt()
2021 i += cp->crc_size; cas_rx_process_pkt()
2022 pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr + off, i, cas_rx_process_pkt()
2030 pci_dma_sync_single_for_device(cp->pdev, page->dma_addr + off, i, cas_rx_process_pkt()
2035 RX_USED_ADD(page, cp->mtu_stride); cas_rx_process_pkt()
2057 page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)]; cas_rx_process_pkt()
2058 pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr, cas_rx_process_pkt()
2059 hlen + cp->crc_size, cas_rx_process_pkt()
2061 pci_dma_sync_single_for_device(cp->pdev, page->dma_addr, cas_rx_process_pkt()
2062 hlen + cp->crc_size, cas_rx_process_pkt()
2074 RX_USED_ADD(page, hlen + cp->crc_size); cas_rx_process_pkt()
2077 if (cp->crc_size) { cas_rx_process_pkt()
2088 page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)]; cas_rx_process_pkt()
2090 hlen = min(cp->page_size - off, dlen); cas_rx_process_pkt()
2092 netif_printk(cp, rx_err, KERN_DEBUG, cp->dev, cas_rx_process_pkt()
2099 i += cp->crc_size; cas_rx_process_pkt()
2100 pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr + off, i, cas_rx_process_pkt()
2104 pci_dma_sync_single_for_device(cp->pdev, page->dma_addr + off, i, cas_rx_process_pkt()
2108 RX_USED_ADD(page, cp->mtu_stride); cas_rx_process_pkt()
2116 page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)]; cas_rx_process_pkt()
2117 pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr, cas_rx_process_pkt()
2118 dlen + cp->crc_size, cas_rx_process_pkt()
2121 memcpy(p, addr, dlen + cp->crc_size); cas_rx_process_pkt()
2122 pci_dma_sync_single_for_device(cp->pdev, page->dma_addr, cas_rx_process_pkt()
2123 dlen + cp->crc_size, cas_rx_process_pkt()
2126 RX_USED_ADD(page, dlen + cp->crc_size); cas_rx_process_pkt()
2129 if (cp->crc_size) { cas_rx_process_pkt()
2137 if (cp->crc_size) { cas_rx_process_pkt()
2139 csum = csum_fold(csum_partial(crcaddr, cp->crc_size, cas_rx_process_pkt()
2144 skb->protocol = eth_type_trans(skb, cp->dev); cas_rx_process_pkt()
2168 static inline void cas_rx_flow_pkt(struct cas *cp, const u64 *words, cas_rx_flow_pkt() argument
2172 struct sk_buff_head *flow = &cp->rx_flows[flowid]; cas_rx_flow_pkt()
2189 static void cas_post_page(struct cas *cp, const int ring, const int index) cas_post_page() argument
2194 entry = cp->rx_old[ring]; cas_post_page()
2196 new = cas_page_swap(cp, ring, index); cas_post_page()
2197 cp->init_rxds[ring][entry].buffer = cpu_to_le64(new->dma_addr); cas_post_page()
2198 cp->init_rxds[ring][entry].index = cas_post_page()
2203 cp->rx_old[ring] = entry; cas_post_page()
2209 writel(entry, cp->regs + REG_RX_KICK); cas_post_page()
2211 (cp->cas_flags & CAS_FLAG_REG_PLUS)) cas_post_page()
2212 writel(entry, cp->regs + REG_PLUS_RX_KICK1); cas_post_page()
2217 static int cas_post_rxds_ringN(struct cas *cp, int ring, int num) cas_post_rxds_ringN() argument
2221 cas_page_t **page = cp->rx_pages[ring]; cas_post_rxds_ringN()
2223 entry = cp->rx_old[ring]; cas_post_rxds_ringN()
2225 netif_printk(cp, intr, KERN_DEBUG, cp->dev, cas_post_rxds_ringN()
2235 cas_page_t *new = cas_page_dequeue(cp); cas_post_rxds_ringN()
2240 cp->cas_flags |= CAS_FLAG_RXD_POST(ring); cas_post_rxds_ringN()
2241 if (!timer_pending(&cp->link_timer)) cas_post_rxds_ringN()
2242 mod_timer(&cp->link_timer, jiffies + cas_post_rxds_ringN()
2244 cp->rx_old[ring] = entry; cas_post_rxds_ringN()
2245 cp->rx_last[ring] = num ? num - released : 0; cas_post_rxds_ringN()
2248 spin_lock(&cp->rx_inuse_lock); cas_post_rxds_ringN()
2249 list_add(&page[entry]->list, &cp->rx_inuse_list); cas_post_rxds_ringN()
2250 spin_unlock(&cp->rx_inuse_lock); cas_post_rxds_ringN()
2251 cp->init_rxds[ring][entry].buffer = cas_post_rxds_ringN()
2264 cp->rx_old[ring] = entry; cas_post_rxds_ringN()
2270 writel(cluster, cp->regs + REG_RX_KICK); cas_post_rxds_ringN()
2272 (cp->cas_flags & CAS_FLAG_REG_PLUS)) cas_post_rxds_ringN()
2273 writel(cluster, cp->regs + REG_PLUS_RX_KICK1); cas_post_rxds_ringN()
2290 static int cas_rx_ringN(struct cas *cp, int ring, int budget) cas_rx_ringN() argument
2292 struct cas_rx_comp *rxcs = cp->init_rxcs[ring]; cas_rx_ringN()
2296 netif_printk(cp, intr, KERN_DEBUG, cp->dev, cas_rx_ringN()
2299 readl(cp->regs + REG_RX_COMP_HEAD), cp->rx_new[ring]); cas_rx_ringN()
2301 entry = cp->rx_new[ring]; cas_rx_ringN()
2327 spin_lock(&cp->stat_lock[ring]); cas_rx_ringN()
2328 cp->net_stats[ring].rx_errors++; cas_rx_ringN()
2330 cp->net_stats[ring].rx_length_errors++; cas_rx_ringN()
2332 cp->net_stats[ring].rx_crc_errors++; cas_rx_ringN()
2333 spin_unlock(&cp->stat_lock[ring]); cas_rx_ringN()
2337 spin_lock(&cp->stat_lock[ring]); cas_rx_ringN()
2338 ++cp->net_stats[ring].rx_dropped; cas_rx_ringN()
2339 spin_unlock(&cp->stat_lock[ring]); cas_rx_ringN()
2343 len = cas_rx_process_pkt(cp, rxc, entry, words, &skb); cas_rx_ringN()
2356 cas_rx_flow_pkt(cp, words, skb); cas_rx_ringN()
2359 spin_lock(&cp->stat_lock[ring]); cas_rx_ringN()
2360 cp->net_stats[ring].rx_packets++; cas_rx_ringN()
2361 cp->net_stats[ring].rx_bytes += len; cas_rx_ringN()
2362 spin_unlock(&cp->stat_lock[ring]); cas_rx_ringN()
2372 cas_post_page(cp, dring, i); cas_rx_ringN()
2379 cas_post_page(cp, dring, i); cas_rx_ringN()
2386 cas_post_page(cp, dring, i); cas_rx_ringN()
2397 cp->rx_new[ring] = entry; cas_rx_ringN()
2400 netdev_info(cp->dev, "Memory squeeze, deferring packet\n"); cas_rx_ringN()
2407 struct cas *cp, int ring) cas_post_rxcs_ringN()
2409 struct cas_rx_comp *rxc = cp->init_rxcs[ring]; cas_post_rxcs_ringN()
2412 last = cp->rx_cur[ring]; cas_post_rxcs_ringN()
2413 entry = cp->rx_new[ring]; cas_post_rxcs_ringN()
2414 netif_printk(cp, intr, KERN_DEBUG, dev, cas_post_rxcs_ringN()
2416 ring, readl(cp->regs + REG_RX_COMP_HEAD), entry); cas_post_rxcs_ringN()
2423 cp->rx_cur[ring] = last; cas_post_rxcs_ringN()
2426 writel(last, cp->regs + REG_RX_COMP_TAIL); cas_post_rxcs_ringN()
2427 else if (cp->cas_flags & CAS_FLAG_REG_PLUS) cas_post_rxcs_ringN()
2428 writel(last, cp->regs + REG_PLUS_RX_COMPN_TAIL(ring)); cas_post_rxcs_ringN()
2438 struct cas *cp, const u32 status, cas_handle_irqN()
2442 cas_post_rxcs_ringN(dev, cp, ring); cas_handle_irqN()
2448 struct cas *cp = netdev_priv(dev); cas_interruptN() local
2450 int ring = (irq == cp->pci_irq_INTC) ? 2 : 3; cas_interruptN()
2451 u32 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(ring)); cas_interruptN()
2457 spin_lock_irqsave(&cp->lock, flags); cas_interruptN()
2460 cas_mask_intr(cp); cas_interruptN()
2461 napi_schedule(&cp->napi); cas_interruptN()
2463 cas_rx_ringN(cp, ring, 0); cas_interruptN()
2469 cas_handle_irqN(dev, cp, status, ring); cas_interruptN()
2470 spin_unlock_irqrestore(&cp->lock, flags); cas_interruptN()
2477 static inline void cas_handle_irq1(struct cas *cp, const u32 status) cas_handle_irq1() argument
2482 cas_post_rxds_ringN(cp, 1, 0); cas_handle_irq1()
2483 spin_lock(&cp->stat_lock[1]); cas_handle_irq1()
2484 cp->net_stats[1].rx_dropped++; cas_handle_irq1()
2485 spin_unlock(&cp->stat_lock[1]); cas_handle_irq1()
2489 cas_post_rxds_ringN(cp, 1, RX_DESC_RINGN_SIZE(1) - cas_handle_irq1()
2493 cas_post_rxcs_ringN(cp, 1); cas_handle_irq1()
2500 struct cas *cp = netdev_priv(dev); cas_interrupt1() local
2502 u32 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(1)); cas_interrupt1()
2508 spin_lock_irqsave(&cp->lock, flags); cas_interrupt1()
2511 cas_mask_intr(cp); cas_interrupt1()
2512 napi_schedule(&cp->napi); cas_interrupt1()
2514 cas_rx_ringN(cp, 1, 0); cas_interrupt1()
2519 cas_handle_irq1(cp, status); cas_interrupt1()
2520 spin_unlock_irqrestore(&cp->lock, flags); cas_interrupt1()
2526 struct cas *cp, const u32 status) cas_handle_irq()
2530 cas_abnormal_irq(dev, cp, status); cas_handle_irq()
2536 cas_post_rxds_ringN(cp, 0, 0); cas_handle_irq()
2537 spin_lock(&cp->stat_lock[0]); cas_handle_irq()
2538 cp->net_stats[0].rx_dropped++; cas_handle_irq()
2539 spin_unlock(&cp->stat_lock[0]); cas_handle_irq()
2541 cas_post_rxds_ringN(cp, 0, RX_DESC_RINGN_SIZE(0) - cas_handle_irq()
2546 cas_post_rxcs_ringN(dev, cp, 0); cas_handle_irq()
2552 struct cas *cp = netdev_priv(dev); cas_interrupt() local
2554 u32 status = readl(cp->regs + REG_INTR_STATUS); cas_interrupt()
2559 spin_lock_irqsave(&cp->lock, flags); cas_interrupt()
2561 cas_tx(dev, cp, status); cas_interrupt()
2567 cas_mask_intr(cp); cas_interrupt()
2568 napi_schedule(&cp->napi); cas_interrupt()
2570 cas_rx_ringN(cp, 0, 0); cas_interrupt()
2576 cas_handle_irq(dev, cp, status); cas_interrupt()
2577 spin_unlock_irqrestore(&cp->lock, flags); cas_interrupt()
2585 struct cas *cp = container_of(napi, struct cas, napi); cas_poll() local
2586 struct net_device *dev = cp->dev; cas_poll()
2588 u32 status = readl(cp->regs + REG_INTR_STATUS); cas_poll()
2591 spin_lock_irqsave(&cp->lock, flags); cas_poll()
2592 cas_tx(dev, cp, status); cas_poll()
2593 spin_unlock_irqrestore(&cp->lock, flags); cas_poll()
2607 credits += cas_rx_ringN(cp, j, budget / N_RX_COMP_RINGS); cas_poll()
2617 spin_lock_irqsave(&cp->lock, flags); cas_poll()
2619 cas_handle_irq(dev, cp, status); cas_poll()
2623 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(1)); cas_poll()
2625 cas_handle_irq1(dev, cp, status); cas_poll()
2631 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(2)); cas_poll()
2633 cas_handle_irqN(dev, cp, status, 2); cas_poll()
2639 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(3)); cas_poll()
2641 cas_handle_irqN(dev, cp, status, 3); cas_poll()
2644 spin_unlock_irqrestore(&cp->lock, flags); cas_poll()
2647 cas_unmask_intr(cp); cas_poll()
2656 struct cas *cp = netdev_priv(dev); cas_netpoll() local
2658 cas_disable_irq(cp, 0); cas_netpoll()
2659 cas_interrupt(cp->pdev->irq, dev); cas_netpoll()
2660 cas_enable_irq(cp, 0); cas_netpoll()
2682 struct cas *cp = netdev_priv(dev); cas_tx_timeout() local
2685 if (!cp->hw_running) { cas_tx_timeout()
2691 readl(cp->regs + REG_MIF_STATE_MACHINE)); cas_tx_timeout()
2694 readl(cp->regs + REG_MAC_STATE_MACHINE)); cas_tx_timeout()
2697 readl(cp->regs + REG_TX_CFG), cas_tx_timeout()
2698 readl(cp->regs + REG_MAC_TX_STATUS), cas_tx_timeout()
2699 readl(cp->regs + REG_MAC_TX_CFG), cas_tx_timeout()
2700 readl(cp->regs + REG_TX_FIFO_PKT_CNT), cas_tx_timeout()
2701 readl(cp->regs + REG_TX_FIFO_WRITE_PTR), cas_tx_timeout()
2702 readl(cp->regs + REG_TX_FIFO_READ_PTR), cas_tx_timeout()
2703 readl(cp->regs + REG_TX_SM_1), cas_tx_timeout()
2704 readl(cp->regs + REG_TX_SM_2)); cas_tx_timeout()
2707 readl(cp->regs + REG_RX_CFG), cas_tx_timeout()
2708 readl(cp->regs + REG_MAC_RX_STATUS), cas_tx_timeout()
2709 readl(cp->regs + REG_MAC_RX_CFG)); cas_tx_timeout()
2712 readl(cp->regs + REG_HP_STATE_MACHINE), cas_tx_timeout()
2713 readl(cp->regs + REG_HP_STATUS0), cas_tx_timeout()
2714 readl(cp->regs + REG_HP_STATUS1), cas_tx_timeout()
2715 readl(cp->regs + REG_HP_STATUS2)); cas_tx_timeout()
2718 atomic_inc(&cp->reset_task_pending); cas_tx_timeout()
2719 atomic_inc(&cp->reset_task_pending_all); cas_tx_timeout()
2720 schedule_work(&cp->reset_task); cas_tx_timeout()
2722 atomic_set(&cp->reset_task_pending, CAS_RESET_ALL); cas_tx_timeout()
2723 schedule_work(&cp->reset_task); cas_tx_timeout()
2736 static void cas_write_txd(struct cas *cp, int ring, int entry, cas_write_txd() argument
2739 struct cas_tx_desc *txd = cp->init_txds[ring] + entry; cas_write_txd()
2750 static inline void *tx_tiny_buf(struct cas *cp, const int ring, tx_tiny_buf() argument
2753 return cp->tx_tiny_bufs[ring] + TX_TINY_BUF_LEN*entry; tx_tiny_buf()
2756 static inline dma_addr_t tx_tiny_map(struct cas *cp, const int ring, tx_tiny_map() argument
2759 cp->tx_tiny_use[ring][tentry].nbufs++; tx_tiny_map()
2760 cp->tx_tiny_use[ring][entry].used = 1; tx_tiny_map()
2761 return cp->tx_tiny_dvma[ring] + TX_TINY_BUF_LEN*entry; tx_tiny_map()
2764 static inline int cas_xmit_tx_ringN(struct cas *cp, int ring, cas_xmit_tx_ringN() argument
2767 struct net_device *dev = cp->dev; cas_xmit_tx_ringN()
2774 spin_lock_irqsave(&cp->tx_lock[ring], flags); cas_xmit_tx_ringN()
2777 if (TX_BUFFS_AVAIL(cp, ring) <= cas_xmit_tx_ringN()
2778 CAS_TABORT(cp)*(skb_shinfo(skb)->nr_frags + 1)) { cas_xmit_tx_ringN()
2780 spin_unlock_irqrestore(&cp->tx_lock[ring], flags); cas_xmit_tx_ringN()
2795 entry = cp->tx_new[ring]; cas_xmit_tx_ringN()
2796 cp->tx_skbs[ring][entry] = skb; cas_xmit_tx_ringN()
2800 mapping = pci_map_page(cp->pdev, virt_to_page(skb->data), cas_xmit_tx_ringN()
2805 tabort = cas_calc_tabort(cp, (unsigned long) skb->data, len); cas_xmit_tx_ringN()
2808 cas_write_txd(cp, ring, entry, mapping, len - tabort, cas_xmit_tx_ringN()
2813 tx_tiny_buf(cp, ring, entry), tabort); cas_xmit_tx_ringN()
2814 mapping = tx_tiny_map(cp, ring, entry, tentry); cas_xmit_tx_ringN()
2815 cas_write_txd(cp, ring, entry, mapping, tabort, ctrl, cas_xmit_tx_ringN()
2818 cas_write_txd(cp, ring, entry, mapping, len, ctrl | cas_xmit_tx_ringN()
2827 mapping = skb_frag_dma_map(&cp->pdev->dev, fragp, 0, len, cas_xmit_tx_ringN()
2830 tabort = cas_calc_tabort(cp, fragp->page_offset, len); cas_xmit_tx_ringN()
2835 cas_write_txd(cp, ring, entry, mapping, len - tabort, cas_xmit_tx_ringN()
2840 memcpy(tx_tiny_buf(cp, ring, entry), cas_xmit_tx_ringN()
2844 mapping = tx_tiny_map(cp, ring, entry, tentry); cas_xmit_tx_ringN()
2848 cas_write_txd(cp, ring, entry, mapping, len, ctrl, cas_xmit_tx_ringN()
2853 cp->tx_new[ring] = entry; cas_xmit_tx_ringN()
2854 if (TX_BUFFS_AVAIL(cp, ring) <= CAS_TABORT(cp)*(MAX_SKB_FRAGS + 1)) cas_xmit_tx_ringN()
2857 netif_printk(cp, tx_queued, KERN_DEBUG, dev, cas_xmit_tx_ringN()
2859 ring, entry, skb->len, TX_BUFFS_AVAIL(cp, ring)); cas_xmit_tx_ringN()
2860 writel(entry, cp->regs + REG_TX_KICKN(ring)); cas_xmit_tx_ringN()
2861 spin_unlock_irqrestore(&cp->tx_lock[ring], flags); cas_xmit_tx_ringN()
2867 struct cas *cp = netdev_priv(dev); cas_start_xmit() local
2874 if (skb_padto(skb, cp->min_frame_size)) cas_start_xmit()
2880 if (cas_xmit_tx_ringN(cp, ring++ & N_TX_RINGS_MASK, skb)) cas_start_xmit()
2885 static void cas_init_tx_dma(struct cas *cp) cas_init_tx_dma() argument
2887 u64 desc_dma = cp->block_dvma; cas_init_tx_dma()
2895 writel((desc_dma + off) >> 32, cp->regs + REG_TX_COMPWB_DB_HI); cas_init_tx_dma()
2896 writel((desc_dma + off) & 0xffffffff, cp->regs + REG_TX_COMPWB_DB_LOW); cas_init_tx_dma()
2909 off = (unsigned long) cp->init_txds[i] - cas_init_tx_dma()
2910 (unsigned long) cp->init_block; cas_init_tx_dma()
2913 writel((desc_dma + off) >> 32, cp->regs + REG_TX_DBN_HI(i)); cas_init_tx_dma()
2914 writel((desc_dma + off) & 0xffffffff, cp->regs + cas_init_tx_dma()
2920 writel(val, cp->regs + REG_TX_CFG); cas_init_tx_dma()
2926 writel(0x800, cp->regs + REG_TX_MAXBURST_0); cas_init_tx_dma()
2927 writel(0x1600, cp->regs + REG_TX_MAXBURST_1); cas_init_tx_dma()
2928 writel(0x2400, cp->regs + REG_TX_MAXBURST_2); cas_init_tx_dma()
2929 writel(0x4800, cp->regs + REG_TX_MAXBURST_3); cas_init_tx_dma()
2931 writel(0x800, cp->regs + REG_TX_MAXBURST_0); cas_init_tx_dma()
2932 writel(0x800, cp->regs + REG_TX_MAXBURST_1); cas_init_tx_dma()
2933 writel(0x800, cp->regs + REG_TX_MAXBURST_2); cas_init_tx_dma()
2934 writel(0x800, cp->regs + REG_TX_MAXBURST_3); cas_init_tx_dma()
2938 /* Must be invoked under cp->lock. */ cas_init_dma()
2939 static inline void cas_init_dma(struct cas *cp) cas_init_dma() argument
2941 cas_init_tx_dma(cp); cas_init_dma()
2942 cas_init_rx_dma(cp); cas_init_dma()
2945 static void cas_process_mc_list(struct cas *cp) cas_process_mc_list() argument
2953 netdev_for_each_mc_addr(ha, cp->dev) { cas_process_mc_list()
2959 cp->regs + REG_MAC_ADDRN(i*3 + 0)); cas_process_mc_list()
2961 cp->regs + REG_MAC_ADDRN(i*3 + 1)); cas_process_mc_list()
2963 cp->regs + REG_MAC_ADDRN(i*3 + 2)); cas_process_mc_list()
2976 writel(hash_table[i], cp->regs + REG_MAC_HASH_TABLEN(i)); cas_process_mc_list()
2979 /* Must be invoked under cp->lock. */ cas_setup_multicast()
2980 static u32 cas_setup_multicast(struct cas *cp) cas_setup_multicast() argument
2985 if (cp->dev->flags & IFF_PROMISC) { cas_setup_multicast()
2988 } else if (cp->dev->flags & IFF_ALLMULTI) { cas_setup_multicast()
2990 writel(0xFFFF, cp->regs + REG_MAC_HASH_TABLEN(i)); cas_setup_multicast()
2994 cas_process_mc_list(cp); cas_setup_multicast()
3001 /* must be invoked under cp->stat_lock[N_TX_RINGS] */ cas_clear_mac_err()
3002 static void cas_clear_mac_err(struct cas *cp) cas_clear_mac_err() argument
3004 writel(0, cp->regs + REG_MAC_COLL_NORMAL); cas_clear_mac_err()
3005 writel(0, cp->regs + REG_MAC_COLL_FIRST); cas_clear_mac_err()
3006 writel(0, cp->regs + REG_MAC_COLL_EXCESS); cas_clear_mac_err()
3007 writel(0, cp->regs + REG_MAC_COLL_LATE); cas_clear_mac_err()
3008 writel(0, cp->regs + REG_MAC_TIMER_DEFER); cas_clear_mac_err()
3009 writel(0, cp->regs + REG_MAC_ATTEMPTS_PEAK); cas_clear_mac_err()
3010 writel(0, cp->regs + REG_MAC_RECV_FRAME); cas_clear_mac_err()
3011 writel(0, cp->regs + REG_MAC_LEN_ERR); cas_clear_mac_err()
3012 writel(0, cp->regs + REG_MAC_ALIGN_ERR); cas_clear_mac_err()
3013 writel(0, cp->regs + REG_MAC_FCS_ERR); cas_clear_mac_err()
3014 writel(0, cp->regs + REG_MAC_RX_CODE_ERR); cas_clear_mac_err()
3018 static void cas_mac_reset(struct cas *cp) cas_mac_reset() argument
3023 writel(0x1, cp->regs + REG_MAC_TX_RESET); cas_mac_reset()
3024 writel(0x1, cp->regs + REG_MAC_RX_RESET); cas_mac_reset()
3029 if (readl(cp->regs + REG_MAC_TX_RESET) == 0) cas_mac_reset()
3037 if (readl(cp->regs + REG_MAC_RX_RESET) == 0) cas_mac_reset()
3042 if (readl(cp->regs + REG_MAC_TX_RESET) | cas_mac_reset()
3043 readl(cp->regs + REG_MAC_RX_RESET)) cas_mac_reset()
3044 netdev_err(cp->dev, "mac tx[%d]/rx[%d] reset failed [%08x]\n", cas_mac_reset()
3045 readl(cp->regs + REG_MAC_TX_RESET), cas_mac_reset()
3046 readl(cp->regs + REG_MAC_RX_RESET), cas_mac_reset()
3047 readl(cp->regs + REG_MAC_STATE_MACHINE)); cas_mac_reset()
3051 /* Must be invoked under cp->lock. */ cas_init_mac()
3052 static void cas_init_mac(struct cas *cp) cas_init_mac() argument
3054 unsigned char *e = &cp->dev->dev_addr[0]; cas_init_mac()
3056 cas_mac_reset(cp); cas_init_mac()
3059 writel(CAWR_RR_DIS, cp->regs + REG_CAWR); cas_init_mac()
3066 if ((cp->cas_flags & CAS_FLAG_TARGET_ABORT) == 0) cas_init_mac()
3067 writel(INF_BURST_EN, cp->regs + REG_INF_BURST); cas_init_mac()
3070 writel(0x1BF0, cp->regs + REG_MAC_SEND_PAUSE); cas_init_mac()
3072 writel(0x00, cp->regs + REG_MAC_IPG0); cas_init_mac()
3073 writel(0x08, cp->regs + REG_MAC_IPG1); cas_init_mac()
3074 writel(0x04, cp->regs + REG_MAC_IPG2); cas_init_mac()
3077 writel(0x40, cp->regs + REG_MAC_SLOT_TIME); cas_init_mac()
3080 writel(ETH_ZLEN + 4, cp->regs + REG_MAC_FRAMESIZE_MIN); cas_init_mac()
3089 cp->regs + REG_MAC_FRAMESIZE_MAX); cas_init_mac()
3095 if ((cp->cas_flags & CAS_FLAG_SATURN) && cp->crc_size) cas_init_mac()
3096 writel(0x41, cp->regs + REG_MAC_PA_SIZE); cas_init_mac()
3098 writel(0x07, cp->regs + REG_MAC_PA_SIZE); cas_init_mac()
3099 writel(0x04, cp->regs + REG_MAC_JAM_SIZE); cas_init_mac()
3100 writel(0x10, cp->regs + REG_MAC_ATTEMPT_LIMIT); cas_init_mac()
3101 writel(0x8808, cp->regs + REG_MAC_CTRL_TYPE); cas_init_mac()
3103 writel((e[5] | (e[4] << 8)) & 0x3ff, cp->regs + REG_MAC_RANDOM_SEED); cas_init_mac()
3105 writel(0, cp->regs + REG_MAC_ADDR_FILTER0); cas_init_mac()
3106 writel(0, cp->regs + REG_MAC_ADDR_FILTER1); cas_init_mac()
3107 writel(0, cp->regs + REG_MAC_ADDR_FILTER2); cas_init_mac()
3108 writel(0, cp->regs + REG_MAC_ADDR_FILTER2_1_MASK); cas_init_mac()
3109 writel(0, cp->regs + REG_MAC_ADDR_FILTER0_MASK); cas_init_mac()
3113 writel(0x0, cp->regs + REG_MAC_ADDRN(i)); cas_init_mac()
3115 writel((e[4] << 8) | e[5], cp->regs + REG_MAC_ADDRN(0)); cas_init_mac()
3116 writel((e[2] << 8) | e[3], cp->regs + REG_MAC_ADDRN(1)); cas_init_mac()
3117 writel((e[0] << 8) | e[1], cp->regs + REG_MAC_ADDRN(2)); cas_init_mac()
3119 writel(0x0001, cp->regs + REG_MAC_ADDRN(42)); cas_init_mac()
3120 writel(0xc200, cp->regs + REG_MAC_ADDRN(43)); cas_init_mac()
3121 writel(0x0180, cp->regs + REG_MAC_ADDRN(44)); cas_init_mac()
3123 cp->mac_rx_cfg = cas_setup_multicast(cp); cas_init_mac()
3125 spin_lock(&cp->stat_lock[N_TX_RINGS]); cas_init_mac()
3126 cas_clear_mac_err(cp); cas_init_mac()
3127 spin_unlock(&cp->stat_lock[N_TX_RINGS]); cas_init_mac()
3133 writel(MAC_TX_FRAME_XMIT, cp->regs + REG_MAC_TX_MASK); cas_init_mac()
3134 writel(MAC_RX_FRAME_RECV, cp->regs + REG_MAC_RX_MASK); cas_init_mac()
3139 writel(0xffffffff, cp->regs + REG_MAC_CTRL_MASK); cas_init_mac()
3142 /* Must be invoked under cp->lock. */ cas_init_pause_thresholds()
3143 static void cas_init_pause_thresholds(struct cas *cp) cas_init_pause_thresholds() argument
3148 if (cp->rx_fifo_size <= (2 * 1024)) { cas_init_pause_thresholds()
3149 cp->rx_pause_off = cp->rx_pause_on = cp->rx_fifo_size; cas_init_pause_thresholds()
3151 int max_frame = (cp->dev->mtu + ETH_HLEN + 4 + 4 + 64) & ~63; cas_init_pause_thresholds()
3152 if (max_frame * 3 > cp->rx_fifo_size) { cas_init_pause_thresholds()
3153 cp->rx_pause_off = 7104; cas_init_pause_thresholds()
3154 cp->rx_pause_on = 960; cas_init_pause_thresholds()
3156 int off = (cp->rx_fifo_size - (max_frame * 2)); cas_init_pause_thresholds()
3158 cp->rx_pause_off = off; cas_init_pause_thresholds()
3159 cp->rx_pause_on = on; cas_init_pause_thresholds()
3188 static int cas_get_vpd_info(struct cas *cp, unsigned char *dev_addr, cas_get_vpd_info() argument
3191 void __iomem *p = cp->regs + REG_EXPANSION_ROM_RUN_START; cas_get_vpd_info()
3207 cp->regs + REG_BIM_LOCAL_DEV_EN); cas_get_vpd_info()
3312 cp->cas_flags |= CAS_FLAG_ENTROPY_DEV; cas_get_vpd_info()
3353 addr = of_get_property(cp->of_node, "local-mac-address", NULL); cas_get_vpd_info()
3368 writel(0, cp->regs + REG_BIM_LOCAL_DEV_EN); cas_get_vpd_info()
3373 static void cas_check_pci_invariants(struct cas *cp) cas_check_pci_invariants() argument
3375 struct pci_dev *pdev = cp->pdev; cas_check_pci_invariants()
3377 cp->cas_flags = 0; cas_check_pci_invariants()
3381 cp->cas_flags |= CAS_FLAG_REG_PLUS; cas_check_pci_invariants()
3383 cp->cas_flags |= CAS_FLAG_TARGET_ABORT; cas_check_pci_invariants()
3389 cp->cas_flags |= CAS_FLAG_NO_HW_CSUM; cas_check_pci_invariants()
3392 cp->cas_flags |= CAS_FLAG_REG_PLUS; cas_check_pci_invariants()
3399 cp->cas_flags |= CAS_FLAG_SATURN; cas_check_pci_invariants()
3404 static int cas_check_invariants(struct cas *cp) cas_check_invariants() argument
3406 struct pci_dev *pdev = cp->pdev; cas_check_invariants()
3411 cp->page_order = 0; cas_check_invariants()
3420 cp->page_order = CAS_JUMBO_PAGE_SHIFT - PAGE_SHIFT; cas_check_invariants()
3426 cp->page_size = (PAGE_SIZE << cp->page_order); cas_check_invariants()
3429 cp->tx_fifo_size = readl(cp->regs + REG_TX_FIFO_SIZE) * 64; cas_check_invariants()
3430 cp->rx_fifo_size = RX_FIFO_SIZE; cas_check_invariants()
3435 cp->phy_type = cas_get_vpd_info(cp, cp->dev->dev_addr, cas_check_invariants()
3437 if (cp->phy_type & CAS_PHY_SERDES) { cas_check_invariants()
3438 cp->cas_flags |= CAS_FLAG_1000MB_CAP; cas_check_invariants()
3443 cfg = readl(cp->regs + REG_MIF_CFG); cas_check_invariants()
3445 cp->phy_type = CAS_PHY_MII_MDIO1; cas_check_invariants()
3447 cp->phy_type = CAS_PHY_MII_MDIO0; cas_check_invariants()
3450 cas_mif_poll(cp, 0); cas_check_invariants()
3451 writel(PCS_DATAPATH_MODE_MII, cp->regs + REG_PCS_DATAPATH_MODE); cas_check_invariants()
3458 cp->phy_addr = i; cas_check_invariants()
3459 phy_id = cas_phy_read(cp, MII_PHYSID1) << 16; cas_check_invariants()
3460 phy_id |= cas_phy_read(cp, MII_PHYSID2); cas_check_invariants()
3462 cp->phy_id = phy_id; cas_check_invariants()
3468 readl(cp->regs + REG_MIF_STATE_MACHINE)); cas_check_invariants()
3473 cfg = cas_phy_read(cp, MII_BMSR); cas_check_invariants()
3475 cas_phy_read(cp, CAS_MII_1000_EXTEND)) cas_check_invariants()
3476 cp->cas_flags |= CAS_FLAG_1000MB_CAP; cas_check_invariants()
3480 /* Must be invoked under cp->lock. */ cas_start_dma()
3481 static inline void cas_start_dma(struct cas *cp) cas_start_dma() argument
3488 val = readl(cp->regs + REG_TX_CFG) | TX_CFG_DMA_EN; cas_start_dma()
3489 writel(val, cp->regs + REG_TX_CFG); cas_start_dma()
3490 val = readl(cp->regs + REG_RX_CFG) | RX_CFG_DMA_EN; cas_start_dma()
3491 writel(val, cp->regs + REG_RX_CFG); cas_start_dma()
3494 val = readl(cp->regs + REG_MAC_TX_CFG) | MAC_TX_CFG_EN; cas_start_dma()
3495 writel(val, cp->regs + REG_MAC_TX_CFG); cas_start_dma()
3496 val = readl(cp->regs + REG_MAC_RX_CFG) | MAC_RX_CFG_EN; cas_start_dma()
3497 writel(val, cp->regs + REG_MAC_RX_CFG); cas_start_dma()
3501 val = readl(cp->regs + REG_MAC_TX_CFG); cas_start_dma()
3509 val = readl(cp->regs + REG_MAC_RX_CFG); cas_start_dma()
3512 netdev_err(cp->dev, cas_start_dma()
3514 readl(cp->regs + REG_MIF_STATE_MACHINE), cas_start_dma()
3515 readl(cp->regs + REG_MAC_STATE_MACHINE)); cas_start_dma()
3521 netdev_err(cp->dev, "enabling mac failed [%s:%08x:%08x]\n", cas_start_dma()
3523 readl(cp->regs + REG_MIF_STATE_MACHINE), cas_start_dma()
3524 readl(cp->regs + REG_MAC_STATE_MACHINE)); cas_start_dma()
3527 cas_unmask_intr(cp); /* enable interrupts */ cas_start_dma()
3528 writel(RX_DESC_RINGN_SIZE(0) - 4, cp->regs + REG_RX_KICK); cas_start_dma()
3529 writel(0, cp->regs + REG_RX_COMP_TAIL); cas_start_dma()
3531 if (cp->cas_flags & CAS_FLAG_REG_PLUS) { cas_start_dma()
3534 cp->regs + REG_PLUS_RX_KICK1); cas_start_dma()
3537 writel(0, cp->regs + REG_PLUS_RX_COMPN_TAIL(i)); cas_start_dma()
3541 /* Must be invoked under cp->lock. */ cas_read_pcs_link_mode()
3542 static void cas_read_pcs_link_mode(struct cas *cp, int *fd, int *spd, cas_read_pcs_link_mode() argument
3545 u32 val = readl(cp->regs + REG_PCS_MII_LPA); cas_read_pcs_link_mode()
3553 /* Must be invoked under cp->lock. */ cas_read_mii_link_mode()
3554 static void cas_read_mii_link_mode(struct cas *cp, int *fd, int *spd, cas_read_mii_link_mode() argument
3564 val = cas_phy_read(cp, MII_LPA); cas_read_mii_link_mode()
3576 if (cp->cas_flags & CAS_FLAG_1000MB_CAP) { cas_read_mii_link_mode()
3577 val = cas_phy_read(cp, CAS_MII_1000_STATUS); cas_read_mii_link_mode()
3588 * Must be invoked under cp->lock.
3590 static void cas_set_link_modes(struct cas *cp) cas_set_link_modes() argument
3599 if (CAS_PHY_MII(cp->phy_type)) { cas_set_link_modes()
3600 cas_mif_poll(cp, 0); cas_set_link_modes()
3601 val = cas_phy_read(cp, MII_BMCR); cas_set_link_modes()
3603 cas_read_mii_link_mode(cp, &full_duplex, &speed, cas_set_link_modes()
3612 speed = (cp->cas_flags & CAS_FLAG_1000MB_CAP) ? cas_set_link_modes()
3615 cas_mif_poll(cp, 1); cas_set_link_modes()
3618 val = readl(cp->regs + REG_PCS_MII_CTRL); cas_set_link_modes()
3619 cas_read_pcs_link_mode(cp, &full_duplex, &speed, &pause); cas_set_link_modes()
3626 netif_info(cp, link, cp->dev, "Link up at %d Mbps, %s-duplex\n", cas_set_link_modes()
3630 if (CAS_PHY_MII(cp->phy_type)) { cas_set_link_modes()
3639 writel(val, cp->regs + REG_MAC_XIF_CFG); cas_set_link_modes()
3661 cp->regs + REG_MAC_TX_CFG); cas_set_link_modes()
3663 val = readl(cp->regs + REG_MAC_RX_CFG); cas_set_link_modes()
3666 cp->regs + REG_MAC_RX_CFG); cas_set_link_modes()
3668 writel(0x200, cp->regs + REG_MAC_SLOT_TIME); cas_set_link_modes()
3670 cp->crc_size = 4; cas_set_link_modes()
3672 cp->min_frame_size = CAS_1000MB_MIN_FRAME; cas_set_link_modes()
3675 writel(val, cp->regs + REG_MAC_TX_CFG); cas_set_link_modes()
3680 val = readl(cp->regs + REG_MAC_RX_CFG); cas_set_link_modes()
3683 cp->crc_size = 0; cas_set_link_modes()
3684 cp->min_frame_size = CAS_MIN_MTU; cas_set_link_modes()
3687 cp->crc_size = 4; cas_set_link_modes()
3688 cp->min_frame_size = CAS_MIN_FRAME; cas_set_link_modes()
3691 cp->regs + REG_MAC_RX_CFG); cas_set_link_modes()
3692 writel(0x40, cp->regs + REG_MAC_SLOT_TIME); cas_set_link_modes()
3695 if (netif_msg_link(cp)) { cas_set_link_modes()
3697 netdev_info(cp->dev, "Pause is enabled (rxfifo: %d off: %d on: %d)\n", cas_set_link_modes()
3698 cp->rx_fifo_size, cas_set_link_modes()
3699 cp->rx_pause_off, cas_set_link_modes()
3700 cp->rx_pause_on); cas_set_link_modes()
3702 netdev_info(cp->dev, "TX pause enabled\n"); cas_set_link_modes()
3704 netdev_info(cp->dev, "Pause is disabled\n"); cas_set_link_modes()
3708 val = readl(cp->regs + REG_MAC_CTRL_CFG); cas_set_link_modes()
3716 writel(val, cp->regs + REG_MAC_CTRL_CFG); cas_set_link_modes()
3717 cas_start_dma(cp); cas_set_link_modes()
3720 /* Must be invoked under cp->lock. */ cas_init_hw()
3721 static void cas_init_hw(struct cas *cp, int restart_link) cas_init_hw() argument
3724 cas_phy_init(cp); cas_init_hw()
3726 cas_init_pause_thresholds(cp); cas_init_hw()
3727 cas_init_mac(cp); cas_init_hw()
3728 cas_init_dma(cp); cas_init_hw()
3732 cp->timer_ticks = 0; cas_init_hw()
3733 cas_begin_auto_negotiation(cp, NULL); cas_init_hw()
3734 } else if (cp->lstate == link_up) { cas_init_hw()
3735 cas_set_link_modes(cp); cas_init_hw()
3736 netif_carrier_on(cp->dev); cas_init_hw()
3740 /* Must be invoked under cp->lock. on earlier cassini boards,
3744 static void cas_hard_reset(struct cas *cp) cas_hard_reset() argument
3746 writel(BIM_LOCAL_DEV_SOFT_0, cp->regs + REG_BIM_LOCAL_DEV_EN); cas_hard_reset()
3748 pci_restore_state(cp->pdev); cas_hard_reset()
3752 static void cas_global_reset(struct cas *cp, int blkflag) cas_global_reset() argument
3757 if (blkflag && !CAS_PHY_MII(cp->phy_type)) { cas_global_reset()
3765 cp->regs + REG_SW_RESET); cas_global_reset()
3767 writel(SW_RESET_TX | SW_RESET_RX, cp->regs + REG_SW_RESET); cas_global_reset()
3775 u32 val = readl(cp->regs + REG_SW_RESET); cas_global_reset()
3780 netdev_err(cp->dev, "sw reset failed\n"); cas_global_reset()
3785 BIM_CFG_RTA_INTR_ENABLE, cp->regs + REG_BIM_CFG); cas_global_reset()
3793 PCI_ERR_BIM_DMA_READ), cp->regs + cas_global_reset()
3799 writel(PCS_DATAPATH_MODE_MII, cp->regs + REG_PCS_DATAPATH_MODE); cas_global_reset()
3802 static void cas_reset(struct cas *cp, int blkflag) cas_reset() argument
3806 cas_mask_intr(cp); cas_reset()
3807 cas_global_reset(cp, blkflag); cas_reset()
3808 cas_mac_reset(cp); cas_reset()
3809 cas_entropy_reset(cp); cas_reset()
3812 val = readl(cp->regs + REG_TX_CFG); cas_reset()
3814 writel(val, cp->regs + REG_TX_CFG); cas_reset()
3816 val = readl(cp->regs + REG_RX_CFG); cas_reset()
3818 writel(val, cp->regs + REG_RX_CFG); cas_reset()
3821 if ((cp->cas_flags & CAS_FLAG_TARGET_ABORT) || cas_reset()
3823 cas_load_firmware(cp, CAS_HP_FIRMWARE); cas_reset()
3825 cas_load_firmware(cp, CAS_HP_ALT_FIRMWARE); cas_reset()
3829 spin_lock(&cp->stat_lock[N_TX_RINGS]); cas_reset()
3830 cas_clear_mac_err(cp); cas_reset()
3831 spin_unlock(&cp->stat_lock[N_TX_RINGS]); cas_reset()
3835 static void cas_shutdown(struct cas *cp) cas_shutdown() argument
3840 cp->hw_running = 0; cas_shutdown()
3842 del_timer_sync(&cp->link_timer); cas_shutdown()
3846 while (atomic_read(&cp->reset_task_pending_mtu) || cas_shutdown()
3847 atomic_read(&cp->reset_task_pending_spare) || cas_shutdown()
3848 atomic_read(&cp->reset_task_pending_all)) cas_shutdown()
3852 while (atomic_read(&cp->reset_task_pending)) cas_shutdown()
3856 cas_lock_all_save(cp, flags); cas_shutdown()
3857 cas_reset(cp, 0); cas_shutdown()
3858 if (cp->cas_flags & CAS_FLAG_SATURN) cas_shutdown()
3859 cas_phy_powerdown(cp); cas_shutdown()
3860 cas_unlock_all_restore(cp, flags); cas_shutdown()
3865 struct cas *cp = netdev_priv(dev); cas_change_mtu() local
3876 atomic_inc(&cp->reset_task_pending); cas_change_mtu()
3877 if ((cp->phy_type & CAS_PHY_SERDES)) { cas_change_mtu()
3878 atomic_inc(&cp->reset_task_pending_all); cas_change_mtu()
3880 atomic_inc(&cp->reset_task_pending_mtu); cas_change_mtu()
3882 schedule_work(&cp->reset_task); cas_change_mtu()
3884 atomic_set(&cp->reset_task_pending, (cp->phy_type & CAS_PHY_SERDES) ? cas_change_mtu()
3887 schedule_work(&cp->reset_task); cas_change_mtu()
3890 flush_work(&cp->reset_task); cas_change_mtu()
3894 static void cas_clean_txd(struct cas *cp, int ring) cas_clean_txd() argument
3896 struct cas_tx_desc *txd = cp->init_txds[ring]; cas_clean_txd()
3897 struct sk_buff *skb, **skbs = cp->tx_skbs[ring]; cas_clean_txd()
3920 pci_unmap_page(cp->pdev, daddr, dlen, cas_clean_txd()
3930 if (cp->tx_tiny_use[ring][ent].used) cas_clean_txd()
3938 memset(cp->tx_tiny_use[ring], 0, size*sizeof(*cp->tx_tiny_use[ring])); cas_clean_txd()
3942 static inline void cas_free_rx_desc(struct cas *cp, int ring) cas_free_rx_desc() argument
3944 cas_page_t **page = cp->rx_pages[ring]; cas_free_rx_desc()
3950 cas_page_free(cp, page[i]); cas_free_rx_desc()
3956 static void cas_free_rxds(struct cas *cp) cas_free_rxds() argument
3961 cas_free_rx_desc(cp, i); cas_free_rxds()
3964 /* Must be invoked under cp->lock. */ cas_clean_rings()
3965 static void cas_clean_rings(struct cas *cp) cas_clean_rings() argument
3970 memset(cp->tx_old, 0, sizeof(*cp->tx_old)*N_TX_RINGS); cas_clean_rings()
3971 memset(cp->tx_new, 0, sizeof(*cp->tx_new)*N_TX_RINGS); cas_clean_rings()
3973 cas_clean_txd(cp, i); cas_clean_rings()
3976 memset(cp->init_block, 0, sizeof(struct cas_init_block)); cas_clean_rings()
3977 cas_clean_rxds(cp); cas_clean_rings()
3978 cas_clean_rxcs(cp); cas_clean_rings()
3982 static inline int cas_alloc_rx_desc(struct cas *cp, int ring) cas_alloc_rx_desc() argument
3984 cas_page_t **page = cp->rx_pages[ring]; cas_alloc_rx_desc()
3989 if ((page[i] = cas_page_alloc(cp, GFP_KERNEL)) == NULL) cas_alloc_rx_desc()
3995 static int cas_alloc_rxds(struct cas *cp) cas_alloc_rxds() argument
4000 if (cas_alloc_rx_desc(cp, i) < 0) { cas_alloc_rxds()
4001 cas_free_rxds(cp); cas_alloc_rxds()
4010 struct cas *cp = container_of(work, struct cas, reset_task); cas_reset_task() local
4012 int pending = atomic_read(&cp->reset_task_pending); cas_reset_task()
4014 int pending_all = atomic_read(&cp->reset_task_pending_all); cas_reset_task()
4015 int pending_spare = atomic_read(&cp->reset_task_pending_spare); cas_reset_task()
4016 int pending_mtu = atomic_read(&cp->reset_task_pending_mtu); cas_reset_task()
4022 atomic_dec(&cp->reset_task_pending); cas_reset_task()
4030 if (cp->hw_running) { cas_reset_task()
4034 netif_device_detach(cp->dev); cas_reset_task()
4035 cas_lock_all_save(cp, flags); cas_reset_task()
4037 if (cp->opened) { cas_reset_task()
4042 cas_spare_recover(cp, GFP_ATOMIC); cas_reset_task()
4060 cas_reset(cp, !(pending_all > 0)); cas_reset_task()
4061 if (cp->opened) cas_reset_task()
4062 cas_clean_rings(cp); cas_reset_task()
4063 cas_init_hw(cp, (pending_all > 0)); cas_reset_task()
4065 cas_reset(cp, !(pending == CAS_RESET_ALL)); cas_reset_task()
4066 if (cp->opened) cas_reset_task()
4067 cas_clean_rings(cp); cas_reset_task()
4068 cas_init_hw(cp, pending == CAS_RESET_ALL); cas_reset_task()
4072 cas_unlock_all_restore(cp, flags); cas_reset_task()
4073 netif_device_attach(cp->dev); cas_reset_task()
4076 atomic_sub(pending_all, &cp->reset_task_pending_all); cas_reset_task()
4077 atomic_sub(pending_spare, &cp->reset_task_pending_spare); cas_reset_task()
4078 atomic_sub(pending_mtu, &cp->reset_task_pending_mtu); cas_reset_task()
4079 atomic_dec(&cp->reset_task_pending); cas_reset_task()
4081 atomic_set(&cp->reset_task_pending, 0); cas_reset_task()
4087 struct cas *cp = (struct cas *) data; cas_link_timer() local
4092 cp->link_transition_jiffies_valid && cas_link_timer()
4093 ((jiffies - cp->link_transition_jiffies) > cas_link_timer()
4099 cp->link_transition_jiffies_valid = 0; cas_link_timer()
4102 if (!cp->hw_running) cas_link_timer()
4105 spin_lock_irqsave(&cp->lock, flags); cas_link_timer()
4106 cas_lock_tx(cp); cas_link_timer()
4107 cas_entropy_gather(cp); cas_link_timer()
4113 if (atomic_read(&cp->reset_task_pending_all) || cas_link_timer()
4114 atomic_read(&cp->reset_task_pending_spare) || cas_link_timer()
4115 atomic_read(&cp->reset_task_pending_mtu)) cas_link_timer()
4118 if (atomic_read(&cp->reset_task_pending)) cas_link_timer()
4123 if ((mask = (cp->cas_flags & CAS_FLAG_RXD_POST_MASK))) { cas_link_timer()
4132 if (cas_post_rxds_ringN(cp, i, cp->rx_last[i]) < 0) { cas_link_timer()
4136 cp->cas_flags &= ~rmask; cas_link_timer()
4140 if (CAS_PHY_MII(cp->phy_type)) { cas_link_timer()
4142 cas_mif_poll(cp, 0); cas_link_timer()
4143 bmsr = cas_phy_read(cp, MII_BMSR); cas_link_timer()
4149 bmsr = cas_phy_read(cp, MII_BMSR); cas_link_timer()
4150 cas_mif_poll(cp, 1); cas_link_timer()
4151 readl(cp->regs + REG_MIF_STATUS); /* avoid dups */ cas_link_timer()
4152 reset = cas_mii_link_check(cp, bmsr); cas_link_timer()
4154 reset = cas_pcs_link_check(cp); cas_link_timer()
4161 if ((readl(cp->regs + REG_MAC_TX_STATUS) & MAC_TX_FRAME_XMIT) == 0) { cas_link_timer()
4162 u32 val = readl(cp->regs + REG_MAC_STATE_MACHINE); cas_link_timer()
4168 netif_printk(cp, tx_err, KERN_DEBUG, cp->dev, cas_link_timer()
4174 val = readl(cp->regs + REG_TX_FIFO_PKT_CNT); cas_link_timer()
4175 wptr = readl(cp->regs + REG_TX_FIFO_WRITE_PTR); cas_link_timer()
4176 rptr = readl(cp->regs + REG_TX_FIFO_READ_PTR); cas_link_timer()
4178 netif_printk(cp, tx_err, KERN_DEBUG, cp->dev, cas_link_timer()
4185 cas_hard_reset(cp); cas_link_timer()
4191 atomic_inc(&cp->reset_task_pending); cas_link_timer()
4192 atomic_inc(&cp->reset_task_pending_all); cas_link_timer()
4193 schedule_work(&cp->reset_task); cas_link_timer()
4195 atomic_set(&cp->reset_task_pending, CAS_RESET_ALL); cas_link_timer()
4197 schedule_work(&cp->reset_task); cas_link_timer()
4202 mod_timer(&cp->link_timer, jiffies + CAS_LINK_TIMEOUT); cas_link_timer()
4203 cas_unlock_tx(cp); cas_link_timer()
4204 spin_unlock_irqrestore(&cp->lock, flags); cas_link_timer()
4210 static void cas_tx_tiny_free(struct cas *cp) cas_tx_tiny_free() argument
4212 struct pci_dev *pdev = cp->pdev; cas_tx_tiny_free()
4216 if (!cp->tx_tiny_bufs[i]) cas_tx_tiny_free()
4220 cp->tx_tiny_bufs[i], cas_tx_tiny_free()
4221 cp->tx_tiny_dvma[i]); cas_tx_tiny_free()
4222 cp->tx_tiny_bufs[i] = NULL; cas_tx_tiny_free()
4226 static int cas_tx_tiny_alloc(struct cas *cp) cas_tx_tiny_alloc() argument
4228 struct pci_dev *pdev = cp->pdev; cas_tx_tiny_alloc()
4232 cp->tx_tiny_bufs[i] = cas_tx_tiny_alloc()
4234 &cp->tx_tiny_dvma[i]); cas_tx_tiny_alloc()
4235 if (!cp->tx_tiny_bufs[i]) { cas_tx_tiny_alloc()
4236 cas_tx_tiny_free(cp); cas_tx_tiny_alloc()
4246 struct cas *cp = netdev_priv(dev); cas_open() local
4250 mutex_lock(&cp->pm_mutex); cas_open()
4252 hw_was_up = cp->hw_running; cas_open()
4255 * etc. state so it is safe to do this bit without cp->lock cas_open()
4257 if (!cp->hw_running) { cas_open()
4259 cas_lock_all_save(cp, flags); cas_open()
4265 cas_reset(cp, 0); cas_open()
4266 cp->hw_running = 1; cas_open()
4267 cas_unlock_all_restore(cp, flags); cas_open()
4271 if (cas_tx_tiny_alloc(cp) < 0) cas_open()
4275 if (cas_alloc_rxds(cp) < 0) cas_open()
4279 cas_spare_init(cp); cas_open()
4280 cas_spare_recover(cp, GFP_KERNEL); cas_open()
4287 if (request_irq(cp->pdev->irq, cas_interrupt, cas_open()
4289 netdev_err(cp->dev, "failed to request irq !\n"); cas_open()
4295 napi_enable(&cp->napi); cas_open()
4298 cas_lock_all_save(cp, flags); cas_open()
4299 cas_clean_rings(cp); cas_open()
4300 cas_init_hw(cp, !hw_was_up); cas_open()
4301 cp->opened = 1; cas_open()
4302 cas_unlock_all_restore(cp, flags); cas_open()
4305 mutex_unlock(&cp->pm_mutex); cas_open()
4309 cas_spare_free(cp); cas_open()
4310 cas_free_rxds(cp); cas_open()
4312 cas_tx_tiny_free(cp); cas_open()
4314 mutex_unlock(&cp->pm_mutex); cas_open()
4321 struct cas *cp = netdev_priv(dev); cas_close() local
4324 napi_disable(&cp->napi); cas_close()
4327 mutex_lock(&cp->pm_mutex); cas_close()
4332 cas_lock_all_save(cp, flags); cas_close()
4333 cp->opened = 0; cas_close()
4334 cas_reset(cp, 0); cas_close()
4335 cas_phy_init(cp); cas_close()
4336 cas_begin_auto_negotiation(cp, NULL); cas_close()
4337 cas_clean_rings(cp); cas_close()
4338 cas_unlock_all_restore(cp, flags); cas_close()
4340 free_irq(cp->pdev->irq, (void *) dev); cas_close()
4341 cas_spare_free(cp); cas_close()
4342 cas_free_rxds(cp); cas_close()
4343 cas_tx_tiny_free(cp); cas_close()
4344 mutex_unlock(&cp->pm_mutex); cas_close()
4395 static void cas_read_regs(struct cas *cp, u8 *ptr, int len) cas_read_regs() argument
4401 spin_lock_irqsave(&cp->lock, flags); cas_read_regs()
4406 hval = cas_phy_read(cp, cas_read_regs()
4410 val= readl(cp->regs+ethtool_register_table[i].offsets); cas_read_regs()
4414 spin_unlock_irqrestore(&cp->lock, flags); cas_read_regs()
4419 struct cas *cp = netdev_priv(dev); cas_get_stats() local
4420 struct net_device_stats *stats = cp->net_stats; cas_get_stats()
4426 if (!cp->hw_running) cas_get_stats()
4437 spin_lock_irqsave(&cp->stat_lock[N_TX_RINGS], flags); cas_get_stats()
4439 readl(cp->regs + REG_MAC_FCS_ERR) & 0xffff; cas_get_stats()
4441 readl(cp->regs + REG_MAC_ALIGN_ERR) &0xffff; cas_get_stats()
4443 readl(cp->regs + REG_MAC_LEN_ERR) & 0xffff; cas_get_stats()
4445 tmp = (readl(cp->regs + REG_MAC_COLL_EXCESS) & 0xffff) + cas_get_stats()
4446 (readl(cp->regs + REG_MAC_COLL_LATE) & 0xffff); cas_get_stats()
4449 tmp + (readl(cp->regs + REG_MAC_COLL_NORMAL) & 0xffff); cas_get_stats()
4452 readl(cp->regs + REG_MAC_COLL_EXCESS); cas_get_stats()
4453 stats[N_TX_RINGS].collisions += readl(cp->regs + REG_MAC_COLL_EXCESS) + cas_get_stats()
4454 readl(cp->regs + REG_MAC_COLL_LATE); cas_get_stats()
4456 cas_clear_mac_err(cp); cas_get_stats()
4459 spin_lock(&cp->stat_lock[0]); cas_get_stats()
4466 spin_unlock(&cp->stat_lock[0]); cas_get_stats()
4469 spin_lock(&cp->stat_lock[i]); cas_get_stats()
4482 spin_unlock(&cp->stat_lock[i]); cas_get_stats()
4484 spin_unlock_irqrestore(&cp->stat_lock[N_TX_RINGS], flags); cas_get_stats()
4491 struct cas *cp = netdev_priv(dev); cas_set_multicast() local
4496 if (!cp->hw_running) cas_set_multicast()
4499 spin_lock_irqsave(&cp->lock, flags); cas_set_multicast()
4500 rxcfg = readl(cp->regs + REG_MAC_RX_CFG); cas_set_multicast()
4503 writel(rxcfg & ~MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG); cas_set_multicast()
4504 while (readl(cp->regs + REG_MAC_RX_CFG) & MAC_RX_CFG_EN) { cas_set_multicast()
4513 writel(rxcfg & ~MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG); cas_set_multicast()
4514 while (readl(cp->regs + REG_MAC_RX_CFG) & MAC_RX_CFG_HASH_FILTER_EN) { cas_set_multicast()
4521 cp->mac_rx_cfg = rxcfg_new = cas_setup_multicast(cp); cas_set_multicast()
4523 writel(rxcfg, cp->regs + REG_MAC_RX_CFG); cas_set_multicast()
4524 spin_unlock_irqrestore(&cp->lock, flags); cas_set_multicast()
4529 struct cas *cp = netdev_priv(dev); cas_get_drvinfo() local
4532 strlcpy(info->bus_info, pci_name(cp->pdev), sizeof(info->bus_info)); cas_get_drvinfo()
4533 info->regdump_len = cp->casreg_len < CAS_MAX_REGS ? cas_get_drvinfo()
4534 cp->casreg_len : CAS_MAX_REGS; cas_get_drvinfo()
4540 struct cas *cp = netdev_priv(dev); cas_get_settings() local
4548 if (cp->cas_flags & CAS_FLAG_1000MB_CAP) { cas_get_settings()
4554 spin_lock_irqsave(&cp->lock, flags); cas_get_settings()
4556 linkstate = cp->lstate; cas_get_settings()
4557 if (CAS_PHY_MII(cp->phy_type)) { cas_get_settings()
4559 cmd->transceiver = (cp->cas_flags & CAS_FLAG_SATURN) ? cas_get_settings()
4561 cmd->phy_address = cp->phy_addr; cas_get_settings()
4575 if (cp->hw_running) { cas_get_settings()
4576 cas_mif_poll(cp, 0); cas_get_settings()
4577 bmcr = cas_phy_read(cp, MII_BMCR); cas_get_settings()
4578 cas_read_mii_link_mode(cp, &full_duplex, cas_get_settings()
4580 cas_mif_poll(cp, 1); cas_get_settings()
4590 if (cp->hw_running) { cas_get_settings()
4592 bmcr = readl(cp->regs + REG_PCS_MII_CTRL); cas_get_settings()
4593 cas_read_pcs_link_mode(cp, &full_duplex, cas_get_settings()
4597 spin_unlock_irqrestore(&cp->lock, flags); cas_get_settings()
4628 if (cp->link_cntl & BMCR_ANENABLE) { cas_get_settings()
4633 if (cp->link_cntl & BMCR_SPEED100) { cas_get_settings()
4635 } else if (cp->link_cntl & CAS_BMCR_SPEED1000) { cas_get_settings()
4638 cmd->duplex = (cp->link_cntl & BMCR_FULLDPLX)? cas_get_settings()
4647 struct cas *cp = netdev_priv(dev); cas_set_settings() local
4665 spin_lock_irqsave(&cp->lock, flags); cas_set_settings()
4666 cas_begin_auto_negotiation(cp, cmd); cas_set_settings()
4667 spin_unlock_irqrestore(&cp->lock, flags); cas_set_settings()
4673 struct cas *cp = netdev_priv(dev); cas_nway_reset() local
4676 if ((cp->link_cntl & BMCR_ANENABLE) == 0) cas_nway_reset()
4680 spin_lock_irqsave(&cp->lock, flags); cas_nway_reset()
4681 cas_begin_auto_negotiation(cp, NULL); cas_nway_reset()
4682 spin_unlock_irqrestore(&cp->lock, flags); cas_nway_reset()
4689 struct cas *cp = netdev_priv(dev); cas_get_link() local
4690 return cp->lstate == link_up; cas_get_link()
4695 struct cas *cp = netdev_priv(dev); cas_get_msglevel() local
4696 return cp->msg_enable; cas_get_msglevel()
4701 struct cas *cp = netdev_priv(dev); cas_set_msglevel() local
4702 cp->msg_enable = value; cas_set_msglevel()
4707 struct cas *cp = netdev_priv(dev); cas_get_regs_len() local
4708 return cp->casreg_len < CAS_MAX_REGS ? cp->casreg_len: CAS_MAX_REGS; cas_get_regs_len()
4714 struct cas *cp = netdev_priv(dev); cas_get_regs() local
4716 /* cas_read_regs handles locks (cp->lock). */ cas_get_regs()
4717 cas_read_regs(cp, p, regs->len / sizeof(u32)); cas_get_regs()
4739 struct cas *cp = netdev_priv(dev); cas_get_ethtool_stats() local
4740 struct net_device_stats *stats = cas_get_stats(cp->dev); cas_get_ethtool_stats()
4778 struct cas *cp = netdev_priv(dev); cas_ioctl() local
4786 mutex_lock(&cp->pm_mutex); cas_ioctl()
4789 data->phy_id = cp->phy_addr; cas_ioctl()
4793 spin_lock_irqsave(&cp->lock, flags); cas_ioctl()
4794 cas_mif_poll(cp, 0); cas_ioctl()
4795 data->val_out = cas_phy_read(cp, data->reg_num & 0x1f); cas_ioctl()
4796 cas_mif_poll(cp, 1); cas_ioctl()
4797 spin_unlock_irqrestore(&cp->lock, flags); cas_ioctl()
4802 spin_lock_irqsave(&cp->lock, flags); cas_ioctl()
4803 cas_mif_poll(cp, 0); cas_ioctl()
4804 rc = cas_phy_write(cp, data->reg_num & 0x1f, data->val_in); cas_ioctl()
4805 cas_mif_poll(cp, 1); cas_ioctl()
4806 spin_unlock_irqrestore(&cp->lock, flags); cas_ioctl()
4812 mutex_unlock(&cp->pm_mutex); cas_ioctl()
4921 struct cas *cp; cas_init_one() local
4942 dev = alloc_etherdev(sizeof(*cp)); cas_init_one()
5016 cp = netdev_priv(dev); cas_init_one()
5017 cp->pdev = pdev; cas_init_one()
5020 cp->orig_cacheline_size = cas_cacheline_size ? orig_cacheline_size: 0; cas_init_one()
5022 cp->dev = dev; cas_init_one()
5023 cp->msg_enable = (cassini_debug < 0) ? CAS_DEF_MSG_ENABLE : cas_init_one()
5027 cp->of_node = pci_device_to_OF_node(pdev); cas_init_one()
5030 cp->link_transition = LINK_TRANSITION_UNKNOWN; cas_init_one()
5031 cp->link_transition_jiffies_valid = 0; cas_init_one()
5033 spin_lock_init(&cp->lock); cas_init_one()
5034 spin_lock_init(&cp->rx_inuse_lock); cas_init_one()
5035 spin_lock_init(&cp->rx_spare_lock); cas_init_one()
5037 spin_lock_init(&cp->stat_lock[i]); cas_init_one()
5038 spin_lock_init(&cp->tx_lock[i]); cas_init_one()
5040 spin_lock_init(&cp->stat_lock[N_TX_RINGS]); cas_init_one()
5041 mutex_init(&cp->pm_mutex); cas_init_one()
5043 init_timer(&cp->link_timer); cas_init_one()
5044 cp->link_timer.function = cas_link_timer; cas_init_one()
5045 cp->link_timer.data = (unsigned long) cp; cas_init_one()
5051 atomic_set(&cp->reset_task_pending, 0); cas_init_one()
5052 atomic_set(&cp->reset_task_pending_all, 0); cas_init_one()
5053 atomic_set(&cp->reset_task_pending_spare, 0); cas_init_one()
5054 atomic_set(&cp->reset_task_pending_mtu, 0); cas_init_one()
5056 INIT_WORK(&cp->reset_task, cas_reset_task); cas_init_one()
5060 cp->link_cntl = link_modes[link_mode]; cas_init_one()
5062 cp->link_cntl = BMCR_ANENABLE; cas_init_one()
5063 cp->lstate = link_down; cas_init_one()
5064 cp->link_transition = LINK_TRANSITION_LINK_DOWN; cas_init_one()
5065 netif_carrier_off(cp->dev); cas_init_one()
5066 cp->timer_ticks = 0; cas_init_one()
5069 cp->regs = pci_iomap(pdev, 0, casreg_len); cas_init_one()
5070 if (!cp->regs) { cas_init_one()
5074 cp->casreg_len = casreg_len; cas_init_one()
5077 cas_check_pci_invariants(cp); cas_init_one()
5078 cas_hard_reset(cp); cas_init_one()
5079 cas_reset(cp, 0); cas_init_one()
5080 if (cas_check_invariants(cp)) cas_init_one()
5082 if (cp->cas_flags & CAS_FLAG_SATURN) cas_init_one()
5083 cas_saturn_firmware_init(cp); cas_init_one()
5085 cp->init_block = (struct cas_init_block *) cas_init_one()
5087 &cp->block_dvma); cas_init_one()
5088 if (!cp->init_block) { cas_init_one()
5094 cp->init_txds[i] = cp->init_block->txds[i]; cas_init_one()
5097 cp->init_rxds[i] = cp->init_block->rxds[i]; cas_init_one()
5100 cp->init_rxcs[i] = cp->init_block->rxcs[i]; cas_init_one()
5103 skb_queue_head_init(&cp->rx_flows[i]); cas_init_one()
5110 netif_napi_add(dev, &cp->napi, cas_poll, 64); cas_init_one()
5116 if ((cp->cas_flags & CAS_FLAG_NO_HW_CSUM) == 0) cas_init_one()
5127 i = readl(cp->regs + REG_BIM_CFG); cas_init_one()
5129 (cp->cas_flags & CAS_FLAG_REG_PLUS) ? "+" : "", cas_init_one()
5132 (cp->phy_type == CAS_PHY_SERDES) ? "Fi" : "Cu", pdev->irq, cas_init_one()
5136 cp->hw_running = 1; cas_init_one()
5137 cas_entropy_reset(cp); cas_init_one()
5138 cas_phy_init(cp); cas_init_one()
5139 cas_begin_auto_negotiation(cp, NULL); cas_init_one()
5144 cp->init_block, cp->block_dvma); cas_init_one()
5147 mutex_lock(&cp->pm_mutex); cas_init_one()
5148 if (cp->hw_running) cas_init_one()
5149 cas_shutdown(cp); cas_init_one()
5150 mutex_unlock(&cp->pm_mutex); cas_init_one()
5152 pci_iounmap(pdev, cp->regs); cas_init_one()
5175 struct cas *cp; cas_remove_one() local
5179 cp = netdev_priv(dev); cas_remove_one()
5182 vfree(cp->fw_data); cas_remove_one()
5184 mutex_lock(&cp->pm_mutex); cas_remove_one()
5185 cancel_work_sync(&cp->reset_task); cas_remove_one()
5186 if (cp->hw_running) cas_remove_one()
5187 cas_shutdown(cp); cas_remove_one()
5188 mutex_unlock(&cp->pm_mutex); cas_remove_one()
5191 if (cp->orig_cacheline_size) { cas_remove_one()
5196 cp->orig_cacheline_size); cas_remove_one()
5200 cp->init_block, cp->block_dvma); cas_remove_one()
5201 pci_iounmap(pdev, cp->regs); cas_remove_one()
5211 struct cas *cp = netdev_priv(dev); cas_suspend() local
5214 mutex_lock(&cp->pm_mutex); cas_suspend()
5217 if (cp->opened) { cas_suspend()
5220 cas_lock_all_save(cp, flags); cas_suspend()
5227 cas_reset(cp, 0); cas_suspend()
5228 cas_clean_rings(cp); cas_suspend()
5229 cas_unlock_all_restore(cp, flags); cas_suspend()
5232 if (cp->hw_running) cas_suspend()
5233 cas_shutdown(cp); cas_suspend()
5234 mutex_unlock(&cp->pm_mutex); cas_suspend()
5242 struct cas *cp = netdev_priv(dev); cas_resume() local
5246 mutex_lock(&cp->pm_mutex); cas_resume()
5247 cas_hard_reset(cp); cas_resume()
5248 if (cp->opened) { cas_resume()
5250 cas_lock_all_save(cp, flags); cas_resume()
5251 cas_reset(cp, 0); cas_resume()
5252 cp->hw_running = 1; cas_resume()
5253 cas_clean_rings(cp); cas_resume()
5254 cas_init_hw(cp, 1); cas_resume()
5255 cas_unlock_all_restore(cp, flags); cas_resume()
5259 mutex_unlock(&cp->pm_mutex); cas_resume()
1107 cas_pcs_interrupt(struct net_device *dev, struct cas *cp, u32 status) cas_pcs_interrupt() argument
1117 cas_txmac_interrupt(struct net_device *dev, struct cas *cp, u32 status) cas_txmac_interrupt() argument
2406 cas_post_rxcs_ringN(struct net_device *dev, struct cas *cp, int ring) cas_post_rxcs_ringN() argument
2437 cas_handle_irqN(struct net_device *dev, struct cas *cp, const u32 status, const int ring) cas_handle_irqN() argument
2525 cas_handle_irq(struct net_device *dev, struct cas *cp, const u32 status) cas_handle_irq() argument
/linux-4.1.27/net/netfilter/ipvs/
H A Dip_vs_conn.c147 static unsigned int ip_vs_conn_hashkey_conn(const struct ip_vs_conn *cp) ip_vs_conn_hashkey_conn() argument
151 ip_vs_conn_fill_param(ip_vs_conn_net(cp), cp->af, cp->protocol, ip_vs_conn_hashkey_conn()
152 &cp->caddr, cp->cport, NULL, 0, &p); ip_vs_conn_hashkey_conn()
154 if (cp->pe) { ip_vs_conn_hashkey_conn()
155 p.pe = cp->pe; ip_vs_conn_hashkey_conn()
156 p.pe_data = cp->pe_data; ip_vs_conn_hashkey_conn()
157 p.pe_data_len = cp->pe_data_len; ip_vs_conn_hashkey_conn()
167 static inline int ip_vs_conn_hash(struct ip_vs_conn *cp) ip_vs_conn_hash() argument
172 if (cp->flags & IP_VS_CONN_F_ONE_PACKET) ip_vs_conn_hash()
176 hash = ip_vs_conn_hashkey_conn(cp); ip_vs_conn_hash()
179 spin_lock(&cp->lock); ip_vs_conn_hash()
181 if (!(cp->flags & IP_VS_CONN_F_HASHED)) { ip_vs_conn_hash()
182 cp->flags |= IP_VS_CONN_F_HASHED; ip_vs_conn_hash()
183 atomic_inc(&cp->refcnt); ip_vs_conn_hash()
184 hlist_add_head_rcu(&cp->c_list, &ip_vs_conn_tab[hash]); ip_vs_conn_hash()
192 spin_unlock(&cp->lock); ip_vs_conn_hash()
203 static inline int ip_vs_conn_unhash(struct ip_vs_conn *cp) ip_vs_conn_unhash() argument
209 hash = ip_vs_conn_hashkey_conn(cp); ip_vs_conn_unhash()
212 spin_lock(&cp->lock); ip_vs_conn_unhash()
214 if (cp->flags & IP_VS_CONN_F_HASHED) { ip_vs_conn_unhash()
215 hlist_del_rcu(&cp->c_list); ip_vs_conn_unhash()
216 cp->flags &= ~IP_VS_CONN_F_HASHED; ip_vs_conn_unhash()
217 atomic_dec(&cp->refcnt); ip_vs_conn_unhash()
222 spin_unlock(&cp->lock); ip_vs_conn_unhash()
231 static inline bool ip_vs_conn_unlink(struct ip_vs_conn *cp) ip_vs_conn_unlink() argument
236 hash = ip_vs_conn_hashkey_conn(cp); ip_vs_conn_unlink()
239 spin_lock(&cp->lock); ip_vs_conn_unlink()
241 if (cp->flags & IP_VS_CONN_F_HASHED) { ip_vs_conn_unlink()
244 if (atomic_cmpxchg(&cp->refcnt, 1, 0) == 1) { ip_vs_conn_unlink()
245 hlist_del_rcu(&cp->c_list); ip_vs_conn_unlink()
246 cp->flags &= ~IP_VS_CONN_F_HASHED; ip_vs_conn_unlink()
250 ret = atomic_read(&cp->refcnt) ? false : true; ip_vs_conn_unlink()
252 spin_unlock(&cp->lock); ip_vs_conn_unlink()
269 struct ip_vs_conn *cp; __ip_vs_conn_in_get() local
275 hlist_for_each_entry_rcu(cp, &ip_vs_conn_tab[hash], c_list) { __ip_vs_conn_in_get()
276 if (p->cport == cp->cport && p->vport == cp->vport && __ip_vs_conn_in_get()
277 cp->af == p->af && __ip_vs_conn_in_get()
278 ip_vs_addr_equal(p->af, p->caddr, &cp->caddr) && __ip_vs_conn_in_get()
279 ip_vs_addr_equal(p->af, p->vaddr, &cp->vaddr) && __ip_vs_conn_in_get()
280 ((!p->cport) ^ (!(cp->flags & IP_VS_CONN_F_NO_CPORT))) && __ip_vs_conn_in_get()
281 p->protocol == cp->protocol && __ip_vs_conn_in_get()
282 ip_vs_conn_net_eq(cp, p->net)) { __ip_vs_conn_in_get()
283 if (!__ip_vs_conn_get(cp)) __ip_vs_conn_in_get()
287 return cp; __ip_vs_conn_in_get()
298 struct ip_vs_conn *cp; ip_vs_conn_in_get() local
300 cp = __ip_vs_conn_in_get(p); ip_vs_conn_in_get()
301 if (!cp && atomic_read(&ip_vs_conn_no_cport_cnt)) { ip_vs_conn_in_get()
304 cp = __ip_vs_conn_in_get(&cport_zero_p); ip_vs_conn_in_get()
311 cp ? "hit" : "not hit"); ip_vs_conn_in_get()
313 return cp; ip_vs_conn_in_get()
354 struct ip_vs_conn *cp; ip_vs_ct_in_get() local
360 hlist_for_each_entry_rcu(cp, &ip_vs_conn_tab[hash], c_list) { ip_vs_ct_in_get()
362 if (!ip_vs_conn_net_eq(cp, p->net)) ip_vs_ct_in_get()
364 if (p->pe == cp->pe && p->pe->ct_match(p, cp)) { ip_vs_ct_in_get()
365 if (__ip_vs_conn_get(cp)) ip_vs_ct_in_get()
371 if (cp->af == p->af && ip_vs_ct_in_get()
372 ip_vs_addr_equal(p->af, p->caddr, &cp->caddr) && ip_vs_ct_in_get()
376 p->af, p->vaddr, &cp->vaddr) && ip_vs_ct_in_get()
377 p->vport == cp->vport && p->cport == cp->cport && ip_vs_ct_in_get()
378 cp->flags & IP_VS_CONN_F_TEMPLATE && ip_vs_ct_in_get()
379 p->protocol == cp->protocol && ip_vs_ct_in_get()
380 ip_vs_conn_net_eq(cp, p->net)) { ip_vs_ct_in_get()
381 if (__ip_vs_conn_get(cp)) ip_vs_ct_in_get()
385 cp = NULL; ip_vs_ct_in_get()
394 cp ? "hit" : "not hit"); ip_vs_ct_in_get()
396 return cp; ip_vs_ct_in_get()
406 struct ip_vs_conn *cp, *ret=NULL; ip_vs_conn_out_get() local
415 hlist_for_each_entry_rcu(cp, &ip_vs_conn_tab[hash], c_list) { ip_vs_conn_out_get()
416 if (p->vport == cp->cport && p->cport == cp->dport && ip_vs_conn_out_get()
417 cp->af == p->af && ip_vs_conn_out_get()
418 ip_vs_addr_equal(p->af, p->vaddr, &cp->caddr) && ip_vs_conn_out_get()
419 ip_vs_addr_equal(p->af, p->caddr, &cp->daddr) && ip_vs_conn_out_get()
420 p->protocol == cp->protocol && ip_vs_conn_out_get()
421 ip_vs_conn_net_eq(cp, p->net)) { ip_vs_conn_out_get()
422 if (!__ip_vs_conn_get(cp)) ip_vs_conn_out_get()
425 ret = cp; ip_vs_conn_out_get()
457 void ip_vs_conn_put(struct ip_vs_conn *cp) ip_vs_conn_put() argument
459 unsigned long t = (cp->flags & IP_VS_CONN_F_ONE_PACKET) ? ip_vs_conn_put()
460 0 : cp->timeout; ip_vs_conn_put()
461 mod_timer(&cp->timer, jiffies+t); ip_vs_conn_put()
463 __ip_vs_conn_put(cp); ip_vs_conn_put()
470 void ip_vs_conn_fill_cport(struct ip_vs_conn *cp, __be16 cport) ip_vs_conn_fill_cport() argument
472 if (ip_vs_conn_unhash(cp)) { ip_vs_conn_fill_cport()
473 spin_lock_bh(&cp->lock); ip_vs_conn_fill_cport()
474 if (cp->flags & IP_VS_CONN_F_NO_CPORT) { ip_vs_conn_fill_cport()
476 cp->flags &= ~IP_VS_CONN_F_NO_CPORT; ip_vs_conn_fill_cport()
477 cp->cport = cport; ip_vs_conn_fill_cport()
479 spin_unlock_bh(&cp->lock); ip_vs_conn_fill_cport()
482 ip_vs_conn_hash(cp); ip_vs_conn_fill_cport()
491 static inline void ip_vs_bind_xmit(struct ip_vs_conn *cp) ip_vs_bind_xmit() argument
493 switch (IP_VS_FWD_METHOD(cp)) { ip_vs_bind_xmit()
495 cp->packet_xmit = ip_vs_nat_xmit; ip_vs_bind_xmit()
500 if (cp->daf == AF_INET6) ip_vs_bind_xmit()
501 cp->packet_xmit = ip_vs_tunnel_xmit_v6; ip_vs_bind_xmit()
504 cp->packet_xmit = ip_vs_tunnel_xmit; ip_vs_bind_xmit()
508 cp->packet_xmit = ip_vs_dr_xmit; ip_vs_bind_xmit()
512 cp->packet_xmit = ip_vs_null_xmit; ip_vs_bind_xmit()
516 cp->packet_xmit = ip_vs_bypass_xmit; ip_vs_bind_xmit()
522 static inline void ip_vs_bind_xmit_v6(struct ip_vs_conn *cp) ip_vs_bind_xmit_v6() argument
524 switch (IP_VS_FWD_METHOD(cp)) { ip_vs_bind_xmit_v6()
526 cp->packet_xmit = ip_vs_nat_xmit_v6; ip_vs_bind_xmit_v6()
530 if (cp->daf == AF_INET6) ip_vs_bind_xmit_v6()
531 cp->packet_xmit = ip_vs_tunnel_xmit_v6; ip_vs_bind_xmit_v6()
533 cp->packet_xmit = ip_vs_tunnel_xmit; ip_vs_bind_xmit_v6()
537 cp->packet_xmit = ip_vs_dr_xmit_v6; ip_vs_bind_xmit_v6()
541 cp->packet_xmit = ip_vs_null_xmit; ip_vs_bind_xmit_v6()
545 cp->packet_xmit = ip_vs_bypass_xmit_v6; ip_vs_bind_xmit_v6()
563 ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest) ip_vs_bind_dest() argument
576 if (cp->protocol != IPPROTO_UDP) ip_vs_bind_dest()
578 flags = cp->flags; ip_vs_bind_dest()
590 cp->flags = flags; ip_vs_bind_dest()
591 cp->dest = dest; ip_vs_bind_dest()
596 ip_vs_proto_name(cp->protocol), ip_vs_bind_dest()
597 IP_VS_DBG_ADDR(cp->af, &cp->caddr), ntohs(cp->cport), ip_vs_bind_dest()
598 IP_VS_DBG_ADDR(cp->af, &cp->vaddr), ntohs(cp->vport), ip_vs_bind_dest()
599 IP_VS_DBG_ADDR(cp->daf, &cp->daddr), ntohs(cp->dport), ip_vs_bind_dest()
600 ip_vs_fwd_tag(cp), cp->state, ip_vs_bind_dest()
601 cp->flags, atomic_read(&cp->refcnt), ip_vs_bind_dest()
630 void ip_vs_try_bind_dest(struct ip_vs_conn *cp) ip_vs_try_bind_dest() argument
641 dest = ip_vs_find_dest(ip_vs_conn_net(cp), cp->af, cp->af, &cp->daddr, ip_vs_try_bind_dest()
642 cp->dport, &cp->vaddr, cp->vport, ip_vs_try_bind_dest()
643 cp->protocol, cp->fwmark, cp->flags); ip_vs_try_bind_dest()
647 spin_lock_bh(&cp->lock); ip_vs_try_bind_dest()
648 if (cp->dest) { ip_vs_try_bind_dest()
649 spin_unlock_bh(&cp->lock); ip_vs_try_bind_dest()
656 if (cp->app) ip_vs_try_bind_dest()
657 ip_vs_unbind_app(cp); ip_vs_try_bind_dest()
659 ip_vs_bind_dest(cp, dest); ip_vs_try_bind_dest()
660 spin_unlock_bh(&cp->lock); ip_vs_try_bind_dest()
663 cp->packet_xmit = NULL; ip_vs_try_bind_dest()
665 if (cp->af == AF_INET6) ip_vs_try_bind_dest()
666 ip_vs_bind_xmit_v6(cp); ip_vs_try_bind_dest()
669 ip_vs_bind_xmit(cp); ip_vs_try_bind_dest()
671 pd = ip_vs_proto_data_get(ip_vs_conn_net(cp), cp->protocol); ip_vs_try_bind_dest()
673 ip_vs_bind_app(cp, pd->pp); ip_vs_try_bind_dest()
683 static inline void ip_vs_unbind_dest(struct ip_vs_conn *cp) ip_vs_unbind_dest() argument
685 struct ip_vs_dest *dest = cp->dest; ip_vs_unbind_dest()
693 ip_vs_proto_name(cp->protocol), ip_vs_unbind_dest()
694 IP_VS_DBG_ADDR(cp->af, &cp->caddr), ntohs(cp->cport), ip_vs_unbind_dest()
695 IP_VS_DBG_ADDR(cp->af, &cp->vaddr), ntohs(cp->vport), ip_vs_unbind_dest()
696 IP_VS_DBG_ADDR(cp->daf, &cp->daddr), ntohs(cp->dport), ip_vs_unbind_dest()
697 ip_vs_fwd_tag(cp), cp->state, ip_vs_unbind_dest()
698 cp->flags, atomic_read(&cp->refcnt), ip_vs_unbind_dest()
702 if (!(cp->flags & IP_VS_CONN_F_TEMPLATE)) { ip_vs_unbind_dest()
705 if (cp->flags & IP_VS_CONN_F_INACTIVE) { ip_vs_unbind_dest()
792 struct ip_vs_conn *cp = container_of(head, struct ip_vs_conn, ip_vs_conn_rcu_free() local
795 ip_vs_pe_put(cp->pe); ip_vs_conn_rcu_free()
796 kfree(cp->pe_data); ip_vs_conn_rcu_free()
797 kmem_cache_free(ip_vs_conn_cachep, cp); ip_vs_conn_rcu_free()
802 struct ip_vs_conn *cp = (struct ip_vs_conn *)data; ip_vs_conn_expire() local
803 struct net *net = ip_vs_conn_net(cp); ip_vs_conn_expire()
809 if (atomic_read(&cp->n_control)) ip_vs_conn_expire()
813 if (likely(ip_vs_conn_unlink(cp))) { ip_vs_conn_expire()
815 del_timer(&cp->timer); ip_vs_conn_expire()
818 if (cp->control) ip_vs_conn_expire()
819 ip_vs_control_del(cp); ip_vs_conn_expire()
821 if (cp->flags & IP_VS_CONN_F_NFCT) { ip_vs_conn_expire()
828 ip_vs_conn_drop_conntrack(cp); ip_vs_conn_expire()
831 if (unlikely(cp->app != NULL)) ip_vs_conn_expire()
832 ip_vs_unbind_app(cp); ip_vs_conn_expire()
833 ip_vs_unbind_dest(cp); ip_vs_conn_expire()
834 if (cp->flags & IP_VS_CONN_F_NO_CPORT) ip_vs_conn_expire()
836 call_rcu(&cp->rcu_head, ip_vs_conn_rcu_free); ip_vs_conn_expire()
843 atomic_read(&cp->refcnt), ip_vs_conn_expire()
844 atomic_read(&cp->n_control)); ip_vs_conn_expire()
846 atomic_inc(&cp->refcnt); ip_vs_conn_expire()
847 cp->timeout = 60*HZ; ip_vs_conn_expire()
850 ip_vs_sync_conn(net, cp, sysctl_sync_threshold(ipvs)); ip_vs_conn_expire()
852 ip_vs_conn_put(cp); ip_vs_conn_expire()
858 void ip_vs_conn_expire_now(struct ip_vs_conn *cp) ip_vs_conn_expire_now() argument
863 if (timer_pending(&cp->timer) && ip_vs_conn_expire_now()
864 time_after(cp->timer.expires, jiffies)) ip_vs_conn_expire_now()
865 mod_timer_pending(&cp->timer, jiffies); ip_vs_conn_expire_now()
877 struct ip_vs_conn *cp; ip_vs_conn_new() local
882 cp = kmem_cache_alloc(ip_vs_conn_cachep, GFP_ATOMIC); ip_vs_conn_new()
883 if (cp == NULL) { ip_vs_conn_new()
888 INIT_HLIST_NODE(&cp->c_list); ip_vs_conn_new()
889 setup_timer(&cp->timer, ip_vs_conn_expire, (unsigned long)cp); ip_vs_conn_new()
890 ip_vs_conn_net_set(cp, p->net); ip_vs_conn_new()
891 cp->af = p->af; ip_vs_conn_new()
892 cp->daf = dest_af; ip_vs_conn_new()
893 cp->protocol = p->protocol; ip_vs_conn_new()
894 ip_vs_addr_set(p->af, &cp->caddr, p->caddr); ip_vs_conn_new()
895 cp->cport = p->cport; ip_vs_conn_new()
898 &cp->vaddr, p->vaddr); ip_vs_conn_new()
899 cp->vport = p->vport; ip_vs_conn_new()
900 ip_vs_addr_set(cp->daf, &cp->daddr, daddr); ip_vs_conn_new()
901 cp->dport = dport; ip_vs_conn_new()
902 cp->flags = flags; ip_vs_conn_new()
903 cp->fwmark = fwmark; ip_vs_conn_new()
906 cp->pe = p->pe; ip_vs_conn_new()
907 cp->pe_data = p->pe_data; ip_vs_conn_new()
908 cp->pe_data_len = p->pe_data_len; ip_vs_conn_new()
910 cp->pe = NULL; ip_vs_conn_new()
911 cp->pe_data = NULL; ip_vs_conn_new()
912 cp->pe_data_len = 0; ip_vs_conn_new()
914 spin_lock_init(&cp->lock); ip_vs_conn_new()
921 atomic_set(&cp->refcnt, 1); ip_vs_conn_new()
923 cp->control = NULL; ip_vs_conn_new()
924 atomic_set(&cp->n_control, 0); ip_vs_conn_new()
925 atomic_set(&cp->in_pkts, 0); ip_vs_conn_new()
927 cp->packet_xmit = NULL; ip_vs_conn_new()
928 cp->app = NULL; ip_vs_conn_new()
929 cp->app_data = NULL; ip_vs_conn_new()
931 cp->in_seq.delta = 0; ip_vs_conn_new()
932 cp->out_seq.delta = 0; ip_vs_conn_new()
939 cp->dest = NULL; ip_vs_conn_new()
940 ip_vs_bind_dest(cp, dest); ip_vs_conn_new()
943 cp->state = 0; ip_vs_conn_new()
944 cp->old_state = 0; ip_vs_conn_new()
945 cp->timeout = 3*HZ; ip_vs_conn_new()
946 cp->sync_endtime = jiffies & ~3UL; ip_vs_conn_new()
951 ip_vs_bind_xmit_v6(cp); ip_vs_conn_new()
954 ip_vs_bind_xmit(cp); ip_vs_conn_new()
957 ip_vs_bind_app(cp, pd->pp); ip_vs_conn_new()
967 cp->flags |= IP_VS_CONN_F_NFCT; ip_vs_conn_new()
970 ip_vs_conn_hash(cp); ip_vs_conn_new()
972 return cp; ip_vs_conn_new()
987 struct ip_vs_conn *cp; ip_vs_conn_array() local
991 hlist_for_each_entry_rcu(cp, &ip_vs_conn_tab[idx], c_list) { ip_vs_conn_array()
997 return cp; ip_vs_conn_array()
1018 struct ip_vs_conn *cp = v; ip_vs_conn_seq_next() local
1029 e = rcu_dereference(hlist_next_rcu(&cp->c_list)); ip_vs_conn_seq_next()
1035 hlist_for_each_entry_rcu(cp, &ip_vs_conn_tab[idx], c_list) { ip_vs_conn_seq_next()
1037 return cp; ip_vs_conn_seq_next()
1058 const struct ip_vs_conn *cp = v; ip_vs_conn_seq_show() local
1064 if (!ip_vs_conn_net_eq(cp, net)) ip_vs_conn_seq_show()
1066 if (cp->pe_data) { ip_vs_conn_seq_show()
1068 len = strlen(cp->pe->name); ip_vs_conn_seq_show()
1069 memcpy(pe_data + 1, cp->pe->name, len); ip_vs_conn_seq_show()
1072 len += cp->pe->show_pe_data(cp, pe_data + len); ip_vs_conn_seq_show()
1077 if (cp->daf == AF_INET6) ip_vs_conn_seq_show()
1078 snprintf(dbuf, sizeof(dbuf), "%pI6", &cp->daddr.in6); ip_vs_conn_seq_show()
1082 ntohl(cp->daddr.ip)); ip_vs_conn_seq_show()
1085 if (cp->af == AF_INET6) ip_vs_conn_seq_show()
1088 ip_vs_proto_name(cp->protocol), ip_vs_conn_seq_show()
1089 &cp->caddr.in6, ntohs(cp->cport), ip_vs_conn_seq_show()
1090 &cp->vaddr.in6, ntohs(cp->vport), ip_vs_conn_seq_show()
1091 dbuf, ntohs(cp->dport), ip_vs_conn_seq_show()
1092 ip_vs_state_name(cp->protocol, cp->state), ip_vs_conn_seq_show()
1093 (cp->timer.expires-jiffies)/HZ, pe_data); ip_vs_conn_seq_show()
1099 ip_vs_proto_name(cp->protocol), ip_vs_conn_seq_show()
1100 ntohl(cp->caddr.ip), ntohs(cp->cport), ip_vs_conn_seq_show()
1101 ntohl(cp->vaddr.ip), ntohs(cp->vport), ip_vs_conn_seq_show()
1102 dbuf, ntohs(cp->dport), ip_vs_conn_seq_show()
1103 ip_vs_state_name(cp->protocol, cp->state), ip_vs_conn_seq_show()
1104 (cp->timer.expires-jiffies)/HZ, pe_data); ip_vs_conn_seq_show()
1146 const struct ip_vs_conn *cp = v; ip_vs_conn_sync_seq_show() local
1149 if (!ip_vs_conn_net_eq(cp, net)) ip_vs_conn_sync_seq_show()
1153 if (cp->daf == AF_INET6) ip_vs_conn_sync_seq_show()
1154 snprintf(dbuf, sizeof(dbuf), "%pI6", &cp->daddr.in6); ip_vs_conn_sync_seq_show()
1158 ntohl(cp->daddr.ip)); ip_vs_conn_sync_seq_show()
1161 if (cp->af == AF_INET6) ip_vs_conn_sync_seq_show()
1164 ip_vs_proto_name(cp->protocol), ip_vs_conn_sync_seq_show()
1165 &cp->caddr.in6, ntohs(cp->cport), ip_vs_conn_sync_seq_show()
1166 &cp->vaddr.in6, ntohs(cp->vport), ip_vs_conn_sync_seq_show()
1167 dbuf, ntohs(cp->dport), ip_vs_conn_sync_seq_show()
1168 ip_vs_state_name(cp->protocol, cp->state), ip_vs_conn_sync_seq_show()
1169 ip_vs_origin_name(cp->flags), ip_vs_conn_sync_seq_show()
1170 (cp->timer.expires-jiffies)/HZ); ip_vs_conn_sync_seq_show()
1176 ip_vs_proto_name(cp->protocol), ip_vs_conn_sync_seq_show()
1177 ntohl(cp->caddr.ip), ntohs(cp->cport), ip_vs_conn_sync_seq_show()
1178 ntohl(cp->vaddr.ip), ntohs(cp->vport), ip_vs_conn_sync_seq_show()
1179 dbuf, ntohs(cp->dport), ip_vs_conn_sync_seq_show()
1180 ip_vs_state_name(cp->protocol, cp->state), ip_vs_conn_sync_seq_show()
1181 ip_vs_origin_name(cp->flags), ip_vs_conn_sync_seq_show()
1182 (cp->timer.expires-jiffies)/HZ); ip_vs_conn_sync_seq_show()
1214 static inline int todrop_entry(struct ip_vs_conn *cp) todrop_entry() argument
1227 if (time_before(cp->timeout + jiffies, cp->timer.expires + 60*HZ)) todrop_entry()
1232 i = atomic_read(&cp->in_pkts); todrop_entry()
1246 struct ip_vs_conn *cp, *cp_c; ip_vs_random_dropentry() local
1255 hlist_for_each_entry_rcu(cp, &ip_vs_conn_tab[hash], c_list) { ip_vs_random_dropentry()
1256 if (cp->flags & IP_VS_CONN_F_TEMPLATE) ip_vs_random_dropentry()
1259 if (!ip_vs_conn_net_eq(cp, net)) ip_vs_random_dropentry()
1261 if (cp->protocol == IPPROTO_TCP) { ip_vs_random_dropentry()
1262 switch(cp->state) { ip_vs_random_dropentry()
1268 if (todrop_entry(cp)) ip_vs_random_dropentry()
1275 } else if (cp->protocol == IPPROTO_SCTP) { ip_vs_random_dropentry()
1276 switch (cp->state) { ip_vs_random_dropentry()
1281 if (todrop_entry(cp)) ip_vs_random_dropentry()
1288 if (!todrop_entry(cp)) ip_vs_random_dropentry()
1293 ip_vs_conn_expire_now(cp); ip_vs_random_dropentry()
1294 cp_c = cp->control; ip_vs_random_dropentry()
1295 /* cp->control is valid only with reference to cp */ ip_vs_random_dropentry()
1296 if (cp_c && __ip_vs_conn_get(cp)) { ip_vs_random_dropentry()
1299 __ip_vs_conn_put(cp); ip_vs_random_dropentry()
1314 struct ip_vs_conn *cp, *cp_c; ip_vs_conn_flush() local
1321 hlist_for_each_entry_rcu(cp, &ip_vs_conn_tab[idx], c_list) { ip_vs_conn_flush()
1322 if (!ip_vs_conn_net_eq(cp, net)) ip_vs_conn_flush()
1325 ip_vs_conn_expire_now(cp); ip_vs_conn_flush()
1326 cp_c = cp->control; ip_vs_conn_flush()
1327 /* cp->control is valid only with reference to cp */ ip_vs_conn_flush()
1328 if (cp_c && __ip_vs_conn_get(cp)) { ip_vs_conn_flush()
1331 __ip_vs_conn_put(cp); ip_vs_conn_flush()
H A Dip_vs_proto_udp.c128 struct ip_vs_conn *cp, struct ip_vs_iphdr *iph) udp_snat_handler()
136 if (cp->af == AF_INET6 && iph->fragoffs) udp_snat_handler()
145 if (unlikely(cp->app != NULL)) { udp_snat_handler()
149 if (pp->csum_check && !pp->csum_check(cp->af, skb, pp)) udp_snat_handler()
155 if (!(ret = ip_vs_app_pkt_out(cp, skb))) udp_snat_handler()
165 udph->source = cp->vport; udp_snat_handler()
171 udp_partial_csum_update(cp->af, udph, &cp->daddr, &cp->vaddr, udp_snat_handler()
176 udp_fast_csum_update(cp->af, udph, &cp->daddr, &cp->vaddr, udp_snat_handler()
177 cp->dport, cp->vport); udp_snat_handler()
179 skb->ip_summed = (cp->app && pp->csum_check) ? udp_snat_handler()
186 if (cp->af == AF_INET6) udp_snat_handler()
187 udph->check = csum_ipv6_magic(&cp->vaddr.in6, udp_snat_handler()
188 &cp->caddr.in6, udp_snat_handler()
190 cp->protocol, skb->csum); udp_snat_handler()
193 udph->check = csum_tcpudp_magic(cp->vaddr.ip, udp_snat_handler()
194 cp->caddr.ip, udp_snat_handler()
196 cp->protocol, udp_snat_handler()
211 struct ip_vs_conn *cp, struct ip_vs_iphdr *iph) udp_dnat_handler()
219 if (cp->af == AF_INET6 && iph->fragoffs) udp_dnat_handler()
228 if (unlikely(cp->app != NULL)) { udp_dnat_handler()
232 if (pp->csum_check && !pp->csum_check(cp->af, skb, pp)) udp_dnat_handler()
239 if (!(ret = ip_vs_app_pkt_in(cp, skb))) udp_dnat_handler()
249 udph->dest = cp->dport; udp_dnat_handler()
255 udp_partial_csum_update(cp->af, udph, &cp->vaddr, &cp->daddr, udp_dnat_handler()
260 udp_fast_csum_update(cp->af, udph, &cp->vaddr, &cp->daddr, udp_dnat_handler()
261 cp->vport, cp->dport); udp_dnat_handler()
263 skb->ip_summed = (cp->app && pp->csum_check) ? udp_dnat_handler()
270 if (cp->af == AF_INET6) udp_dnat_handler()
271 udph->check = csum_ipv6_magic(&cp->caddr.in6, udp_dnat_handler()
272 &cp->daddr.in6, udp_dnat_handler()
274 cp->protocol, skb->csum); udp_dnat_handler()
277 udph->check = csum_tcpudp_magic(cp->caddr.ip, udp_dnat_handler()
278 cp->daddr.ip, udp_dnat_handler()
280 cp->protocol, udp_dnat_handler()
386 static int udp_app_conn_bind(struct ip_vs_conn *cp) udp_app_conn_bind() argument
388 struct netns_ipvs *ipvs = net_ipvs(ip_vs_conn_net(cp)); udp_app_conn_bind()
394 if (IP_VS_FWD_METHOD(cp) != IP_VS_CONN_F_MASQ) udp_app_conn_bind()
398 hash = udp_app_hashkey(cp->vport); udp_app_conn_bind()
402 if (inc->port == cp->vport) { udp_app_conn_bind()
410 IP_VS_DBG_ADDR(cp->af, &cp->caddr), udp_app_conn_bind()
411 ntohs(cp->cport), udp_app_conn_bind()
412 IP_VS_DBG_ADDR(cp->af, &cp->vaddr), udp_app_conn_bind()
413 ntohs(cp->vport), udp_app_conn_bind()
416 cp->app = inc; udp_app_conn_bind()
418 result = inc->init_conn(inc, cp); udp_app_conn_bind()
447 udp_state_transition(struct ip_vs_conn *cp, int direction, udp_state_transition() argument
456 cp->timeout = pd->timeout_table[IP_VS_UDP_S_NORMAL]; udp_state_transition()
127 udp_snat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp, struct ip_vs_conn *cp, struct ip_vs_iphdr *iph) udp_snat_handler() argument
210 udp_dnat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp, struct ip_vs_conn *cp, struct ip_vs_iphdr *iph) udp_dnat_handler() argument
H A Dip_vs_proto_tcp.c132 struct ip_vs_conn *cp, struct ip_vs_iphdr *iph) tcp_snat_handler()
140 if (cp->af == AF_INET6 && iph->fragoffs) tcp_snat_handler()
149 if (unlikely(cp->app != NULL)) { tcp_snat_handler()
153 if (pp->csum_check && !pp->csum_check(cp->af, skb, pp)) tcp_snat_handler()
157 if (!(ret = ip_vs_app_pkt_out(cp, skb))) tcp_snat_handler()
167 tcph->source = cp->vport; tcp_snat_handler()
171 tcp_partial_csum_update(cp->af, tcph, &cp->daddr, &cp->vaddr, tcp_snat_handler()
176 tcp_fast_csum_update(cp->af, tcph, &cp->daddr, &cp->vaddr, tcp_snat_handler()
177 cp->dport, cp->vport); tcp_snat_handler()
179 skb->ip_summed = (cp->app && pp->csum_check) ? tcp_snat_handler()
186 if (cp->af == AF_INET6) tcp_snat_handler()
187 tcph->check = csum_ipv6_magic(&cp->vaddr.in6, tcp_snat_handler()
188 &cp->caddr.in6, tcp_snat_handler()
190 cp->protocol, skb->csum); tcp_snat_handler()
193 tcph->check = csum_tcpudp_magic(cp->vaddr.ip, tcp_snat_handler()
194 cp->caddr.ip, tcp_snat_handler()
196 cp->protocol, tcp_snat_handler()
210 struct ip_vs_conn *cp, struct ip_vs_iphdr *iph) tcp_dnat_handler()
218 if (cp->af == AF_INET6 && iph->fragoffs) tcp_dnat_handler()
227 if (unlikely(cp->app != NULL)) { tcp_dnat_handler()
231 if (pp->csum_check && !pp->csum_check(cp->af, skb, pp)) tcp_dnat_handler()
238 if (!(ret = ip_vs_app_pkt_in(cp, skb))) tcp_dnat_handler()
248 tcph->dest = cp->dport; tcp_dnat_handler()
254 tcp_partial_csum_update(cp->af, tcph, &cp->vaddr, &cp->daddr, tcp_dnat_handler()
259 tcp_fast_csum_update(cp->af, tcph, &cp->vaddr, &cp->daddr, tcp_dnat_handler()
260 cp->vport, cp->dport); tcp_dnat_handler()
262 skb->ip_summed = (cp->app && pp->csum_check) ? tcp_dnat_handler()
269 if (cp->af == AF_INET6) tcp_dnat_handler()
270 tcph->check = csum_ipv6_magic(&cp->caddr.in6, tcp_dnat_handler()
271 &cp->daddr.in6, tcp_dnat_handler()
273 cp->protocol, skb->csum); tcp_dnat_handler()
276 tcph->check = csum_tcpudp_magic(cp->caddr.ip, tcp_dnat_handler()
277 cp->daddr.ip, tcp_dnat_handler()
279 cp->protocol, tcp_dnat_handler()
474 set_tcp_state(struct ip_vs_proto_data *pd, struct ip_vs_conn *cp, set_tcp_state() argument
485 if (cp->flags & IP_VS_CONN_F_NOOUTPUT) { set_tcp_state()
487 cp->flags &= ~IP_VS_CONN_F_NOOUTPUT; set_tcp_state()
498 pd->tcp_state_table[state_off+state_idx].next_state[cp->state]; set_tcp_state()
501 if (new_state != cp->state) { set_tcp_state()
502 struct ip_vs_dest *dest = cp->dest; set_tcp_state()
513 IP_VS_DBG_ADDR(cp->daf, &cp->daddr), set_tcp_state()
514 ntohs(cp->dport), set_tcp_state()
515 IP_VS_DBG_ADDR(cp->af, &cp->caddr), set_tcp_state()
516 ntohs(cp->cport), set_tcp_state()
517 tcp_state_name(cp->state), set_tcp_state()
519 atomic_read(&cp->refcnt)); set_tcp_state()
522 if (!(cp->flags & IP_VS_CONN_F_INACTIVE) && set_tcp_state()
526 cp->flags |= IP_VS_CONN_F_INACTIVE; set_tcp_state()
527 } else if ((cp->flags & IP_VS_CONN_F_INACTIVE) && set_tcp_state()
531 cp->flags &= ~IP_VS_CONN_F_INACTIVE; set_tcp_state()
537 cp->timeout = pd->timeout_table[cp->state = new_state]; set_tcp_state()
539 cp->timeout = tcp_timeouts[cp->state = new_state]; set_tcp_state()
546 tcp_state_transition(struct ip_vs_conn *cp, int direction, tcp_state_transition() argument
553 int ihl = cp->af == AF_INET ? ip_hdrlen(skb) : sizeof(struct ipv6hdr); tcp_state_transition()
562 spin_lock_bh(&cp->lock); tcp_state_transition()
563 set_tcp_state(pd, cp, direction, th); tcp_state_transition()
564 spin_unlock_bh(&cp->lock); tcp_state_transition()
610 tcp_app_conn_bind(struct ip_vs_conn *cp) tcp_app_conn_bind() argument
612 struct netns_ipvs *ipvs = net_ipvs(ip_vs_conn_net(cp)); tcp_app_conn_bind()
618 if (IP_VS_FWD_METHOD(cp) != IP_VS_CONN_F_MASQ) tcp_app_conn_bind()
622 hash = tcp_app_hashkey(cp->vport); tcp_app_conn_bind()
626 if (inc->port == cp->vport) { tcp_app_conn_bind()
634 IP_VS_DBG_ADDR(cp->af, &cp->caddr), tcp_app_conn_bind()
635 ntohs(cp->cport), tcp_app_conn_bind()
636 IP_VS_DBG_ADDR(cp->af, &cp->vaddr), tcp_app_conn_bind()
637 ntohs(cp->vport), tcp_app_conn_bind()
640 cp->app = inc; tcp_app_conn_bind()
642 result = inc->init_conn(inc, cp); tcp_app_conn_bind()
656 void ip_vs_tcp_conn_listen(struct net *net, struct ip_vs_conn *cp) ip_vs_tcp_conn_listen() argument
660 spin_lock_bh(&cp->lock); ip_vs_tcp_conn_listen()
661 cp->state = IP_VS_TCP_S_LISTEN; ip_vs_tcp_conn_listen()
662 cp->timeout = (pd ? pd->timeout_table[IP_VS_TCP_S_LISTEN] ip_vs_tcp_conn_listen()
664 spin_unlock_bh(&cp->lock); ip_vs_tcp_conn_listen()
131 tcp_snat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp, struct ip_vs_conn *cp, struct ip_vs_iphdr *iph) tcp_snat_handler() argument
209 tcp_dnat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp, struct ip_vs_conn *cp, struct ip_vs_iphdr *iph) tcp_dnat_handler() argument
H A Dip_vs_nfct.c82 ip_vs_update_conntrack(struct sk_buff *skb, struct ip_vs_conn *cp, int outin) ip_vs_update_conntrack() argument
93 if (IP_VS_FWD_METHOD(cp) != IP_VS_CONN_F_MASQ) ip_vs_update_conntrack()
101 if (cp->app && nf_ct_protonum(ct) == IPPROTO_TCP && ip_vs_update_conntrack()
116 new_tuple.src.u3 = cp->daddr; ip_vs_update_conntrack()
119 new_tuple.src.u.tcp.port = cp->dport; ip_vs_update_conntrack()
121 new_tuple.dst.u3 = cp->vaddr; ip_vs_update_conntrack()
124 new_tuple.dst.u.tcp.port = cp->vport; ip_vs_update_conntrack()
128 ", new reply=" FMT_TUPLE ", cp=" FMT_CONN "\n", ip_vs_update_conntrack()
131 ARG_TUPLE(&new_tuple), ARG_CONN(cp)); ip_vs_update_conntrack()
147 struct ip_vs_conn *cp; ip_vs_nfct_expect_callback() local
167 cp = ip_vs_conn_out_get(&p); ip_vs_nfct_expect_callback()
168 if (cp) { ip_vs_nfct_expect_callback()
172 FMT_TUPLE ", found inout cp=" FMT_CONN "\n", ip_vs_nfct_expect_callback()
175 ARG_CONN(cp)); ip_vs_nfct_expect_callback()
176 new_reply.dst.u3 = cp->vaddr; ip_vs_nfct_expect_callback()
177 new_reply.dst.u.tcp.port = cp->vport; ip_vs_nfct_expect_callback()
179 ", inout cp=" FMT_CONN "\n", ip_vs_nfct_expect_callback()
182 ARG_CONN(cp)); ip_vs_nfct_expect_callback()
187 cp = ip_vs_conn_in_get(&p); ip_vs_nfct_expect_callback()
188 if (cp) { ip_vs_nfct_expect_callback()
192 FMT_TUPLE ", found outin cp=" FMT_CONN "\n", ip_vs_nfct_expect_callback()
195 ARG_CONN(cp)); ip_vs_nfct_expect_callback()
196 new_reply.src.u3 = cp->daddr; ip_vs_nfct_expect_callback()
197 new_reply.src.u.tcp.port = cp->dport; ip_vs_nfct_expect_callback()
199 FMT_TUPLE ", outin cp=" FMT_CONN "\n", ip_vs_nfct_expect_callback()
202 ARG_CONN(cp)); ip_vs_nfct_expect_callback()
213 if (IP_VS_FWD_METHOD(cp) == IP_VS_CONN_F_MASQ) ip_vs_nfct_expect_callback()
215 ip_vs_conn_put(cp); ip_vs_nfct_expect_callback()
226 struct ip_vs_conn *cp, u_int8_t proto, ip_vs_nfct_expect_related()
239 from_rs ? &cp->daddr : &cp->caddr, ip_vs_nfct_expect_related()
240 from_rs ? &cp->caddr : &cp->vaddr, ip_vs_nfct_expect_related()
242 from_rs ? &cp->cport : &cp->vport); ip_vs_nfct_expect_related()
256 void ip_vs_conn_drop_conntrack(struct ip_vs_conn *cp) ip_vs_conn_drop_conntrack() argument
262 if (!cp->cport) ip_vs_conn_drop_conntrack()
266 .dst = { .protonum = cp->protocol, .dir = IP_CT_DIR_ORIGINAL } }; ip_vs_conn_drop_conntrack()
267 tuple.src.u3 = cp->caddr; ip_vs_conn_drop_conntrack()
268 tuple.src.u.all = cp->cport; ip_vs_conn_drop_conntrack()
269 tuple.src.l3num = cp->af; ip_vs_conn_drop_conntrack()
270 tuple.dst.u3 = cp->vaddr; ip_vs_conn_drop_conntrack()
271 tuple.dst.u.all = cp->vport; ip_vs_conn_drop_conntrack()
275 __func__, ARG_TUPLE(&tuple), ARG_CONN(cp)); ip_vs_conn_drop_conntrack()
277 h = nf_conntrack_find_get(ip_vs_conn_net(cp), NF_CT_DEFAULT_ZONE, ip_vs_conn_drop_conntrack()
225 ip_vs_nfct_expect_related(struct sk_buff *skb, struct nf_conn *ct, struct ip_vs_conn *cp, u_int8_t proto, const __be16 port, int from_rs) ip_vs_nfct_expect_related() argument
H A Dip_vs_xmit.c511 struct ip_vs_conn *cp) ip_vs_tunnel_xmit_prepare()
516 if (unlikely(cp->flags & IP_VS_CONN_F_NFCT)) ip_vs_tunnel_xmit_prepare()
544 struct ip_vs_conn *cp, int local) ip_vs_nat_send_or_cont()
549 if (likely(!(cp->flags & IP_VS_CONN_F_NFCT))) ip_vs_nat_send_or_cont()
552 ip_vs_update_conntrack(skb, cp, 1); ip_vs_nat_send_or_cont()
557 if (!local || cp->vport != cp->dport || ip_vs_nat_send_or_cont()
558 !ip_vs_addr_equal(cp->af, &cp->vaddr, &cp->daddr)) ip_vs_nat_send_or_cont()
575 struct ip_vs_conn *cp, int local) ip_vs_send_or_cont()
580 if (likely(!(cp->flags & IP_VS_CONN_F_NFCT))) ip_vs_send_or_cont()
599 ip_vs_null_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, ip_vs_null_xmit() argument
603 return ip_vs_send_or_cont(NFPROTO_IPV4, skb, cp, 1); ip_vs_null_xmit()
613 ip_vs_bypass_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, ip_vs_bypass_xmit() argument
621 if (__ip_vs_get_out_rt(cp->af, skb, NULL, iph->daddr, ip_vs_bypass_xmit()
630 ip_vs_send_or_cont(NFPROTO_IPV4, skb, cp, 0); ip_vs_bypass_xmit()
645 ip_vs_bypass_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, ip_vs_bypass_xmit_v6() argument
651 if (__ip_vs_get_out_rt_v6(cp->af, skb, NULL, &ipvsh->daddr.in6, NULL, ip_vs_bypass_xmit_v6()
658 ip_vs_send_or_cont(NFPROTO_IPV6, skb, cp, 0); ip_vs_bypass_xmit_v6()
677 ip_vs_nat_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, ip_vs_nat_xmit() argument
687 if (unlikely(cp->flags & IP_VS_CONN_F_NO_CPORT)) { ip_vs_nat_xmit()
693 ip_vs_conn_fill_cport(cp, *p); ip_vs_nat_xmit()
698 local = __ip_vs_get_out_rt(cp->af, skb, cp->dest, cp->daddr.ip, ip_vs_nat_xmit()
710 if (cp->flags & IP_VS_CONN_F_SYNC && local) { ip_vs_nat_xmit()
724 if (local && ipv4_is_loopback(cp->daddr.ip) && was_input) { ip_vs_nat_xmit()
738 if (pp->dnat_handler && !pp->dnat_handler(skb, pp, cp, ipvsh)) ip_vs_nat_xmit()
740 ip_hdr(skb)->daddr = cp->daddr.ip; ip_vs_nat_xmit()
752 rc = ip_vs_nat_send_or_cont(NFPROTO_IPV4, skb, cp, local); ip_vs_nat_xmit()
767 ip_vs_nat_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, ip_vs_nat_xmit_v6() argument
777 if (unlikely(cp->flags & IP_VS_CONN_F_NO_CPORT && !ipvsh->fragoffs)) { ip_vs_nat_xmit_v6()
782 ip_vs_conn_fill_cport(cp, *p); ip_vs_nat_xmit_v6()
786 local = __ip_vs_get_out_rt_v6(cp->af, skb, cp->dest, &cp->daddr.in6, ip_vs_nat_xmit_v6()
799 if (cp->flags & IP_VS_CONN_F_SYNC && local) { ip_vs_nat_xmit_v6()
829 if (pp->dnat_handler && !pp->dnat_handler(skb, pp, cp, ipvsh)) ip_vs_nat_xmit_v6()
831 ipv6_hdr(skb)->daddr = cp->daddr.in6; ip_vs_nat_xmit_v6()
842 rc = ip_vs_nat_send_or_cont(NFPROTO_IPV6, skb, cp, local); ip_vs_nat_xmit_v6()
940 * destination will be set to cp->daddr. Most code of this function
956 ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, ip_vs_tunnel_xmit() argument
976 local = __ip_vs_get_out_rt(cp->af, skb, cp->dest, cp->daddr.ip, ip_vs_tunnel_xmit()
985 return ip_vs_send_or_cont(NFPROTO_IPV4, skb, cp, 1); ip_vs_tunnel_xmit()
998 skb = ip_vs_prepare_tunneled_skb(skb, cp->af, max_headroom, ip_vs_tunnel_xmit()
1005 skb, false, __tun_gso_type_mask(AF_INET, cp->af)); ip_vs_tunnel_xmit()
1024 iph->daddr = cp->daddr.ip; ip_vs_tunnel_xmit()
1032 ret = ip_vs_tunnel_xmit_prepare(skb, cp); ip_vs_tunnel_xmit()
1053 ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, ip_vs_tunnel_xmit_v6() argument
1070 local = __ip_vs_get_out_rt_v6(cp->af, skb, cp->dest, &cp->daddr.in6, ip_vs_tunnel_xmit_v6()
1079 return ip_vs_send_or_cont(NFPROTO_IPV6, skb, cp, 1); ip_vs_tunnel_xmit_v6()
1090 skb = ip_vs_prepare_tunneled_skb(skb, cp->af, max_headroom, ip_vs_tunnel_xmit_v6()
1097 skb, false, __tun_gso_type_mask(AF_INET6, cp->af)); ip_vs_tunnel_xmit_v6()
1116 iph->daddr = cp->daddr.in6; ip_vs_tunnel_xmit_v6()
1123 ret = ip_vs_tunnel_xmit_prepare(skb, cp); ip_vs_tunnel_xmit_v6()
1149 ip_vs_dr_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, ip_vs_dr_xmit() argument
1157 local = __ip_vs_get_out_rt(cp->af, skb, cp->dest, cp->daddr.ip, ip_vs_dr_xmit()
1165 return ip_vs_send_or_cont(NFPROTO_IPV4, skb, cp, 1); ip_vs_dr_xmit()
1173 ip_vs_send_or_cont(NFPROTO_IPV4, skb, cp, 0); ip_vs_dr_xmit()
1188 ip_vs_dr_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, ip_vs_dr_xmit_v6() argument
1196 local = __ip_vs_get_out_rt_v6(cp->af, skb, cp->dest, &cp->daddr.in6, ip_vs_dr_xmit_v6()
1204 return ip_vs_send_or_cont(NFPROTO_IPV6, skb, cp, 1); ip_vs_dr_xmit_v6()
1210 ip_vs_send_or_cont(NFPROTO_IPV6, skb, cp, 0); ip_vs_dr_xmit_v6()
1230 ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, ip_vs_icmp_xmit() argument
1244 if (IP_VS_FWD_METHOD(cp) != IP_VS_CONN_F_MASQ) { ip_vs_icmp_xmit()
1245 if (cp->packet_xmit) ip_vs_icmp_xmit()
1246 rc = cp->packet_xmit(skb, cp, pp, iph); ip_vs_icmp_xmit()
1250 atomic_inc(&cp->in_pkts); ip_vs_icmp_xmit()
1264 local = __ip_vs_get_out_rt(cp->af, skb, cp->dest, cp->daddr.ip, rt_mode, ip_vs_icmp_xmit()
1275 if (cp->flags & IP_VS_CONN_F_SYNC && local) { ip_vs_icmp_xmit()
1282 __func__, &cp->daddr.ip); ip_vs_icmp_xmit()
1289 if (local && ipv4_is_loopback(cp->daddr.ip) && was_input) { ip_vs_icmp_xmit()
1292 __func__, &cp->daddr.ip); ip_vs_icmp_xmit()
1303 ip_vs_nat_icmp(skb, pp, cp, 0); ip_vs_icmp_xmit()
1308 rc = ip_vs_nat_send_or_cont(NFPROTO_IPV4, skb, cp, local); ip_vs_icmp_xmit()
1323 ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, ip_vs_icmp_xmit_v6() argument
1337 if (IP_VS_FWD_METHOD(cp) != IP_VS_CONN_F_MASQ) { ip_vs_icmp_xmit_v6()
1338 if (cp->packet_xmit) ip_vs_icmp_xmit_v6()
1339 rc = cp->packet_xmit(skb, cp, pp, ipvsh); ip_vs_icmp_xmit_v6()
1343 atomic_inc(&cp->in_pkts); ip_vs_icmp_xmit_v6()
1356 local = __ip_vs_get_out_rt_v6(cp->af, skb, cp->dest, &cp->daddr.in6, ip_vs_icmp_xmit_v6()
1366 if (cp->flags & IP_VS_CONN_F_SYNC && local) { ip_vs_icmp_xmit_v6()
1373 __func__, &cp->daddr.in6); ip_vs_icmp_xmit_v6()
1384 __func__, &cp->daddr.in6); ip_vs_icmp_xmit_v6()
1395 ip_vs_nat_icmp_v6(skb, pp, cp, 0); ip_vs_icmp_xmit_v6()
1400 rc = ip_vs_nat_send_or_cont(NFPROTO_IPV6, skb, cp, local); ip_vs_icmp_xmit_v6()
510 ip_vs_tunnel_xmit_prepare(struct sk_buff *skb, struct ip_vs_conn *cp) ip_vs_tunnel_xmit_prepare() argument
543 ip_vs_nat_send_or_cont(int pf, struct sk_buff *skb, struct ip_vs_conn *cp, int local) ip_vs_nat_send_or_cont() argument
574 ip_vs_send_or_cont(int pf, struct sk_buff *skb, struct ip_vs_conn *cp, int local) ip_vs_send_or_cont() argument
H A Dip_vs_sync.c143 __be32 timeout; /* cp timeout */
165 __be32 timeout; /* cp timeout */
396 select_master_thread_id(struct netns_ipvs *ipvs, struct ip_vs_conn *cp) select_master_thread_id() argument
398 return ((long) cp >> (1 + ilog2(sizeof(*cp)))) & ipvs->threads_mask; select_master_thread_id()
429 static inline bool in_persistence(struct ip_vs_conn *cp) in_persistence() argument
431 for (cp = cp->control; cp; cp = cp->control) { in_persistence()
432 if (cp->flags & IP_VS_CONN_F_TEMPLATE) in_persistence()
448 struct ip_vs_conn *cp, int pkts) ip_vs_sync_conn_needed()
450 unsigned long orig = ACCESS_ONCE(cp->sync_endtime); ip_vs_sync_conn_needed()
452 unsigned long n = (now + cp->timeout) & ~3UL; ip_vs_sync_conn_needed()
458 if (unlikely(cp->flags & IP_VS_CONN_F_TEMPLATE)) ip_vs_sync_conn_needed()
460 else if (unlikely(sysctl_sync_persist_mode(ipvs) && in_persistence(cp))) ip_vs_sync_conn_needed()
462 else if (likely(cp->protocol == IPPROTO_TCP)) { ip_vs_sync_conn_needed()
463 if (!((1 << cp->state) & ip_vs_sync_conn_needed()
470 force = cp->state != cp->old_state; ip_vs_sync_conn_needed()
471 if (force && cp->state != IP_VS_TCP_S_ESTABLISHED) ip_vs_sync_conn_needed()
473 } else if (unlikely(cp->protocol == IPPROTO_SCTP)) { ip_vs_sync_conn_needed()
474 if (!((1 << cp->state) & ip_vs_sync_conn_needed()
481 force = cp->state != cp->old_state; ip_vs_sync_conn_needed()
482 if (force && cp->state != IP_VS_SCTP_S_ESTABLISHED) ip_vs_sync_conn_needed()
492 long min_diff = max(cp->timeout >> 1, 10UL * HZ); ip_vs_sync_conn_needed()
502 if (time_before(now, orig - cp->timeout + ip_vs_sync_conn_needed()
510 if (!(cp->flags & IP_VS_CONN_F_TEMPLATE) && ip_vs_sync_conn_needed()
518 cp->old_state = cp->state; ip_vs_sync_conn_needed()
519 n = cmpxchg(&cp->sync_endtime, orig, n); ip_vs_sync_conn_needed()
527 static void ip_vs_sync_conn_v0(struct net *net, struct ip_vs_conn *cp, ip_vs_sync_conn_v0() argument
538 if (unlikely(cp->af != AF_INET)) ip_vs_sync_conn_v0()
541 if (cp->flags & IP_VS_CONN_F_ONE_PACKET) ip_vs_sync_conn_v0()
544 if (!ip_vs_sync_conn_needed(ipvs, cp, pkts)) ip_vs_sync_conn_v0()
553 id = select_master_thread_id(ipvs, cp); ip_vs_sync_conn_v0()
575 len = (cp->flags & IP_VS_CONN_F_SEQ_MASK) ? FULL_CONN_SIZE : ip_vs_sync_conn_v0()
582 s->protocol = cp->protocol; ip_vs_sync_conn_v0()
583 s->cport = cp->cport; ip_vs_sync_conn_v0()
584 s->vport = cp->vport; ip_vs_sync_conn_v0()
585 s->dport = cp->dport; ip_vs_sync_conn_v0()
586 s->caddr = cp->caddr.ip; ip_vs_sync_conn_v0()
587 s->vaddr = cp->vaddr.ip; ip_vs_sync_conn_v0()
588 s->daddr = cp->daddr.ip; ip_vs_sync_conn_v0()
589 s->flags = htons(cp->flags & ~IP_VS_CONN_F_HASHED); ip_vs_sync_conn_v0()
590 s->state = htons(cp->state); ip_vs_sync_conn_v0()
591 if (cp->flags & IP_VS_CONN_F_SEQ_MASK) { ip_vs_sync_conn_v0()
594 memcpy(opt, &cp->in_seq, sizeof(*opt)); ip_vs_sync_conn_v0()
609 cp = cp->control; ip_vs_sync_conn_v0()
610 if (cp) { ip_vs_sync_conn_v0()
611 if (cp->flags & IP_VS_CONN_F_TEMPLATE) ip_vs_sync_conn_v0()
612 pkts = atomic_add_return(1, &cp->in_pkts); ip_vs_sync_conn_v0()
615 ip_vs_sync_conn(net, cp, pkts); ip_vs_sync_conn_v0()
624 void ip_vs_sync_conn(struct net *net, struct ip_vs_conn *cp, int pkts) ip_vs_sync_conn() argument
637 ip_vs_sync_conn_v0(net, cp, pkts); ip_vs_sync_conn()
641 if (cp->flags & IP_VS_CONN_F_ONE_PACKET) ip_vs_sync_conn()
644 if (!ip_vs_sync_conn_needed(ipvs, cp, pkts)) ip_vs_sync_conn()
649 if (cp->pe_data_len) { ip_vs_sync_conn()
650 if (!cp->pe_data || !cp->dest) { ip_vs_sync_conn()
654 pe_name_len = strnlen(cp->pe->name, IP_VS_PENAME_MAXLEN); ip_vs_sync_conn()
663 id = select_master_thread_id(ipvs, cp); ip_vs_sync_conn()
667 if (cp->af == AF_INET6) ip_vs_sync_conn()
673 if (cp->flags & IP_VS_CONN_F_SEQ_MASK) ip_vs_sync_conn()
676 if (cp->pe_data_len) ip_vs_sync_conn()
677 len += cp->pe_data_len + 2; /* + Param hdr field */ ip_vs_sync_conn()
717 s->v4.type = (cp->af == AF_INET6 ? STYPE_F_INET6 : 0); ip_vs_sync_conn()
719 s->v4.flags = htonl(cp->flags & ~IP_VS_CONN_F_HASHED); ip_vs_sync_conn()
720 s->v4.state = htons(cp->state); ip_vs_sync_conn()
721 s->v4.protocol = cp->protocol; ip_vs_sync_conn()
722 s->v4.cport = cp->cport; ip_vs_sync_conn()
723 s->v4.vport = cp->vport; ip_vs_sync_conn()
724 s->v4.dport = cp->dport; ip_vs_sync_conn()
725 s->v4.fwmark = htonl(cp->fwmark); ip_vs_sync_conn()
726 s->v4.timeout = htonl(cp->timeout / HZ); ip_vs_sync_conn()
730 if (cp->af == AF_INET6) { ip_vs_sync_conn()
732 s->v6.caddr = cp->caddr.in6; ip_vs_sync_conn()
733 s->v6.vaddr = cp->vaddr.in6; ip_vs_sync_conn()
734 s->v6.daddr = cp->daddr.in6; ip_vs_sync_conn()
739 s->v4.caddr = cp->caddr.ip; ip_vs_sync_conn()
740 s->v4.vaddr = cp->vaddr.ip; ip_vs_sync_conn()
741 s->v4.daddr = cp->daddr.ip; ip_vs_sync_conn()
743 if (cp->flags & IP_VS_CONN_F_SEQ_MASK) { ip_vs_sync_conn()
746 hton_seq((struct ip_vs_seq *)p, &cp->in_seq); ip_vs_sync_conn()
748 hton_seq((struct ip_vs_seq *)p, &cp->out_seq); ip_vs_sync_conn()
752 if (cp->pe_data_len && cp->pe_data) { ip_vs_sync_conn()
754 *(p++) = cp->pe_data_len; ip_vs_sync_conn()
755 memcpy(p, cp->pe_data, cp->pe_data_len); ip_vs_sync_conn()
756 p += cp->pe_data_len; ip_vs_sync_conn()
761 memcpy(p, cp->pe->name, pe_name_len); ip_vs_sync_conn()
770 cp = cp->control; ip_vs_sync_conn()
771 if (!cp) ip_vs_sync_conn()
773 if (cp->flags & IP_VS_CONN_F_TEMPLATE) ip_vs_sync_conn()
774 pkts = atomic_add_return(1, &cp->in_pkts); ip_vs_sync_conn()
845 struct ip_vs_conn *cp; ip_vs_proc_conn() local
849 cp = ip_vs_conn_in_get(param); ip_vs_proc_conn()
850 if (cp && ((cp->dport != dport) || ip_vs_proc_conn()
851 !ip_vs_addr_equal(cp->daf, &cp->daddr, daddr))) { ip_vs_proc_conn()
853 ip_vs_conn_expire_now(cp); ip_vs_proc_conn()
854 __ip_vs_conn_put(cp); ip_vs_proc_conn()
855 cp = NULL; ip_vs_proc_conn()
861 __ip_vs_conn_put(cp); ip_vs_proc_conn()
867 cp = ip_vs_ct_in_get(param); ip_vs_proc_conn()
870 if (cp) { ip_vs_proc_conn()
874 dest = cp->dest; ip_vs_proc_conn()
875 spin_lock_bh(&cp->lock); ip_vs_proc_conn()
876 if ((cp->flags ^ flags) & IP_VS_CONN_F_INACTIVE && ip_vs_proc_conn()
887 flags |= cp->flags & ~IP_VS_CONN_F_BACKUP_UPD_MASK; ip_vs_proc_conn()
888 cp->flags = flags; ip_vs_proc_conn()
889 spin_unlock_bh(&cp->lock); ip_vs_proc_conn()
891 ip_vs_try_bind_dest(cp); ip_vs_proc_conn()
908 cp = ip_vs_conn_new(param, type, daddr, dport, flags, dest, ip_vs_proc_conn()
911 if (!cp) { ip_vs_proc_conn()
921 memcpy(&cp->in_seq, opt, sizeof(*opt)); ip_vs_proc_conn()
922 atomic_set(&cp->in_pkts, sysctl_sync_threshold(ipvs)); ip_vs_proc_conn()
923 cp->state = state; ip_vs_proc_conn()
924 cp->old_state = cp->state; ip_vs_proc_conn()
937 cp->timeout = timeout*HZ; ip_vs_proc_conn()
943 cp->timeout = pd->timeout_table[state]; ip_vs_proc_conn()
945 cp->timeout = (3*60*HZ); ip_vs_proc_conn()
947 ip_vs_conn_put(cp); ip_vs_proc_conn()
447 ip_vs_sync_conn_needed(struct netns_ipvs *ipvs, struct ip_vs_conn *cp, int pkts) ip_vs_sync_conn_needed() argument
H A Dip_vs_app.c262 * Bind ip_vs_conn to its ip_vs_app (called by cp constructor)
264 int ip_vs_bind_app(struct ip_vs_conn *cp, ip_vs_bind_app() argument
267 return pp->app_conn_bind(cp); ip_vs_bind_app()
272 * Unbind cp from application incarnation (called by cp destructor)
274 void ip_vs_unbind_app(struct ip_vs_conn *cp) ip_vs_unbind_app() argument
276 struct ip_vs_app *inc = cp->app; ip_vs_unbind_app()
282 inc->unbind_conn(inc, cp); ip_vs_unbind_app()
284 inc->done_conn(inc, cp); ip_vs_unbind_app()
286 cp->app = NULL; ip_vs_unbind_app()
351 static inline void vs_seq_update(struct ip_vs_conn *cp, struct ip_vs_seq *vseq, vs_seq_update() argument
354 /* spinlock is to keep updating cp->flags atomic */ vs_seq_update()
355 spin_lock_bh(&cp->lock); vs_seq_update()
356 if (!(cp->flags & flag) || after(seq, vseq->init_seq)) { vs_seq_update()
360 cp->flags |= flag; vs_seq_update()
362 spin_unlock_bh(&cp->lock); vs_seq_update()
365 static inline int app_tcp_pkt_out(struct ip_vs_conn *cp, struct sk_buff *skb, app_tcp_pkt_out() argument
386 if (cp->flags & IP_VS_CONN_F_OUT_SEQ) app_tcp_pkt_out()
387 vs_fix_seq(&cp->out_seq, th); app_tcp_pkt_out()
388 if (cp->flags & IP_VS_CONN_F_IN_SEQ) app_tcp_pkt_out()
389 vs_fix_ack_seq(&cp->in_seq, th); app_tcp_pkt_out()
397 if (!app->pkt_out(app, cp, skb, &diff)) app_tcp_pkt_out()
404 vs_seq_update(cp, &cp->out_seq, app_tcp_pkt_out()
412 * called by ipvs packet handler, assumes previously checked cp!=NULL
415 int ip_vs_app_pkt_out(struct ip_vs_conn *cp, struct sk_buff *skb) ip_vs_app_pkt_out() argument
423 if ((app = cp->app) == NULL) ip_vs_app_pkt_out()
427 if (cp->protocol == IPPROTO_TCP) ip_vs_app_pkt_out()
428 return app_tcp_pkt_out(cp, skb, app); ip_vs_app_pkt_out()
436 return app->pkt_out(app, cp, skb, NULL); ip_vs_app_pkt_out()
440 static inline int app_tcp_pkt_in(struct ip_vs_conn *cp, struct sk_buff *skb, app_tcp_pkt_in() argument
461 if (cp->flags & IP_VS_CONN_F_IN_SEQ) app_tcp_pkt_in()
462 vs_fix_seq(&cp->in_seq, th); app_tcp_pkt_in()
463 if (cp->flags & IP_VS_CONN_F_OUT_SEQ) app_tcp_pkt_in()
464 vs_fix_ack_seq(&cp->out_seq, th); app_tcp_pkt_in()
472 if (!app->pkt_in(app, cp, skb, &diff)) app_tcp_pkt_in()
479 vs_seq_update(cp, &cp->in_seq, app_tcp_pkt_in()
487 * called by ipvs packet handler, assumes previously checked cp!=NULL.
490 int ip_vs_app_pkt_in(struct ip_vs_conn *cp, struct sk_buff *skb) ip_vs_app_pkt_in() argument
498 if ((app = cp->app) == NULL) ip_vs_app_pkt_in()
502 if (cp->protocol == IPPROTO_TCP) ip_vs_app_pkt_in()
503 return app_tcp_pkt_in(cp, skb, app); ip_vs_app_pkt_in()
511 return app->pkt_in(app, cp, skb, NULL); ip_vs_app_pkt_in()
H A Dip_vs_proto_sctp.c80 struct ip_vs_conn *cp, struct ip_vs_iphdr *iph) sctp_snat_handler()
87 if (cp->af == AF_INET6 && iph->fragoffs) sctp_snat_handler()
95 if (unlikely(cp->app != NULL)) { sctp_snat_handler()
99 if (pp->csum_check && !pp->csum_check(cp->af, skb, pp)) sctp_snat_handler()
103 ret = ip_vs_app_pkt_out(cp, skb); sctp_snat_handler()
114 if (sctph->source != cp->vport || payload_csum || sctp_snat_handler()
116 sctph->source = cp->vport; sctp_snat_handler()
127 struct ip_vs_conn *cp, struct ip_vs_iphdr *iph) sctp_dnat_handler()
134 if (cp->af == AF_INET6 && iph->fragoffs) sctp_dnat_handler()
142 if (unlikely(cp->app != NULL)) { sctp_dnat_handler()
146 if (pp->csum_check && !pp->csum_check(cp->af, skb, pp)) sctp_dnat_handler()
150 ret = ip_vs_app_pkt_in(cp, skb); sctp_dnat_handler()
161 if (sctph->dest != cp->dport || payload_csum || sctp_dnat_handler()
164 sctph->dest = cp->dport; sctp_dnat_handler()
369 set_sctp_state(struct ip_vs_proto_data *pd, struct ip_vs_conn *cp, set_sctp_state() argument
378 ihl = cp->af == AF_INET ? ip_hdrlen(skb) : sizeof(struct ipv6hdr); set_sctp_state()
418 if (cp->flags & IP_VS_CONN_F_NOOUTPUT) { set_sctp_state()
420 cp->flags &= ~IP_VS_CONN_F_NOOUTPUT; set_sctp_state()
425 next_state = sctp_states[direction][event][cp->state]; set_sctp_state()
427 if (next_state != cp->state) { set_sctp_state()
428 struct ip_vs_dest *dest = cp->dest; set_sctp_state()
435 IP_VS_DBG_ADDR(cp->daf, &cp->daddr), set_sctp_state()
436 ntohs(cp->dport), set_sctp_state()
437 IP_VS_DBG_ADDR(cp->af, &cp->caddr), set_sctp_state()
438 ntohs(cp->cport), set_sctp_state()
439 sctp_state_name(cp->state), set_sctp_state()
441 atomic_read(&cp->refcnt)); set_sctp_state()
443 if (!(cp->flags & IP_VS_CONN_F_INACTIVE) && set_sctp_state()
447 cp->flags |= IP_VS_CONN_F_INACTIVE; set_sctp_state()
448 } else if ((cp->flags & IP_VS_CONN_F_INACTIVE) && set_sctp_state()
452 cp->flags &= ~IP_VS_CONN_F_INACTIVE; set_sctp_state()
457 cp->timeout = pd->timeout_table[cp->state = next_state]; set_sctp_state()
459 cp->timeout = sctp_timeouts[cp->state = next_state]; set_sctp_state()
463 sctp_state_transition(struct ip_vs_conn *cp, int direction, sctp_state_transition() argument
466 spin_lock_bh(&cp->lock); sctp_state_transition()
467 set_sctp_state(pd, cp, direction, skb); sctp_state_transition()
468 spin_unlock_bh(&cp->lock); sctp_state_transition()
509 static int sctp_app_conn_bind(struct ip_vs_conn *cp) sctp_app_conn_bind() argument
511 struct netns_ipvs *ipvs = net_ipvs(ip_vs_conn_net(cp)); sctp_app_conn_bind()
517 if (IP_VS_FWD_METHOD(cp) != IP_VS_CONN_F_MASQ) sctp_app_conn_bind()
520 hash = sctp_app_hashkey(cp->vport); sctp_app_conn_bind()
524 if (inc->port == cp->vport) { sctp_app_conn_bind()
532 IP_VS_DBG_ADDR(cp->af, &cp->caddr), sctp_app_conn_bind()
533 ntohs(cp->cport), sctp_app_conn_bind()
534 IP_VS_DBG_ADDR(cp->af, &cp->vaddr), sctp_app_conn_bind()
535 ntohs(cp->vport), sctp_app_conn_bind()
537 cp->app = inc; sctp_app_conn_bind()
539 result = inc->init_conn(inc, cp); sctp_app_conn_bind()
79 sctp_snat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp, struct ip_vs_conn *cp, struct ip_vs_iphdr *iph) sctp_snat_handler() argument
126 sctp_dnat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp, struct ip_vs_conn *cp, struct ip_vs_iphdr *iph) sctp_dnat_handler() argument
H A Dip_vs_core.c112 ip_vs_in_stats(struct ip_vs_conn *cp, struct sk_buff *skb) ip_vs_in_stats() argument
114 struct ip_vs_dest *dest = cp->dest; ip_vs_in_stats()
146 ip_vs_out_stats(struct ip_vs_conn *cp, struct sk_buff *skb) ip_vs_out_stats() argument
148 struct ip_vs_dest *dest = cp->dest; ip_vs_out_stats()
180 ip_vs_conn_stats(struct ip_vs_conn *cp, struct ip_vs_service *svc) ip_vs_conn_stats() argument
185 s = this_cpu_ptr(cp->dest->stats.cpustats); ip_vs_conn_stats()
203 ip_vs_set_state(struct ip_vs_conn *cp, int direction, ip_vs_set_state() argument
208 pd->pp->state_transition(cp, direction, skb, pd); ip_vs_set_state()
239 struct ip_vs_conn *cp = NULL; ip_vs_sched_persist() local
372 cp = ip_vs_conn_new(&param, dest->af, &dest->addr, dport, flags, dest, ip_vs_sched_persist()
374 if (cp == NULL) { ip_vs_sched_persist()
383 ip_vs_control_add(cp, ct); ip_vs_sched_persist()
386 ip_vs_conn_stats(cp, svc); ip_vs_sched_persist()
387 return cp; ip_vs_sched_persist()
418 struct ip_vs_conn *cp = NULL; ip_vs_schedule() local
448 (cp = pp->conn_in_get(svc->af, skb, iph, 1))) { ip_vs_schedule()
451 __ip_vs_conn_put(cp); ip_vs_schedule()
501 cp = ip_vs_conn_new(&p, dest->af, &dest->addr, ip_vs_schedule()
504 if (!cp) { ip_vs_schedule()
512 ip_vs_fwd_tag(cp), ip_vs_schedule()
513 IP_VS_DBG_ADDR(cp->af, &cp->caddr), ntohs(cp->cport), ip_vs_schedule()
514 IP_VS_DBG_ADDR(cp->af, &cp->vaddr), ntohs(cp->vport), ip_vs_schedule()
515 IP_VS_DBG_ADDR(cp->daf, &cp->daddr), ntohs(cp->dport), ip_vs_schedule()
516 cp->flags, atomic_read(&cp->refcnt)); ip_vs_schedule()
518 ip_vs_conn_stats(cp, svc); ip_vs_schedule()
519 return cp; ip_vs_schedule()
559 struct ip_vs_conn *cp; ip_vs_leave() local
572 cp = ip_vs_conn_new(&p, svc->af, &daddr, 0, ip_vs_leave()
575 if (!cp) ip_vs_leave()
580 ip_vs_in_stats(cp, skb); ip_vs_leave()
583 ip_vs_set_state(cp, IP_VS_DIR_INPUT, skb, pd); ip_vs_leave()
586 ret = cp->packet_xmit(skb, cp, pd->pp, iph); ip_vs_leave()
589 atomic_inc(&cp->in_pkts); ip_vs_leave()
590 ip_vs_conn_put(cp); ip_vs_leave()
709 struct ip_vs_conn *cp, int inout) ip_vs_nat_icmp()
718 iph->saddr = cp->vaddr.ip; ip_vs_nat_icmp()
720 ciph->daddr = cp->vaddr.ip; ip_vs_nat_icmp()
723 iph->daddr = cp->daddr.ip; ip_vs_nat_icmp()
725 ciph->saddr = cp->daddr.ip; ip_vs_nat_icmp()
735 ports[1] = cp->vport; ip_vs_nat_icmp()
737 ports[0] = cp->dport; ip_vs_nat_icmp()
755 struct ip_vs_conn *cp, int inout) ip_vs_nat_icmp_v6()
773 iph->saddr = cp->vaddr.in6; ip_vs_nat_icmp_v6()
774 ciph->daddr = cp->vaddr.in6; ip_vs_nat_icmp_v6()
776 iph->daddr = cp->daddr.in6; ip_vs_nat_icmp_v6()
777 ciph->saddr = cp->daddr.in6; ip_vs_nat_icmp_v6()
787 ntohs(inout ? cp->vport : cp->dport)); ip_vs_nat_icmp_v6()
789 ports[1] = cp->vport; ip_vs_nat_icmp_v6()
791 ports[0] = cp->dport; ip_vs_nat_icmp_v6()
818 __u8 protocol, struct ip_vs_conn *cp, handle_response_icmp()
825 if (IP_VS_FWD_METHOD(cp) != 0) { handle_response_icmp()
846 ip_vs_nat_icmp_v6(skb, pp, cp, 1); handle_response_icmp()
849 ip_vs_nat_icmp(skb, pp, cp, 1); handle_response_icmp()
855 ip_vs_out_stats(cp, skb); handle_response_icmp()
858 if (!(cp->flags & IP_VS_CONN_F_NFCT)) handle_response_icmp()
861 ip_vs_update_conntrack(skb, cp, 0); handle_response_icmp()
865 __ip_vs_conn_put(cp); handle_response_icmp()
882 struct ip_vs_conn *cp; ip_vs_out_icmp() local
940 cp = pp->conn_out_get(AF_INET, skb, &ciph, 1); ip_vs_out_icmp()
941 if (!cp) ip_vs_out_icmp()
945 return handle_response_icmp(AF_INET, skb, &snet, cih->protocol, cp, ip_vs_out_icmp()
956 struct ip_vs_conn *cp; ip_vs_out_icmp_v6() local
1004 cp = pp->conn_out_get(AF_INET6, skb, &ciph, 1); ip_vs_out_icmp_v6()
1005 if (!cp) ip_vs_out_icmp_v6()
1010 return handle_response_icmp(AF_INET6, skb, &snet, ciph.protocol, cp, ip_vs_out_icmp_v6()
1067 static inline bool is_new_conn_expected(const struct ip_vs_conn *cp, is_new_conn_expected() argument
1071 if (cp->control) is_new_conn_expected()
1074 switch (cp->protocol) { is_new_conn_expected()
1076 return (cp->state == IP_VS_TCP_S_TIME_WAIT) || is_new_conn_expected()
1078 (cp->state == IP_VS_TCP_S_FIN_WAIT) && is_new_conn_expected()
1079 (cp->flags & IP_VS_CONN_F_NOOUTPUT)); is_new_conn_expected()
1081 return cp->state == IP_VS_SCTP_S_CLOSED; is_new_conn_expected()
1091 struct ip_vs_conn *cp, struct ip_vs_iphdr *iph, handle_response()
1102 if (pp->snat_handler && !pp->snat_handler(skb, pp, cp, iph)) handle_response()
1107 ipv6_hdr(skb)->saddr = cp->vaddr.in6; handle_response()
1111 ip_hdr(skb)->saddr = cp->vaddr.ip; handle_response()
1135 ip_vs_out_stats(cp, skb); handle_response()
1136 ip_vs_set_state(cp, IP_VS_DIR_OUTPUT, skb, pd); handle_response()
1138 if (!(cp->flags & IP_VS_CONN_F_NFCT)) handle_response()
1141 ip_vs_update_conntrack(skb, cp, 0); handle_response()
1142 ip_vs_conn_put(cp); handle_response()
1148 ip_vs_conn_put(cp); handle_response()
1164 struct ip_vs_conn *cp; ip_vs_out() local
1230 cp = pp->conn_out_get(af, skb, &iph, 0); ip_vs_out()
1232 if (likely(cp)) ip_vs_out()
1233 return handle_response(af, skb, pd, cp, &iph, hooknum); ip_vs_out()
1344 struct ip_vs_conn *cp; ip_vs_in_icmp() local
1425 cp = pp->conn_in_get(AF_INET, skb, &ciph, ipip ? 0 : 1); ip_vs_in_icmp()
1426 if (!cp) ip_vs_in_icmp()
1447 struct ip_vs_dest *dest = cp->dest; ip_vs_in_icmp()
1488 ip_vs_out_stats(cp, skb); ip_vs_in_icmp()
1497 ip_vs_in_stats(cp, skb); ip_vs_in_icmp()
1501 verdict = ip_vs_icmp_xmit(skb, cp, pp, offset, hooknum, &ciph); ip_vs_in_icmp()
1504 __ip_vs_conn_put(cp); ip_vs_in_icmp()
1517 struct ip_vs_conn *cp; ip_vs_in_icmp_v6() local
1578 cp = pp->conn_in_get(AF_INET6, skb, &ciph, ip_vs_in_icmp_v6()
1581 if (!cp) ip_vs_in_icmp_v6()
1585 (IP_VS_FWD_METHOD(cp) != IP_VS_CONN_F_MASQ)) { ip_vs_in_icmp_v6()
1586 __ip_vs_conn_put(cp); ip_vs_in_icmp_v6()
1591 ip_vs_in_stats(cp, skb); ip_vs_in_icmp_v6()
1599 verdict = ip_vs_icmp_xmit_v6(skb, cp, pp, writable, hooknum, &ciph); ip_vs_in_icmp_v6()
1601 __ip_vs_conn_put(cp); ip_vs_in_icmp_v6()
1619 struct ip_vs_conn *cp; ip_vs_in() local
1689 cp = pp->conn_in_get(af, skb, &iph, 0); ip_vs_in()
1693 is_new_conn(skb, &iph) && cp && ip_vs_in()
1694 ((unlikely(sysctl_expire_nodest_conn(ipvs)) && cp->dest && ip_vs_in()
1695 unlikely(!atomic_read(&cp->dest->weight))) || ip_vs_in()
1696 unlikely(is_new_conn_expected(cp, conn_reuse_mode)))) { ip_vs_in()
1697 if (!atomic_read(&cp->n_control)) ip_vs_in()
1698 ip_vs_conn_expire_now(cp); ip_vs_in()
1699 __ip_vs_conn_put(cp); ip_vs_in()
1700 cp = NULL; ip_vs_in()
1703 if (unlikely(!cp) && !iph.fragoffs) { ip_vs_in()
1705 * replayed fragment zero will already have created the cp ip_vs_in()
1709 /* Schedule and create new connection entry into &cp */ ip_vs_in()
1710 if (!pp->conn_schedule(af, skb, pd, &v, &cp, &iph)) ip_vs_in()
1714 if (unlikely(!cp)) { ip_vs_in()
1730 if (cp->dest && !(cp->dest->flags & IP_VS_DEST_F_AVAILABLE)) { ip_vs_in()
1735 ip_vs_conn_expire_now(cp); ip_vs_in()
1739 __ip_vs_conn_put(cp); ip_vs_in()
1743 ip_vs_in_stats(cp, skb); ip_vs_in()
1744 ip_vs_set_state(cp, IP_VS_DIR_INPUT, skb, pd); ip_vs_in()
1745 if (cp->packet_xmit) ip_vs_in()
1746 ret = cp->packet_xmit(skb, cp, pp, &iph); ip_vs_in()
1762 if (cp->flags & IP_VS_CONN_F_ONE_PACKET) ip_vs_in()
1765 pkts = atomic_add_return(1, &cp->in_pkts); ip_vs_in()
1768 ip_vs_sync_conn(net, cp, pkts); ip_vs_in()
1770 ip_vs_conn_put(cp); ip_vs_in()
708 ip_vs_nat_icmp(struct sk_buff *skb, struct ip_vs_protocol *pp, struct ip_vs_conn *cp, int inout) ip_vs_nat_icmp() argument
754 ip_vs_nat_icmp_v6(struct sk_buff *skb, struct ip_vs_protocol *pp, struct ip_vs_conn *cp, int inout) ip_vs_nat_icmp_v6() argument
816 handle_response_icmp(int af, struct sk_buff *skb, union nf_inet_addr *snet, __u8 protocol, struct ip_vs_conn *cp, struct ip_vs_protocol *pp, unsigned int offset, unsigned int ihl, unsigned int hooknum) handle_response_icmp() argument
1090 handle_response(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd, struct ip_vs_conn *cp, struct ip_vs_iphdr *iph, unsigned int hooknum) handle_response() argument
H A Dip_vs_proto_ah_esp.c63 struct ip_vs_conn *cp; ah_esp_conn_in_get() local
68 cp = ip_vs_conn_in_get(&p); ah_esp_conn_in_get()
69 if (!cp) { ah_esp_conn_in_get()
82 return cp; ah_esp_conn_in_get()
90 struct ip_vs_conn *cp; ah_esp_conn_out_get() local
95 cp = ip_vs_conn_out_get(&p); ah_esp_conn_out_get()
96 if (!cp) { ah_esp_conn_out_get()
105 return cp; ah_esp_conn_out_get()
H A Dip_vs_ftp.c66 ip_vs_ftp_init_conn(struct ip_vs_app *app, struct ip_vs_conn *cp) ip_vs_ftp_init_conn() argument
69 cp->flags |= IP_VS_CONN_F_NFCT; ip_vs_ftp_init_conn()
75 ip_vs_ftp_done_conn(struct ip_vs_app *app, struct ip_vs_conn *cp) ip_vs_ftp_done_conn() argument
169 static int ip_vs_ftp_out(struct ip_vs_app *app, struct ip_vs_conn *cp, ip_vs_ftp_out() argument
192 if (cp->af == AF_INET6) ip_vs_ftp_out()
197 if (cp->state != IP_VS_TCP_S_ESTABLISHED) ip_vs_ftp_out()
204 if (cp->app_data == &ip_vs_ftp_pasv) { ip_vs_ftp_out()
219 &from.ip, ntohs(port), &cp->caddr.ip, 0); ip_vs_ftp_out()
226 ip_vs_conn_fill_param(ip_vs_conn_net(cp), AF_INET, ip_vs_ftp_out()
228 &cp->caddr, 0, &p); ip_vs_ftp_out()
233 ip_vs_conn_fill_param(ip_vs_conn_net(cp), ip_vs_ftp_out()
234 AF_INET, IPPROTO_TCP, &cp->caddr, ip_vs_ftp_out()
235 0, &cp->vaddr, port, &p); ip_vs_ftp_out()
240 cp->dest, skb->mark); ip_vs_ftp_out()
245 ip_vs_control_add(n_cp, cp); ip_vs_ftp_out()
293 cp->app_data = NULL; ip_vs_ftp_out()
313 static int ip_vs_ftp_in(struct ip_vs_app *app, struct ip_vs_conn *cp, ip_vs_ftp_in() argument
332 if (cp->af == AF_INET6) ip_vs_ftp_in()
337 if (cp->state != IP_VS_TCP_S_ESTABLISHED) ip_vs_ftp_in()
362 cp->app_data = &ip_vs_ftp_pasv; ip_vs_ftp_in()
384 cp->app_data = NULL; ip_vs_ftp_in()
391 &to.ip, ntohs(port), &cp->vaddr.ip, 0); ip_vs_ftp_in()
395 ip_vs_conn_fill_param(ip_vs_conn_net(cp), AF_INET, ip_vs_ftp_in()
396 iph->protocol, &to, port, &cp->vaddr, ip_vs_ftp_in()
397 htons(ntohs(cp->vport)-1), &p); ip_vs_ftp_in()
401 n_cp = ip_vs_conn_new(&p, AF_INET, &cp->daddr, ip_vs_ftp_in()
402 htons(ntohs(cp->dport)-1), ip_vs_ftp_in()
403 IP_VS_CONN_F_NFCT, cp->dest, ip_vs_ftp_in()
409 ip_vs_control_add(n_cp, cp); ip_vs_ftp_in()
H A Dip_vs_pe_sip.c140 static int ip_vs_sip_show_pe_data(const struct ip_vs_conn *cp, char *buf) ip_vs_sip_show_pe_data() argument
142 memcpy(buf, cp->pe_data, cp->pe_data_len); ip_vs_sip_show_pe_data()
143 return cp->pe_data_len; ip_vs_sip_show_pe_data()
/linux-4.1.27/kernel/sched/
H A Dcpudeadline.c39 static void cpudl_exchange(struct cpudl *cp, int a, int b) cpudl_exchange() argument
41 int cpu_a = cp->elements[a].cpu, cpu_b = cp->elements[b].cpu; cpudl_exchange()
43 swap(cp->elements[a].cpu, cp->elements[b].cpu); cpudl_exchange()
44 swap(cp->elements[a].dl , cp->elements[b].dl ); cpudl_exchange()
46 swap(cp->elements[cpu_a].idx, cp->elements[cpu_b].idx); cpudl_exchange()
49 static void cpudl_heapify(struct cpudl *cp, int idx) cpudl_heapify() argument
59 if ((l < cp->size) && dl_time_before(cp->elements[idx].dl, cpudl_heapify()
60 cp->elements[l].dl)) cpudl_heapify()
62 if ((r < cp->size) && dl_time_before(cp->elements[largest].dl, cpudl_heapify()
63 cp->elements[r].dl)) cpudl_heapify()
69 cpudl_exchange(cp, largest, idx); cpudl_heapify()
74 static void cpudl_change_key(struct cpudl *cp, int idx, u64 new_dl) cpudl_change_key() argument
78 if (dl_time_before(new_dl, cp->elements[idx].dl)) { cpudl_change_key()
79 cp->elements[idx].dl = new_dl; cpudl_change_key()
80 cpudl_heapify(cp, idx); cpudl_change_key()
82 cp->elements[idx].dl = new_dl; cpudl_change_key()
83 while (idx > 0 && dl_time_before(cp->elements[parent(idx)].dl, cpudl_change_key()
84 cp->elements[idx].dl)) { cpudl_change_key()
85 cpudl_exchange(cp, idx, parent(idx)); cpudl_change_key()
91 static inline int cpudl_maximum(struct cpudl *cp) cpudl_maximum() argument
93 return cp->elements[0].cpu; cpudl_maximum()
98 * @cp: the cpudl max-heap context
104 int cpudl_find(struct cpudl *cp, struct task_struct *p, cpudl_find() argument
111 cpumask_and(later_mask, cp->free_cpus, &p->cpus_allowed)) { cpudl_find()
114 } else if (cpumask_test_cpu(cpudl_maximum(cp), &p->cpus_allowed) && cpudl_find()
115 dl_time_before(dl_se->deadline, cp->elements[0].dl)) { cpudl_find()
116 best_cpu = cpudl_maximum(cp); cpudl_find()
129 * @cp: the cpudl max-heap context
137 void cpudl_set(struct cpudl *cp, int cpu, u64 dl, int is_valid) cpudl_set() argument
144 raw_spin_lock_irqsave(&cp->lock, flags); cpudl_set()
145 old_idx = cp->elements[cpu].idx; cpudl_set()
156 new_cpu = cp->elements[cp->size - 1].cpu; cpudl_set()
157 cp->elements[old_idx].dl = cp->elements[cp->size - 1].dl; cpudl_set()
158 cp->elements[old_idx].cpu = new_cpu; cpudl_set()
159 cp->size--; cpudl_set()
160 cp->elements[new_cpu].idx = old_idx; cpudl_set()
161 cp->elements[cpu].idx = IDX_INVALID; cpudl_set()
163 cp->elements[parent(old_idx)].dl, cpudl_set()
164 cp->elements[old_idx].dl)) { cpudl_set()
165 cpudl_exchange(cp, old_idx, parent(old_idx)); cpudl_set()
168 cpumask_set_cpu(cpu, cp->free_cpus); cpudl_set()
169 cpudl_heapify(cp, old_idx); cpudl_set()
175 cp->size++; cpudl_set()
176 cp->elements[cp->size - 1].dl = 0; cpudl_set()
177 cp->elements[cp->size - 1].cpu = cpu; cpudl_set()
178 cp->elements[cpu].idx = cp->size - 1; cpudl_set()
179 cpudl_change_key(cp, cp->size - 1, dl); cpudl_set()
180 cpumask_clear_cpu(cpu, cp->free_cpus); cpudl_set()
182 cpudl_change_key(cp, old_idx, dl); cpudl_set()
186 raw_spin_unlock_irqrestore(&cp->lock, flags); cpudl_set()
191 * @cp: the cpudl max-heap context
194 void cpudl_set_freecpu(struct cpudl *cp, int cpu) cpudl_set_freecpu() argument
196 cpumask_set_cpu(cpu, cp->free_cpus); cpudl_set_freecpu()
201 * @cp: the cpudl max-heap context
204 void cpudl_clear_freecpu(struct cpudl *cp, int cpu) cpudl_clear_freecpu() argument
206 cpumask_clear_cpu(cpu, cp->free_cpus); cpudl_clear_freecpu()
211 * @cp: the cpudl max-heap context
213 int cpudl_init(struct cpudl *cp) cpudl_init() argument
217 memset(cp, 0, sizeof(*cp)); cpudl_init()
218 raw_spin_lock_init(&cp->lock); cpudl_init()
219 cp->size = 0; cpudl_init()
221 cp->elements = kcalloc(nr_cpu_ids, cpudl_init()
224 if (!cp->elements) cpudl_init()
227 if (!zalloc_cpumask_var(&cp->free_cpus, GFP_KERNEL)) { cpudl_init()
228 kfree(cp->elements); cpudl_init()
233 cp->elements[i].idx = IDX_INVALID; cpudl_init()
240 * @cp: the cpudl max-heap context
242 void cpudl_cleanup(struct cpudl *cp) cpudl_cleanup() argument
244 free_cpumask_var(cp->free_cpus); cpudl_cleanup()
245 kfree(cp->elements); cpudl_cleanup()
H A Dcpudeadline.h23 int cpudl_find(struct cpudl *cp, struct task_struct *p,
25 void cpudl_set(struct cpudl *cp, int cpu, u64 dl, int is_valid);
26 int cpudl_init(struct cpudl *cp);
27 void cpudl_set_freecpu(struct cpudl *cp, int cpu);
28 void cpudl_clear_freecpu(struct cpudl *cp, int cpu);
29 void cpudl_cleanup(struct cpudl *cp);
H A Dcpupri.h24 int cpupri_find(struct cpupri *cp,
26 void cpupri_set(struct cpupri *cp, int cpu, int pri);
27 int cpupri_init(struct cpupri *cp);
28 void cpupri_cleanup(struct cpupri *cp);
H A Dcpupri.c55 * @cp: The cpupri context
68 int cpupri_find(struct cpupri *cp, struct task_struct *p, cpupri_find() argument
77 struct cpupri_vec *vec = &cp->pri_to_cpu[idx]; cpupri_find()
132 * @cp: The cpupri context
140 void cpupri_set(struct cpupri *cp, int cpu, int newpri) cpupri_set() argument
142 int *currpri = &cp->cpu_to_pri[cpu]; cpupri_set()
160 struct cpupri_vec *vec = &cp->pri_to_cpu[newpri]; cpupri_set()
173 struct cpupri_vec *vec = &cp->pri_to_cpu[oldpri]; cpupri_set()
204 * @cp: The cpupri context
208 int cpupri_init(struct cpupri *cp) cpupri_init() argument
212 memset(cp, 0, sizeof(*cp)); cpupri_init()
215 struct cpupri_vec *vec = &cp->pri_to_cpu[i]; cpupri_init()
222 cp->cpu_to_pri = kcalloc(nr_cpu_ids, sizeof(int), GFP_KERNEL); cpupri_init()
223 if (!cp->cpu_to_pri) cpupri_init()
227 cp->cpu_to_pri[i] = CPUPRI_INVALID; cpupri_init()
233 free_cpumask_var(cp->pri_to_cpu[i].mask); cpupri_init()
239 * @cp: The cpupri context
241 void cpupri_cleanup(struct cpupri *cp) cpupri_cleanup() argument
245 kfree(cp->cpu_to_pri); cpupri_cleanup()
247 free_cpumask_var(cp->pri_to_cpu[i].mask); cpupri_cleanup()
/linux-4.1.27/arch/mips/fw/arc/
H A Dcmdline.c35 static char * __init move_firmware_args(char* cp) move_firmware_args() argument
48 strcat(cp, used_arc[i][1]); move_firmware_args()
49 cp += strlen(used_arc[i][1]); move_firmware_args()
54 strcpy(cp, s); move_firmware_args()
55 cp += strlen(s); move_firmware_args()
57 *cp++ = ' '; move_firmware_args()
64 return cp; move_firmware_args()
69 char *cp; prom_init_cmdline() local
74 cp = arcs_cmdline; prom_init_cmdline()
79 cp = move_firmware_args(cp); prom_init_cmdline()
89 strcpy(cp, prom_argv(actr)); prom_init_cmdline()
90 cp += strlen(prom_argv(actr)); prom_init_cmdline()
91 *cp++ = ' '; prom_init_cmdline()
97 if (cp != arcs_cmdline) /* get rid of trailing space */ prom_init_cmdline()
98 --cp; prom_init_cmdline()
99 *cp = '\0'; prom_init_cmdline()
/linux-4.1.27/fs/
H A Dbinfmt_script.c20 char *cp; load_script() local
47 if ((cp = strchr(bprm->buf, '\n')) == NULL) load_script()
48 cp = bprm->buf+BINPRM_BUF_SIZE-1; load_script()
49 *cp = '\0'; load_script()
50 while (cp > bprm->buf) { load_script()
51 cp--; load_script()
52 if ((*cp == ' ') || (*cp == '\t')) load_script()
53 *cp = '\0'; load_script()
57 for (cp = bprm->buf+2; (*cp == ' ') || (*cp == '\t'); cp++); load_script()
58 if (*cp == '\0') load_script()
60 i_name = cp; load_script()
62 for ( ; *cp && (*cp != ' ') && (*cp != '\t'); cp++) load_script()
64 while ((*cp == ' ') || (*cp == '\t')) load_script()
65 *cp++ = '\0'; load_script()
66 if (*cp) load_script()
67 i_arg = cp; load_script()
H A Dchar_dev.c77 struct char_device_struct *cd, **cp; __register_chrdev_region() local
108 for (cp = &chrdevs[i]; *cp; cp = &(*cp)->next) __register_chrdev_region()
109 if ((*cp)->major > major || __register_chrdev_region()
110 ((*cp)->major == major && __register_chrdev_region()
111 (((*cp)->baseminor >= baseminor) || __register_chrdev_region()
112 ((*cp)->baseminor + (*cp)->minorct > baseminor)))) __register_chrdev_region()
116 if (*cp && (*cp)->major == major) { __register_chrdev_region()
117 int old_min = (*cp)->baseminor; __register_chrdev_region()
118 int old_max = (*cp)->baseminor + (*cp)->minorct - 1; __register_chrdev_region()
135 cd->next = *cp; __register_chrdev_region()
136 *cp = cd; __register_chrdev_region()
148 struct char_device_struct *cd = NULL, **cp; __unregister_chrdev_region() local
152 for (cp = &chrdevs[i]; *cp; cp = &(*cp)->next) __unregister_chrdev_region()
153 if ((*cp)->major == major && __unregister_chrdev_region()
154 (*cp)->baseminor == baseminor && __unregister_chrdev_region()
155 (*cp)->minorct == minorct) __unregister_chrdev_region()
157 if (*cp) { __unregister_chrdev_region()
158 cd = *cp; __unregister_chrdev_region()
159 *cp = cd->next; __unregister_chrdev_region()
/linux-4.1.27/drivers/net/ethernet/realtek/
H A D8139cp.c1 /* 8139cp.c: A Linux PCI Ethernet driver for the RealTek 8139C+ chips. */
51 #define DRV_NAME "8139cp"
92 MODULE_PARM_DESC (debug, "8139cp: bitmapped message enable number");
98 MODULE_PARM_DESC (multicast_filter_limit, "8139cp: maximum number of filtered multicast addresses");
353 #define cpr8(reg) readb(cp->regs + (reg))
354 #define cpr16(reg) readw(cp->regs + (reg))
355 #define cpr32(reg) readl(cp->regs + (reg))
356 #define cpw8(reg,val) writeb((val), cp->regs + (reg))
357 #define cpw16(reg,val) writew((val), cp->regs + (reg))
358 #define cpw32(reg,val) writel((val), cp->regs + (reg))
360 writeb((val), cp->regs + (reg)); \
361 readb(cp->regs + (reg)); \
364 writew((val), cp->regs + (reg)); \
365 readw(cp->regs + (reg)); \
368 writel((val), cp->regs + (reg)); \
369 readl(cp->regs + (reg)); \
374 static void cp_tx (struct cp_private *cp);
375 static void cp_clean_rings (struct cp_private *cp);
405 static inline void cp_set_rxbufsize (struct cp_private *cp) cp_set_rxbufsize() argument
407 unsigned int mtu = cp->dev->mtu; cp_set_rxbufsize()
411 cp->rx_buf_sz = mtu + ETH_HLEN + 8; cp_set_rxbufsize()
413 cp->rx_buf_sz = PKT_BUF_SZ; cp_set_rxbufsize()
416 static inline void cp_rx_skb (struct cp_private *cp, struct sk_buff *skb, cp_rx_skb() argument
421 skb->protocol = eth_type_trans (skb, cp->dev); cp_rx_skb()
423 cp->dev->stats.rx_packets++; cp_rx_skb()
424 cp->dev->stats.rx_bytes += skb->len; cp_rx_skb()
429 napi_gro_receive(&cp->napi, skb); cp_rx_skb()
432 static void cp_rx_err_acct (struct cp_private *cp, unsigned rx_tail, cp_rx_err_acct() argument
435 netif_dbg(cp, rx_err, cp->dev, "rx err, slot %d status 0x%x len %d\n", cp_rx_err_acct()
437 cp->dev->stats.rx_errors++; cp_rx_err_acct()
439 cp->dev->stats.rx_frame_errors++; cp_rx_err_acct()
441 cp->dev->stats.rx_crc_errors++; cp_rx_err_acct()
443 cp->dev->stats.rx_length_errors++; cp_rx_err_acct()
445 cp->dev->stats.rx_length_errors++; cp_rx_err_acct()
447 cp->dev->stats.rx_fifo_errors++; cp_rx_err_acct()
463 struct cp_private *cp = container_of(napi, struct cp_private, napi); cp_rx_poll() local
464 struct net_device *dev = cp->dev; cp_rx_poll()
465 unsigned int rx_tail = cp->rx_tail; cp_rx_poll()
477 const unsigned buflen = cp->rx_buf_sz; cp_rx_poll()
479 skb = cp->rx_skb[rx_tail]; cp_rx_poll()
482 desc = &cp->rx_ring[rx_tail]; cp_rx_poll()
496 cp_rx_err_acct(cp, rx_tail, status, len); cp_rx_poll()
498 cp->cp_stats.rx_frags++; cp_rx_poll()
503 cp_rx_err_acct(cp, rx_tail, status, len); cp_rx_poll()
507 netif_dbg(cp, rx_status, dev, "rx slot %d status 0x%x len %d\n", cp_rx_poll()
516 new_mapping = dma_map_single(&cp->pdev->dev, new_skb->data, buflen, cp_rx_poll()
518 if (dma_mapping_error(&cp->pdev->dev, new_mapping)) { cp_rx_poll()
524 dma_unmap_single(&cp->pdev->dev, mapping, cp_rx_poll()
535 cp->rx_skb[rx_tail] = new_skb; cp_rx_poll()
537 cp_rx_skb(cp, skb, desc); cp_rx_poll()
542 cp->rx_ring[rx_tail].opts2 = 0; cp_rx_poll()
543 cp->rx_ring[rx_tail].addr = cpu_to_le64(mapping); cp_rx_poll()
546 cp->rx_buf_sz); cp_rx_poll()
548 desc->opts1 = cpu_to_le32(DescOwn | cp->rx_buf_sz); cp_rx_poll()
552 cp->rx_tail = rx_tail; cp_rx_poll()
564 spin_lock_irqsave(&cp->lock, flags); cp_rx_poll()
567 spin_unlock_irqrestore(&cp->lock, flags); cp_rx_poll()
576 struct cp_private *cp; cp_interrupt() local
582 cp = netdev_priv(dev); cp_interrupt()
584 spin_lock(&cp->lock); cp_interrupt()
592 netif_dbg(cp, intr, dev, "intr, status %04x cmd %02x cpcmd %04x\n", cp_interrupt()
604 if (napi_schedule_prep(&cp->napi)) { cp_interrupt()
606 __napi_schedule(&cp->napi); cp_interrupt()
610 cp_tx(cp); cp_interrupt()
612 mii_check_media(&cp->mii_if, netif_msg_link(cp), false); cp_interrupt()
618 pci_read_config_word(cp->pdev, PCI_STATUS, &pci_status); cp_interrupt()
619 pci_write_config_word(cp->pdev, PCI_STATUS, pci_status); cp_interrupt()
627 spin_unlock(&cp->lock); cp_interrupt()
639 struct cp_private *cp = netdev_priv(dev); cp_poll_controller() local
640 const int irq = cp->pdev->irq; cp_poll_controller()
648 static void cp_tx (struct cp_private *cp) cp_tx() argument
650 unsigned tx_head = cp->tx_head; cp_tx()
651 unsigned tx_tail = cp->tx_tail; cp_tx()
655 struct cp_desc *txd = cp->tx_ring + tx_tail; cp_tx()
664 skb = cp->tx_skb[tx_tail]; cp_tx()
667 dma_unmap_single(&cp->pdev->dev, le64_to_cpu(txd->addr), cp_tx()
673 netif_dbg(cp, tx_err, cp->dev, cp_tx()
675 cp->dev->stats.tx_errors++; cp_tx()
677 cp->dev->stats.tx_window_errors++; cp_tx()
679 cp->dev->stats.tx_aborted_errors++; cp_tx()
681 cp->dev->stats.tx_carrier_errors++; cp_tx()
683 cp->dev->stats.tx_fifo_errors++; cp_tx()
685 cp->dev->stats.collisions += cp_tx()
687 cp->dev->stats.tx_packets++; cp_tx()
688 cp->dev->stats.tx_bytes += skb->len; cp_tx()
689 netif_dbg(cp, tx_done, cp->dev, cp_tx()
697 cp->tx_skb[tx_tail] = NULL; cp_tx()
702 cp->tx_tail = tx_tail; cp_tx()
704 netdev_completed_queue(cp->dev, pkts_compl, bytes_compl); cp_tx()
705 if (TX_BUFFS_AVAIL(cp) > (MAX_SKB_FRAGS + 1)) cp_tx()
706 netif_wake_queue(cp->dev); cp_tx()
715 static void unwind_tx_frag_mapping(struct cp_private *cp, struct sk_buff *skb, unwind_tx_frag_mapping() argument
723 cp->tx_skb[index] = NULL; unwind_tx_frag_mapping()
724 txd = &cp->tx_ring[index]; unwind_tx_frag_mapping()
726 dma_unmap_single(&cp->pdev->dev, le64_to_cpu(txd->addr), unwind_tx_frag_mapping()
734 struct cp_private *cp = netdev_priv(dev); cp_start_xmit() local
741 spin_lock_irqsave(&cp->lock, intr_flags); cp_start_xmit()
744 if (TX_BUFFS_AVAIL(cp) <= (skb_shinfo(skb)->nr_frags + 1)) { cp_start_xmit()
746 spin_unlock_irqrestore(&cp->lock, intr_flags); cp_start_xmit()
751 entry = cp->tx_head; cp_start_xmit()
758 struct cp_desc *txd = &cp->tx_ring[entry]; cp_start_xmit()
763 mapping = dma_map_single(&cp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE); cp_start_xmit()
764 if (dma_mapping_error(&cp->pdev->dev, mapping)) cp_start_xmit()
788 cp->tx_skb[entry] = skb; cp_start_xmit()
802 first_mapping = dma_map_single(&cp->pdev->dev, skb->data, cp_start_xmit()
804 if (dma_mapping_error(&cp->pdev->dev, first_mapping)) cp_start_xmit()
807 cp->tx_skb[entry] = skb; cp_start_xmit()
817 mapping = dma_map_single(&cp->pdev->dev, cp_start_xmit()
820 if (dma_mapping_error(&cp->pdev->dev, mapping)) { cp_start_xmit()
821 unwind_tx_frag_mapping(cp, skb, first_entry, entry); cp_start_xmit()
844 txd = &cp->tx_ring[entry]; cp_start_xmit()
852 cp->tx_skb[entry] = skb; cp_start_xmit()
856 txd = &cp->tx_ring[first_entry]; cp_start_xmit()
877 cp->tx_head = entry; cp_start_xmit()
880 netif_dbg(cp, tx_queued, cp->dev, "tx queued, slot %d, skblen %d\n", cp_start_xmit()
882 if (TX_BUFFS_AVAIL(cp) <= (MAX_SKB_FRAGS + 1)) cp_start_xmit()
886 spin_unlock_irqrestore(&cp->lock, intr_flags); cp_start_xmit()
893 cp->dev->stats.tx_dropped++; cp_start_xmit()
902 struct cp_private *cp = netdev_priv(dev); __cp_set_rx_mode() local
931 cp->rx_config = cp_rx_config | rx_mode;
932 cpw32_f(RxConfig, cp->rx_config);
941 struct cp_private *cp = netdev_priv(dev); cp_set_rx_mode() local
943 spin_lock_irqsave (&cp->lock, flags); cp_set_rx_mode()
945 spin_unlock_irqrestore (&cp->lock, flags); cp_set_rx_mode()
948 static void __cp_get_stats(struct cp_private *cp) __cp_get_stats() argument
951 cp->dev->stats.rx_missed_errors += (cpr32 (RxMissed) & 0xffffff); __cp_get_stats()
957 struct cp_private *cp = netdev_priv(dev); cp_get_stats() local
961 spin_lock_irqsave(&cp->lock, flags); cp_get_stats()
963 __cp_get_stats(cp); cp_get_stats()
964 spin_unlock_irqrestore(&cp->lock, flags); cp_get_stats()
969 static void cp_stop_hw (struct cp_private *cp) cp_stop_hw() argument
977 cp->rx_tail = 0; cp_stop_hw()
978 cp->tx_head = cp->tx_tail = 0; cp_stop_hw()
980 netdev_reset_queue(cp->dev); cp_stop_hw()
983 static void cp_reset_hw (struct cp_private *cp) cp_reset_hw() argument
996 netdev_err(cp->dev, "hardware reset timeout\n"); cp_reset_hw()
999 static inline void cp_start_hw (struct cp_private *cp) cp_start_hw() argument
1003 cpw16(CpCmd, cp->cpcmd); cp_start_hw()
1016 ring_dma = cp->ring_dma; cp_start_hw()
1032 netdev_reset_queue(cp->dev); cp_start_hw()
1035 static void cp_enable_irq(struct cp_private *cp) cp_enable_irq() argument
1040 static void cp_init_hw (struct cp_private *cp) cp_init_hw() argument
1042 struct net_device *dev = cp->dev; cp_init_hw()
1044 cp_reset_hw(cp); cp_init_hw()
1052 cp_start_hw(cp); cp_init_hw()
1061 cp->wol_enabled = 0; cp_init_hw()
1070 static int cp_refill_rx(struct cp_private *cp) cp_refill_rx() argument
1072 struct net_device *dev = cp->dev; cp_refill_rx()
1079 skb = netdev_alloc_skb_ip_align(dev, cp->rx_buf_sz); cp_refill_rx()
1083 mapping = dma_map_single(&cp->pdev->dev, skb->data, cp_refill_rx()
1084 cp->rx_buf_sz, PCI_DMA_FROMDEVICE); cp_refill_rx()
1085 if (dma_mapping_error(&cp->pdev->dev, mapping)) { cp_refill_rx()
1089 cp->rx_skb[i] = skb; cp_refill_rx()
1091 cp->rx_ring[i].opts2 = 0; cp_refill_rx()
1092 cp->rx_ring[i].addr = cpu_to_le64(mapping); cp_refill_rx()
1094 cp->rx_ring[i].opts1 = cp_refill_rx()
1095 cpu_to_le32(DescOwn | RingEnd | cp->rx_buf_sz); cp_refill_rx()
1097 cp->rx_ring[i].opts1 = cp_refill_rx()
1098 cpu_to_le32(DescOwn | cp->rx_buf_sz); cp_refill_rx()
1104 cp_clean_rings(cp); cp_refill_rx()
1108 static void cp_init_rings_index (struct cp_private *cp) cp_init_rings_index() argument
1110 cp->rx_tail = 0; cp_init_rings_index()
1111 cp->tx_head = cp->tx_tail = 0; cp_init_rings_index()
1114 static int cp_init_rings (struct cp_private *cp) cp_init_rings() argument
1116 memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE); cp_init_rings()
1117 cp->tx_ring[CP_TX_RING_SIZE - 1].opts1 = cpu_to_le32(RingEnd); cp_init_rings()
1119 cp_init_rings_index(cp); cp_init_rings()
1121 return cp_refill_rx (cp); cp_init_rings()
1124 static int cp_alloc_rings (struct cp_private *cp) cp_alloc_rings() argument
1126 struct device *d = &cp->pdev->dev; cp_alloc_rings()
1130 mem = dma_alloc_coherent(d, CP_RING_BYTES, &cp->ring_dma, GFP_KERNEL); cp_alloc_rings()
1134 cp->rx_ring = mem; cp_alloc_rings()
1135 cp->tx_ring = &cp->rx_ring[CP_RX_RING_SIZE]; cp_alloc_rings()
1137 rc = cp_init_rings(cp); cp_alloc_rings()
1139 dma_free_coherent(d, CP_RING_BYTES, cp->rx_ring, cp->ring_dma); cp_alloc_rings()
1144 static void cp_clean_rings (struct cp_private *cp) cp_clean_rings() argument
1150 if (cp->rx_skb[i]) { cp_clean_rings()
1151 desc = cp->rx_ring + i; cp_clean_rings()
1152 dma_unmap_single(&cp->pdev->dev,le64_to_cpu(desc->addr), cp_clean_rings()
1153 cp->rx_buf_sz, PCI_DMA_FROMDEVICE); cp_clean_rings()
1154 dev_kfree_skb(cp->rx_skb[i]); cp_clean_rings()
1159 if (cp->tx_skb[i]) { cp_clean_rings()
1160 struct sk_buff *skb = cp->tx_skb[i]; cp_clean_rings()
1162 desc = cp->tx_ring + i; cp_clean_rings()
1163 dma_unmap_single(&cp->pdev->dev,le64_to_cpu(desc->addr), cp_clean_rings()
1168 cp->dev->stats.tx_dropped++; cp_clean_rings()
1171 netdev_reset_queue(cp->dev); cp_clean_rings()
1173 memset(cp->rx_ring, 0, sizeof(struct cp_desc) * CP_RX_RING_SIZE); cp_clean_rings()
1174 memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE); cp_clean_rings()
1176 memset(cp->rx_skb, 0, sizeof(struct sk_buff *) * CP_RX_RING_SIZE); cp_clean_rings()
1177 memset(cp->tx_skb, 0, sizeof(struct sk_buff *) * CP_TX_RING_SIZE); cp_clean_rings()
1180 static void cp_free_rings (struct cp_private *cp) cp_free_rings() argument
1182 cp_clean_rings(cp); cp_free_rings()
1183 dma_free_coherent(&cp->pdev->dev, CP_RING_BYTES, cp->rx_ring, cp_free_rings()
1184 cp->ring_dma); cp_free_rings()
1185 cp->rx_ring = NULL; cp_free_rings()
1186 cp->tx_ring = NULL; cp_free_rings()
1191 struct cp_private *cp = netdev_priv(dev); cp_open() local
1192 const int irq = cp->pdev->irq; cp_open()
1195 netif_dbg(cp, ifup, dev, "enabling interface\n"); cp_open()
1197 rc = cp_alloc_rings(cp); cp_open()
1201 napi_enable(&cp->napi); cp_open()
1203 cp_init_hw(cp); cp_open()
1209 cp_enable_irq(cp); cp_open()
1212 mii_check_media(&cp->mii_if, netif_msg_link(cp), true); cp_open()
1218 napi_disable(&cp->napi); cp_open()
1219 cp_stop_hw(cp); cp_open()
1220 cp_free_rings(cp); cp_open()
1226 struct cp_private *cp = netdev_priv(dev); cp_close() local
1229 napi_disable(&cp->napi); cp_close()
1231 netif_dbg(cp, ifdown, dev, "disabling interface\n"); cp_close()
1233 spin_lock_irqsave(&cp->lock, flags); cp_close()
1238 cp_stop_hw(cp); cp_close()
1240 spin_unlock_irqrestore(&cp->lock, flags); cp_close()
1242 free_irq(cp->pdev->irq, dev); cp_close()
1244 cp_free_rings(cp); cp_close()
1250 struct cp_private *cp = netdev_priv(dev); cp_tx_timeout() local
1258 spin_lock_irqsave(&cp->lock, flags); cp_tx_timeout()
1260 cp_stop_hw(cp); cp_tx_timeout()
1261 cp_clean_rings(cp); cp_tx_timeout()
1262 rc = cp_init_rings(cp); cp_tx_timeout()
1263 cp_start_hw(cp); cp_tx_timeout()
1264 cp_enable_irq(cp); cp_tx_timeout()
1268 spin_unlock_irqrestore(&cp->lock, flags); cp_tx_timeout()
1273 struct cp_private *cp = netdev_priv(dev); cp_change_mtu() local
1282 cp_set_rxbufsize(cp); /* set new rx buf size */ cp_change_mtu()
1289 cp_set_rxbufsize(cp); cp_change_mtu()
1306 struct cp_private *cp = netdev_priv(dev); mdio_read() local
1309 readw(cp->regs + mii_2_8139_map[location]) : 0; mdio_read()
1316 struct cp_private *cp = netdev_priv(dev); mdio_write() local
1327 static int netdev_set_wol (struct cp_private *cp, netdev_set_wol() argument
1354 cp->wol_enabled = (wol->wolopts) ? 1 : 0; netdev_set_wol()
1360 static void netdev_get_wol (struct cp_private *cp, netdev_get_wol() argument
1369 if (!cp->wol_enabled) return; netdev_get_wol()
1384 struct cp_private *cp = netdev_priv(dev); cp_get_drvinfo() local
1388 strlcpy(info->bus_info, pci_name(cp->pdev), sizeof(info->bus_info)); cp_get_drvinfo()
1417 struct cp_private *cp = netdev_priv(dev); cp_get_settings() local
1421 spin_lock_irqsave(&cp->lock, flags); cp_get_settings()
1422 rc = mii_ethtool_gset(&cp->mii_if, cmd); cp_get_settings()
1423 spin_unlock_irqrestore(&cp->lock, flags); cp_get_settings()
1430 struct cp_private *cp = netdev_priv(dev); cp_set_settings() local
1434 spin_lock_irqsave(&cp->lock, flags); cp_set_settings()
1435 rc = mii_ethtool_sset(&cp->mii_if, cmd); cp_set_settings()
1436 spin_unlock_irqrestore(&cp->lock, flags); cp_set_settings()
1443 struct cp_private *cp = netdev_priv(dev); cp_nway_reset() local
1444 return mii_nway_restart(&cp->mii_if); cp_nway_reset()
1449 struct cp_private *cp = netdev_priv(dev); cp_get_msglevel() local
1450 return cp->msg_enable; cp_get_msglevel()
1455 struct cp_private *cp = netdev_priv(dev); cp_set_msglevel() local
1456 cp->msg_enable = value; cp_set_msglevel()
1461 struct cp_private *cp = netdev_priv(dev); cp_set_features() local
1467 spin_lock_irqsave(&cp->lock, flags); cp_set_features()
1470 cp->cpcmd |= RxChkSum; cp_set_features()
1472 cp->cpcmd &= ~RxChkSum; cp_set_features()
1475 cp->cpcmd |= RxVlanOn; cp_set_features()
1477 cp->cpcmd &= ~RxVlanOn; cp_set_features()
1479 cpw16_f(CpCmd, cp->cpcmd); cp_set_features()
1480 spin_unlock_irqrestore(&cp->lock, flags); cp_set_features()
1488 struct cp_private *cp = netdev_priv(dev); cp_get_regs() local
1496 spin_lock_irqsave(&cp->lock, flags); cp_get_regs()
1497 memcpy_fromio(p, cp->regs, CP_REGS_SIZE); cp_get_regs()
1498 spin_unlock_irqrestore(&cp->lock, flags); cp_get_regs()
1503 struct cp_private *cp = netdev_priv(dev); cp_get_wol() local
1506 spin_lock_irqsave (&cp->lock, flags); cp_get_wol()
1507 netdev_get_wol (cp, wol); cp_get_wol()
1508 spin_unlock_irqrestore (&cp->lock, flags); cp_get_wol()
1513 struct cp_private *cp = netdev_priv(dev); cp_set_wol() local
1517 spin_lock_irqsave (&cp->lock, flags); cp_set_wol()
1518 rc = netdev_set_wol (cp, wol); cp_set_wol()
1519 spin_unlock_irqrestore (&cp->lock, flags); cp_set_wol()
1539 struct cp_private *cp = netdev_priv(dev); cp_get_ethtool_stats() local
1544 nic_stats = dma_alloc_coherent(&cp->pdev->dev, sizeof(*nic_stats), cp_get_ethtool_stats()
1577 tmp_stats[i++] = cp->cp_stats.rx_frags; cp_get_ethtool_stats()
1580 dma_free_coherent(&cp->pdev->dev, sizeof(*nic_stats), nic_stats, dma); cp_get_ethtool_stats()
1606 struct cp_private *cp = netdev_priv(dev); cp_ioctl() local
1613 spin_lock_irqsave(&cp->lock, flags); cp_ioctl()
1614 rc = generic_mii_ioctl(&cp->mii_if, if_mii(rq), cmd, NULL); cp_ioctl()
1615 spin_unlock_irqrestore(&cp->lock, flags); cp_ioctl()
1621 struct cp_private *cp = netdev_priv(dev); cp_set_mac_address() local
1629 spin_lock_irq(&cp->lock); cp_set_mac_address()
1636 spin_unlock_irq(&cp->lock); cp_set_mac_address()
1760 struct cp_private *cp = netdev_priv(dev); cp_get_eeprom_len() local
1763 spin_lock_irq(&cp->lock); cp_get_eeprom_len()
1764 size = read_eeprom(cp->regs, 0, 8) == 0x8129 ? 256 : 128; cp_get_eeprom_len()
1765 spin_unlock_irq(&cp->lock); cp_get_eeprom_len()
1773 struct cp_private *cp = netdev_priv(dev); cp_get_eeprom() local
1782 spin_lock_irq(&cp->lock); cp_get_eeprom()
1784 addr_len = read_eeprom(cp->regs, 0, 8) == 0x8129 ? 8 : 6; cp_get_eeprom()
1787 val = read_eeprom(cp->regs, offset, addr_len); cp_get_eeprom()
1793 val = read_eeprom(cp->regs, offset, addr_len); cp_get_eeprom()
1800 val = read_eeprom(cp->regs, offset, addr_len); cp_get_eeprom()
1804 spin_unlock_irq(&cp->lock); cp_get_eeprom()
1811 struct cp_private *cp = netdev_priv(dev); cp_set_eeprom() local
1821 spin_lock_irq(&cp->lock); cp_set_eeprom()
1823 addr_len = read_eeprom(cp->regs, 0, 8) == 0x8129 ? 8 : 6; cp_set_eeprom()
1826 val = read_eeprom(cp->regs, offset, addr_len) & 0xff; cp_set_eeprom()
1828 write_eeprom(cp->regs, offset, val, addr_len); cp_set_eeprom()
1835 write_eeprom(cp->regs, offset, val, addr_len); cp_set_eeprom()
1840 val = read_eeprom(cp->regs, offset, addr_len) & 0xff00; cp_set_eeprom()
1842 write_eeprom(cp->regs, offset, val, addr_len); cp_set_eeprom()
1845 spin_unlock_irq(&cp->lock); cp_set_eeprom()
1850 static void cp_set_d3_state (struct cp_private *cp) cp_set_d3_state() argument
1852 pci_enable_wake(cp->pdev, PCI_D0, 1); /* Enable PME# generation */ cp_set_d3_state()
1853 pci_set_power_state (cp->pdev, PCI_D3hot); cp_set_d3_state()
1877 struct cp_private *cp; cp_init_one() local
1898 cp = netdev_priv(dev); cp_init_one()
1899 cp->pdev = pdev; cp_init_one()
1900 cp->dev = dev; cp_init_one()
1901 cp->msg_enable = (debug < 0 ? CP_DEF_MSG_ENABLE : debug); cp_init_one()
1902 spin_lock_init (&cp->lock); cp_init_one()
1903 cp->mii_if.dev = dev; cp_init_one()
1904 cp->mii_if.mdio_read = mdio_read; cp_init_one()
1905 cp->mii_if.mdio_write = mdio_write; cp_init_one()
1906 cp->mii_if.phy_id = CP_INTERNAL_PHY; cp_init_one()
1907 cp->mii_if.phy_id_mask = 0x1f; cp_init_one()
1908 cp->mii_if.reg_num_mask = 0x1f; cp_init_one()
1909 cp_set_rxbufsize(cp); cp_init_one()
1958 cp->cpcmd = (pci_using_dac ? PCIDAC : 0) | cp_init_one()
1972 cp->regs = regs; cp_init_one()
1974 cp_stop_hw(cp); cp_init_one()
1983 netif_napi_add(dev, &cp->napi, cp_rx_poll, 16); cp_init_one()
2010 if (cp->wol_enabled) cp_init_one()
2011 cp_set_d3_state (cp); cp_init_one()
2031 struct cp_private *cp = netdev_priv(dev); cp_remove_one() local
2034 iounmap(cp->regs); cp_remove_one()
2035 if (cp->wol_enabled) cp_remove_one()
2047 struct cp_private *cp = netdev_priv(dev); cp_suspend() local
2056 spin_lock_irqsave (&cp->lock, flags); cp_suspend()
2062 spin_unlock_irqrestore (&cp->lock, flags); cp_suspend()
2065 pci_enable_wake(pdev, pci_choose_state(pdev, state), cp->wol_enabled); cp_suspend()
2074 struct cp_private *cp = netdev_priv(dev); cp_resume() local
2087 cp_init_rings_index (cp); cp_resume()
2088 cp_init_hw (cp); cp_resume()
2089 cp_enable_irq(cp); cp_resume()
2092 spin_lock_irqsave (&cp->lock, flags); cp_resume()
2094 mii_check_media(&cp->mii_if, netif_msg_link(cp), false); cp_resume()
2096 spin_unlock_irqrestore (&cp->lock, flags); cp_resume()
/linux-4.1.27/net/bluetooth/
H A Damp.c225 struct hci_cp_read_local_amp_assoc cp; amp_read_loc_assoc_frag() local
230 cp.phy_handle = phy_handle; amp_read_loc_assoc_frag()
231 cp.max_len = cpu_to_le16(hdev->amp_assoc_size); amp_read_loc_assoc_frag()
232 cp.len_so_far = cpu_to_le16(loc_assoc->offset); amp_read_loc_assoc_frag()
234 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_AMP_ASSOC, sizeof(cp), &cp); amp_read_loc_assoc_frag()
239 struct hci_cp_read_local_amp_assoc cp; amp_read_loc_assoc() local
242 memset(&cp, 0, sizeof(cp)); amp_read_loc_assoc()
244 cp.max_len = cpu_to_le16(hdev->amp_assoc_size); amp_read_loc_assoc()
247 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_AMP_ASSOC, sizeof(cp), &cp); amp_read_loc_assoc()
253 struct hci_cp_read_local_amp_assoc cp; amp_read_loc_assoc_final_data() local
256 cp.phy_handle = hcon->handle; amp_read_loc_assoc_final_data()
257 cp.len_so_far = cpu_to_le16(0); amp_read_loc_assoc_final_data()
258 cp.max_len = cpu_to_le16(hdev->amp_assoc_size); amp_read_loc_assoc_final_data()
263 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_AMP_ASSOC, sizeof(cp), &cp); amp_read_loc_assoc_final_data()
270 struct hci_cp_write_remote_amp_assoc *cp; amp_write_rem_assoc_frag() local
289 len = frag_len + sizeof(*cp); amp_write_rem_assoc_frag()
291 cp = kzalloc(len, GFP_KERNEL); amp_write_rem_assoc_frag()
292 if (!cp) { amp_write_rem_assoc_frag()
300 cp->phy_handle = hcon->handle; amp_write_rem_assoc_frag()
301 cp->len_so_far = cpu_to_le16(ctrl->assoc_len_so_far); amp_write_rem_assoc_frag()
302 cp->rem_len = cpu_to_le16(ctrl->assoc_rem_len); amp_write_rem_assoc_frag()
303 memcpy(cp->frag, ctrl->assoc, frag_len); amp_write_rem_assoc_frag()
310 hci_send_cmd(hdev, HCI_OP_WRITE_REMOTE_AMP_ASSOC, len, cp); amp_write_rem_assoc_frag()
312 kfree(cp); amp_write_rem_assoc_frag()
350 struct hci_cp_create_phy_link cp; amp_create_phylink() local
352 cp.phy_handle = hcon->handle; amp_create_phylink()
357 if (phylink_gen_key(mgr->l2cap_conn->hcon, cp.key, &cp.key_len, amp_create_phylink()
358 &cp.key_type)) { amp_create_phylink()
363 hci_send_cmd(hdev, HCI_OP_CREATE_PHY_LINK, sizeof(cp), &cp); amp_create_phylink()
369 struct hci_cp_accept_phy_link cp; amp_accept_phylink() local
371 cp.phy_handle = hcon->handle; amp_accept_phylink()
376 if (phylink_gen_key(mgr->l2cap_conn->hcon, cp.key, &cp.key_len, amp_accept_phylink()
377 &cp.key_type)) { amp_accept_phylink()
382 hci_send_cmd(hdev, HCI_OP_ACCEPT_PHY_LINK, sizeof(cp), &cp); amp_accept_phylink()
416 struct hci_cp_create_accept_logical_link cp; amp_create_logical_link() local
429 cp.phy_handle = hs_hcon->handle; amp_create_logical_link()
431 cp.tx_flow_spec.id = chan->local_id; amp_create_logical_link()
432 cp.tx_flow_spec.stype = chan->local_stype; amp_create_logical_link()
433 cp.tx_flow_spec.msdu = cpu_to_le16(chan->local_msdu); amp_create_logical_link()
434 cp.tx_flow_spec.sdu_itime = cpu_to_le32(chan->local_sdu_itime); amp_create_logical_link()
435 cp.tx_flow_spec.acc_lat = cpu_to_le32(chan->local_acc_lat); amp_create_logical_link()
436 cp.tx_flow_spec.flush_to = cpu_to_le32(chan->local_flush_to); amp_create_logical_link()
438 cp.rx_flow_spec.id = chan->remote_id; amp_create_logical_link()
439 cp.rx_flow_spec.stype = chan->remote_stype; amp_create_logical_link()
440 cp.rx_flow_spec.msdu = cpu_to_le16(chan->remote_msdu); amp_create_logical_link()
441 cp.rx_flow_spec.sdu_itime = cpu_to_le32(chan->remote_sdu_itime); amp_create_logical_link()
442 cp.rx_flow_spec.acc_lat = cpu_to_le32(chan->remote_acc_lat); amp_create_logical_link()
443 cp.rx_flow_spec.flush_to = cpu_to_le32(chan->remote_flush_to); amp_create_logical_link()
446 hci_send_cmd(hdev, HCI_OP_CREATE_LOGICAL_LINK, sizeof(cp), amp_create_logical_link()
447 &cp); amp_create_logical_link()
449 hci_send_cmd(hdev, HCI_OP_ACCEPT_LOGICAL_LINK, sizeof(cp), amp_create_logical_link()
450 &cp); amp_create_logical_link()
458 struct hci_cp_disconn_logical_link cp; amp_disconnect_logical_link() local
465 cp.log_handle = cpu_to_le16(hchan->handle); amp_disconnect_logical_link()
466 hci_send_cmd(hcon->hdev, HCI_OP_DISCONN_LOGICAL_LINK, sizeof(cp), &cp); amp_disconnect_logical_link()
H A Dhci_conn.c71 struct hci_cp_create_conn cp; hci_acl_create_connection() local
83 memset(&cp, 0, sizeof(cp)); hci_acl_create_connection()
84 bacpy(&cp.bdaddr, &conn->dst); hci_acl_create_connection()
85 cp.pscan_rep_mode = 0x02; hci_acl_create_connection()
90 cp.pscan_rep_mode = ie->data.pscan_rep_mode; hci_acl_create_connection()
91 cp.pscan_mode = ie->data.pscan_mode; hci_acl_create_connection()
92 cp.clock_offset = ie->data.clock_offset | hci_acl_create_connection()
101 cp.pkt_type = cpu_to_le16(conn->pkt_type); hci_acl_create_connection()
103 cp.role_switch = 0x01; hci_acl_create_connection()
105 cp.role_switch = 0x00; hci_acl_create_connection()
107 hci_send_cmd(hdev, HCI_OP_CREATE_CONN, sizeof(cp), &cp); hci_acl_create_connection()
112 struct hci_cp_create_conn_cancel cp; hci_acl_create_connection_cancel() local
119 bacpy(&cp.bdaddr, &conn->dst); hci_acl_create_connection_cancel()
120 hci_send_cmd(conn->hdev, HCI_OP_CREATE_CONN_CANCEL, sizeof(cp), &cp); hci_acl_create_connection_cancel()
125 struct hci_cp_reject_sync_conn_req cp; hci_reject_sco() local
127 cp.reason = HCI_ERROR_REJ_LIMITED_RESOURCES; hci_reject_sco()
128 bacpy(&cp.bdaddr, &conn->dst); hci_reject_sco()
130 hci_send_cmd(conn->hdev, HCI_OP_REJECT_SYNC_CONN_REQ, sizeof(cp), &cp); hci_reject_sco()
135 struct hci_cp_disconnect cp; hci_disconnect() local
155 cp.handle = cpu_to_le16(conn->handle); hci_disconnect()
156 cp.reason = reason; hci_disconnect()
157 return hci_send_cmd(conn->hdev, HCI_OP_DISCONNECT, sizeof(cp), &cp); hci_disconnect()
162 struct hci_cp_disconn_phy_link cp; hci_amp_disconn() local
168 cp.phy_handle = HCI_PHY_HANDLE(conn->handle); hci_amp_disconn()
169 cp.reason = hci_proto_disconn_ind(conn); hci_amp_disconn()
171 sizeof(cp), &cp); hci_amp_disconn()
177 struct hci_cp_add_sco cp; hci_add_sco() local
186 cp.handle = cpu_to_le16(handle); hci_add_sco()
187 cp.pkt_type = cpu_to_le16(conn->pkt_type); hci_add_sco()
189 hci_send_cmd(hdev, HCI_OP_ADD_SCO, sizeof(cp), &cp); hci_add_sco()
195 struct hci_cp_setup_sync_conn cp; hci_setup_sync() local
205 cp.handle = cpu_to_le16(handle); hci_setup_sync()
207 cp.tx_bandwidth = cpu_to_le32(0x00001f40); hci_setup_sync()
208 cp.rx_bandwidth = cpu_to_le32(0x00001f40); hci_setup_sync()
209 cp.voice_setting = cpu_to_le16(conn->setting); hci_setup_sync()
232 cp.retrans_effort = param->retrans_effort; hci_setup_sync()
233 cp.pkt_type = __cpu_to_le16(param->pkt_type); hci_setup_sync()
234 cp.max_latency = __cpu_to_le16(param->max_latency); hci_setup_sync()
236 if (hci_send_cmd(hdev, HCI_OP_SETUP_SYNC_CONN, sizeof(cp), &cp) < 0) hci_setup_sync()
247 struct hci_cp_le_conn_update cp; hci_le_conn_update() local
261 memset(&cp, 0, sizeof(cp)); hci_le_conn_update()
262 cp.handle = cpu_to_le16(conn->handle); hci_le_conn_update()
263 cp.conn_interval_min = cpu_to_le16(min); hci_le_conn_update()
264 cp.conn_interval_max = cpu_to_le16(max); hci_le_conn_update()
265 cp.conn_latency = cpu_to_le16(latency); hci_le_conn_update()
266 cp.supervision_timeout = cpu_to_le16(to_multiplier); hci_le_conn_update()
267 cp.min_ce_len = cpu_to_le16(0x0000); hci_le_conn_update()
268 cp.max_ce_len = cpu_to_le16(0x0000); hci_le_conn_update()
270 hci_send_cmd(hdev, HCI_OP_LE_CONN_UPDATE, sizeof(cp), &cp); hci_le_conn_update()
282 struct hci_cp_le_start_enc cp; hci_le_start_enc() local
286 memset(&cp, 0, sizeof(cp)); hci_le_start_enc()
288 cp.handle = cpu_to_le16(conn->handle); hci_le_start_enc()
289 cp.rand = rand; hci_le_start_enc()
290 cp.ediv = ediv; hci_le_start_enc()
291 memcpy(cp.ltk, ltk, sizeof(cp.ltk)); hci_le_start_enc()
293 hci_send_cmd(hdev, HCI_OP_LE_START_ENC, sizeof(cp), &cp); hci_le_start_enc()
380 struct hci_cp_sniff_subrate cp; hci_conn_idle() local
381 cp.handle = cpu_to_le16(conn->handle); hci_conn_idle()
382 cp.max_latency = cpu_to_le16(0); hci_conn_idle()
383 cp.min_remote_timeout = cpu_to_le16(0); hci_conn_idle()
384 cp.min_local_timeout = cpu_to_le16(0); hci_conn_idle()
385 hci_send_cmd(hdev, HCI_OP_SNIFF_SUBRATE, sizeof(cp), &cp); hci_conn_idle()
389 struct hci_cp_sniff_mode cp; hci_conn_idle() local
390 cp.handle = cpu_to_le16(conn->handle); hci_conn_idle()
391 cp.max_interval = cpu_to_le16(hdev->sniff_max_interval); hci_conn_idle()
392 cp.min_interval = cpu_to_le16(hdev->sniff_min_interval); hci_conn_idle()
393 cp.attempt = cpu_to_le16(4); hci_conn_idle()
394 cp.timeout = cpu_to_le16(1); hci_conn_idle()
395 hci_send_cmd(hdev, HCI_OP_SNIFF_MODE, sizeof(cp), &cp); hci_conn_idle()
661 struct hci_cp_le_create_conn cp; hci_req_add_le_create_conn() local
665 memset(&cp, 0, sizeof(cp)); hci_req_add_le_create_conn()
673 cp.scan_interval = cpu_to_le16(hdev->le_scan_interval); hci_req_add_le_create_conn()
674 cp.scan_window = cpu_to_le16(hdev->le_scan_window); hci_req_add_le_create_conn()
675 bacpy(&cp.peer_addr, &conn->dst); hci_req_add_le_create_conn()
676 cp.peer_addr_type = conn->dst_type; hci_req_add_le_create_conn()
677 cp.own_address_type = own_addr_type; hci_req_add_le_create_conn()
678 cp.conn_interval_min = cpu_to_le16(conn->le_conn_min_interval); hci_req_add_le_create_conn()
679 cp.conn_interval_max = cpu_to_le16(conn->le_conn_max_interval); hci_req_add_le_create_conn()
680 cp.conn_latency = cpu_to_le16(conn->le_conn_latency); hci_req_add_le_create_conn()
681 cp.supervision_timeout = cpu_to_le16(conn->le_supv_timeout); hci_req_add_le_create_conn()
682 cp.min_ce_len = cpu_to_le16(0x0000); hci_req_add_le_create_conn()
683 cp.max_ce_len = cpu_to_le16(0x0000); hci_req_add_le_create_conn()
685 hci_req_add(req, HCI_OP_LE_CREATE_CONN, sizeof(cp), &cp); hci_req_add_le_create_conn()
694 struct hci_cp_le_set_adv_param cp; hci_req_directed_advertising() local
711 memset(&cp, 0, sizeof(cp)); hci_req_directed_advertising()
712 cp.type = LE_ADV_DIRECT_IND; hci_req_directed_advertising()
713 cp.own_address_type = own_addr_type; hci_req_directed_advertising()
714 cp.direct_addr_type = conn->dst_type; hci_req_directed_advertising()
715 bacpy(&cp.direct_addr, &conn->dst); hci_req_directed_advertising()
716 cp.channel_map = hdev->le_adv_channel_map; hci_req_directed_advertising()
718 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp); hci_req_directed_advertising()
978 struct hci_cp_auth_requested cp; hci_conn_auth() local
980 cp.handle = cpu_to_le16(conn->handle); hci_conn_auth()
982 sizeof(cp), &cp); hci_conn_auth()
1002 struct hci_cp_set_conn_encrypt cp; hci_conn_encrypt() local
1003 cp.handle = cpu_to_le16(conn->handle); hci_conn_encrypt()
1004 cp.encrypt = 0x01; hci_conn_encrypt()
1005 hci_send_cmd(conn->hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp), hci_conn_encrypt()
1006 &cp); hci_conn_encrypt()
1108 struct hci_cp_switch_role cp; hci_conn_switch_role() local
1109 bacpy(&cp.bdaddr, &conn->dst); hci_conn_switch_role()
1110 cp.role = role; hci_conn_switch_role()
1111 hci_send_cmd(conn->hdev, HCI_OP_SWITCH_ROLE, sizeof(cp), &cp); hci_conn_switch_role()
1132 struct hci_cp_exit_sniff_mode cp; hci_conn_enter_active_mode() local
1133 cp.handle = cpu_to_le16(conn->handle); hci_conn_enter_active_mode()
1134 hci_send_cmd(hdev, HCI_OP_EXIT_SNIFF_MODE, sizeof(cp), &cp); hci_conn_enter_active_mode()
H A Dmgmt.c876 struct hci_cp_le_set_scan_rsp_data cp; update_scan_rsp_data_for_instance() local
882 memset(&cp, 0, sizeof(cp)); update_scan_rsp_data_for_instance()
885 len = create_instance_scan_rsp_data(hdev, cp.data); update_scan_rsp_data_for_instance()
887 len = create_default_scan_rsp_data(hdev, cp.data); update_scan_rsp_data_for_instance()
890 !memcmp(cp.data, hdev->scan_rsp_data, len)) update_scan_rsp_data_for_instance()
893 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data)); update_scan_rsp_data_for_instance()
896 cp.length = len; update_scan_rsp_data_for_instance()
898 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp); update_scan_rsp_data_for_instance()
929 struct mgmt_mode *cp = cmd->param; get_adv_discov_flags() local
930 if (cp->val == 0x01) get_adv_discov_flags()
932 else if (cp->val == 0x02) get_adv_discov_flags()
967 struct mgmt_mode *cp = cmd->param; get_connectable() local
969 return cp->val; get_connectable()
1071 struct hci_cp_le_set_adv_data cp; update_adv_data_for_instance() local
1077 memset(&cp, 0, sizeof(cp)); update_adv_data_for_instance()
1079 len = create_instance_adv_data(hdev, instance, cp.data); update_adv_data_for_instance()
1083 memcmp(cp.data, hdev->adv_data, len) == 0) update_adv_data_for_instance()
1086 memcpy(hdev->adv_data, cp.data, sizeof(cp.data)); update_adv_data_for_instance()
1089 cp.length = len; update_adv_data_for_instance()
1091 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp); update_adv_data_for_instance()
1163 struct hci_cp_write_eir cp; update_eir() local
1177 memset(&cp, 0, sizeof(cp)); update_eir()
1179 create_eir(hdev, cp.data); update_eir()
1181 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0) update_eir()
1184 memcpy(hdev->eir, cp.data, sizeof(cp.data)); update_eir()
1186 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp); update_eir()
1239 struct hci_cp_le_set_adv_param cp; enable_advertising() local
1274 memset(&cp, 0, sizeof(cp)); enable_advertising()
1275 cp.min_interval = cpu_to_le16(hdev->le_adv_min_interval); enable_advertising()
1276 cp.max_interval = cpu_to_le16(hdev->le_adv_max_interval); enable_advertising()
1279 cp.type = LE_ADV_IND; enable_advertising()
1281 cp.type = LE_ADV_SCAN_IND; enable_advertising()
1283 cp.type = LE_ADV_NONCONN_IND; enable_advertising()
1285 cp.own_address_type = own_addr_type; enable_advertising()
1286 cp.channel_map = hdev->le_adv_channel_map; enable_advertising()
1288 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp); enable_advertising()
1402 struct hci_cp_remote_name_req_cancel cp; hci_stop_discovery() local
1423 bacpy(&cp.bdaddr, &e->data.bdaddr); hci_stop_discovery()
1424 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp), hci_stop_discovery()
1425 &cp); hci_stop_discovery()
1550 struct mgmt_mode *cp = data; set_powered() local
1556 if (cp->val != 0x00 && cp->val != 0x01) set_powered()
1571 if (cp->val) { set_powered()
1579 if (!!cp->val == hdev_is_powered(hdev)) { set_powered()
1590 if (cp->val) { set_powered()
1706 struct mgmt_mode *cp; set_discoverable_complete() local
1725 cp = cmd->param; set_discoverable_complete()
1726 if (cp->val) { set_discoverable_complete()
1763 struct mgmt_cp_set_discoverable *cp = data; set_discoverable() local
1777 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02) set_discoverable()
1781 timeout = __le16_to_cpu(cp->timeout); set_discoverable()
1786 if ((cp->val == 0x00 && timeout > 0) || set_discoverable()
1787 (cp->val == 0x02 && timeout == 0)) set_discoverable()
1819 if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) { set_discoverable()
1838 if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) && set_discoverable()
1839 (cp->val == 0x02) == hci_dev_test_flag(hdev, set_discoverable()
1844 if (cp->val && hdev->discov_timeout > 0) { set_discoverable()
1868 if (cp->val == 0x02) set_discoverable()
1883 if (cp->val) { set_discoverable()
1886 if (cp->val == 0x02) { set_discoverable()
1964 struct mgmt_mode *cp; set_connectable_complete() local
1981 cp = cmd->param; set_connectable_complete()
1982 if (cp->val) { set_connectable_complete()
2042 struct mgmt_mode *cp = data; set_connectable() local
2055 if (cp->val != 0x00 && cp->val != 0x01) set_connectable()
2062 err = set_connectable_update_settings(hdev, sk, cp->val); set_connectable()
2086 if (!cp->val) { set_connectable()
2091 } else if (cp->val != test_bit(HCI_PSCAN, &hdev->flags)) { set_connectable()
2092 if (cp->val) { set_connectable()
2127 cp->val); set_connectable()
2139 struct mgmt_mode *cp = data; set_bondable() local
2145 if (cp->val != 0x00 && cp->val != 0x01) set_bondable()
2151 if (cp->val) set_bondable()
2171 struct mgmt_mode *cp = data; set_link_security() local
2183 if (cp->val != 0x00 && cp->val != 0x01) set_link_security()
2192 if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) { set_link_security()
2213 val = !!cp->val; set_link_security()
2239 struct mgmt_mode *cp = data; set_ssp() local
2254 if (cp->val != 0x00 && cp->val != 0x01) set_ssp()
2263 if (cp->val) { set_ssp()
2292 if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) { set_ssp()
2303 if (!cp->val && hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS)) set_ssp()
2305 sizeof(cp->val), &cp->val); set_ssp()
2307 err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val); set_ssp()
2320 struct mgmt_mode *cp = data; set_hs() local
2339 if (cp->val != 0x00 && cp->val != 0x01) set_hs()
2351 if (cp->val) { set_hs()
2417 struct mgmt_mode *cp = data; set_le() local
2430 if (cp->val != 0x00 && cp->val != 0x01) set_le()
2444 if (cp->val == 0x01) set_le()
2453 val = !!cp->val; set_le()
2586 struct mgmt_cp_add_uuid *cp = data; add_uuid() local
2608 memcpy(uuid->uuid, cp->uuid, 16); add_uuid()
2609 uuid->svc_hint = cp->svc_hint; add_uuid()
2610 uuid->size = get_uuid_size(cp->uuid); add_uuid()
2666 struct mgmt_cp_remove_uuid *cp = data; remove_uuid() local
2683 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) { remove_uuid()
2699 if (memcmp(match->uuid, cp->uuid, 16) != 0) remove_uuid()
2752 struct mgmt_cp_set_dev_class *cp = data; set_dev_class() local
2771 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) { set_dev_class()
2777 hdev->major_class = cp->major; set_dev_class()
2778 hdev->minor_class = cp->minor; set_dev_class()
2823 struct mgmt_cp_load_link_keys *cp = data; load_link_keys() local
2824 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) / load_link_keys()
2836 key_count = __le16_to_cpu(cp->key_count); load_link_keys()
2844 expected_len = sizeof(*cp) + key_count * load_link_keys()
2853 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01) load_link_keys()
2857 BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys, load_link_keys()
2861 struct mgmt_link_key_info *key = &cp->keys[i]; load_link_keys()
2873 if (cp->debug_keys) load_link_keys()
2883 struct mgmt_link_key_info *key = &cp->keys[i]; load_link_keys()
2917 struct mgmt_cp_unpair_device *cp = data; unpair_device() local
2925 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr); unpair_device()
2926 rp.addr.type = cp->addr.type; unpair_device()
2928 if (!bdaddr_type_is_valid(cp->addr.type)) unpair_device()
2933 if (cp->disconnect != 0x00 && cp->disconnect != 0x01) unpair_device()
2947 if (cp->addr.type == BDADDR_BREDR) { unpair_device()
2955 if (cp->disconnect) unpair_device()
2957 &cp->addr.bdaddr); unpair_device()
2961 err = hci_remove_link_key(hdev, &cp->addr.bdaddr); unpair_device()
2965 if (cp->addr.type == BDADDR_LE_PUBLIC) unpair_device()
2971 &cp->addr.bdaddr); unpair_device()
2983 if (!cp->disconnect) unpair_device()
2986 hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type); unpair_device()
2989 hci_remove_irk(hdev, &cp->addr.bdaddr, addr_type); unpair_device()
2991 err = hci_remove_ltk(hdev, &cp->addr.bdaddr, addr_type); unpair_device()
3007 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk); unpair_device()
3011 cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp, unpair_device()
3012 sizeof(*cp)); unpair_device()
3034 struct mgmt_cp_disconnect *cp = data; disconnect() local
3043 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr); disconnect()
3044 rp.addr.type = cp->addr.type; disconnect()
3046 if (!bdaddr_type_is_valid(cp->addr.type)) disconnect()
3066 if (cp->addr.type == BDADDR_BREDR) disconnect()
3068 &cp->addr.bdaddr); disconnect()
3070 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr); disconnect()
3174 struct mgmt_cp_pin_code_neg_reply *cp) send_pin_code_neg_reply()
3179 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp, send_pin_code_neg_reply()
3180 sizeof(*cp)); send_pin_code_neg_reply()
3185 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr); send_pin_code_neg_reply()
3196 struct mgmt_cp_pin_code_reply *cp = data; pin_code_reply() local
3211 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr); pin_code_reply()
3218 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) { pin_code_reply()
3221 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr)); pin_code_reply()
3241 bacpy(&reply.bdaddr, &cp->addr.bdaddr); pin_code_reply()
3242 reply.pin_len = cp->pin_len; pin_code_reply()
3243 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code)); pin_code_reply()
3257 struct mgmt_cp_set_io_capability *cp = data; set_io_capability() local
3261 if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY) set_io_capability()
3267 hdev->io_capability = cp->io_capability; set_io_capability()
3375 struct mgmt_cp_pair_device *cp = data; pair_device() local
3385 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr); pair_device()
3386 rp.addr.type = cp->addr.type; pair_device()
3388 if (!bdaddr_type_is_valid(cp->addr.type)) pair_device()
3393 if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY) pair_device()
3407 if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) { pair_device()
3417 if (cp->addr.type == BDADDR_BREDR) { pair_device()
3418 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level, pair_device()
3425 if (cp->addr.type == BDADDR_LE_PUBLIC) pair_device()
3439 hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type); pair_device()
3441 conn = hci_connect_le(hdev, &cp->addr.bdaddr, addr_type, pair_device()
3480 if (cp->addr.type == BDADDR_BREDR) { pair_device()
3490 conn->io_capability = cp->io_cap; pair_device()
3602 struct hci_cp_user_passkey_reply cp; user_pairing_resp() local
3604 bacpy(&cp.bdaddr, &addr->bdaddr); user_pairing_resp()
3605 cp.passkey = passkey; user_pairing_resp()
3606 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp); user_pairing_resp()
3622 struct mgmt_cp_pin_code_neg_reply *cp = data; pin_code_neg_reply() local
3626 return user_pairing_resp(sk, hdev, &cp->addr, pin_code_neg_reply()
3634 struct mgmt_cp_user_confirm_reply *cp = data; user_confirm_reply() local
3638 if (len != sizeof(*cp)) user_confirm_reply()
3642 return user_pairing_resp(sk, hdev, &cp->addr, user_confirm_reply()
3650 struct mgmt_cp_user_confirm_neg_reply *cp = data; user_confirm_neg_reply() local
3654 return user_pairing_resp(sk, hdev, &cp->addr, user_confirm_neg_reply()
3662 struct mgmt_cp_user_passkey_reply *cp = data; user_passkey_reply() local
3666 return user_pairing_resp(sk, hdev, &cp->addr, user_passkey_reply()
3668 HCI_OP_USER_PASSKEY_REPLY, cp->passkey); user_passkey_reply()
3674 struct mgmt_cp_user_passkey_neg_reply *cp = data; user_passkey_neg_reply() local
3678 return user_pairing_resp(sk, hdev, &cp->addr, user_passkey_neg_reply()
3686 struct hci_cp_write_local_name cp; update_name() local
3688 memcpy(cp.name, hdev->dev_name, sizeof(cp.name)); update_name()
3690 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp); update_name()
3695 struct mgmt_cp_set_local_name *cp; set_name_complete() local
3706 cp = cmd->param; set_name_complete()
3713 cp, sizeof(*cp)); set_name_complete()
3724 struct mgmt_cp_set_local_name *cp = data; set_local_name() local
3736 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) && set_local_name()
3737 !memcmp(hdev->short_name, cp->short_name, set_local_name()
3744 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name)); set_local_name()
3747 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name)); set_local_name()
3766 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name)); set_local_name()
3917 struct mgmt_cp_add_remote_oob_data *cp = data; add_remote_oob_data() local
3920 if (cp->addr.type != BDADDR_BREDR) { add_remote_oob_data()
3924 &cp->addr, sizeof(cp->addr)); add_remote_oob_data()
3928 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr, add_remote_oob_data()
3929 cp->addr.type, cp->hash, add_remote_oob_data()
3930 cp->rand, NULL, NULL); add_remote_oob_data()
3938 &cp->addr, sizeof(cp->addr)); add_remote_oob_data()
3940 struct mgmt_cp_add_remote_oob_ext_data *cp = data; add_remote_oob_data() local
3944 if (bdaddr_type_is_le(cp->addr.type)) { add_remote_oob_data()
3948 if (memcmp(cp->rand192, ZERO_KEY, 16) || add_remote_oob_data()
3949 memcmp(cp->hash192, ZERO_KEY, 16)) { add_remote_oob_data()
3963 if (!memcmp(cp->rand192, ZERO_KEY, 16) || add_remote_oob_data()
3964 !memcmp(cp->hash192, ZERO_KEY, 16)) { add_remote_oob_data()
3968 rand192 = cp->rand192; add_remote_oob_data()
3969 hash192 = cp->hash192; add_remote_oob_data()
3976 if (!memcmp(cp->rand256, ZERO_KEY, 16) || add_remote_oob_data()
3977 !memcmp(cp->hash256, ZERO_KEY, 16)) { add_remote_oob_data()
3981 rand256 = cp->rand256; add_remote_oob_data()
3982 hash256 = cp->hash256; add_remote_oob_data()
3985 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr, add_remote_oob_data()
3986 cp->addr.type, hash192, rand192, add_remote_oob_data()
3995 status, &cp->addr, sizeof(cp->addr)); add_remote_oob_data()
4010 struct mgmt_cp_remove_remote_oob_data *cp = data; remove_remote_oob_data() local
4016 if (cp->addr.type != BDADDR_BREDR) remove_remote_oob_data()
4020 &cp->addr, sizeof(cp->addr)); remove_remote_oob_data()
4024 if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) { remove_remote_oob_data()
4030 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type); remove_remote_oob_data()
4038 status, &cp->addr, sizeof(cp->addr)); remove_remote_oob_data()
4047 struct hci_cp_inquiry cp; trigger_bredr_inquiry() local
4062 memset(&cp, 0, sizeof(cp)); trigger_bredr_inquiry()
4063 memcpy(&cp.lap, lap, sizeof(cp.lap)); trigger_bredr_inquiry()
4064 cp.length = DISCOV_BREDR_INQUIRY_LEN; trigger_bredr_inquiry()
4066 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp); trigger_bredr_inquiry()
4257 struct mgmt_cp_start_discovery *cp = data; start_discovery() local
4270 &cp->type, sizeof(cp->type)); start_discovery()
4277 MGMT_STATUS_BUSY, &cp->type, start_discovery()
4278 sizeof(cp->type)); start_discovery()
4295 hdev->discovery.type = cp->type; start_discovery()
4302 status, &cp->type, sizeof(cp->type)); start_discovery()
4330 struct mgmt_cp_start_service_discovery *cp = data; start_service_discovery() local
4333 const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16); start_service_discovery()
4346 &cp->type, sizeof(cp->type)); start_service_discovery()
4354 MGMT_STATUS_BUSY, &cp->type, start_service_discovery()
4355 sizeof(cp->type)); start_service_discovery()
4359 uuid_count = __le16_to_cpu(cp->uuid_count); start_service_discovery()
4365 MGMT_STATUS_INVALID_PARAMS, &cp->type, start_service_discovery()
4366 sizeof(cp->type)); start_service_discovery()
4370 expected_len = sizeof(*cp) + uuid_count * 16; start_service_discovery()
4376 MGMT_STATUS_INVALID_PARAMS, &cp->type, start_service_discovery()
4377 sizeof(cp->type)); start_service_discovery()
4396 hdev->discovery.type = cp->type; start_service_discovery()
4397 hdev->discovery.rssi = cp->rssi; start_service_discovery()
4401 hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16, start_service_discovery()
4407 &cp->type, sizeof(cp->type)); start_service_discovery()
4418 status, &cp->type, sizeof(cp->type)); start_service_discovery()
4517 struct mgmt_cp_confirm_name *cp = data; confirm_name() local
4527 MGMT_STATUS_FAILED, &cp->addr, confirm_name()
4528 sizeof(cp->addr)); confirm_name()
4532 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr); confirm_name()
4535 MGMT_STATUS_INVALID_PARAMS, &cp->addr, confirm_name()
4536 sizeof(cp->addr)); confirm_name()
4540 if (cp->name_known) { confirm_name()
4549 &cp->addr, sizeof(cp->addr)); confirm_name()
4559 struct mgmt_cp_block_device *cp = data; block_device() local
4565 if (!bdaddr_type_is_valid(cp->addr.type)) block_device()
4568 &cp->addr, sizeof(cp->addr)); block_device()
4572 err = hci_bdaddr_list_add(&hdev->blacklist, &cp->addr.bdaddr, block_device()
4573 cp->addr.type); block_device()
4579 mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr), block_device()
4585 &cp->addr, sizeof(cp->addr)); block_device()
4595 struct mgmt_cp_unblock_device *cp = data; unblock_device() local
4601 if (!bdaddr_type_is_valid(cp->addr.type)) unblock_device()
4604 &cp->addr, sizeof(cp->addr)); unblock_device()
4608 err = hci_bdaddr_list_del(&hdev->blacklist, &cp->addr.bdaddr, unblock_device()
4609 cp->addr.type); unblock_device()
4615 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr), unblock_device()
4621 &cp->addr, sizeof(cp->addr)); unblock_device()
4631 struct mgmt_cp_set_device_id *cp = data; set_device_id() local
4638 source = __le16_to_cpu(cp->source); set_device_id()
4647 hdev->devid_vendor = __le16_to_cpu(cp->vendor); set_device_id()
4648 hdev->devid_product = __le16_to_cpu(cp->product); set_device_id()
4649 hdev->devid_version = __le16_to_cpu(cp->version); set_device_id()
4720 struct mgmt_mode *cp = data; set_advertising() local
4733 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02) set_advertising()
4739 val = !!cp->val; set_advertising()
4748 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) || set_advertising()
4754 if (cp->val) { set_advertising()
4756 if (cp->val == 0x02) set_advertising()
4790 if (cp->val == 0x02) set_advertising()
4816 struct mgmt_cp_set_static_address *cp = data; set_static_address() local
4829 if (bacmp(&cp->bdaddr, BDADDR_ANY)) { set_static_address()
4830 if (!bacmp(&cp->bdaddr, BDADDR_NONE)) set_static_address()
4836 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0) set_static_address()
4844 bacpy(&hdev->static_addr, &cp->bdaddr); set_static_address()
4860 struct mgmt_cp_set_scan_params *cp = data; set_scan_params() local
4870 interval = __le16_to_cpu(cp->interval); set_scan_params()
4876 window = __le16_to_cpu(cp->window); set_scan_params()
4931 struct mgmt_mode *cp = cmd->param; fast_connectable_complete() local
4933 if (cp->val) fast_connectable_complete()
4951 struct mgmt_mode *cp = data; set_fast_connectable() local
4963 if (cp->val != 0x00 && cp->val != 0x01) set_fast_connectable()
4975 if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) { set_fast_connectable()
4998 write_fast_connectable(&req, cp->val); set_fast_connectable()
5047 struct mgmt_mode *cp = data; set_bredr() local
5062 if (cp->val != 0x00 && cp->val != 0x01) set_bredr()
5068 if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) { set_bredr()
5074 if (!cp->val) { set_bredr()
5093 if (!cp->val) { set_bredr()
5160 struct mgmt_mode *cp; sc_enable_complete() local
5176 cp = cmd->param; sc_enable_complete()
5178 switch (cp->val) { sc_enable_complete()
5205 struct mgmt_mode *cp = data; set_secure_conn() local
5224 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02) set_secure_conn()
5234 if (cp->val) { set_secure_conn()
5237 if (cp->val == 0x02) set_secure_conn()
5263 val = !!cp->val; set_secure_conn()
5266 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) { set_secure_conn()
5293 struct mgmt_mode *cp = data; set_debug_keys() local
5299 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02) set_debug_keys()
5305 if (cp->val) set_debug_keys()
5311 if (cp->val == 0x02) set_debug_keys()
5320 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00; set_debug_keys()
5340 struct mgmt_cp_set_privacy *cp = cp_data; set_privacy() local
5350 if (cp->privacy != 0x00 && cp->privacy != 0x01) set_privacy()
5365 if (cp->privacy) { set_privacy()
5367 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk)); set_privacy()
5406 struct mgmt_cp_load_irks *cp = cp_data; load_irks() local
5407 const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) / load_irks()
5418 irk_count = __le16_to_cpu(cp->irk_count); load_irks()
5425 expected_len = sizeof(*cp) + irk_count * sizeof(struct mgmt_irk_info); load_irks()
5436 struct mgmt_irk_info *key = &cp->irks[i]; load_irks()
5449 struct mgmt_irk_info *irk = &cp->irks[i]; load_irks()
5492 struct mgmt_cp_load_long_term_keys *cp = cp_data; load_long_term_keys() local
5493 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) / load_long_term_keys()
5504 key_count = __le16_to_cpu(cp->key_count); load_long_term_keys()
5511 expected_len = sizeof(*cp) + key_count * load_long_term_keys()
5523 struct mgmt_ltk_info *key = &cp->keys[i]; load_long_term_keys()
5536 struct mgmt_ltk_info *key = &cp->keys[i]; load_long_term_keys()
5611 struct hci_cp_read_rssi *cp; conn_info_refresh_complete() local
5630 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_RSSI); conn_info_refresh_complete()
5631 if (!cp) { conn_info_refresh_complete()
5632 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER); conn_info_refresh_complete()
5638 if (!cp) { conn_info_refresh_complete()
5643 handle = __le16_to_cpu(cp->handle); conn_info_refresh_complete()
5664 struct mgmt_cp_get_conn_info *cp = data; get_conn_info() local
5673 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr); get_conn_info()
5674 rp.addr.type = cp->addr.type; get_conn_info()
5676 if (!bdaddr_type_is_valid(cp->addr.type)) get_conn_info()
5690 if (cp->addr.type == BDADDR_BREDR) get_conn_info()
5692 &cp->addr.bdaddr); get_conn_info()
5694 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr); get_conn_info()
5735 if (!bdaddr_type_is_le(cp->addr.type) || get_conn_info()
5853 struct mgmt_cp_get_clock_info *cp = data; get_clock_info() local
5864 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr); get_clock_info()
5865 rp.addr.type = cp->addr.type; get_clock_info()
5867 if (cp->addr.type != BDADDR_BREDR) get_clock_info()
5881 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) { get_clock_info()
5883 &cp->addr.bdaddr); get_clock_info()
6019 struct mgmt_cp_add_device *cp = data; add_device() local
6027 if (!bdaddr_type_is_valid(cp->addr.type) || add_device()
6028 !bacmp(&cp->addr.bdaddr, BDADDR_ANY)) add_device()
6031 &cp->addr, sizeof(cp->addr)); add_device()
6033 if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02) add_device()
6036 &cp->addr, sizeof(cp->addr)); add_device()
6050 if (cp->addr.type == BDADDR_BREDR) { add_device()
6052 if (cp->action != 0x01) { add_device()
6059 err = hci_bdaddr_list_add(&hdev->whitelist, &cp->addr.bdaddr, add_device()
6060 cp->addr.type); add_device()
6069 if (cp->addr.type == BDADDR_LE_PUBLIC) add_device()
6074 if (cp->action == 0x02) add_device()
6076 else if (cp->action == 0x01) add_device()
6084 if (hci_conn_params_set(&req, &cp->addr.bdaddr, addr_type, add_device()
6092 device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action); add_device()
6142 struct mgmt_cp_remove_device *cp = data; remove_device() local
6161 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) { remove_device()
6165 if (!bdaddr_type_is_valid(cp->addr.type)) { remove_device()
6172 if (cp->addr.type == BDADDR_BREDR) { remove_device()
6174 &cp->addr.bdaddr, remove_device()
6175 cp->addr.type); remove_device()
6185 device_removed(sk, hdev, &cp->addr.bdaddr, remove_device()
6186 cp->addr.type); remove_device()
6190 if (cp->addr.type == BDADDR_LE_PUBLIC) remove_device()
6195 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, remove_device()
6216 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type); remove_device()
6221 if (cp->addr.type) { remove_device()
6269 struct mgmt_cp_load_conn_param *cp = data; load_conn_param() local
6270 const u16 max_param_count = ((U16_MAX - sizeof(*cp)) / load_conn_param()
6279 param_count = __le16_to_cpu(cp->param_count); load_conn_param()
6287 expected_len = sizeof(*cp) + param_count * load_conn_param()
6303 struct mgmt_conn_param *param = &cp->params[i]; load_conn_param()
6355 struct mgmt_cp_set_external_config *cp = data; set_external_config() local
6365 if (cp->config != 0x00 && cp->config != 0x01) set_external_config()
6375 if (cp->config) set_external_config()
6411 struct mgmt_cp_set_public_address *cp = data; set_public_address() local
6421 if (!bacmp(&cp->bdaddr, BDADDR_ANY)) set_public_address()
6431 changed = !!bacmp(&hdev->public_addr, &cp->bdaddr); set_public_address()
6432 bacpy(&hdev->public_addr, &cp->bdaddr); set_public_address()
6583 struct mgmt_cp_read_local_oob_ext_data *cp) read_local_ssp_oob_req()
6590 cp, sizeof(*cp)); read_local_ssp_oob_req()
6613 struct mgmt_cp_read_local_oob_ext_data *cp = data; read_local_oob_ext_data() local
6623 switch (cp->type) { read_local_oob_ext_data()
6659 switch (cp->type) { BIT()
6662 err = read_local_ssp_oob_req(hdev, sk, cp); BIT()
6748 rp->type = cp->type;
6934 struct mgmt_cp_add_advertising *cp = data; add_advertising() local
6951 flags = __le32_to_cpu(cp->flags); add_advertising()
6952 timeout = __le16_to_cpu(cp->timeout); add_advertising()
6958 if (cp->instance != 0x01 || (flags & ~supported_flags)) add_advertising()
6978 if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) || add_advertising()
6979 !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len, add_advertising()
6980 cp->scan_rsp_len, false)) { add_advertising()
6989 hdev->adv_instance.adv_data_len = cp->adv_data_len; add_advertising()
6990 hdev->adv_instance.scan_rsp_len = cp->scan_rsp_len; add_advertising()
6992 if (cp->adv_data_len) add_advertising()
6993 memcpy(hdev->adv_instance.adv_data, cp->data, cp->adv_data_len); add_advertising()
6995 if (cp->scan_rsp_len) add_advertising()
6997 cp->data + cp->adv_data_len, cp->scan_rsp_len); add_advertising()
7080 struct mgmt_cp_remove_advertising *cp = data; remove_advertising() local
7091 if (cp->instance > 1) remove_advertising()
7386 struct hci_cp_write_le_host_supported cp; powered_update_hci() local
7388 cp.le = 0x01; powered_update_hci()
7389 cp.simul = 0x00; powered_update_hci()
7394 if (cp.le != lmp_host_le_capable(hdev) || powered_update_hci()
7395 cp.simul != lmp_host_le_br_capable(hdev)) powered_update_hci()
7397 sizeof(cp), &cp); powered_update_hci()
7746 struct mgmt_cp_unpair_device *cp = cmd->param; unpair_device_rsp() local
7748 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk); unpair_device_rsp()
7757 struct mgmt_mode *cp; mgmt_powering_down() local
7763 cp = cmd->param; mgmt_powering_down()
7764 if (!cp->val) mgmt_powering_down()
7810 struct mgmt_cp_disconnect *cp; mgmt_disconnect_failed() local
7820 cp = cmd->param; mgmt_disconnect_failed()
7822 if (bacmp(bdaddr, &cp->addr.bdaddr)) mgmt_disconnect_failed()
7825 if (cp->addr.type != bdaddr_type) mgmt_disconnect_failed()
8033 struct hci_cp_write_eir cp; clear_eir() local
8040 memset(&cp, 0, sizeof(cp)); clear_eir()
8042 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp); clear_eir()
3173 send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev, struct mgmt_cp_pin_code_neg_reply *cp) send_pin_code_neg_reply() argument
6582 read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk, struct mgmt_cp_read_local_oob_ext_data *cp) read_local_ssp_oob_req() argument
H A Dhci_event.c786 struct hci_cp_read_clock *cp; hci_cc_read_clock() local
799 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK); hci_cc_read_clock()
800 if (!cp) hci_cc_read_clock()
803 if (cp->which == 0x00) { hci_cc_read_clock()
896 struct hci_cp_pin_code_reply *cp; hci_cc_pin_code_reply() local
909 cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY); hci_cc_pin_code_reply()
910 if (!cp) hci_cc_pin_code_reply()
913 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr); hci_cc_pin_code_reply()
915 conn->pin_length = cp->pin_len; hci_cc_pin_code_reply()
1116 struct hci_cp_le_set_scan_param *cp; hci_cc_le_set_scan_param() local
1124 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_PARAM); hci_cc_le_set_scan_param()
1125 if (!cp) hci_cc_le_set_scan_param()
1130 hdev->le_scan_type = cp->type; hci_cc_le_set_scan_param()
1167 struct hci_cp_le_set_scan_enable *cp; hci_cc_le_set_scan_enable() local
1175 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE); hci_cc_le_set_scan_enable()
1176 if (!cp) hci_cc_le_set_scan_enable()
1181 switch (cp->enable) { hci_cc_le_set_scan_enable()
1226 BT_ERR("Used reserved LE_Scan_Enable param %d", cp->enable); hci_cc_le_set_scan_enable()
1395 struct hci_cp_le_set_adv_param *cp; hci_cc_set_adv_param() local
1403 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_PARAM); hci_cc_set_adv_param()
1404 if (!cp) hci_cc_set_adv_param()
1408 hdev->adv_addr_type = cp->own_address_type; hci_cc_set_adv_param()
1508 struct hci_cp_create_conn *cp; hci_cs_create_conn() local
1513 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN); hci_cs_create_conn()
1514 if (!cp) hci_cs_create_conn()
1519 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr); hci_cs_create_conn()
1521 BT_DBG("%s bdaddr %pMR hcon %p", hdev->name, &cp->bdaddr, conn); hci_cs_create_conn()
1534 conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr, hci_cs_create_conn()
1546 struct hci_cp_add_sco *cp; hci_cs_add_sco() local
1555 cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO); hci_cs_add_sco()
1556 if (!cp) hci_cs_add_sco()
1559 handle = __le16_to_cpu(cp->handle); hci_cs_add_sco()
1581 struct hci_cp_auth_requested *cp; hci_cs_auth_requested() local
1589 cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED); hci_cs_auth_requested()
1590 if (!cp) hci_cs_auth_requested()
1595 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); hci_cs_auth_requested()
1608 struct hci_cp_set_conn_encrypt *cp; hci_cs_set_conn_encrypt() local
1616 cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT); hci_cs_set_conn_encrypt()
1617 if (!cp) hci_cs_set_conn_encrypt()
1622 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); hci_cs_set_conn_encrypt()
1658 struct hci_cp_remote_name_req cp; hci_resolve_name() local
1660 memset(&cp, 0, sizeof(cp)); hci_resolve_name()
1662 bacpy(&cp.bdaddr, &e->data.bdaddr); hci_resolve_name()
1663 cp.pscan_rep_mode = e->data.pscan_rep_mode; hci_resolve_name()
1664 cp.pscan_mode = e->data.pscan_mode; hci_resolve_name()
1665 cp.clock_offset = e->data.clock_offset; hci_resolve_name()
1667 return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp); hci_resolve_name()
1741 struct hci_cp_remote_name_req *cp; hci_cs_remote_name_req() local
1751 cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ); hci_cs_remote_name_req()
1752 if (!cp) hci_cs_remote_name_req()
1757 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr); hci_cs_remote_name_req()
1760 hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0); hci_cs_remote_name_req()
1784 struct hci_cp_read_remote_features *cp; hci_cs_read_remote_features() local
1792 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES); hci_cs_read_remote_features()
1793 if (!cp) hci_cs_read_remote_features()
1798 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); hci_cs_read_remote_features()
1811 struct hci_cp_read_remote_ext_features *cp; hci_cs_read_remote_ext_features() local
1819 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES); hci_cs_read_remote_ext_features()
1820 if (!cp) hci_cs_read_remote_ext_features()
1825 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); hci_cs_read_remote_ext_features()
1838 struct hci_cp_setup_sync_conn *cp; hci_cs_setup_sync_conn() local
1847 cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN); hci_cs_setup_sync_conn()
1848 if (!cp) hci_cs_setup_sync_conn()
1851 handle = __le16_to_cpu(cp->handle); hci_cs_setup_sync_conn()
1873 struct hci_cp_sniff_mode *cp; hci_cs_sniff_mode() local
1881 cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE); hci_cs_sniff_mode()
1882 if (!cp) hci_cs_sniff_mode()
1887 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); hci_cs_sniff_mode()
1900 struct hci_cp_exit_sniff_mode *cp; hci_cs_exit_sniff_mode() local
1908 cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE); hci_cs_exit_sniff_mode()
1909 if (!cp) hci_cs_exit_sniff_mode()
1914 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); hci_cs_exit_sniff_mode()
1927 struct hci_cp_disconnect *cp; hci_cs_disconnect() local
1933 cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT); hci_cs_disconnect()
1934 if (!cp) hci_cs_disconnect()
1939 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); hci_cs_disconnect()
1949 struct hci_cp_create_phy_link *cp; hci_cs_create_phylink() local
1953 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_PHY_LINK); hci_cs_create_phylink()
1954 if (!cp) hci_cs_create_phylink()
1962 hcon = hci_conn_hash_lookup_handle(hdev, cp->phy_handle); hci_cs_create_phylink()
1966 amp_write_remote_assoc(hdev, cp->phy_handle); hci_cs_create_phylink()
1974 struct hci_cp_accept_phy_link *cp; hci_cs_accept_phylink() local
1981 cp = hci_sent_cmd_data(hdev, HCI_OP_ACCEPT_PHY_LINK); hci_cs_accept_phylink()
1982 if (!cp) hci_cs_accept_phylink()
1985 amp_write_remote_assoc(hdev, cp->phy_handle); hci_cs_accept_phylink()
1990 struct hci_cp_le_create_conn *cp; hci_cs_le_create_conn() local
2002 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN); hci_cs_le_create_conn()
2003 if (!cp) hci_cs_le_create_conn()
2008 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->peer_addr); hci_cs_le_create_conn()
2016 conn->init_addr_type = cp->own_address_type; hci_cs_le_create_conn()
2017 if (cp->own_address_type == ADDR_LE_DEV_RANDOM) hci_cs_le_create_conn()
2022 conn->resp_addr_type = cp->peer_addr_type; hci_cs_le_create_conn()
2023 bacpy(&conn->resp_addr, &cp->peer_addr); hci_cs_le_create_conn()
2030 if (cp->filter_policy == HCI_LE_USE_PEER_ADDR) hci_cs_le_create_conn()
2041 struct hci_cp_le_read_remote_features *cp; hci_cs_le_read_remote_features() local
2049 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_READ_REMOTE_FEATURES); hci_cs_le_read_remote_features()
2050 if (!cp) hci_cs_le_read_remote_features()
2055 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); hci_cs_le_read_remote_features()
2068 struct hci_cp_le_start_enc *cp; hci_cs_le_start_enc() local
2078 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_START_ENC); hci_cs_le_start_enc()
2079 if (!cp) hci_cs_le_start_enc()
2082 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); hci_cs_le_start_enc()
2098 struct hci_cp_switch_role *cp; hci_cs_switch_role() local
2106 cp = hci_sent_cmd_data(hdev, HCI_OP_SWITCH_ROLE); hci_cs_switch_role()
2107 if (!cp) hci_cs_switch_role()
2112 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr); hci_cs_switch_role()
2263 struct hci_cp_read_remote_features cp; hci_conn_complete_evt() local
2264 cp.handle = ev->handle; hci_conn_complete_evt()
2266 sizeof(cp), &cp); hci_conn_complete_evt()
2273 struct hci_cp_change_conn_ptype cp; hci_conn_complete_evt() local
2274 cp.handle = ev->handle; hci_conn_complete_evt()
2275 cp.pkt_type = cpu_to_le16(conn->pkt_type); hci_conn_complete_evt()
2276 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp), hci_conn_complete_evt()
2277 &cp); hci_conn_complete_evt()
2303 struct hci_cp_reject_conn_req cp; hci_reject_conn() local
2305 bacpy(&cp.bdaddr, bdaddr); hci_reject_conn()
2306 cp.reason = HCI_ERROR_REJ_BAD_ADDR; hci_reject_conn()
2307 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp); hci_reject_conn()
2373 struct hci_cp_accept_conn_req cp; hci_conn_request_evt() local
2376 bacpy(&cp.bdaddr, &ev->bdaddr); hci_conn_request_evt()
2379 cp.role = 0x00; /* Become master */ hci_conn_request_evt()
2381 cp.role = 0x01; /* Remain slave */ hci_conn_request_evt()
2383 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp); hci_conn_request_evt()
2385 struct hci_cp_accept_sync_conn_req cp; hci_conn_request_evt() local
2388 bacpy(&cp.bdaddr, &ev->bdaddr); hci_conn_request_evt()
2389 cp.pkt_type = cpu_to_le16(conn->pkt_type); hci_conn_request_evt()
2391 cp.tx_bandwidth = cpu_to_le32(0x00001f40); hci_conn_request_evt()
2392 cp.rx_bandwidth = cpu_to_le32(0x00001f40); hci_conn_request_evt()
2393 cp.max_latency = cpu_to_le16(0xffff); hci_conn_request_evt()
2394 cp.content_format = cpu_to_le16(hdev->voice_setting); hci_conn_request_evt()
2395 cp.retrans_effort = 0xff; hci_conn_request_evt()
2397 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, sizeof(cp), hci_conn_request_evt()
2398 &cp); hci_conn_request_evt()
2529 struct hci_cp_set_conn_encrypt cp; hci_auth_complete_evt() local
2530 cp.handle = ev->handle; hci_auth_complete_evt()
2531 cp.encrypt = 0x01; hci_auth_complete_evt()
2532 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp), hci_auth_complete_evt()
2533 &cp); hci_auth_complete_evt()
2549 struct hci_cp_set_conn_encrypt cp; hci_auth_complete_evt() local
2550 cp.handle = ev->handle; hci_auth_complete_evt()
2551 cp.encrypt = 0x01; hci_auth_complete_evt()
2552 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp), hci_auth_complete_evt()
2553 &cp); hci_auth_complete_evt()
2594 struct hci_cp_auth_requested cp; hci_remote_name_evt() local
2598 cp.handle = __cpu_to_le16(conn->handle); hci_remote_name_evt()
2599 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp); hci_remote_name_evt()
2723 struct hci_cp_read_remote_ext_features cp; hci_remote_features_evt() local
2724 cp.handle = ev->handle; hci_remote_features_evt()
2725 cp.page = 0x01; hci_remote_features_evt()
2727 sizeof(cp), &cp); hci_remote_features_evt()
2732 struct hci_cp_remote_name_req cp; hci_remote_features_evt() local
2733 memset(&cp, 0, sizeof(cp)); hci_remote_features_evt()
2734 bacpy(&cp.bdaddr, &conn->dst); hci_remote_features_evt()
2735 cp.pscan_rep_mode = 0x02; hci_remote_features_evt()
2736 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp); hci_remote_features_evt()
3437 struct hci_cp_link_key_reply cp; hci_link_key_request_evt() local
3480 bacpy(&cp.bdaddr, &ev->bdaddr); hci_link_key_request_evt()
3481 memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE); hci_link_key_request_evt()
3483 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp); hci_link_key_request_evt()
3721 struct hci_cp_remote_name_req cp; hci_remote_ext_features_evt() local
3722 memset(&cp, 0, sizeof(cp)); hci_remote_ext_features_evt()
3723 bacpy(&cp.bdaddr, &conn->dst); hci_remote_ext_features_evt()
3724 cp.pscan_rep_mode = 0x02; hci_remote_ext_features_evt()
3725 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp); hci_remote_ext_features_evt()
3999 struct hci_cp_io_capability_reply cp; hci_io_capa_request_evt() local
4001 bacpy(&cp.bdaddr, &ev->bdaddr); hci_io_capa_request_evt()
4004 cp.capability = (conn->io_capability == 0x04) ? hci_io_capa_request_evt()
4025 cp.authentication = conn->auth_type; hci_io_capa_request_evt()
4026 cp.oob_data = bredr_oob_data_present(conn); hci_io_capa_request_evt()
4029 sizeof(cp), &cp); hci_io_capa_request_evt()
4031 struct hci_cp_io_capability_neg_reply cp; hci_io_capa_request_evt() local
4033 bacpy(&cp.bdaddr, &ev->bdaddr); hci_io_capa_request_evt()
4034 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED; hci_io_capa_request_evt()
4037 sizeof(cp), &cp); hci_io_capa_request_evt()
4277 struct hci_cp_remote_oob_data_neg_reply cp; hci_remote_oob_data_request_evt() local
4279 bacpy(&cp.bdaddr, &ev->bdaddr); hci_remote_oob_data_request_evt()
4281 sizeof(cp), &cp); hci_remote_oob_data_request_evt()
4286 struct hci_cp_remote_oob_ext_data_reply cp; hci_remote_oob_data_request_evt() local
4288 bacpy(&cp.bdaddr, &ev->bdaddr); hci_remote_oob_data_request_evt()
4290 memset(cp.hash192, 0, sizeof(cp.hash192)); hci_remote_oob_data_request_evt()
4291 memset(cp.rand192, 0, sizeof(cp.rand192)); hci_remote_oob_data_request_evt()
4293 memcpy(cp.hash192, data->hash192, sizeof(cp.hash192)); hci_remote_oob_data_request_evt()
4294 memcpy(cp.rand192, data->rand192, sizeof(cp.rand192)); hci_remote_oob_data_request_evt()
4296 memcpy(cp.hash256, data->hash256, sizeof(cp.hash256)); hci_remote_oob_data_request_evt()
4297 memcpy(cp.rand256, data->rand256, sizeof(cp.rand256)); hci_remote_oob_data_request_evt()
4300 sizeof(cp), &cp); hci_remote_oob_data_request_evt()
4302 struct hci_cp_remote_oob_data_reply cp; hci_remote_oob_data_request_evt() local
4304 bacpy(&cp.bdaddr, &ev->bdaddr); hci_remote_oob_data_request_evt()
4305 memcpy(cp.hash, data->hash192, sizeof(cp.hash)); hci_remote_oob_data_request_evt()
4306 memcpy(cp.rand, data->rand192, sizeof(cp.rand)); hci_remote_oob_data_request_evt()
4309 sizeof(cp), &cp); hci_remote_oob_data_request_evt()
4570 struct hci_cp_le_read_remote_features cp; hci_le_conn_complete_evt() local
4572 cp.handle = __cpu_to_le16(conn->handle); hci_le_conn_complete_evt()
4575 sizeof(cp), &cp); hci_le_conn_complete_evt()
4931 struct hci_cp_le_ltk_reply cp; hci_le_ltk_request_evt() local
4958 memcpy(cp.ltk, ltk->val, sizeof(ltk->val)); hci_le_ltk_request_evt()
4959 cp.handle = cpu_to_le16(conn->handle); hci_le_ltk_request_evt()
4965 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp); hci_le_ltk_request_evt()
4994 struct hci_cp_le_conn_param_req_neg_reply cp; send_conn_param_neg_reply() local
4996 cp.handle = cpu_to_le16(handle); send_conn_param_neg_reply()
4997 cp.reason = reason; send_conn_param_neg_reply()
4999 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY, sizeof(cp), send_conn_param_neg_reply()
5000 &cp); send_conn_param_neg_reply()
5007 struct hci_cp_le_conn_param_req_reply cp; hci_le_remote_conn_param_req_evt() local
5050 cp.handle = ev->handle; hci_le_remote_conn_param_req_evt()
5051 cp.interval_min = ev->interval_min; hci_le_remote_conn_param_req_evt()
5052 cp.interval_max = ev->interval_max; hci_le_remote_conn_param_req_evt()
5053 cp.latency = ev->latency; hci_le_remote_conn_param_req_evt()
5054 cp.timeout = ev->timeout; hci_le_remote_conn_param_req_evt()
5055 cp.min_ce_len = 0; hci_le_remote_conn_param_req_evt()
5056 cp.max_ce_len = 0; hci_le_remote_conn_param_req_evt()
5058 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_REPLY, sizeof(cp), &cp); hci_le_remote_conn_param_req_evt()
H A Dhci_request.c146 struct hci_cp_le_set_scan_enable cp; hci_req_add_le_scan_disable() local
148 memset(&cp, 0, sizeof(cp)); hci_req_add_le_scan_disable()
149 cp.enable = LE_SCAN_DISABLE; hci_req_add_le_scan_disable()
150 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp); hci_req_add_le_scan_disable()
156 struct hci_cp_le_add_to_white_list cp; add_to_white_list() local
158 cp.bdaddr_type = params->addr_type; add_to_white_list()
159 bacpy(&cp.bdaddr, &params->addr); add_to_white_list()
161 hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp); add_to_white_list()
178 struct hci_cp_le_del_from_white_list cp; update_white_list() local
188 cp.bdaddr_type = b->bdaddr_type; update_white_list()
189 bacpy(&cp.bdaddr, &b->bdaddr); update_white_list()
192 sizeof(cp), &cp); update_white_list()
H A Dsco.c729 struct hci_cp_accept_conn_req cp; sco_conn_defer_accept() local
731 bacpy(&cp.bdaddr, &conn->dst); sco_conn_defer_accept()
732 cp.role = 0x00; /* Ignored */ sco_conn_defer_accept()
734 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp); sco_conn_defer_accept()
736 struct hci_cp_accept_sync_conn_req cp; sco_conn_defer_accept() local
738 bacpy(&cp.bdaddr, &conn->dst); sco_conn_defer_accept()
739 cp.pkt_type = cpu_to_le16(conn->pkt_type); sco_conn_defer_accept()
741 cp.tx_bandwidth = cpu_to_le32(0x00001f40); sco_conn_defer_accept()
742 cp.rx_bandwidth = cpu_to_le32(0x00001f40); sco_conn_defer_accept()
743 cp.content_format = cpu_to_le16(setting); sco_conn_defer_accept()
748 cp.max_latency = cpu_to_le16(0x0008); sco_conn_defer_accept()
750 cp.max_latency = cpu_to_le16(0x000D); sco_conn_defer_accept()
751 cp.retrans_effort = 0x02; sco_conn_defer_accept()
754 cp.max_latency = cpu_to_le16(0xffff); sco_conn_defer_accept()
755 cp.retrans_effort = 0xff; sco_conn_defer_accept()
760 sizeof(cp), &cp); sco_conn_defer_accept()
/linux-4.1.27/arch/sparc/prom/
H A Dbootstr_32.c19 char *cp, *arg; prom_getbootargs() local
28 cp = barg_buf; prom_getbootargs()
36 if (cp >= barg_buf + BARG_LEN - 2) prom_getbootargs()
39 *cp++ = *arg++; prom_getbootargs()
41 *cp++ = ' '; prom_getbootargs()
42 if (cp >= barg_buf + BARG_LEN - 1) prom_getbootargs()
46 *cp = 0; prom_getbootargs()
/linux-4.1.27/drivers/net/ethernet/broadcom/
H A Dcnic.c190 struct cnic_local *cp = dev->cnic_priv; cnic_ctx_wr() local
191 struct cnic_eth_dev *ethdev = cp->ethdev; cnic_ctx_wr()
204 struct cnic_local *cp = dev->cnic_priv; cnic_ctx_tbl_wr() local
205 struct cnic_eth_dev *ethdev = cp->ethdev; cnic_ctx_tbl_wr()
217 struct cnic_local *cp = dev->cnic_priv; cnic_ring_ctl() local
218 struct cnic_eth_dev *ethdev = cp->ethdev; cnic_ring_ctl()
234 struct cnic_local *cp = dev->cnic_priv; cnic_reg_wr_ind() local
235 struct cnic_eth_dev *ethdev = cp->ethdev; cnic_reg_wr_ind()
247 struct cnic_local *cp = dev->cnic_priv; cnic_reg_rd_ind() local
248 struct cnic_eth_dev *ethdev = cp->ethdev; cnic_reg_rd_ind()
260 struct cnic_local *cp = dev->cnic_priv; cnic_ulp_ctl() local
261 struct cnic_eth_dev *ethdev = cp->ethdev; cnic_ulp_ctl()
285 struct cnic_local *cp = dev->cnic_priv; cnic_spq_completion() local
286 struct cnic_eth_dev *ethdev = cp->ethdev; cnic_spq_completion()
294 static int cnic_get_l5_cid(struct cnic_local *cp, u32 cid, u32 *l5_cid) cnic_get_l5_cid() argument
298 if (!cp->ctx_tbl) cnic_get_l5_cid()
301 for (i = 0; i < cp->max_cid_space; i++) { cnic_get_l5_cid()
302 if (cp->ctx_tbl[i].cid == cid) { cnic_get_l5_cid()
310 static int cnic_send_nlmsg(struct cnic_local *cp, u32 type, cnic_send_nlmsg() argument
318 struct cnic_uio_dev *udev = cp->udev; cnic_send_nlmsg()
347 ulp_ops = rcu_dereference(cp->ulp_ops[CNIC_ULP_ISCSI]); cnic_send_nlmsg()
350 cp->ulp_handle[CNIC_ULP_ISCSI], cnic_send_nlmsg()
371 struct cnic_local *cp; cnic_iscsi_nl_msg_recv() local
380 cp = dev->cnic_priv; cnic_iscsi_nl_msg_recv()
385 if (!rcu_access_pointer(cp->ulp_ops[CNIC_ULP_L4])) { cnic_iscsi_nl_msg_recv()
389 csk = &cp->csk_tbl[l5_cid]; cnic_iscsi_nl_msg_recv()
409 cnic_cm_upcall(cp, csk, cnic_iscsi_nl_msg_recv()
483 struct cnic_local *cp = dev->cnic_priv; cnic_register_driver() local
485 clear_bit(ULP_F_INIT, &cp->ulp_flags[ulp_type]); cnic_register_driver()
496 struct cnic_local *cp = dev->cnic_priv; cnic_register_driver() local
498 if (!test_and_set_bit(ULP_F_INIT, &cp->ulp_flags[ulp_type])) cnic_register_driver()
525 struct cnic_local *cp = dev->cnic_priv; cnic_unregister_driver() local
527 if (rcu_access_pointer(cp->ulp_ops[ulp_type])) { cnic_unregister_driver()
561 struct cnic_local *cp = dev->cnic_priv; cnic_register_device() local
575 if (rcu_access_pointer(cp->ulp_ops[ulp_type])) { cnic_register_device()
582 clear_bit(ULP_F_START, &cp->ulp_flags[ulp_type]); cnic_register_device()
583 cp->ulp_handle[ulp_type] = ulp_ctx; cnic_register_device()
585 rcu_assign_pointer(cp->ulp_ops[ulp_type], ulp_ops); cnic_register_device()
589 if (!test_and_set_bit(ULP_F_START, &cp->ulp_flags[ulp_type])) cnic_register_device()
590 ulp_ops->cnic_start(cp->ulp_handle[ulp_type]); cnic_register_device()
603 struct cnic_local *cp = dev->cnic_priv; cnic_unregister_device() local
612 cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL); cnic_unregister_device()
615 if (rcu_access_pointer(cp->ulp_ops[ulp_type])) { cnic_unregister_device()
616 RCU_INIT_POINTER(cp->ulp_ops[ulp_type], NULL); cnic_unregister_device()
631 while (test_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[ulp_type]) && cnic_unregister_device()
636 if (test_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[ulp_type])) cnic_unregister_device()
777 struct cnic_local *cp = dev->cnic_priv; cnic_alloc_dma() local
805 cp->setup_pgtbl(dev, dma); cnic_alloc_dma()
816 struct cnic_local *cp = dev->cnic_priv; cnic_free_context() local
819 for (i = 0; i < cp->ctx_blks; i++) { cnic_free_context()
820 if (cp->ctx_arr[i].ctx) { cnic_free_context()
821 dma_free_coherent(&dev->pcidev->dev, cp->ctx_blk_size, cnic_free_context()
822 cp->ctx_arr[i].ctx, cnic_free_context()
823 cp->ctx_arr[i].mapping); cnic_free_context()
824 cp->ctx_arr[i].ctx = NULL; cnic_free_context()
868 struct cnic_local *cp = dev->cnic_priv; cnic_free_resc() local
869 struct cnic_uio_dev *udev = cp->udev; cnic_free_resc()
873 cp->udev = NULL; cnic_free_resc()
879 kfree(cp->ctx_arr); cnic_free_resc()
880 cp->ctx_arr = NULL; cnic_free_resc()
881 cp->ctx_blks = 0; cnic_free_resc()
883 cnic_free_dma(dev, &cp->gbl_buf_info); cnic_free_resc()
884 cnic_free_dma(dev, &cp->kwq_info); cnic_free_resc()
885 cnic_free_dma(dev, &cp->kwq_16_data_info); cnic_free_resc()
886 cnic_free_dma(dev, &cp->kcq2.dma); cnic_free_resc()
887 cnic_free_dma(dev, &cp->kcq1.dma); cnic_free_resc()
888 kfree(cp->iscsi_tbl); cnic_free_resc()
889 cp->iscsi_tbl = NULL; cnic_free_resc()
890 kfree(cp->ctx_tbl); cnic_free_resc()
891 cp->ctx_tbl = NULL; cnic_free_resc()
893 cnic_free_id_tbl(&cp->fcoe_cid_tbl); cnic_free_resc()
894 cnic_free_id_tbl(&cp->cid_tbl); cnic_free_resc()
899 struct cnic_local *cp = dev->cnic_priv; cnic_alloc_context() local
901 if (BNX2_CHIP(cp) == BNX2_CHIP_5709) { cnic_alloc_context()
904 cp->ctx_blk_size = CNIC_PAGE_SIZE; cnic_alloc_context()
905 cp->cids_per_blk = CNIC_PAGE_SIZE / 128; cnic_alloc_context()
906 arr_size = BNX2_MAX_CID / cp->cids_per_blk * cnic_alloc_context()
908 cp->ctx_arr = kzalloc(arr_size, GFP_KERNEL); cnic_alloc_context()
909 if (cp->ctx_arr == NULL) cnic_alloc_context()
924 for (j = lo; j < hi; j += cp->cids_per_blk, k++) cnic_alloc_context()
925 cp->ctx_arr[k].cid = j; cnic_alloc_context()
928 cp->ctx_blks = k; cnic_alloc_context()
929 if (cp->ctx_blks >= (BNX2_MAX_CID / cp->cids_per_blk)) { cnic_alloc_context()
930 cp->ctx_blks = 0; cnic_alloc_context()
934 for (i = 0; i < cp->ctx_blks; i++) { cnic_alloc_context()
935 cp->ctx_arr[i].ctx = cnic_alloc_context()
938 &cp->ctx_arr[i].mapping, cnic_alloc_context()
940 if (cp->ctx_arr[i].ctx == NULL) cnic_alloc_context()
1012 struct cnic_local *cp = udev->dev->cnic_priv; __cnic_alloc_uio_rings() local
1024 udev->l2_buf_size = (cp->l2_rx_ring_size + 1) * cp->l2_single_buf_size; __cnic_alloc_uio_rings()
1040 struct cnic_local *cp = dev->cnic_priv; cnic_alloc_uio_rings() local
1050 cp->udev = udev; cnic_alloc_uio_rings()
1071 cp->udev = udev; cnic_alloc_uio_rings()
1082 struct cnic_local *cp = dev->cnic_priv; cnic_init_uio() local
1083 struct cnic_uio_dev *udev = cp->udev; cnic_init_uio()
1099 uinfo->mem[1].addr = (unsigned long) cp->status_blk.gen & cnic_init_uio()
1101 if (cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) cnic_init_uio()
1110 uinfo->mem[1].addr = (unsigned long) cp->bnx2x_def_status_blk & cnic_init_uio()
1112 uinfo->mem[1].size = sizeof(*cp->bnx2x_def_status_blk); cnic_init_uio()
1148 struct cnic_local *cp = dev->cnic_priv; cnic_alloc_bnx2_resc() local
1151 ret = cnic_alloc_dma(dev, &cp->kwq_info, KWQ_PAGE_CNT, 1); cnic_alloc_bnx2_resc()
1154 cp->kwq = (struct kwqe **) cp->kwq_info.pg_arr; cnic_alloc_bnx2_resc()
1156 ret = cnic_alloc_kcq(dev, &cp->kcq1, true); cnic_alloc_bnx2_resc()
1181 struct cnic_local *cp = dev->cnic_priv; cnic_alloc_bnx2x_context() local
1183 int ctx_blk_size = cp->ethdev->ctx_blk_size; cnic_alloc_bnx2x_context()
1186 total_mem = BNX2X_CONTEXT_MEM_SIZE * cp->max_cid_space; cnic_alloc_bnx2x_context()
1191 if (blks > cp->ethdev->ctx_tbl_len) cnic_alloc_bnx2x_context()
1194 cp->ctx_arr = kcalloc(blks, sizeof(struct cnic_ctx), GFP_KERNEL); cnic_alloc_bnx2x_context()
1195 if (cp->ctx_arr == NULL) cnic_alloc_bnx2x_context()
1198 cp->ctx_blks = blks; cnic_alloc_bnx2x_context()
1199 cp->ctx_blk_size = ctx_blk_size; cnic_alloc_bnx2x_context()
1201 cp->ctx_align = 0; cnic_alloc_bnx2x_context()
1203 cp->ctx_align = ctx_blk_size; cnic_alloc_bnx2x_context()
1205 cp->cids_per_blk = ctx_blk_size / BNX2X_CONTEXT_MEM_SIZE; cnic_alloc_bnx2x_context()
1208 cp->ctx_arr[i].ctx = cnic_alloc_bnx2x_context()
1209 dma_alloc_coherent(&dev->pcidev->dev, cp->ctx_blk_size, cnic_alloc_bnx2x_context()
1210 &cp->ctx_arr[i].mapping, cnic_alloc_bnx2x_context()
1212 if (cp->ctx_arr[i].ctx == NULL) cnic_alloc_bnx2x_context()
1215 if (cp->ctx_align && cp->ctx_blk_size == ctx_blk_size) { cnic_alloc_bnx2x_context()
1216 if (cp->ctx_arr[i].mapping & (cp->ctx_align - 1)) { cnic_alloc_bnx2x_context()
1218 cp->ctx_blk_size += cp->ctx_align; cnic_alloc_bnx2x_context()
1229 struct cnic_local *cp = dev->cnic_priv; cnic_alloc_bnx2x_resc() local
1231 struct cnic_eth_dev *ethdev = cp->ethdev; cnic_alloc_bnx2x_resc()
1234 struct cnic_dma *kwq_16_dma = &cp->kwq_16_data_info; cnic_alloc_bnx2x_resc()
1236 cp->max_cid_space = MAX_ISCSI_TBL_SZ; cnic_alloc_bnx2x_resc()
1237 cp->iscsi_start_cid = start_cid; cnic_alloc_bnx2x_resc()
1238 cp->fcoe_start_cid = start_cid + MAX_ISCSI_TBL_SZ; cnic_alloc_bnx2x_resc()
1241 cp->max_cid_space += dev->max_fcoe_conn; cnic_alloc_bnx2x_resc()
1242 cp->fcoe_init_cid = ethdev->fcoe_init_cid; cnic_alloc_bnx2x_resc()
1243 if (!cp->fcoe_init_cid) cnic_alloc_bnx2x_resc()
1244 cp->fcoe_init_cid = 0x10; cnic_alloc_bnx2x_resc()
1247 cp->iscsi_tbl = kzalloc(sizeof(struct cnic_iscsi) * MAX_ISCSI_TBL_SZ, cnic_alloc_bnx2x_resc()
1249 if (!cp->iscsi_tbl) cnic_alloc_bnx2x_resc()
1252 cp->ctx_tbl = kzalloc(sizeof(struct cnic_context) * cnic_alloc_bnx2x_resc()
1253 cp->max_cid_space, GFP_KERNEL); cnic_alloc_bnx2x_resc()
1254 if (!cp->ctx_tbl) cnic_alloc_bnx2x_resc()
1258 cp->ctx_tbl[i].proto.iscsi = &cp->iscsi_tbl[i]; cnic_alloc_bnx2x_resc()
1259 cp->ctx_tbl[i].ulp_proto_id = CNIC_ULP_ISCSI; cnic_alloc_bnx2x_resc()
1262 for (i = MAX_ISCSI_TBL_SZ; i < cp->max_cid_space; i++) cnic_alloc_bnx2x_resc()
1263 cp->ctx_tbl[i].ulp_proto_id = CNIC_ULP_FCOE; cnic_alloc_bnx2x_resc()
1265 pages = CNIC_PAGE_ALIGN(cp->max_cid_space * CNIC_KWQ16_DATA_SIZE) / cnic_alloc_bnx2x_resc()
1273 for (i = 0, j = 0; i < cp->max_cid_space; i++) { cnic_alloc_bnx2x_resc()
1276 cp->ctx_tbl[i].kwqe_data = kwq_16_dma->pg_arr[j] + off; cnic_alloc_bnx2x_resc()
1277 cp->ctx_tbl[i].kwqe_data_mapping = kwq_16_dma->pg_map_arr[j] + cnic_alloc_bnx2x_resc()
1284 ret = cnic_alloc_kcq(dev, &cp->kcq1, false); cnic_alloc_bnx2x_resc()
1289 ret = cnic_alloc_kcq(dev, &cp->kcq2, true); cnic_alloc_bnx2x_resc()
1295 ret = cnic_alloc_dma(dev, &cp->gbl_buf_info, pages, 0); cnic_alloc_bnx2x_resc()
1303 if (cp->ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI) cnic_alloc_bnx2x_resc()
1306 cp->bnx2x_def_status_blk = cp->ethdev->irq_arr[1].status_blk; cnic_alloc_bnx2x_resc()
1308 cp->l2_rx_ring_size = 15; cnic_alloc_bnx2x_resc()
1325 static inline u32 cnic_kwq_avail(struct cnic_local *cp) cnic_kwq_avail() argument
1327 return cp->max_kwq_idx - cnic_kwq_avail()
1328 ((cp->kwq_prod_idx - cp->kwq_con_idx) & cp->max_kwq_idx); cnic_kwq_avail()
1334 struct cnic_local *cp = dev->cnic_priv; cnic_submit_bnx2_kwqes() local
1341 spin_lock_bh(&cp->cnic_ulp_lock); cnic_submit_bnx2_kwqes()
1342 if (num_wqes > cnic_kwq_avail(cp) && cnic_submit_bnx2_kwqes()
1343 !test_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags)) { cnic_submit_bnx2_kwqes()
1344 spin_unlock_bh(&cp->cnic_ulp_lock); cnic_submit_bnx2_kwqes()
1348 clear_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags); cnic_submit_bnx2_kwqes()
1350 prod = cp->kwq_prod_idx; cnic_submit_bnx2_kwqes()
1353 prod_qe = &cp->kwq[KWQ_PG(sw_prod)][KWQ_IDX(sw_prod)]; cnic_submit_bnx2_kwqes()
1358 cp->kwq_prod_idx = prod; cnic_submit_bnx2_kwqes()
1360 CNIC_WR16(dev, cp->kwq_io_addr, cp->kwq_prod_idx); cnic_submit_bnx2_kwqes()
1362 spin_unlock_bh(&cp->cnic_ulp_lock); cnic_submit_bnx2_kwqes()
1366 static void *cnic_get_kwqe_16_data(struct cnic_local *cp, u32 l5_cid, cnic_get_kwqe_16_data() argument
1369 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid]; cnic_get_kwqe_16_data()
1381 struct cnic_local *cp = dev->cnic_priv; cnic_submit_kwqe_16() local
1403 spin_lock_bh(&cp->cnic_ulp_lock); cnic_submit_kwqe_16()
1404 ret = cp->ethdev->drv_submit_kwqes_16(dev->netdev, kwq, 1); cnic_submit_kwqe_16()
1405 spin_unlock_bh(&cp->cnic_ulp_lock); cnic_submit_kwqe_16()
1416 struct cnic_local *cp = dev->cnic_priv; cnic_reply_bnx2x_kcqes() local
1420 ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]); cnic_reply_bnx2x_kcqes()
1422 ulp_ops->indicate_kcqes(cp->ulp_handle[ulp_type], cnic_reply_bnx2x_kcqes()
1451 struct cnic_local *cp = dev->cnic_priv; cnic_bnx2x_iscsi_init1() local
1457 cp->num_iscsi_tasks = req1->num_tasks_per_conn; cnic_bnx2x_iscsi_init1()
1458 cp->num_ccells = req1->num_ccells_per_conn; cnic_bnx2x_iscsi_init1()
1459 cp->task_array_size = BNX2X_ISCSI_TASK_CONTEXT_SIZE * cnic_bnx2x_iscsi_init1()
1460 cp->num_iscsi_tasks; cnic_bnx2x_iscsi_init1()
1461 cp->r2tq_size = cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS * cnic_bnx2x_iscsi_init1()
1463 cp->hq_size = cp->num_ccells * BNX2X_ISCSI_HQ_BD_SIZE; cnic_bnx2x_iscsi_init1()
1464 pages = CNIC_PAGE_ALIGN(cp->hq_size) / CNIC_PAGE_SIZE; cnic_bnx2x_iscsi_init1()
1466 cp->num_cqs = req1->num_cqs; cnic_bnx2x_iscsi_init1()
1498 cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS); cnic_bnx2x_iscsi_init1()
1513 cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS); cnic_bnx2x_iscsi_init1()
1579 struct cnic_local *cp = dev->cnic_priv; cnic_free_bnx2x_conn_resc() local
1580 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid]; cnic_free_bnx2x_conn_resc()
1588 cnic_free_id(&cp->cid_tbl, ctx->cid); cnic_free_bnx2x_conn_resc()
1590 cnic_free_id(&cp->fcoe_cid_tbl, ctx->cid); cnic_free_bnx2x_conn_resc()
1600 struct cnic_local *cp = dev->cnic_priv; cnic_alloc_bnx2x_conn_resc() local
1601 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid]; cnic_alloc_bnx2x_conn_resc()
1605 cid = cnic_alloc_new_id(&cp->fcoe_cid_tbl); cnic_alloc_bnx2x_conn_resc()
1614 cid = cnic_alloc_new_id(&cp->cid_tbl); cnic_alloc_bnx2x_conn_resc()
1621 pages = CNIC_PAGE_ALIGN(cp->task_array_size) / CNIC_PAGE_SIZE; cnic_alloc_bnx2x_conn_resc()
1627 pages = CNIC_PAGE_ALIGN(cp->r2tq_size) / CNIC_PAGE_SIZE; cnic_alloc_bnx2x_conn_resc()
1632 pages = CNIC_PAGE_ALIGN(cp->hq_size) / CNIC_PAGE_SIZE; cnic_alloc_bnx2x_conn_resc()
1647 struct cnic_local *cp = dev->cnic_priv; cnic_get_bnx2x_ctx() local
1648 struct cnic_eth_dev *ethdev = cp->ethdev; cnic_get_bnx2x_ctx()
1649 int blk = (cid - ethdev->starting_cid) / cp->cids_per_blk; cnic_get_bnx2x_ctx()
1650 int off = (cid - ethdev->starting_cid) % cp->cids_per_blk; cnic_get_bnx2x_ctx()
1655 if (cp->ctx_align) { cnic_get_bnx2x_ctx()
1656 unsigned long mask = cp->ctx_align - 1; cnic_get_bnx2x_ctx()
1658 if (cp->ctx_arr[blk].mapping & mask) cnic_get_bnx2x_ctx()
1659 align_off = cp->ctx_align - cnic_get_bnx2x_ctx()
1660 (cp->ctx_arr[blk].mapping & mask); cnic_get_bnx2x_ctx()
1662 ctx_map = cp->ctx_arr[blk].mapping + align_off + cnic_get_bnx2x_ctx()
1664 ctx = cp->ctx_arr[blk].ctx + align_off + cnic_get_bnx2x_ctx()
1677 struct cnic_local *cp = dev->cnic_priv; cnic_setup_bnx2x_ctx() local
1684 struct cnic_context *ctx = &cp->ctx_tbl[req1->iscsi_conn_id]; cnic_setup_bnx2x_ctx()
1796 for (i = 1, j = 1; i < cp->num_cqs; i++, j++) { cnic_setup_bnx2x_ctx()
1819 ictx->ustorm_st_context.num_cqs = cp->num_cqs; cnic_setup_bnx2x_ctx()
1843 ictx->cstorm_st_context.cq_proc_en_bit_map = (1 << cp->num_cqs) - 1; cnic_setup_bnx2x_ctx()
1844 for (i = 0; i < cp->num_cqs; i++) { cnic_setup_bnx2x_ctx()
1866 struct cnic_local *cp = dev->cnic_priv; cnic_bnx2x_iscsi_ofld1() local
1896 ctx = &cp->ctx_tbl[l5_cid]; cnic_bnx2x_iscsi_ofld1()
1903 if (atomic_inc_return(&cp->iscsi_conn) > dev->max_iscsi_conn) { cnic_bnx2x_iscsi_ofld1()
1904 atomic_dec(&cp->iscsi_conn); cnic_bnx2x_iscsi_ofld1()
1909 atomic_dec(&cp->iscsi_conn); cnic_bnx2x_iscsi_ofld1()
1916 atomic_dec(&cp->iscsi_conn); cnic_bnx2x_iscsi_ofld1()
1921 kcqe.iscsi_conn_context_id = BNX2X_HW_CID(bp, cp->ctx_tbl[l5_cid].cid); cnic_bnx2x_iscsi_ofld1()
1932 struct cnic_local *cp = dev->cnic_priv; cnic_bnx2x_iscsi_update() local
1940 if (cnic_get_l5_cid(cp, cid, &l5_cid) != 0) cnic_bnx2x_iscsi_update()
1943 data = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data); cnic_bnx2x_iscsi_update()
1956 struct cnic_local *cp = dev->cnic_priv; cnic_bnx2x_destroy_ramrod() local
1958 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid]; cnic_bnx2x_destroy_ramrod()
1982 struct cnic_local *cp = dev->cnic_priv; cnic_bnx2x_iscsi_destroy() local
1986 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid]; cnic_bnx2x_iscsi_destroy()
2001 queue_delayed_work(cnic_wq, &cp->delete_task, delta); cnic_bnx2x_iscsi_destroy()
2011 atomic_dec(&cp->iscsi_conn); cnic_bnx2x_iscsi_destroy()
2111 struct cnic_local *cp = dev->cnic_priv; cnic_bnx2x_connect() local
2120 struct cnic_sock *csk = &cp->csk_tbl[l5_cid]; cnic_bnx2x_connect()
2121 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid]; cnic_bnx2x_connect()
2143 conn_buf = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data); cnic_bnx2x_connect()
2249 struct cnic_local *cp = dev->cnic_priv; cnic_bnx2x_fcoe_stat() local
2255 cid = BNX2X_HW_CID(bp, cp->fcoe_init_cid); cnic_bnx2x_fcoe_stat()
2257 fcoe_stat = cnic_get_kwqe_16_data(cp, BNX2X_FCOE_L5_CID_BASE, &l5_data); cnic_bnx2x_fcoe_stat()
2273 struct cnic_local *cp = dev->cnic_priv; cnic_bnx2x_fcoe_init1() local
2302 fcoe_init = cnic_get_kwqe_16_data(cp, BNX2X_FCOE_L5_CID_BASE, &l5_data); cnic_bnx2x_fcoe_init1()
2310 fcoe_init->eq_pbl_base.lo = cp->kcq2.dma.pgtbl_map & 0xffffffff; cnic_bnx2x_fcoe_init1()
2311 fcoe_init->eq_pbl_base.hi = (u64) cp->kcq2.dma.pgtbl_map >> 32; cnic_bnx2x_fcoe_init1()
2312 fcoe_init->eq_pbl_size = cp->kcq2.dma.num_pages; cnic_bnx2x_fcoe_init1()
2314 fcoe_init->sb_num = cp->status_blk_num; cnic_bnx2x_fcoe_init1()
2317 cp->kcq2.sw_prod_idx = 0; cnic_bnx2x_fcoe_init1()
2319 cid = BNX2X_HW_CID(bp, cp->fcoe_init_cid); cnic_bnx2x_fcoe_init1()
2331 struct cnic_local *cp = dev->cnic_priv; cnic_bnx2x_fcoe_ofld1() local
2362 ctx = &cp->ctx_tbl[l5_cid]; cnic_bnx2x_fcoe_ofld1()
2389 fcoe_offload = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data); cnic_bnx2x_fcoe_ofld1()
2428 struct cnic_local *cp = dev->cnic_priv; cnic_bnx2x_fcoe_enable() local
2438 fcoe_enable = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data); cnic_bnx2x_fcoe_enable()
2456 struct cnic_local *cp = dev->cnic_priv; cnic_bnx2x_fcoe_disable() local
2470 fcoe_disable = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data); cnic_bnx2x_fcoe_disable()
2487 struct cnic_local *cp = dev->cnic_priv; cnic_bnx2x_fcoe_destroy() local
2500 ctx = &cp->ctx_tbl[l5_cid]; cnic_bnx2x_fcoe_destroy()
2517 queue_delayed_work(cnic_wq, &cp->delete_task, msecs_to_jiffies(2000)); cnic_bnx2x_fcoe_destroy()
2530 struct cnic_local *cp = dev->cnic_priv; cnic_bnx2x_delete_wait() local
2533 for (i = start_cid; i < cp->max_cid_space; i++) { cnic_bnx2x_delete_wait()
2534 struct cnic_context *ctx = &cp->ctx_tbl[i]; cnic_bnx2x_delete_wait()
2556 struct cnic_local *cp = dev->cnic_priv; cnic_bnx2x_fcoe_fw_destroy() local
2564 cid = BNX2X_HW_CID(bp, cp->fcoe_init_cid); cnic_bnx2x_fcoe_fw_destroy()
2574 struct cnic_local *cp = dev->cnic_priv; cnic_bnx2x_kwqe_err() local
2617 cnic_get_l5_cid(cp, BNX2X_SW_CID(cid), &kcqe.kcqe_info0); cnic_bnx2x_kwqe_err()
2636 cnic_get_l5_cid(cp, BNX2X_SW_CID(cid), &l4kcqe->conn_id); cnic_bnx2x_kwqe_err()
2820 struct cnic_local *cp = dev->cnic_priv; service_kcqes() local
2828 u32 kcqe_op_flag = cp->completed_kcq[i]->kcqe_op_flag; service_kcqes()
2835 u32 next_op = cp->completed_kcq[i + j]->kcqe_op_flag; service_kcqes()
2862 ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]); service_kcqes()
2864 ulp_ops->indicate_kcqes(cp->ulp_handle[ulp_type], service_kcqes()
2865 cp->completed_kcq + i, j); service_kcqes()
2879 struct cnic_local *cp = dev->cnic_priv; cnic_get_kcqes() local
2891 cp->completed_kcq[kcqe_cnt++] = kcqe; cnic_get_kcqes()
2904 static int cnic_l2_completion(struct cnic_local *cp) cnic_l2_completion() argument
2907 struct cnic_uio_dev *udev = cp->udev; cnic_l2_completion()
2913 if (!test_bit(CNIC_F_BNX2X_CLASS, &cp->dev->flags)) cnic_l2_completion()
2916 hw_cons = *cp->rx_cons_ptr; cnic_l2_completion()
2920 sw_cons = cp->rx_cons; cnic_l2_completion()
2938 static void cnic_chk_pkt_rings(struct cnic_local *cp) cnic_chk_pkt_rings() argument
2943 if (!test_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags)) cnic_chk_pkt_rings()
2946 rx_cons = *cp->rx_cons_ptr; cnic_chk_pkt_rings()
2947 tx_cons = *cp->tx_cons_ptr; cnic_chk_pkt_rings()
2948 if (cp->tx_cons != tx_cons || cp->rx_cons != rx_cons) { cnic_chk_pkt_rings()
2949 if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags)) cnic_chk_pkt_rings()
2950 comp = cnic_l2_completion(cp); cnic_chk_pkt_rings()
2952 cp->tx_cons = tx_cons; cnic_chk_pkt_rings()
2953 cp->rx_cons = rx_cons; cnic_chk_pkt_rings()
2955 if (cp->udev) cnic_chk_pkt_rings()
2956 uio_event_notify(&cp->udev->cnic_uinfo); cnic_chk_pkt_rings()
2959 clear_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags); cnic_chk_pkt_rings()
2964 struct cnic_local *cp = dev->cnic_priv; cnic_service_bnx2_queues() local
2965 u32 status_idx = (u16) *cp->kcq1.status_idx_ptr; cnic_service_bnx2_queues()
2970 cp->kwq_con_idx = *cp->kwq_con_idx_ptr; cnic_service_bnx2_queues()
2972 while ((kcqe_cnt = cnic_get_kcqes(dev, &cp->kcq1))) { cnic_service_bnx2_queues()
2978 status_idx = (u16) *cp->kcq1.status_idx_ptr; cnic_service_bnx2_queues()
2981 cp->kwq_con_idx = *cp->kwq_con_idx_ptr; cnic_service_bnx2_queues()
2984 CNIC_WR16(dev, cp->kcq1.io_addr, cp->kcq1.sw_prod_idx); cnic_service_bnx2_queues()
2986 cnic_chk_pkt_rings(cp); cnic_service_bnx2_queues()
3007 struct cnic_local *cp = dev->cnic_priv; cnic_service_bnx2_msix() local
3009 cp->last_status_idx = cnic_service_bnx2_queues(dev); cnic_service_bnx2_msix()
3011 CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num | cnic_service_bnx2_msix()
3012 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | cp->last_status_idx); cnic_service_bnx2_msix()
3017 struct cnic_local *cp = dev->cnic_priv; cnic_doirq() local
3020 u16 prod = cp->kcq1.sw_prod_idx & MAX_KCQ_IDX; cnic_doirq()
3022 prefetch(cp->status_blk.gen); cnic_doirq()
3023 prefetch(&cp->kcq1.kcq[KCQ_PG(prod)][KCQ_IDX(prod)]); cnic_doirq()
3025 tasklet_schedule(&cp->cnic_irq_task); cnic_doirq()
3032 struct cnic_local *cp = dev->cnic_priv; cnic_irq() local
3034 if (cp->ack_int) cnic_irq()
3035 cp->ack_int(dev); cnic_irq()
3078 struct cnic_local *cp = dev->cnic_priv; cnic_ack_bnx2x_msix() local
3080 cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, CSTORM_ID, 0, cnic_ack_bnx2x_msix()
3086 struct cnic_local *cp = dev->cnic_priv; cnic_ack_bnx2x_e2_msix() local
3088 cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF, 0, cnic_ack_bnx2x_e2_msix()
3094 struct cnic_local *cp = dev->cnic_priv; cnic_arm_bnx2x_msix() local
3096 cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, CSTORM_ID, idx, cnic_arm_bnx2x_msix()
3102 struct cnic_local *cp = dev->cnic_priv; cnic_arm_bnx2x_e2_msix() local
3104 cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF, idx, cnic_arm_bnx2x_e2_msix()
3132 struct cnic_local *cp = dev->cnic_priv; cnic_service_bnx2x_bh() local
3140 status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq1); cnic_service_bnx2x_bh()
3142 CNIC_WR16(dev, cp->kcq1.io_addr, cnic_service_bnx2x_bh()
3143 cp->kcq1.sw_prod_idx + MAX_KCQ_IDX); cnic_service_bnx2x_bh()
3146 cp->arm_int(dev, status_idx); cnic_service_bnx2x_bh()
3150 new_status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq2); cnic_service_bnx2x_bh()
3155 CNIC_WR16(dev, cp->kcq2.io_addr, cp->kcq2.sw_prod_idx + cnic_service_bnx2x_bh()
3158 cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF, cnic_service_bnx2x_bh()
3168 struct cnic_local *cp = dev->cnic_priv; cnic_service_bnx2x() local
3170 if (!(cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)) cnic_service_bnx2x()
3173 cnic_chk_pkt_rings(cp); cnic_service_bnx2x()
3178 static void cnic_ulp_stop_one(struct cnic_local *cp, int if_type) cnic_ulp_stop_one() argument
3183 cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL); cnic_ulp_stop_one()
3186 ulp_ops = rcu_dereference_protected(cp->ulp_ops[if_type], cnic_ulp_stop_one()
3192 set_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]); cnic_ulp_stop_one()
3195 if (test_and_clear_bit(ULP_F_START, &cp->ulp_flags[if_type])) cnic_ulp_stop_one()
3196 ulp_ops->cnic_stop(cp->ulp_handle[if_type]); cnic_ulp_stop_one()
3198 clear_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]); cnic_ulp_stop_one()
3203 struct cnic_local *cp = dev->cnic_priv; cnic_ulp_stop() local
3207 cnic_ulp_stop_one(cp, if_type); cnic_ulp_stop()
3212 struct cnic_local *cp = dev->cnic_priv; cnic_ulp_start() local
3219 ulp_ops = rcu_dereference_protected(cp->ulp_ops[if_type], cnic_ulp_start()
3225 set_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]); cnic_ulp_start()
3228 if (!test_and_set_bit(ULP_F_START, &cp->ulp_flags[if_type])) cnic_ulp_start()
3229 ulp_ops->cnic_start(cp->ulp_handle[if_type]); cnic_ulp_start()
3231 clear_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]); cnic_ulp_start()
3237 struct cnic_local *cp = dev->cnic_priv; cnic_copy_ulp_stats() local
3242 ulp_ops = rcu_dereference_protected(cp->ulp_ops[ulp_type], cnic_copy_ulp_stats()
3245 rc = ulp_ops->cnic_get_stats(cp->ulp_handle[ulp_type]); cnic_copy_ulp_stats()
3275 struct cnic_local *cp = dev->cnic_priv; cnic_ctl() local
3276 set_bit(CNIC_LCL_FL_STOP_ISCSI, &cp->cnic_local_flags); cnic_ctl()
3277 queue_delayed_work(cnic_wq, &cp->delete_task, 0); cnic_ctl()
3284 struct cnic_local *cp = dev->cnic_priv; cnic_ctl() local
3289 if (cnic_get_l5_cid(cp, cid, &l5_cid) == 0) { cnic_ctl()
3290 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid]; cnic_ctl()
3322 struct cnic_local *cp = dev->cnic_priv; cnic_ulp_init() local
3336 if (!test_and_set_bit(ULP_F_INIT, &cp->ulp_flags[i])) cnic_ulp_init()
3346 struct cnic_local *cp = dev->cnic_priv; cnic_ulp_exit() local
3360 if (test_and_clear_bit(ULP_F_INIT, &cp->ulp_flags[i])) cnic_ulp_exit()
3576 struct cnic_local *cp = dev->cnic_priv; cnic_cm_create() local
3582 if (cp->ctx_tbl) { cnic_cm_create()
3583 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid]; cnic_cm_create()
3589 csk1 = &cp->csk_tbl[l5_cid]; cnic_cm_create()
3621 struct cnic_local *cp = dev->cnic_priv; cnic_cm_cleanup() local
3623 cnic_free_id(&cp->csk_port_tbl, be16_to_cpu(csk->src_port)); cnic_cm_cleanup()
3743 struct cnic_local *cp = dev->cnic_priv; cnic_resolve_addr() local
3745 return cnic_send_nlmsg(cp, ISCSI_KEVENT_PATH_REQ, csk); cnic_resolve_addr()
3751 struct cnic_local *cp = dev->cnic_priv; cnic_get_route() local
3799 if (cnic_alloc_id(&cp->csk_port_tbl, port_id)) cnic_get_route()
3805 port_id = cnic_alloc_new_id(&cp->csk_port_tbl); cnic_get_route()
3828 struct cnic_local *cp = csk->dev->cnic_priv; cnic_cm_connect() local
3831 if (cp->ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI) cnic_cm_connect()
3857 struct cnic_local *cp = csk->dev->cnic_priv; cnic_cm_abort() local
3870 cp->close_conn(csk, opcode); cnic_cm_abort()
3900 static void cnic_cm_upcall(struct cnic_local *cp, struct cnic_sock *csk, cnic_cm_upcall() argument
3907 ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]); cnic_cm_upcall()
3936 struct cnic_local *cp = dev->cnic_priv; cnic_cm_process_offld_pg() local
3939 struct cnic_sock *csk = &cp->csk_tbl[l5_cid]; cnic_cm_process_offld_pg()
3952 cnic_cm_upcall(cp, csk, cnic_cm_process_offld_pg()
3967 struct cnic_local *cp = dev->cnic_priv; cnic_process_fcoe_term_conn() local
3970 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid]; cnic_process_fcoe_term_conn()
3979 struct cnic_local *cp = dev->cnic_priv; cnic_cm_process_kcqe() local
4001 csk = &cp->csk_tbl[l5_cid]; cnic_cm_process_kcqe()
4013 cnic_cm_upcall(cp, csk, cnic_cm_process_kcqe()
4026 cnic_cm_upcall(cp, csk, opcode); cnic_cm_process_kcqe()
4049 cp->close_conn(csk, opcode); cnic_cm_process_kcqe()
4057 cp->close_conn(csk, L4_KCQE_OPCODE_VALUE_RESET_COMP); cnic_cm_process_kcqe()
4059 cnic_cm_upcall(cp, csk, opcode); cnic_cm_process_kcqe()
4080 struct cnic_local *cp = dev->cnic_priv; cnic_cm_free_mem() local
4082 kfree(cp->csk_tbl); cnic_cm_free_mem()
4083 cp->csk_tbl = NULL; cnic_cm_free_mem()
4084 cnic_free_id_tbl(&cp->csk_port_tbl); cnic_cm_free_mem()
4089 struct cnic_local *cp = dev->cnic_priv; cnic_cm_alloc_mem() local
4092 cp->csk_tbl = kzalloc(sizeof(struct cnic_sock) * MAX_CM_SK_TBL_SZ, cnic_cm_alloc_mem()
4094 if (!cp->csk_tbl) cnic_cm_alloc_mem()
4099 if (cnic_init_id_tbl(&cp->csk_port_tbl, CNIC_LOCAL_PORT_RANGE, cnic_cm_alloc_mem()
4136 struct cnic_local *cp = dev->cnic_priv; cnic_close_bnx2_conn() local
4139 cnic_cm_upcall(cp, csk, opcode); cnic_close_bnx2_conn()
4146 cnic_cm_upcall(cp, csk, opcode); cnic_close_bnx2_conn()
4165 struct cnic_local *cp = dev->cnic_priv; cnic_close_bnx2x_conn() local
4166 struct cnic_context *ctx = &cp->ctx_tbl[csk->l5_cid]; cnic_close_bnx2x_conn()
4199 cnic_cm_upcall(cp, csk, csk->state); cnic_close_bnx2x_conn()
4205 struct cnic_local *cp = dev->cnic_priv; cnic_cm_stop_bnx2x_hw() local
4207 if (!cp->ctx_tbl) cnic_cm_stop_bnx2x_hw()
4215 cancel_delayed_work(&cp->delete_task); cnic_cm_stop_bnx2x_hw()
4218 if (atomic_read(&cp->iscsi_conn) != 0) cnic_cm_stop_bnx2x_hw()
4220 atomic_read(&cp->iscsi_conn)); cnic_cm_stop_bnx2x_hw()
4257 struct cnic_local *cp; cnic_delete_task() local
4262 cp = container_of(work, struct cnic_local, delete_task.work); cnic_delete_task()
4263 dev = cp->dev; cnic_delete_task()
4265 if (test_and_clear_bit(CNIC_LCL_FL_STOP_ISCSI, &cp->cnic_local_flags)) { cnic_delete_task()
4268 cnic_ulp_stop_one(cp, CNIC_ULP_ISCSI); cnic_delete_task()
4271 cp->ethdev->drv_ctl(dev->netdev, &info); cnic_delete_task()
4274 for (i = 0; i < cp->max_cid_space; i++) { cnic_delete_task()
4275 struct cnic_context *ctx = &cp->ctx_tbl[i]; cnic_delete_task()
4295 atomic_dec(&cp->iscsi_conn); cnic_delete_task()
4302 queue_delayed_work(cnic_wq, &cp->delete_task, cnic_delete_task()
4309 struct cnic_local *cp = dev->cnic_priv; cnic_cm_open() local
4316 err = cp->start_cm(dev); cnic_cm_open()
4321 INIT_DELAYED_WORK(&cp->delete_task, cnic_delete_task); cnic_cm_open()
4330 cp->ulp_handle[CNIC_ULP_L4] = dev; cnic_cm_open()
4331 rcu_assign_pointer(cp->ulp_ops[CNIC_ULP_L4], &cm_ulp_ops); cnic_cm_open()
4341 struct cnic_local *cp = dev->cnic_priv; cnic_cm_shutdown() local
4344 if (!cp->csk_tbl) cnic_cm_shutdown()
4348 struct cnic_sock *csk = &cp->csk_tbl[i]; cnic_cm_shutdown()
4371 struct cnic_local *cp = dev->cnic_priv; cnic_setup_5709_context() local
4375 if (BNX2_CHIP(cp) != BNX2_CHIP_5709) cnic_setup_5709_context()
4378 for (i = 0; i < cp->ctx_blks; i++) { cnic_setup_5709_context()
4380 u32 idx = cp->ctx_arr[i].cid / cp->cids_per_blk; cnic_setup_5709_context()
4383 memset(cp->ctx_arr[i].ctx, 0, CNIC_PAGE_SIZE); cnic_setup_5709_context()
4386 (cp->ctx_arr[i].mapping & 0xffffffff) | valid_bit); cnic_setup_5709_context()
4388 (u64) cp->ctx_arr[i].mapping >> 32); cnic_setup_5709_context()
4408 struct cnic_local *cp = dev->cnic_priv; cnic_free_irq() local
4409 struct cnic_eth_dev *ethdev = cp->ethdev; cnic_free_irq()
4412 cp->disable_int_sync(dev); cnic_free_irq()
4413 tasklet_kill(&cp->cnic_irq_task); cnic_free_irq()
4420 struct cnic_local *cp = dev->cnic_priv; cnic_request_irq() local
4421 struct cnic_eth_dev *ethdev = cp->ethdev; cnic_request_irq()
4426 tasklet_disable(&cp->cnic_irq_task); cnic_request_irq()
4433 struct cnic_local *cp = dev->cnic_priv; cnic_init_bnx2_irq() local
4434 struct cnic_eth_dev *ethdev = cp->ethdev; cnic_init_bnx2_irq()
4438 int sblk_num = cp->status_blk_num; cnic_init_bnx2_irq()
4448 cp->last_status_idx = cp->status_blk.bnx2->status_idx; cnic_init_bnx2_irq()
4449 tasklet_init(&cp->cnic_irq_task, cnic_service_bnx2_msix, cnic_init_bnx2_irq()
4455 while (cp->status_blk.bnx2->status_completion_producer_index && cnic_init_bnx2_irq()
4463 if (cp->status_blk.bnx2->status_completion_producer_index) { cnic_init_bnx2_irq()
4469 struct status_block *sblk = cp->status_blk.gen; cnic_init_bnx2_irq()
4493 struct cnic_local *cp = dev->cnic_priv; cnic_enable_bnx2_int() local
4494 struct cnic_eth_dev *ethdev = cp->ethdev; cnic_enable_bnx2_int()
4499 CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num | cnic_enable_bnx2_int()
4500 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | cp->last_status_idx); cnic_enable_bnx2_int()
4505 struct cnic_local *cp = dev->cnic_priv; cnic_disable_bnx2_int_sync() local
4506 struct cnic_eth_dev *ethdev = cp->ethdev; cnic_disable_bnx2_int_sync()
4511 CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num | cnic_disable_bnx2_int_sync()
4519 struct cnic_local *cp = dev->cnic_priv; cnic_init_bnx2_tx_ring() local
4520 struct cnic_eth_dev *ethdev = cp->ethdev; cnic_init_bnx2_tx_ring()
4521 struct cnic_uio_dev *udev = cp->udev; cnic_init_bnx2_tx_ring()
4527 struct status_block *s_blk = cp->status_blk.gen; cnic_init_bnx2_tx_ring()
4529 sb_id = cp->status_blk_num; cnic_init_bnx2_tx_ring()
4531 cp->tx_cons_ptr = &s_blk->status_tx_quick_consumer_index2; cnic_init_bnx2_tx_ring()
4533 struct status_block_msix *sblk = cp->status_blk.bnx2; cnic_init_bnx2_tx_ring()
4538 cp->tx_cons_ptr = &sblk->status_tx_quick_consumer_index; cnic_init_bnx2_tx_ring()
4540 cp->tx_cons = *cp->tx_cons_ptr; cnic_init_bnx2_tx_ring()
4543 if (BNX2_CHIP(cp) == BNX2_CHIP_5709) { cnic_init_bnx2_tx_ring()
4586 struct cnic_local *cp = dev->cnic_priv; cnic_init_bnx2_rx_ring() local
4587 struct cnic_eth_dev *ethdev = cp->ethdev; cnic_init_bnx2_rx_ring()
4588 struct cnic_uio_dev *udev = cp->udev; cnic_init_bnx2_rx_ring()
4592 struct status_block *s_blk = cp->status_blk.gen; cnic_init_bnx2_rx_ring()
4595 sb_id = cp->status_blk_num; cnic_init_bnx2_rx_ring()
4597 cp->rx_cons_ptr = &s_blk->status_rx_quick_consumer_index2; cnic_init_bnx2_rx_ring()
4601 struct status_block_msix *sblk = cp->status_blk.bnx2; cnic_init_bnx2_rx_ring()
4603 cp->rx_cons_ptr = &sblk->status_rx_quick_consumer_index; cnic_init_bnx2_rx_ring()
4608 while (!(*cp->rx_cons_ptr != 0) && i < 10) { cnic_init_bnx2_rx_ring()
4614 cp->rx_cons = *cp->rx_cons_ptr; cnic_init_bnx2_rx_ring()
4630 int n = (i % cp->l2_rx_ring_size) + 1; cnic_init_bnx2_rx_ring()
4632 buf_map = udev->l2_buf_map + (n * cp->l2_single_buf_size); cnic_init_bnx2_rx_ring()
4633 rxbd->rx_bd_len = cp->l2_single_buf_size; cnic_init_bnx2_rx_ring()
4664 struct cnic_local *cp = dev->cnic_priv; cnic_set_bnx2_mac() local
4667 val = cp->func << 2; cnic_set_bnx2_mac()
4669 cp->shmem_base = cnic_reg_rd_ind(dev, BNX2_SHM_HDR_ADDR_0 + val); cnic_set_bnx2_mac()
4671 val = cnic_reg_rd_ind(dev, cp->shmem_base + cnic_set_bnx2_mac()
4678 val = cnic_reg_rd_ind(dev, cp->shmem_base + cnic_set_bnx2_mac()
4688 if (BNX2_CHIP(cp) != BNX2_CHIP_5709) cnic_set_bnx2_mac()
4698 struct cnic_local *cp = dev->cnic_priv; cnic_start_bnx2_hw() local
4699 struct cnic_eth_dev *ethdev = cp->ethdev; cnic_start_bnx2_hw()
4700 struct status_block *sblk = cp->status_blk.gen; cnic_start_bnx2_hw()
4727 cp->kwq_io_addr = MB_GET_CID_ADDR(KWQ_CID) + L5_KRNLQ_HOST_QIDX; cnic_start_bnx2_hw()
4729 cp->max_kwq_idx = MAX_KWQ_IDX; cnic_start_bnx2_hw()
4730 cp->kwq_prod_idx = 0; cnic_start_bnx2_hw()
4731 cp->kwq_con_idx = 0; cnic_start_bnx2_hw()
4732 set_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags); cnic_start_bnx2_hw()
4734 if (BNX2_CHIP(cp) == BNX2_CHIP_5706 || BNX2_CHIP(cp) == BNX2_CHIP_5708) cnic_start_bnx2_hw()
4735 cp->kwq_con_idx_ptr = &sblk->status_rx_quick_consumer_index15; cnic_start_bnx2_hw()
4737 cp->kwq_con_idx_ptr = &sblk->status_cmd_consumer_index; cnic_start_bnx2_hw()
4750 val = (u32) ((u64) cp->kwq_info.pgtbl_map >> 32); cnic_start_bnx2_hw()
4753 val = (u32) cp->kwq_info.pgtbl_map; cnic_start_bnx2_hw()
4757 cp->kcq1.io_addr = MB_GET_CID_ADDR(KCQ_CID) + L5_KRNLQ_HOST_QIDX; cnic_start_bnx2_hw()
4759 cp->kcq1.sw_prod_idx = 0; cnic_start_bnx2_hw()
4760 cp->kcq1.hw_prod_idx_ptr = cnic_start_bnx2_hw()
4763 cp->kcq1.status_idx_ptr = &sblk->status_idx; cnic_start_bnx2_hw()
4776 val = (u32) ((u64) cp->kcq1.dma.pgtbl_map >> 32); cnic_start_bnx2_hw()
4779 val = (u32) cp->kcq1.dma.pgtbl_map; cnic_start_bnx2_hw()
4782 cp->int_num = 0; cnic_start_bnx2_hw()
4784 struct status_block_msix *msblk = cp->status_blk.bnx2; cnic_start_bnx2_hw()
4785 u32 sb_id = cp->status_blk_num; cnic_start_bnx2_hw()
4788 cp->kcq1.hw_prod_idx_ptr = cnic_start_bnx2_hw()
4790 cp->kcq1.status_idx_ptr = &msblk->status_idx; cnic_start_bnx2_hw()
4791 cp->kwq_con_idx_ptr = &msblk->status_cmd_consumer_index; cnic_start_bnx2_hw()
4792 cp->int_num = sb_id << BNX2_PCICFG_INT_ACK_CMD_INT_NUM_SHIFT; cnic_start_bnx2_hw()
4834 struct cnic_local *cp = dev->cnic_priv; cnic_setup_bnx2x_context() local
4835 struct cnic_eth_dev *ethdev = cp->ethdev; cnic_setup_bnx2x_context()
4839 for (i = 0; i < cp->ctx_blks; i++) { cnic_setup_bnx2x_context()
4840 struct cnic_ctx *ctx = &cp->ctx_arr[i]; cnic_setup_bnx2x_context()
4843 if (cp->ctx_align) { cnic_setup_bnx2x_context()
4844 unsigned long mask = cp->ctx_align - 1; cnic_setup_bnx2x_context()
4855 struct cnic_local *cp = dev->cnic_priv; cnic_init_bnx2x_irq() local
4856 struct cnic_eth_dev *ethdev = cp->ethdev; cnic_init_bnx2x_irq()
4859 tasklet_init(&cp->cnic_irq_task, cnic_service_bnx2x_bh, cnic_init_bnx2x_irq()
4888 struct cnic_local *cp = dev->cnic_priv; cnic_enable_bnx2x_int() local
4890 u8 sb_id = cp->status_blk_num; cnic_enable_bnx2x_int()
4907 struct cnic_local *cp = dev->cnic_priv; cnic_init_bnx2x_tx_ring() local
4909 struct cnic_uio_dev *udev = cp->udev; cnic_init_bnx2x_tx_ring()
4912 struct host_sp_status_block *sb = cp->bnx2x_def_status_blk; cnic_init_bnx2x_tx_ring()
4914 u32 cli = cp->ethdev->iscsi_l2_client_id; cnic_init_bnx2x_tx_ring()
4966 cp->tx_cons_ptr = cnic_init_bnx2x_tx_ring()
4973 struct cnic_local *cp = dev->cnic_priv; cnic_init_bnx2x_rx_ring() local
4975 struct cnic_uio_dev *udev = cp->udev; cnic_init_bnx2x_rx_ring()
4980 struct host_sp_status_block *sb = cp->bnx2x_def_status_blk; cnic_init_bnx2x_rx_ring()
4982 u32 cli = cp->ethdev->iscsi_l2_client_id; cnic_init_bnx2x_rx_ring()
4991 data->general.mtu = cpu_to_le16(cp->l2_single_buf_size - 14); cnic_init_bnx2x_rx_ring()
4996 int n = (i % cp->l2_rx_ring_size) + 1; cnic_init_bnx2x_rx_ring()
4998 buf_map = udev->l2_buf_map + (n * cp->l2_single_buf_size); cnic_init_bnx2x_rx_ring()
5027 data->rx.max_bytes_on_bd = cpu_to_le16(cp->l2_single_buf_size); cnic_init_bnx2x_rx_ring()
5033 cp->rx_cons_ptr = cnic_init_bnx2x_rx_ring()
5035 cp->rx_cons = *cp->rx_cons_ptr; cnic_init_bnx2x_rx_ring()
5040 struct cnic_local *cp = dev->cnic_priv; cnic_init_bnx2x_kcq() local
5044 cp->kcq1.io_addr = BAR_CSTRORM_INTMEM + cnic_init_bnx2x_kcq()
5046 cp->kcq1.sw_prod_idx = 0; cnic_init_bnx2x_kcq()
5049 struct host_hc_status_block_e2 *sb = cp->status_blk.gen; cnic_init_bnx2x_kcq()
5051 cp->kcq1.hw_prod_idx_ptr = cnic_init_bnx2x_kcq()
5053 cp->kcq1.status_idx_ptr = cnic_init_bnx2x_kcq()
5056 struct host_hc_status_block_e1x *sb = cp->status_blk.gen; cnic_init_bnx2x_kcq()
5058 cp->kcq1.hw_prod_idx_ptr = cnic_init_bnx2x_kcq()
5060 cp->kcq1.status_idx_ptr = cnic_init_bnx2x_kcq()
5065 struct host_hc_status_block_e2 *sb = cp->status_blk.gen; cnic_init_bnx2x_kcq()
5067 cp->kcq2.io_addr = BAR_USTRORM_INTMEM + cnic_init_bnx2x_kcq()
5069 cp->kcq2.sw_prod_idx = 0; cnic_init_bnx2x_kcq()
5070 cp->kcq2.hw_prod_idx_ptr = cnic_init_bnx2x_kcq()
5072 cp->kcq2.status_idx_ptr = cnic_init_bnx2x_kcq()
5079 struct cnic_local *cp = dev->cnic_priv; cnic_start_bnx2x_hw() local
5081 struct cnic_eth_dev *ethdev = cp->ethdev; cnic_start_bnx2x_hw()
5086 cp->func = bp->pf_num; cnic_start_bnx2x_hw()
5088 func = CNIC_FUNC(cp); cnic_start_bnx2x_hw()
5091 ret = cnic_init_id_tbl(&cp->cid_tbl, MAX_ISCSI_TBL_SZ, cnic_start_bnx2x_hw()
5092 cp->iscsi_start_cid, 0); cnic_start_bnx2x_hw()
5098 ret = cnic_init_id_tbl(&cp->fcoe_cid_tbl, dev->max_fcoe_conn, cnic_start_bnx2x_hw()
5099 cp->fcoe_start_cid, 0); cnic_start_bnx2x_hw()
5105 cp->bnx2x_igu_sb_id = ethdev->irq_arr[0].status_blk_num2; cnic_start_bnx2x_hw()
5110 CNIC_WR16(dev, cp->kcq1.io_addr, MAX_KCQ_IDX); cnic_start_bnx2x_hw()
5115 cp->kcq1.dma.pg_map_arr[1] & 0xffffffff); cnic_start_bnx2x_hw()
5118 (u64) cp->kcq1.dma.pg_map_arr[1] >> 32); cnic_start_bnx2x_hw()
5121 cp->kcq1.dma.pg_map_arr[0] & 0xffffffff); cnic_start_bnx2x_hw()
5124 (u64) cp->kcq1.dma.pg_map_arr[0] >> 32); cnic_start_bnx2x_hw()
5128 CSTORM_ISCSI_EQ_SB_NUM_OFFSET(pfid, 0), cp->status_blk_num); cnic_start_bnx2x_hw()
5135 cp->gbl_buf_info.pg_map_arr[0] & 0xffffffff); cnic_start_bnx2x_hw()
5138 (u64) cp->gbl_buf_info.pg_map_arr[0] >> 32); cnic_start_bnx2x_hw()
5155 struct cnic_local *cp = dev->cnic_priv; cnic_init_rings() local
5157 struct cnic_uio_dev *udev = cp->udev; cnic_init_rings()
5159 if (test_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags)) cnic_init_rings()
5165 set_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags); cnic_init_rings()
5167 u32 cli = cp->ethdev->iscsi_l2_client_id; cnic_init_rings()
5168 u32 cid = cp->ethdev->iscsi_l2_cid; cnic_init_rings()
5189 set_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags); cnic_init_rings()
5202 set_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags); cnic_init_rings()
5208 while (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags) && cnic_init_rings()
5212 if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags)) cnic_init_rings()
5225 struct cnic_local *cp = dev->cnic_priv; cnic_shutdown_rings() local
5226 struct cnic_uio_dev *udev = cp->udev; cnic_shutdown_rings()
5229 if (!test_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags)) cnic_shutdown_rings()
5235 u32 cli = cp->ethdev->iscsi_l2_client_id; cnic_shutdown_rings()
5236 u32 cid = cp->ethdev->iscsi_l2_cid; cnic_shutdown_rings()
5242 set_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags); cnic_shutdown_rings()
5249 while (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags) && cnic_shutdown_rings()
5253 if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags)) cnic_shutdown_rings()
5263 clear_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags); cnic_shutdown_rings()
5270 struct cnic_local *cp = dev->cnic_priv; cnic_register_netdev() local
5271 struct cnic_eth_dev *ethdev = cp->ethdev; cnic_register_netdev()
5280 err = ethdev->drv_register_cnic(dev->netdev, cp->cnic_ops, dev); cnic_register_netdev()
5296 struct cnic_local *cp = dev->cnic_priv; cnic_unregister_netdev() local
5297 struct cnic_eth_dev *ethdev = cp->ethdev; cnic_unregister_netdev()
5307 struct cnic_local *cp = dev->cnic_priv; cnic_start_hw() local
5308 struct cnic_eth_dev *ethdev = cp->ethdev; cnic_start_hw()
5316 cp->func = PCI_FUNC(dev->pcidev->devfn); cnic_start_hw()
5317 cp->status_blk.gen = ethdev->irq_arr[0].status_blk; cnic_start_hw()
5318 cp->status_blk_num = ethdev->irq_arr[0].status_blk_num; cnic_start_hw()
5320 err = cp->alloc_resc(dev); cnic_start_hw()
5326 err = cp->start_hw(dev); cnic_start_hw()
5336 cp->enable_int(dev); cnic_start_hw()
5341 cp->free_resc(dev); cnic_start_hw()
5365 struct cnic_local *cp = dev->cnic_priv; cnic_stop_bnx2x_hw() local
5368 u32 sb_id = cp->status_blk_num; cnic_stop_bnx2x_hw()
5388 *cp->kcq1.hw_prod_idx_ptr = 0; cnic_stop_bnx2x_hw()
5391 CNIC_WR16(dev, cp->kcq1.io_addr, 0); cnic_stop_bnx2x_hw()
5398 struct cnic_local *cp = dev->cnic_priv; cnic_stop_hw() local
5404 while (cp->udev && cp->udev->uio_dev != -1 && i < 15) { cnic_stop_hw()
5409 cp->stop_cm(dev); cnic_stop_hw()
5410 cp->ethdev->drv_state &= ~CNIC_DRV_STATE_HANDLES_IRQ; cnic_stop_hw()
5412 RCU_INIT_POINTER(cp->ulp_ops[CNIC_ULP_L4], NULL); cnic_stop_hw()
5415 cp->stop_hw(dev); cnic_stop_hw()
5440 struct cnic_local *cp; cnic_alloc_dev() local
5455 cp = cdev->cnic_priv; cnic_alloc_dev()
5456 cp->dev = cdev; cnic_alloc_dev()
5457 cp->l2_single_buf_size = 0x400; cnic_alloc_dev()
5458 cp->l2_rx_ring_size = 3; cnic_alloc_dev()
5460 spin_lock_init(&cp->cnic_ulp_lock); cnic_alloc_dev()
5471 struct cnic_local *cp; init_bnx2_cnic() local
5502 cp = cdev->cnic_priv; init_bnx2_cnic()
5503 cp->ethdev = ethdev; init_bnx2_cnic()
5505 cp->chip_id = ethdev->chip_id; init_bnx2_cnic()
5509 cp->cnic_ops = &cnic_bnx2_ops; init_bnx2_cnic()
5510 cp->start_hw = cnic_start_bnx2_hw; init_bnx2_cnic()
5511 cp->stop_hw = cnic_stop_bnx2_hw; init_bnx2_cnic()
5512 cp->setup_pgtbl = cnic_setup_page_tbl; init_bnx2_cnic()
5513 cp->alloc_resc = cnic_alloc_bnx2_resc; init_bnx2_cnic()
5514 cp->free_resc = cnic_free_resc; init_bnx2_cnic()
5515 cp->start_cm = cnic_cm_init_bnx2_hw; init_bnx2_cnic()
5516 cp->stop_cm = cnic_cm_stop_bnx2_hw; init_bnx2_cnic()
5517 cp->enable_int = cnic_enable_bnx2_int; init_bnx2_cnic()
5518 cp->disable_int_sync = cnic_disable_bnx2_int_sync; init_bnx2_cnic()
5519 cp->close_conn = cnic_close_bnx2_conn; init_bnx2_cnic()
5531 struct cnic_local *cp; init_bnx2x_cnic() local
5555 cp = cdev->cnic_priv; init_bnx2x_cnic()
5556 cp->ethdev = ethdev; init_bnx2x_cnic()
5558 cp->chip_id = ethdev->chip_id; init_bnx2x_cnic()
5574 cp->cnic_ops = &cnic_bnx2x_ops; init_bnx2x_cnic()
5575 cp->start_hw = cnic_start_bnx2x_hw; init_bnx2x_cnic()
5576 cp->stop_hw = cnic_stop_bnx2x_hw; init_bnx2x_cnic()
5577 cp->setup_pgtbl = cnic_setup_page_tbl_le; init_bnx2x_cnic()
5578 cp->alloc_resc = cnic_alloc_bnx2x_resc; init_bnx2x_cnic()
5579 cp->free_resc = cnic_free_resc; init_bnx2x_cnic()
5580 cp->start_cm = cnic_cm_init_bnx2x_hw; init_bnx2x_cnic()
5581 cp->stop_cm = cnic_cm_stop_bnx2x_hw; init_bnx2x_cnic()
5582 cp->enable_int = cnic_enable_bnx2x_int; init_bnx2x_cnic()
5583 cp->disable_int_sync = cnic_disable_bnx2x_int_sync; init_bnx2x_cnic()
5585 cp->ack_int = cnic_ack_bnx2x_e2_msix; init_bnx2x_cnic()
5586 cp->arm_int = cnic_arm_bnx2x_e2_msix; init_bnx2x_cnic()
5588 cp->ack_int = cnic_ack_bnx2x_msix; init_bnx2x_cnic()
5589 cp->arm_int = cnic_arm_bnx2x_msix; init_bnx2x_cnic()
5591 cp->close_conn = cnic_close_bnx2x_conn; init_bnx2x_cnic()
5617 static void cnic_rcv_netevent(struct cnic_local *cp, unsigned long event, cnic_rcv_netevent() argument
5627 ulp_ops = rcu_dereference_protected(cp->ulp_ops[if_type], cnic_rcv_netevent()
5634 ctx = cp->ulp_handle[if_type]; cnic_rcv_netevent()
5636 set_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]); cnic_rcv_netevent()
5641 clear_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]); cnic_rcv_netevent()
5664 struct cnic_local *cp = dev->cnic_priv; cnic_netdev_event() local
5680 cnic_rcv_netevent(cp, event, 0); cnic_netdev_event()
H A Dcnic.h102 #define BNX2X_KWQ_DATA_PG(cp, x) ((x) / (cp)->kwq_16_data_pp)
103 #define BNX2X_KWQ_DATA_IDX(cp, x) ((x) % (cp)->kwq_16_data_pp)
104 #define BNX2X_KWQ_DATA(cp, x) \
105 &(cp)->kwq_16_data[BNX2X_KWQ_DATA_PG(cp, x)][BNX2X_KWQ_DATA_IDX(cp, x)]
403 #define CNIC_FUNC(cp) ((cp)->func)
421 #define CNIC_SUPPORTS_FCOE(cp) \
/linux-4.1.27/arch/x86/boot/
H A Dstring.c80 static unsigned int simple_guess_base(const char *cp) simple_guess_base() argument
82 if (cp[0] == '0') { simple_guess_base()
83 if (TOLOWER(cp[1]) == 'x' && isxdigit(cp[2])) simple_guess_base()
94 * @cp: The start of the string
99 unsigned long long simple_strtoull(const char *cp, char **endp, unsigned int base) simple_strtoull() argument
104 base = simple_guess_base(cp); simple_strtoull()
106 if (base == 16 && cp[0] == '0' && TOLOWER(cp[1]) == 'x') simple_strtoull()
107 cp += 2; simple_strtoull()
109 while (isxdigit(*cp)) { simple_strtoull()
112 value = isdigit(*cp) ? *cp - '0' : TOLOWER(*cp) - 'a' + 10; simple_strtoull()
116 cp++; simple_strtoull()
119 *endp = (char *)cp; simple_strtoull()
H A DMakefile164 cp /usr/$$i/syslinux/isolinux.bin $(obj)/isoimage ; \
166 cp /usr/$$i/syslinux/ldlinux.c32 $(obj)/isoimage ; \
172 cp $(obj)/bzImage $(obj)/isoimage/linux
175 cp '$(FDINITRD)' $(obj)/isoimage/initrd.img ; \
187 cp System.map $(INSTALL_PATH)/
H A Dinstall.sh50 cp $3 $4/System.map
/linux-4.1.27/tools/usb/usbip/libsrc/
H A Dnames.c311 char buf[512], *cp; parse() local
323 cp = strchr(buf, '\r'); parse()
324 if (cp) parse()
325 *cp = 0; parse()
326 cp = strchr(buf, '\n'); parse()
327 if (cp) parse()
328 *cp = 0; parse()
331 cp = buf; parse()
357 cp = buf+2; parse()
358 while (isspace(*cp)) parse()
359 cp++; parse()
360 if (!isxdigit(*cp)) { parse()
364 u = strtoul(cp, &cp, 16); parse()
365 while (isspace(*cp)) parse()
366 cp++; parse()
367 if (!*cp) { parse()
371 if (new_class(cp, u)) parse()
373 linectr, u, cp); parse()
374 dbg("line %5u class %02x %s", linectr, u, cp); parse()
388 if (isxdigit(*cp)) { parse()
390 u = strtoul(cp, &cp, 16); parse()
391 while (isspace(*cp)) parse()
392 cp++; parse()
393 if (!*cp) { parse()
397 if (new_vendor(cp, u)) parse()
399 linectr, u, cp); parse()
400 dbg("line %5u vendor %04x %s", linectr, u, cp); parse()
407 u = strtoul(buf+1, &cp, 16); parse()
408 while (isspace(*cp)) parse()
409 cp++; parse()
410 if (!*cp) { parse()
416 if (new_product(cp, lastvendor, u)) parse()
418 linectr, lastvendor, u, cp); parse()
420 lastvendor, u, cp); parse()
424 if (new_subclass(cp, lastclass, u)) parse()
426 linectr, lastclass, u, cp); parse()
428 lastclass, u, cp); parse()
446 u = strtoul(buf+2, &cp, 16); parse()
447 while (isspace(*cp)) parse()
448 cp++; parse()
449 if (!*cp) { parse()
455 if (new_protocol(cp, lastclass, lastsubclass, parse()
459 u, cp); parse()
461 linectr, lastclass, lastsubclass, u, cp); parse()
/linux-4.1.27/net/sched/
H A Dcls_tcindex.c224 struct tcindex_data *cp, *oldp; tcindex_set_parms() local
238 cp = kzalloc(sizeof(*cp), GFP_KERNEL); tcindex_set_parms()
239 if (!cp) tcindex_set_parms()
242 cp->mask = p->mask; tcindex_set_parms()
243 cp->shift = p->shift; tcindex_set_parms()
244 cp->hash = p->hash; tcindex_set_parms()
245 cp->alloc_hash = p->alloc_hash; tcindex_set_parms()
246 cp->fall_through = p->fall_through; tcindex_set_parms()
247 cp->tp = tp; tcindex_set_parms()
252 cp->perfect = kmemdup(p->perfect, tcindex_set_parms()
253 sizeof(*r) * cp->hash, GFP_KERNEL); tcindex_set_parms()
254 if (!cp->perfect) tcindex_set_parms()
256 for (i = 0; i < cp->hash; i++) tcindex_set_parms()
257 tcf_exts_init(&cp->perfect[i].exts, tcindex_set_parms()
261 cp->h = p->h; tcindex_set_parms()
269 cp->hash = nla_get_u32(tb[TCA_TCINDEX_HASH]); tcindex_set_parms()
272 cp->mask = nla_get_u16(tb[TCA_TCINDEX_MASK]); tcindex_set_parms()
275 cp->shift = nla_get_u32(tb[TCA_TCINDEX_SHIFT]); tcindex_set_parms()
282 if (cp->perfect) { tcindex_set_parms()
283 if (!valid_perfect_hash(cp) || tcindex_set_parms()
284 cp->hash > cp->alloc_hash) tcindex_set_parms()
286 } else if (cp->h && cp->hash != cp->alloc_hash) { tcindex_set_parms()
292 cp->fall_through = nla_get_u32(tb[TCA_TCINDEX_FALL_THROUGH]); tcindex_set_parms()
294 if (!cp->hash) { tcindex_set_parms()
298 if ((cp->mask >> cp->shift) < PERFECT_HASH_THRESHOLD) tcindex_set_parms()
299 cp->hash = (cp->mask >> cp->shift) + 1; tcindex_set_parms()
301 cp->hash = DEFAULT_HASH_SIZE; tcindex_set_parms()
304 if (!cp->perfect && !cp->h) tcindex_set_parms()
305 cp->alloc_hash = cp->hash; tcindex_set_parms()
312 if (cp->perfect || valid_perfect_hash(cp)) tcindex_set_parms()
313 if (handle >= cp->alloc_hash) tcindex_set_parms()
318 if (!cp->perfect && !cp->h) { tcindex_set_parms()
319 if (valid_perfect_hash(cp)) { tcindex_set_parms()
322 cp->perfect = kcalloc(cp->hash, sizeof(*r), GFP_KERNEL); tcindex_set_parms()
323 if (!cp->perfect) tcindex_set_parms()
325 for (i = 0; i < cp->hash; i++) tcindex_set_parms()
326 tcf_exts_init(&cp->perfect[i].exts, tcindex_set_parms()
333 hash = kcalloc(cp->hash, tcindex_set_parms()
340 cp->h = hash; tcindex_set_parms()
345 if (cp->perfect) tcindex_set_parms()
346 r = cp->perfect + handle; tcindex_set_parms()
348 r = tcindex_lookup(cp, handle) ? : &new_filter_result; tcindex_set_parms()
374 rcu_assign_pointer(tp->root, cp); tcindex_set_parms()
382 fp = cp->h + (handle % cp->hash); tcindex_set_parms()
397 kfree(cp->perfect); tcindex_set_parms()
399 kfree(cp->h); tcindex_set_parms()
401 kfree(cp); tcindex_set_parms()
/linux-4.1.27/kernel/debug/kdb/
H A Dkdb_io.c205 char *cp = buffer; kdb_read() local
226 cp += len; kdb_read()
228 cp--; kdb_read()
231 lastchar = cp; kdb_read()
232 *cp = '\0'; kdb_read()
242 if (cp > buffer) { kdb_read()
243 if (cp < lastchar) { kdb_read()
244 memcpy(tmpbuffer, cp, lastchar - cp); kdb_read()
245 memcpy(cp-1, tmpbuffer, lastchar - cp); kdb_read()
248 --cp; kdb_read()
249 kdb_printf("\b%s \r", cp); kdb_read()
250 tmp = *cp; kdb_read()
251 *cp = '\0'; kdb_read()
254 *cp = tmp; kdb_read()
267 if (cp < lastchar) { kdb_read()
268 memcpy(tmpbuffer, cp+1, lastchar - cp - 1); kdb_read()
269 memcpy(cp, tmpbuffer, lastchar - cp - 1); kdb_read()
271 kdb_printf("%s \r", cp); kdb_read()
272 tmp = *cp; kdb_read()
273 *cp = '\0'; kdb_read()
276 *cp = tmp; kdb_read()
280 if (cp > buffer) { kdb_read()
283 cp = buffer; kdb_read()
287 if (cp < lastchar) { kdb_read()
288 kdb_printf("%s", cp); kdb_read()
289 cp = lastchar; kdb_read()
293 if (cp > buffer) { kdb_read()
295 --cp; kdb_read()
308 if (cp < lastchar) { kdb_read()
309 kdb_printf("%c", *cp); kdb_read()
310 ++cp; kdb_read()
328 if (p_tmp > cp) kdb_read()
330 memcpy(tmpbuffer, p_tmp, cp-p_tmp); kdb_read()
331 *(tmpbuffer + (cp-p_tmp)) = '\0'; kdb_read()
364 strncpy(p_tmp+len_tmp, cp, lastchar-cp+1); kdb_read()
366 strncpy(cp, p_tmp+len, len_tmp-len + 1); kdb_read()
368 kdb_printf("%s", cp); kdb_read()
369 cp += len; kdb_read()
376 if (cp < lastchar) { kdb_read()
377 memcpy(tmpbuffer, cp, lastchar - cp); kdb_read()
378 memcpy(cp+1, tmpbuffer, lastchar - cp); kdb_read()
380 *cp = key; kdb_read()
381 kdb_printf("%s\r", cp); kdb_read()
382 ++cp; kdb_read()
383 tmp = *cp; kdb_read()
384 *cp = '\0'; kdb_read()
387 *cp = tmp; kdb_read()
390 *cp++ = key; kdb_read()
523 char firstchar, *cp; kdb_search_string() local
541 cp = searched; kdb_search_string()
542 while ((cp = strchr(cp, firstchar))) { kdb_search_string()
543 if (!strncmp(cp, searchfor, len2)) kdb_search_string()
545 cp++; kdb_search_string()
561 char *cp, *cp2, *cphold = NULL, replaced_byte = ' '; vkdb_printf() local
613 cp = strchr(kdb_buffer, '\n'); vkdb_printf()
614 if (!cp) { vkdb_printf()
658 cp++; /* to byte after the newline */ vkdb_printf()
659 replaced_byte = *cp; /* remember what/where it was */ vkdb_printf()
660 cphold = cp; vkdb_printf()
661 *cp = '\0'; /* end the string for our search */ vkdb_printf()
700 cp = (char *) printk_skip_level(kdb_buffer); vkdb_printf()
702 gdbstub_msg_write(cp, retlen - (cp - kdb_buffer)); vkdb_printf()
705 len = retlen - (cp - kdb_buffer); vkdb_printf()
706 cp2 = cp; vkdb_printf()
713 c->write(c, cp, retlen - (cp - kdb_buffer)); vkdb_printf()
770 cp = moreprompt; vkdb_printf()
772 dbg_io_ops->write_char(*cp); vkdb_printf()
773 cp++; vkdb_printf()
H A Dkdb_main.c242 char *cp = strchr(e, '='); kdbgetenv() local
243 return cp ? ++cp : ""; kdbgetenv()
405 char *cp; kdb_set() local
407 debugflags = simple_strtoul(argv[2], &cp, 0); kdb_set()
408 if (cp == argv[2] || debugflags & ~KDB_DEBUG_FLAG_MASK) { kdb_set()
503 char *cp; kdbgetaddrarg() local
534 cp = strpbrk(symname, "+-"); kdbgetaddrarg()
535 if (cp != NULL) { kdbgetaddrarg()
536 symbol = *cp; kdbgetaddrarg()
537 *cp++ = '\0'; kdbgetaddrarg()
606 cp = (char *)argv[*nextarg]; kdbgetaddrarg()
610 diag = kdbgetularg(cp, &off); kdbgetaddrarg()
823 char *cp = (char *)str, *cp2; parse_grep() local
826 if (*cp != '|') parse_grep()
828 cp++; parse_grep()
829 while (isspace(*cp)) parse_grep()
830 cp++; parse_grep()
831 if (strncmp(cp, "grep ", 5)) { parse_grep()
835 cp += 5; parse_grep()
836 while (isspace(*cp)) parse_grep()
837 cp++; parse_grep()
838 cp2 = strchr(cp, '\n'); parse_grep()
841 len = strlen(cp); parse_grep()
846 /* now cp points to a nonzero length search string */ parse_grep()
847 if (*cp == '"') { parse_grep()
850 cp++; parse_grep()
851 cp2 = strchr(cp, '"'); parse_grep()
859 if (*cp == '^') { parse_grep()
861 cp++; parse_grep()
863 len = strlen(cp); parse_grep()
865 if (*(cp+len-1) == '$') { parse_grep()
867 *(cp+len-1) = '\0'; parse_grep()
869 len = strlen(cp); parse_grep()
876 strcpy(kdb_grep_string, cp); parse_grep()
914 char *cp; local
922 cp = (char *)cmdstr;
932 if (*cp != '\n' && *cp != '\0') {
935 while (*cp) {
937 while (isspace(*cp))
938 cp++;
939 if ((*cp == '\0') || (*cp == '\n') ||
940 (*cp == '#' && !defcmd_in_progress))
943 if (*cp == '|') {
963 while (*cp && *cp != '\n' &&
964 (escaped || quoted || !isspace(*cp))) {
969 *cpp++ = *cp++;
972 if (*cp == '\\') {
974 ++cp;
977 if (*cp == quoted)
979 else if (*cp == '\'' || *cp == '"')
980 quoted = *cp;
981 *cpp = *cp++;
992 parse_grep(cp);
1519 unsigned char *cp; kdb_md_line() local
1521 cp = wc.c + 8 - bytesperword; kdb_md_line()
1523 cp = wc.c; kdb_md_line()
1530 *c++ = printable_char(*cp++); kdb_md_line()
1531 *c++ = printable_char(*cp++); kdb_md_line()
1532 *c++ = printable_char(*cp++); kdb_md_line()
1533 *c++ = printable_char(*cp++); kdb_md_line()
1536 *c++ = printable_char(*cp++); kdb_md_line()
1537 *c++ = printable_char(*cp++); kdb_md_line()
1540 *c++ = printable_char(*cp++); kdb_md_line()
1543 *c++ = printable_char(*cp++); kdb_md_line()
2094 char *cp; kdb_dmesg() local
2095 lines = simple_strtol(argv[1], &cp, 0); kdb_dmesg()
2096 if (*cp) kdb_dmesg()
2099 adjust = simple_strtoul(argv[2], &cp, 0); kdb_dmesg()
2100 if (*cp || adjust < 0) kdb_dmesg()
/linux-4.1.27/include/sound/
H A Dseq_midi_emul.h148 #define SNDRV_GM_BANK_SELECT(cp) (((cp)->control[0]<<7)|((cp)->control[32]))
149 #define SNDRV_GM_MODULATION_WHEEL(cp) (((cp)->control[1]<<7)|((cp)->control[33]))
150 #define SNDRV_GM_BREATH(cp) (((cp)->control[2]<<7)|((cp)->control[34]))
151 #define SNDRV_GM_FOOT_PEDAL(cp) (((cp)->control[4]<<7)|((cp)->control[36]))
152 #define SNDRV_GM_PORTAMENTO_TIME(cp) (((cp)->control[5]<<7)|((cp)->control[37]))
153 #define SNDRV_GM_DATA_ENTRY(cp) (((cp)->control[6]<<7)|((cp)->control[38]))
154 #define SNDRV_GM_VOLUME(cp) (((cp)->control[7]<<7)|((cp)->control[39]))
155 #define SNDRV_GM_BALANCE(cp) (((cp)->control[8]<<7)|((cp)->control[40]))
156 #define SNDRV_GM_PAN(cp) (((cp)->control[10]<<7)|((cp)->control[42]))
157 #define SNDRV_GM_EXPRESSION(cp) (((cp)->control[11]<<7)|((cp)->control[43]))
/linux-4.1.27/drivers/staging/speakup/
H A Dvarhandlers.c183 char *cp; spk_set_num_var() local
234 cp = spk_pitch_buff; spk_set_num_var()
236 cp = buf; spk_set_num_var()
238 l = sprintf(cp, var_data->u.n.synth_fmt, (int)val); spk_set_num_var()
240 l = sprintf(cp, spk_set_num_var()
242 synth_printf("%s", cp); spk_set_num_var()
275 u_char *cp; spk_set_mask_bits() local
279 for (cp = (u_char *)spk_punc_info[3].value; *cp; cp++) spk_set_mask_bits()
280 spk_chartab[*cp] &= ~mask; spk_set_mask_bits()
282 cp = (u_char *)input; spk_set_mask_bits()
283 if (!cp) spk_set_mask_bits()
284 cp = spk_punc_info[which].value; spk_set_mask_bits()
286 for (; *cp; cp++) { spk_set_mask_bits()
287 if (*cp < SPACE) spk_set_mask_bits()
290 if (!(spk_chartab[*cp]&PUNC)) spk_set_mask_bits()
292 } else if (spk_chartab[*cp]&B_NUM) spk_set_mask_bits()
295 if (*cp) spk_set_mask_bits()
297 cp = (u_char *)input; spk_set_mask_bits()
300 for (; *cp; cp++) spk_set_mask_bits()
301 if (*cp > SPACE) spk_set_mask_bits()
302 spk_chartab[*cp] |= mask; spk_set_mask_bits()
304 for (; *cp; cp++) spk_set_mask_bits()
305 if (*cp > SPACE) spk_set_mask_bits()
306 spk_chartab[*cp] &= ~mask; spk_set_mask_bits()
H A Dkobjects.c33 char *cp; chars_chartab_show() local
48 cp = "B_CTL"; chars_chartab_show()
50 cp = "WDLM"; chars_chartab_show()
52 cp = "A_PUNC"; chars_chartab_show()
54 cp = "PUNC"; chars_chartab_show()
56 cp = "NUM"; chars_chartab_show()
58 cp = "A_CAP"; chars_chartab_show()
60 cp = "ALPHA"; chars_chartab_show()
62 cp = "B_CAPSYM"; chars_chartab_show()
64 cp = "B_SYM"; chars_chartab_show()
66 cp = "0"; chars_chartab_show()
68 scnprintf(buf_pointer, bufsize, "%d\t%s\n", i, cp); chars_chartab_show()
111 char *cp = (char *) buf; chars_chartab_store() local
112 char *end = cp + count; /* the null at the end of the buffer */ chars_chartab_store()
131 while (cp < end) { chars_chartab_store()
133 while ((cp < end) && (*cp == ' ' || *cp == '\t')) chars_chartab_store()
134 cp++; chars_chartab_store()
136 if (cp == end) chars_chartab_store()
138 if ((*cp == '\n') || strchr("dDrR", *cp)) { chars_chartab_store()
144 linefeed = strchr(cp, '\n'); chars_chartab_store()
150 if (!isdigit(*cp)) { chars_chartab_store()
152 cp = linefeed + 1; chars_chartab_store()
156 index = simple_strtoul(cp, &temp, 10); chars_chartab_store()
159 cp = linefeed + 1; chars_chartab_store()
169 cp = linefeed + 1; chars_chartab_store()
197 cp = linefeed + 1; chars_chartab_store()
205 cp = linefeed + 1; chars_chartab_store()
227 char *cp = buf; keymap_show() local
240 cp += sprintf(cp, "%d, %d, %d,\n", KEY_MAP_VER, num_keys, nstates); keymap_show()
247 cp += sprintf(cp, "%d,", (int)ch); keymap_show()
248 *cp++ = (i < nstates) ? SPACE : '\n'; keymap_show()
251 cp += sprintf(cp, "0, %d\n", KEY_MAP_VER); keymap_show()
253 return (int)(cp-buf); keymap_show()
265 char *cp; keymap_store() local
284 cp = in_buff; keymap_store()
287 cp = spk_s2uchar(cp, cp1); keymap_store()
302 cp = spk_s2uchar(cp, cp1); keymap_store()
304 if (!(*cp)) keymap_store()
436 char *cp; version_show() local
438 cp = buf; version_show()
439 cp += sprintf(cp, "Speakup version %s\n", SPEAKUP_VERSION); version_show()
441 cp += sprintf(cp, "%s synthesizer driver version %s\n", version_show()
443 return cp - buf; version_show()
453 char *cp = buf; punc_show() local
480 *cp++ = (char)i; punc_show()
483 return cp-buf; punc_show()
543 char *cp; spk_var_show() local
565 for (cp = (char *)param->p_val; (ch = *cp); cp++) { spk_var_show()
618 char *cp; spk_var_store() local
629 cp = (char *)buf; spk_var_store()
630 string_unescape_any_inplace(cp); spk_var_store()
636 if (*cp == 'd' || *cp == 'r' || *cp == '\0') spk_var_store()
638 else if (*cp == '+' || *cp == '-') spk_var_store()
642 if (kstrtol(cp, 10, &value) == 0) spk_var_store()
668 len = strlen(cp); spk_var_store()
669 if ((len >= 1) && (cp[len - 1] == '\n')) spk_var_store()
671 if ((len >= 2) && (cp[0] == '"') && (cp[len - 1] == '"')) { spk_var_store()
672 ++cp; spk_var_store()
675 cp[len] = '\0'; spk_var_store()
676 ret = spk_set_string_var(cp, param, len); spk_var_store()
744 char *cp = (char *) buf; message_store_helper() local
745 char *end = cp + count; message_store_helper()
760 while (cp < end) { message_store_helper()
762 while ((cp < end) && (*cp == ' ' || *cp == '\t')) message_store_helper()
763 cp++; message_store_helper()
765 if (cp == end) message_store_helper()
767 if (strchr("dDrR", *cp)) { message_store_helper()
773 linefeed = strchr(cp, '\n'); message_store_helper()
779 if (!isdigit(*cp)) { message_store_helper()
781 cp = linefeed + 1; message_store_helper()
785 index = simple_strtoul(cp, &temp, 10); message_store_helper()
802 cp = linefeed + 1; message_store_helper()
816 cp = linefeed + 1; message_store_helper()
H A Dspeakup_soft.c160 char *cp; get_initstring() local
164 cp = buf; get_initstring()
169 cp = cp + sprintf(cp, var->u.n.synth_fmt, get_initstring()
173 cp = cp + sprintf(cp, "\n"); get_initstring()
209 char __user *cp; softsynth_read() local
235 cp = buf; softsynth_read()
249 if (copy_to_user(cp, &ch, 1)) softsynth_read()
253 cp++; softsynth_read()
H A Dmain.c432 char *cp = spk_characters[ch]; speak_char() local
445 if (cp == NULL) { speak_char()
446 pr_info("speak_char: cp == NULL!\n"); speak_char()
453 synth_printf("%s", cp); speak_char()
456 if (*cp == '^') { speak_char()
458 cp++; speak_char()
460 synth_printf("%s", cp); speak_char()
700 char *cp = buf, *str_cap = spk_str_caps_stop; spell_word() local
706 while ((ch = (u_char) *cp)) { spell_word()
707 if (cp != buf) spell_word()
733 cp++; spell_word()
760 char *cp; say_line() local
769 cp = buf; say_line()
770 while (*cp == SPACE) say_line()
771 cp++; say_line()
772 synth_printf("%d, ", (cp - buf) + 1); say_line()
1206 const u_char *cp = key_info; spk_set_key_info() local
1210 version = *cp++; spk_set_key_info()
1213 num_keys = *cp; spk_set_key_info()
1214 states = (int)cp[1]; spk_set_key_info()
1223 memcpy(cp1, cp, key_data_len + 3); spk_set_key_info()
1797 char *cp = num_buf; inc_dec_var() local
1814 *cp = SPACE; inc_dec_var()
1816 *cp++ = *pn; inc_dec_var()
1819 snprintf(cp, sizeof(num_buf) - (cp - num_buf), " %d ", inc_dec_var()
1899 char *cp; handle_goto() local
1936 goto_pos = simple_strtoul(goto_buf, &cp, 10); handle_goto()
1938 if (*cp == 'x') { handle_goto()
/linux-4.1.27/drivers/isdn/hysdn/
H A Dhysdn_procconf.c55 unsigned char *cp = cnf->conf_line; process_line() local
59 hysdn_addlog(cnf->card, "conf line: %s", cp); process_line()
61 if (*cp == '-') { /* option */ process_line()
62 cp++; /* point to option char */ process_line()
64 if (*cp++ != 'c') process_line()
67 while ((*cp <= '9') && (*cp >= '0')) process_line()
68 i = i * 10 + *cp++ - '0'; /* get decimal number */ process_line()
77 if (*cp == '*') { /* line to send */ process_line()
79 hysdn_addlog(cnf->card, "conf chan=%d %s", cnf->channel, cp); process_line()
98 unsigned char ch, *cp; hysdn_conf_write() local
164 cp = cnf->conf_line + cnf->buf_size; hysdn_conf_write()
167 if ((*cp < ' ') && (*cp != 9)) hysdn_conf_write()
169 cp++; hysdn_conf_write()
175 *cp++ = 0; /* string termination */ hysdn_conf_write()
177 while ((i) && (*cp < ' ') && (*cp != 9)) { hysdn_conf_write()
180 cp++; /* next char */ hysdn_conf_write()
214 char *cp; hysdn_conf_read() local
219 if (!(cp = file->private_data)) hysdn_conf_read()
222 return simple_read_from_buffer(buf, count, off, cp, strlen(cp)); hysdn_conf_read()
233 char *cp, *tmp; hysdn_conf_open() local
266 cp = tmp; /* start of string */ hysdn_conf_open()
267 while (*cp) hysdn_conf_open()
268 cp++; hysdn_conf_open()
269 while (((cp - tmp) % (INFO_OUT_LEN + 1)) != INFO_OUT_LEN) hysdn_conf_open()
270 *cp++ = ' '; hysdn_conf_open()
271 *cp++ = '\n'; hysdn_conf_open()
274 sprintf(cp, "%d %3d %4d %4d %3d 0x%04x 0x%08lx %7d %9d %3d %s", hysdn_conf_open()
286 while (*cp) hysdn_conf_open()
287 cp++; hysdn_conf_open()
288 while (((cp - tmp) % (INFO_OUT_LEN + 1)) != INFO_OUT_LEN) hysdn_conf_open()
289 *cp++ = ' '; hysdn_conf_open()
290 *cp++ = '\n'; hysdn_conf_open()
291 *cp = 0; /* end of string */ hysdn_conf_open()
H A Dhysdn_boot.c337 EvalSysrTokData(hysdn_card *card, unsigned char *cp, int len) EvalSysrTokData() argument
349 for (p = cp, crc = 0; p < (cp + len - 2); p++) EvalSysrTokData()
355 if (crc != *(cp + len - 1)) { EvalSysrTokData()
362 if (*cp == SYSR_TOK_END) EvalSysrTokData()
365 if (len < (*(cp + 1) + 2)) { EvalSysrTokData()
366 hysdn_addlog(card, "token 0x%x invalid length %d", *cp, *(cp + 1)); EvalSysrTokData()
369 switch (*cp) { EvalSysrTokData()
371 if (*(cp + 1) != 1) EvalSysrTokData()
373 card->bchans = *(cp + 2); EvalSysrTokData()
377 if (*(cp + 1) != 1) EvalSysrTokData()
379 card->faxchans = *(cp + 2); EvalSysrTokData()
383 if (*(cp + 1) != 6) EvalSysrTokData()
385 memcpy(card->mac_addr, cp + 2, 6); EvalSysrTokData()
389 hysdn_addlog(card, "unknown token 0x%02x length %d", *cp, *(cp + 1)); EvalSysrTokData()
392 len -= (*(cp + 1) + 2); /* adjust len */ EvalSysrTokData()
393 cp += (*(cp + 1) + 2); /* and pointer */ EvalSysrTokData()
H A Dhysdn_proclog.c27 static void put_log_buffer(hysdn_card *card, char *cp);
72 char *cp; hysdn_addlog() local
78 cp = pd->logtmp; hysdn_addlog()
79 cp += sprintf(cp, "HYSDN: card %d ", card->myid); hysdn_addlog()
82 cp += vsprintf(cp, fmt, args); hysdn_addlog()
84 *cp++ = '\n'; hysdn_addlog()
85 *cp = 0; hysdn_addlog()
101 put_log_buffer(hysdn_card *card, char *cp) put_log_buffer() argument
110 if (!cp) put_log_buffer()
112 if (!*cp) put_log_buffer()
117 if (!(ib = kmalloc(sizeof(struct log_data) + strlen(cp), GFP_ATOMIC))) put_log_buffer()
119 strcpy(ib->log_start, cp); /* set output string */ put_log_buffer()
/linux-4.1.27/scripts/
H A Dunifdef.c642 const char *cp; parseline() local
659 cp = skipcomment(tline); parseline()
661 if (*cp == '#') { parseline()
664 cp = skipcomment(cp + 1); parseline()
665 } else if (*cp != '\0') parseline()
669 keyword = tline + (cp - tline); parseline()
670 cp = skipsym(cp); parseline()
671 kwlen = cp - keyword; parseline()
673 if (strncmp(cp, "\\\r\n", 3) == 0 || parseline()
674 strncmp(cp, "\\\n", 2) == 0) parseline()
678 cp = skipcomment(cp); parseline()
679 if ((cursym = findsym(cp)) < 0) parseline()
691 cp = skipsym(cp); parseline()
693 retval = ifeval(&cp); parseline()
695 retval = ifeval(&cp) - LT_IF + LT_ELIF; parseline()
704 cp = skipcomment(cp); parseline()
705 if (*cp != '\0') { parseline()
722 size_t len = cp - tline; parseline()
726 cp += strlen(newline); parseline()
734 while (*cp != '\0') parseline()
735 cp = skipcomment(cp + 1); parseline()
827 const char *cp; eval_unary() local
833 cp = skipcomment(*cpp); eval_unary()
834 if (*cp == '!') { eval_unary()
836 cp++; eval_unary()
837 lt = eval_unary(ops, valp, &cp); eval_unary()
844 } else if (*cp == '(') { eval_unary()
845 cp++; eval_unary()
847 lt = eval_table(eval_ops, valp, &cp); eval_unary()
850 cp = skipcomment(cp); eval_unary()
851 if (*cp++ != ')') eval_unary()
853 } else if (isdigit((unsigned char)*cp)) { eval_unary()
855 *valp = strtol(cp, &ep, 0); eval_unary()
856 if (ep == cp) eval_unary()
859 cp = skipsym(cp); eval_unary()
860 } else if (strncmp(cp, "defined", 7) == 0 && endsym(cp[7])) { eval_unary()
861 cp = skipcomment(cp+7); eval_unary()
863 if (*cp == '(') { eval_unary()
864 cp = skipcomment(cp+1); eval_unary()
869 sym = findsym(cp); eval_unary()
876 cp = skipsym(cp); eval_unary()
877 cp = skipcomment(cp); eval_unary()
878 if (defparen && *cp++ != ')') eval_unary()
881 } else if (!endsym(*cp)) { eval_unary()
883 sym = findsym(cp); eval_unary()
884 cp = skipsym(cp); eval_unary()
887 cp = skipargs(cp); eval_unary()
896 cp = skipargs(cp); eval_unary()
904 *cpp = cp; eval_unary()
916 const char *cp; eval_table() local
921 cp = *cpp; eval_table()
922 lt = ops->inner(ops+1, valp, &cp); eval_table()
926 cp = skipcomment(cp); eval_table()
928 if (strncmp(cp, op->str, strlen(op->str)) == 0) eval_table()
932 cp += strlen(op->str); eval_table()
934 rt = ops->inner(ops+1, &val, &cp); eval_table()
940 *cpp = cp; eval_table()
972 skipcomment(const char *cp) argument
975 for (; isspace((unsigned char)*cp); cp++)
976 if (*cp == '\n')
978 return (cp);
980 while (*cp != '\0')
982 if (strncmp(cp, "\\\r\n", 3) == 0)
983 cp += 3;
984 else if (strncmp(cp, "\\\n", 2) == 0)
985 cp += 2;
988 if (strncmp(cp, "/\\\r\n", 4) == 0) {
990 cp += 4;
991 } else if (strncmp(cp, "/\\\n", 3) == 0) {
993 cp += 3;
994 } else if (strncmp(cp, "/*", 2) == 0) {
996 cp += 2;
997 } else if (strncmp(cp, "//", 2) == 0) {
999 cp += 2;
1000 } else if (strncmp(cp, "\'", 1) == 0) {
1003 cp += 1;
1004 } else if (strncmp(cp, "\"", 1) == 0) {
1007 cp += 1;
1008 } else if (strncmp(cp, "\n", 1) == 0) {
1010 cp += 1;
1011 } else if (strchr(" \r\t", *cp) != NULL) {
1012 cp += 1;
1014 return (cp);
1017 if (strncmp(cp, "\n", 1) == 0) {
1021 cp += 1;
1025 if ((incomment == CHAR_LITERAL && cp[0] == '\'') ||
1026 (incomment == STRING_LITERAL && cp[0] == '\"')) {
1028 cp += 1;
1029 } else if (cp[0] == '\\') {
1030 if (cp[1] == '\0')
1031 cp += 1;
1033 cp += 2;
1034 } else if (strncmp(cp, "\n", 1) == 0) {
1040 cp += 1;
1043 if (strncmp(cp, "*\\\r\n", 4) == 0) {
1045 cp += 4;
1046 } else if (strncmp(cp, "*\\\n", 3) == 0) {
1048 cp += 3;
1049 } else if (strncmp(cp, "*/", 2) == 0) {
1051 cp += 2;
1053 cp += 1;
1056 if (*cp == '*') {
1058 cp += 1;
1059 } else if (*cp == '/') {
1061 cp += 1;
1068 if (*cp == '/') {
1070 cp += 1;
1077 return (cp);
1084 skipargs(const char *cp) skipargs() argument
1086 const char *ocp = cp; skipargs()
1088 cp = skipcomment(cp); skipargs()
1089 if (*cp != '(') skipargs()
1090 return (cp); skipargs()
1092 if (*cp == '(') skipargs()
1094 if (*cp == ')') skipargs()
1096 cp = skipcomment(cp+1); skipargs()
1097 } while (level != 0 && *cp != '\0'); skipargs()
1099 return (cp); skipargs()
1109 skipsym(const char *cp) skipsym() argument
1111 while (!endsym(*cp)) skipsym()
1112 ++cp; skipsym()
1113 return (cp); skipsym()
1123 const char *cp; findsym() local
1126 cp = skipsym(str); findsym()
1127 if (cp == str) findsym()
1135 (int)(cp-str), str, findsym()
1141 if (strlcmp(symname[symind], str, cp-str) == 0) { findsym()
/linux-4.1.27/drivers/net/wireless/ath/wil6210/
H A Dethtool.c45 struct ethtool_coalesce *cp) wil_ethtoolops_get_coalesce()
67 cp->tx_coalesce_usecs = tx_itr_val; wil_ethtoolops_get_coalesce()
68 cp->rx_coalesce_usecs = rx_itr_val; wil_ethtoolops_get_coalesce()
73 struct ethtool_coalesce *cp) wil_ethtoolops_set_coalesce()
78 cp->rx_coalesce_usecs, cp->tx_coalesce_usecs); wil_ethtoolops_set_coalesce()
89 if (cp->rx_coalesce_usecs > WIL6210_ITR_TRSH_MAX || wil_ethtoolops_set_coalesce()
90 cp->tx_coalesce_usecs > WIL6210_ITR_TRSH_MAX) wil_ethtoolops_set_coalesce()
93 wil->tx_max_burst_duration = cp->tx_coalesce_usecs; wil_ethtoolops_set_coalesce()
94 wil->rx_max_burst_duration = cp->rx_coalesce_usecs; wil_ethtoolops_set_coalesce()
102 cp, sizeof(*cp), false); wil_ethtoolops_set_coalesce()
44 wil_ethtoolops_get_coalesce(struct net_device *ndev, struct ethtool_coalesce *cp) wil_ethtoolops_get_coalesce() argument
72 wil_ethtoolops_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *cp) wil_ethtoolops_set_coalesce() argument
/linux-4.1.27/arch/c6x/boot/dts/
H A DMakefile16 $(call if_changed,cp)
/linux-4.1.27/drivers/net/slip/
H A Dslhc.c82 static unsigned char *encode(unsigned char *cp, unsigned short n);
84 static unsigned char * put16(unsigned char *cp, unsigned short x);
171 put16(unsigned char *cp, unsigned short x) put16() argument
173 *cp++ = x >> 8; put16()
174 *cp++ = x; put16()
176 return cp; put16()
182 encode(unsigned char *cp, unsigned short n) encode() argument
185 *cp++ = 0; encode()
186 cp = put16(cp,n); encode()
188 *cp++ = n; encode()
190 return cp; encode()
237 register unsigned char *cp = new_seq; slhc_compress() local
365 cp = encode(cp,deltaS); slhc_compress()
375 cp = encode(cp,deltaS); slhc_compress()
381 cp = encode(cp,deltaA); slhc_compress()
387 cp = encode(cp,deltaS); slhc_compress()
414 cp = new_seq; slhc_compress()
421 cp = new_seq; slhc_compress()
427 cp = encode(cp,deltaS); slhc_compress()
439 * (cp - new_seq) is the number of bytes we need for compressed slhc_compress()
442 * So, (cp - new_seq) + 4 bytes of header are needed. slhc_compress()
444 deltaS = cp - new_seq; slhc_compress()
446 cp = ocp; slhc_compress()
448 *cp++ = changes | NEW_C; slhc_compress()
449 *cp++ = cs->cs_this; slhc_compress()
452 cp = ocp; slhc_compress()
454 *cp++ = changes; slhc_compress()
456 *(__sum16 *)cp = csum; slhc_compress()
457 cp += 2; slhc_compress()
459 memcpy(cp,new_seq,deltaS); /* Write list of deltas */ slhc_compress()
460 memcpy(cp+deltaS,icp+hlen,isize-hlen); slhc_compress()
463 return isize - hlen + deltaS + (cp - ocp); slhc_compress()
495 unsigned char *cp = icp; slhc_uncompress() local
503 changes = *cp++; slhc_uncompress()
508 x = *cp++; /* Read conn index */ slhc_uncompress()
527 thp->check = *(__sum16 *)cp; slhc_uncompress()
528 cp += 2; slhc_uncompress()
557 if((x = decode(&cp)) == -1) { slhc_uncompress()
564 if((x = decode(&cp)) == -1) { slhc_uncompress()
570 if((x = decode(&cp)) == -1) { slhc_uncompress()
576 if((x = decode(&cp)) == -1) { slhc_uncompress()
584 if((x = decode(&cp)) == -1) { slhc_uncompress()
592 * At this point, cp points to the first byte of data in the slhc_uncompress()
597 len = isize - (cp - icp); slhc_uncompress()
604 memmove(icp + hdrlen, cp, len - hdrlen); slhc_uncompress()
606 cp = icp; slhc_uncompress()
607 memcpy(cp, ip, 20); slhc_uncompress()
608 cp += 20; slhc_uncompress()
611 memcpy(cp, cs->cs_ipopt, (ip->ihl - 5) * 4); slhc_uncompress()
612 cp += (ip->ihl - 5) * 4; slhc_uncompress()
618 memcpy(cp, thp, 20); slhc_uncompress()
619 cp += 20; slhc_uncompress()
622 memcpy(cp, cs->cs_tcpopt, ((thp->doff) - 5) * 4); slhc_uncompress()
623 cp += ((thp->doff) - 5) * 4; slhc_uncompress()
/linux-4.1.27/tools/perf/util/
H A Dthread-stack.c32 struct call_path cp[CALL_PATH_BLOCK_SIZE]; member in struct:call_path_block
71 * @cp: call path
79 struct call_path *cp; member in struct:thread_stack_entry
192 return ts->stack[ts->cnt - 1].cp->in_kernel; thread_stack__in_kernel()
208 cr.cp = tse->cp; thread_stack__call_return()
330 static void call_path__init(struct call_path *cp, struct call_path *parent, call_path__init() argument
333 cp->parent = parent; call_path__init()
334 cp->sym = sym; call_path__init()
335 cp->ip = sym ? 0 : ip; call_path__init()
336 cp->db_id = 0; call_path__init()
337 cp->in_kernel = in_kernel; call_path__init()
338 RB_CLEAR_NODE(&cp->rb_node); call_path__init()
339 cp->children = RB_ROOT; call_path__init()
371 struct call_path *cp; call_path__new() local
386 cp = &cpb->cp[n]; call_path__new()
388 call_path__init(cp, parent, sym, ip, in_kernel); call_path__new()
390 return cp; call_path__new()
399 struct call_path *cp; call_path__findnew() local
411 cp = rb_entry(node_parent, struct call_path, rb_node); call_path__findnew()
413 if (cp->sym == sym && cp->ip == ip) call_path__findnew()
414 return cp; call_path__findnew()
416 if (sym < cp->sym || (sym == cp->sym && ip < cp->ip)) call_path__findnew()
422 cp = call_path__new(cpr, parent, sym, ip, in_kernel); call_path__findnew()
423 if (!cp) call_path__findnew()
426 rb_link_node(&cp->rb_node, node_parent, p); call_path__findnew()
427 rb_insert_color(&cp->rb_node, &parent->children); call_path__findnew()
429 return cp; call_path__findnew()
462 u64 timestamp, u64 ref, struct call_path *cp, thread_stack__push_cp()
479 tse->cp = cp; thread_stack__push_cp()
497 if (tse->cp->sym == sym) thread_stack__pop_cp()
534 struct call_path *cp; thread_stack__bottom() local
548 cp = call_path__findnew(cpr, &cpr->call_path, sym, ip, thread_stack__bottom()
550 if (!cp) thread_stack__bottom()
553 return thread_stack__push_cp(thread->ts, ip, sample->time, ref, cp, thread_stack__bottom()
564 struct call_path *cp, *parent; thread_stack__no_call_return() local
580 cp = call_path__findnew(cpr, &cpr->call_path, thread_stack__no_call_return()
583 if (!cp) thread_stack__no_call_return()
586 cp, true); thread_stack__no_call_return()
600 parent = ts->stack[ts->cnt - 1].cp; thread_stack__no_call_return()
605 cp = call_path__findnew(cpr, parent, from_al->sym, sample->ip, thread_stack__no_call_return()
607 if (!cp) thread_stack__no_call_return()
610 err = thread_stack__push_cp(ts, sample->addr, sample->time, ref, cp, thread_stack__no_call_return()
631 if (tse->cp->sym == NULL && tse->cp->ip == 0) { thread_stack__trace_begin()
645 struct call_path *cp; thread_stack__trace_end() local
652 cp = call_path__findnew(cpr, ts->stack[ts->cnt - 1].cp, NULL, 0, thread_stack__trace_end()
654 if (!cp) thread_stack__trace_end()
659 return thread_stack__push_cp(ts, ret_addr, sample->time, ref, cp, thread_stack__trace_end()
711 struct call_path *cp; thread_stack__process() local
721 cp = call_path__findnew(cpr, ts->stack[ts->cnt - 1].cp, thread_stack__process()
724 if (!cp) thread_stack__process()
727 cp, false); thread_stack__process()
461 thread_stack__push_cp(struct thread_stack *ts, u64 ret_addr, u64 timestamp, u64 ref, struct call_path *cp, bool no_call) thread_stack__push_cp() argument
H A Dstring.c60 static const char *skip_sep(const char *cp) skip_sep() argument
62 while (*cp && isspace(*cp)) skip_sep()
63 cp++; skip_sep()
65 return cp; skip_sep()
68 static const char *skip_arg(const char *cp) skip_arg() argument
70 while (*cp && !isspace(*cp)) skip_arg()
71 cp++; skip_arg()
73 return cp; skip_arg()
H A Ddb-export.c390 int db_export__call_path(struct db_export *dbe, struct call_path *cp) db_export__call_path() argument
394 if (cp->db_id) db_export__call_path()
397 if (cp->parent) { db_export__call_path()
398 err = db_export__call_path(dbe, cp->parent); db_export__call_path()
403 cp->db_id = ++dbe->call_path_last_db_id; db_export__call_path()
406 return dbe->export_call_path(dbe, cp); db_export__call_path()
418 err = db_export__call_path(dbe, cr->cp); db_export__call_return()
H A Ddb-export.h63 int (*export_call_path)(struct db_export *dbe, struct call_path *cp);
103 int db_export__call_path(struct db_export *dbe, struct call_path *cp);
H A Dthread-stack.h49 * @cp: call path
61 struct call_path *cp; member in struct:call_return
/linux-4.1.27/security/tomoyo/
H A Daudit.c26 char *cp; tomoyo_print_bprm() local
37 cp = buffer + len; tomoyo_print_bprm()
39 memmove(cp, "} envp[]={ ", 11); tomoyo_print_bprm()
40 cp += 11; tomoyo_print_bprm()
42 last_start = cp; tomoyo_print_bprm()
51 if (cp == last_start) tomoyo_print_bprm()
52 *cp++ = '"'; tomoyo_print_bprm()
53 if (cp >= buffer + tomoyo_buffer_len - 32) { tomoyo_print_bprm()
57 *cp++ = '\\'; tomoyo_print_bprm()
58 *cp++ = '\\'; tomoyo_print_bprm()
60 *cp++ = c; tomoyo_print_bprm()
62 *cp++ = '"'; tomoyo_print_bprm()
63 *cp++ = ' '; tomoyo_print_bprm()
64 last_start = cp; tomoyo_print_bprm()
66 *cp++ = '\\'; tomoyo_print_bprm()
67 *cp++ = (c >> 6) + '0'; tomoyo_print_bprm()
68 *cp++ = ((c >> 3) & 7) + '0'; tomoyo_print_bprm()
69 *cp++ = (c & 7) + '0'; tomoyo_print_bprm()
76 cp = last_start; tomoyo_print_bprm()
77 memmove(cp, "... ", 4); tomoyo_print_bprm()
78 cp += 4; tomoyo_print_bprm()
80 memmove(cp, "} envp[]={ ", 11); tomoyo_print_bprm()
81 cp += 11; tomoyo_print_bprm()
82 last_start = cp; tomoyo_print_bprm()
88 cp = last_start; tomoyo_print_bprm()
89 memmove(cp, "... ", 4); tomoyo_print_bprm()
90 cp += 4; tomoyo_print_bprm()
99 *cp++ = '}'; tomoyo_print_bprm()
100 *cp = '\0'; tomoyo_print_bprm()
H A Drealpath.c26 char *cp; tomoyo_encode2() local
43 cp = kzalloc(len + 10, GFP_NOFS); tomoyo_encode2()
44 if (!cp) tomoyo_encode2()
46 cp0 = cp; tomoyo_encode2()
52 *cp++ = '\\'; tomoyo_encode2()
53 *cp++ = '\\'; tomoyo_encode2()
55 *cp++ = c; tomoyo_encode2()
57 *cp++ = '\\'; tomoyo_encode2()
58 *cp++ = (c >> 6) + '0'; tomoyo_encode2()
59 *cp++ = ((c >> 3) & 7) + '0'; tomoyo_encode2()
60 *cp++ = (c & 7) + '0'; tomoyo_encode2()
H A Dcondition.c161 char *cp = strchr(arg_ptr, '='); tomoyo_scan_bprm() local
162 if (cp) { tomoyo_scan_bprm()
163 *cp = '\0'; tomoyo_scan_bprm()
164 if (!tomoyo_envp(arg_ptr, cp + 1, tomoyo_scan_bprm()
251 char *cp = start + strlen(start) - 1; tomoyo_get_dqword() local
252 if (cp == start || *start++ != '"' || *cp != '"') tomoyo_get_dqword()
254 *cp = '\0'; tomoyo_get_dqword()
311 char *cp = left + strlen(left) - 1; tomoyo_parse_envp() local
312 if (*cp-- != ']' || *cp != '"') tomoyo_parse_envp()
314 *cp = '\0'; tomoyo_parse_envp()
449 char *cp = strchr(pos, ' '); tomoyo_get_transit_preference() local
450 if (cp) tomoyo_get_transit_preference()
451 *cp = '\0'; tomoyo_get_transit_preference()
455 if (cp) tomoyo_get_transit_preference()
456 *cp = ' '; tomoyo_get_transit_preference()
497 char *cp; tomoyo_get_condition() local
512 cp = strchr(pos, ' '); tomoyo_get_condition()
513 if (cp) { tomoyo_get_condition()
514 *cp = '\0'; /* Will restore later. */ tomoyo_get_condition()
515 pos = cp + 1; tomoyo_get_condition()
H A Dutil.c133 const char *cp = strstr(string, keyword); tomoyo_permstr() local
134 if (cp) tomoyo_permstr()
135 return cp == string || *(cp - 1) == '/'; tomoyo_permstr()
199 const char *cp = *str; tomoyo_parse_ulong() local
202 if (*cp == '0') { tomoyo_parse_ulong()
203 char c = *(cp + 1); tomoyo_parse_ulong()
206 cp += 2; tomoyo_parse_ulong()
209 cp++; tomoyo_parse_ulong()
212 *result = simple_strtoul(cp, &ep, base); tomoyo_parse_ulong()
213 if (cp == ep) tomoyo_parse_ulong()
553 const unsigned char *cp = strchr(domainname, ' '); tomoyo_correct_domain() local
554 if (!cp) tomoyo_correct_domain()
557 !tomoyo_correct_word2(domainname, cp - domainname)) tomoyo_correct_domain()
559 domainname = cp + 1; tomoyo_correct_domain()
573 const unsigned char *cp; tomoyo_domain_def() local
577 cp = strchr(buffer, ' '); tomoyo_domain_def()
578 if (!cp) tomoyo_domain_def()
581 len = cp - buffer; tomoyo_domain_def()
952 const char *cp; tomoyo_get_exe() local
961 cp = tomoyo_realpath_from_path(&exe_file->f_path); tomoyo_get_exe()
963 return cp; tomoyo_get_exe()
H A Dcommon.c547 const char *cp = strstr(string, find); tomoyo_find_yesno() local
548 if (cp) { tomoyo_find_yesno()
549 cp += strlen(find); tomoyo_find_yesno()
550 if (!strncmp(cp, "=yes", 4)) tomoyo_find_yesno()
552 else if (!strncmp(cp, "=no", 3)) tomoyo_find_yesno()
570 const char *cp = strstr(string, find); tomoyo_set_uint() local
571 if (cp) tomoyo_set_uint()
572 sscanf(cp + strlen(find), "=%u", i); tomoyo_set_uint()
664 char *cp; tomoyo_write_profile() local
669 i = simple_strtoul(data, &cp, 10); tomoyo_write_profile()
670 if (*cp != '-') tomoyo_write_profile()
672 data = cp + 1; tomoyo_write_profile()
676 cp = strchr(data, '='); tomoyo_write_profile()
677 if (!cp) tomoyo_write_profile()
679 *cp++ = '\0'; tomoyo_write_profile()
683 = tomoyo_get_name(cp); tomoyo_write_profile()
696 tomoyo_set_uint(&profile->pref[i], cp, tomoyo_write_profile()
700 return tomoyo_set_mode(data, cp, profile); tomoyo_write_profile()
1182 const char *cp = tomoyo_dif[profile]; tomoyo_write_domain() local
1183 if (strncmp(data, cp, strlen(cp) - 1)) tomoyo_write_domain()
1942 char *cp = strchr(header, '\n'); tomoyo_add_entry() local
1944 if (!cp) tomoyo_add_entry()
1946 cp = strchr(cp + 1, '\n'); tomoyo_add_entry()
1947 if (!cp) tomoyo_add_entry()
1949 *cp++ = '\0'; tomoyo_add_entry()
1950 len = strlen(cp) + 1; tomoyo_add_entry()
1952 if (*cp == 'f') { tomoyo_add_entry()
1970 snprintf(buffer, len - 1, "%s", cp); tomoyo_add_entry()
2557 char *cp = strchr(line, ' '); tomoyo_parse_policy() local
2558 if (cp) { tomoyo_parse_policy()
2559 *cp++ = '\0'; tomoyo_parse_policy()
2561 memmove(line, cp, strlen(cp) + 1); tomoyo_parse_policy()
2603 char *cp = kzalloc(len, GFP_NOFS); tomoyo_write_control() local
2604 if (!cp) { tomoyo_write_control()
2608 memmove(cp, cp0, head->w.avail); tomoyo_write_control()
2610 head->write_buf = cp; tomoyo_write_control()
2611 cp0 = cp; tomoyo_write_control()
H A Ddomain.c197 const char *cp = strrchr(name, ' '); tomoyo_last_word() local
198 if (cp) tomoyo_last_word()
199 return cp + 1; tomoyo_last_word()
449 const char *cp = domainname; tomoyo_assign_namespace() local
451 while (*cp && *cp++ != ' ') tomoyo_assign_namespace()
757 char *cp; local
760 cp = strrchr(ee->tmp, ' ');
761 if (cp)
762 *cp = '\0';
/linux-4.1.27/drivers/scsi/sym53c8xx_2/
H A Dsym_hipd.c59 static void sym_complete_error (struct sym_hcb *np, struct sym_ccb *cp);
60 static void sym_complete_ok (struct sym_hcb *np, struct sym_ccb *cp);
61 static int sym_compute_residual(struct sym_hcb *np, struct sym_ccb *cp);
73 static void sym_print_msg(struct sym_ccb *cp, char *label, u_char *msg) sym_print_msg() argument
75 sym_print_addr(cp->cmd, "%s: ", label); sym_print_msg()
1419 static int sym_prepare_nego(struct sym_hcb *np, struct sym_ccb *cp, u_char *msgptr) sym_prepare_nego() argument
1421 struct sym_tcb *tp = &np->target[cp->target]; sym_prepare_nego()
1462 cp->nego_status = nego; sym_prepare_nego()
1465 tp->nego_cp = cp; /* Keep track a nego will be performed */ sym_prepare_nego()
1467 sym_print_nego_msg(np, cp->target, sym_prepare_nego()
1480 void sym_put_start_queue(struct sym_hcb *np, struct sym_ccb *cp) sym_put_start_queue() argument
1499 np->last_cp = cp; sym_put_start_queue()
1508 cp->host_xflags |= HX_DMAP_DIRTY; sym_put_start_queue()
1520 np->squeue [np->squeueput] = cpu_to_scr(cp->ccb_ba); sym_put_start_queue()
1525 scmd_printk(KERN_DEBUG, cp->cmd, "queuepos=%d\n", sym_put_start_queue()
1543 struct sym_ccb *cp; sym_start_next_ccbs() local
1559 cp = sym_que_entry(qp, struct sym_ccb, link2_ccbq); sym_start_next_ccbs()
1560 if (cp->tag != NO_TAG) { sym_start_next_ccbs()
1566 lp->itlq_tbl[cp->tag] = cpu_to_scr(cp->ccb_ba); sym_start_next_ccbs()
1575 lp->head.itl_task_sa = cpu_to_scr(cp->ccb_ba); sym_start_next_ccbs()
1580 cp->started = 1; sym_start_next_ccbs()
1582 sym_put_start_queue(np, cp); sym_start_next_ccbs()
1596 struct sym_ccb *cp; sym_wakeup_done() local
1612 cp = sym_ccb_from_dsa(np, dsa); sym_wakeup_done()
1613 if (cp) { sym_wakeup_done()
1615 sym_complete_ok (np, cp); sym_wakeup_done()
1644 struct sym_ccb *cp; sym_flush_comp_queue() local
1648 cp = sym_que_entry(qp, struct sym_ccb, link_ccbq); sym_flush_comp_queue()
1649 sym_insque_tail(&cp->link_ccbq, &np->busy_ccbq); sym_flush_comp_queue()
1651 if (cp->host_status == HS_WAIT) sym_flush_comp_queue()
1653 cmd = cp->cmd; sym_flush_comp_queue()
1658 struct sym_tcb *tp = &np->target[cp->target]; sym_flush_comp_queue()
1659 struct sym_lcb *lp = sym_lp(tp, cp->lun); sym_flush_comp_queue()
1661 sym_remque(&cp->link2_ccbq); sym_flush_comp_queue()
1662 sym_insque_tail(&cp->link2_ccbq, sym_flush_comp_queue()
1664 if (cp->started) { sym_flush_comp_queue()
1665 if (cp->tag != NO_TAG) sym_flush_comp_queue()
1671 cp->started = 0; sym_flush_comp_queue()
1675 sym_free_ccb(np, cp); sym_flush_comp_queue()
2036 struct sym_ccb *cp; sym_settrans() local
2037 cp = sym_que_entry(qp, struct sym_ccb, link_ccbq); sym_settrans()
2038 if (cp->target != target) sym_settrans()
2040 cp->phys.select.sel_scntl3 = tp->head.wval; sym_settrans()
2041 cp->phys.select.sel_sxfer = tp->head.sval; sym_settrans()
2043 cp->phys.select.sel_scntl4 = tp->head.uval; sym_settrans()
2190 struct sym_ccb *cp = sym_ccb_from_dsa(np, dsa); sym_recover_scsi_int() local
2213 if (cp) { sym_recover_scsi_int()
2214 cp->host_status = hsts; sym_recover_scsi_int()
2319 struct sym_ccb *cp = sym_ccb_from_dsa(np, dsa); sym_int_par() local
2337 if (!cp) sym_int_par()
2351 cp->xerr_status |= XE_PARITY_ERR; sym_int_par()
2374 sym_set_script_dp (np, cp, dsp); sym_int_par()
2414 struct sym_ccb *cp; sym_int_ma() local
2425 * locate matching cp if any. sym_int_ma()
2427 cp = sym_ccb_from_dsa(np, dsa); sym_int_ma()
2470 if (cp && (cp->phys.select.sel_scntl3 & EWS)) { sym_int_ma()
2513 cp, (unsigned)dsp, (unsigned)nxtdsp, vdsp, cmd); sym_int_ma()
2522 if (!cp) { sym_int_ma()
2534 tblp = (u32 *) ((char*) &cp->phys + oadr); sym_int_ma()
2556 sym_print_addr(cp->cmd, sym_int_ma()
2567 sym_print_addr(cp->cmd, sym_int_ma()
2587 nxtdsp = scr_to_cpu(cp->phys.pm0.ret); sym_int_ma()
2589 nxtdsp = scr_to_cpu(cp->phys.pm1.ret); sym_int_ma()
2596 pm = &cp->phys.pm0; sym_int_ma()
2600 pm = &cp->phys.pm1; sym_int_ma()
2622 if ((cmd & 7) == 1 && cp && (cp->phys.select.sel_scntl3 & EWS) && sym_int_ma()
2632 cp->phys.wresid.addr = cpu_to_scr(tmp); sym_int_ma()
2635 cp->phys.wresid.size = cpu_to_scr((tmp&0xff000000) | 1); sym_int_ma()
2653 sym_print_addr(cp->cmd, "PM %x %x %x / %x %x %x.\n", sym_int_ma()
2663 sym_set_script_dp (np, cp, newcmd); sym_int_ma()
2715 if (cp->tag != NO_TAG && olen - rest <= 3) { sym_int_ma()
2716 cp->host_status = HS_BUSY; sym_int_ma()
2717 np->msgout[0] = IDENTIFY(0, cp->lun); sym_int_ma()
2728 struct scsi_device *dev = cp->cmd->device; sym_int_ma()
2981 struct sym_ccb *cp; sym_dequeue_from_squeue() local
2994 cp = sym_ccb_from_dsa(np, scr_to_cpu(np->squeue[i])); sym_dequeue_from_squeue()
2995 assert(cp); sym_dequeue_from_squeue()
2998 cp->host_flags &= ~HF_HINT_IARB; sym_dequeue_from_squeue()
3000 if ((target == -1 || cp->target == target) && sym_dequeue_from_squeue()
3001 (lun == -1 || cp->lun == lun) && sym_dequeue_from_squeue()
3002 (task == -1 || cp->tag == task)) { sym_dequeue_from_squeue()
3004 sym_set_cam_status(cp->cmd, DID_SOFT_ERROR); sym_dequeue_from_squeue()
3006 sym_set_cam_status(cp->cmd, DID_REQUEUE); sym_dequeue_from_squeue()
3008 sym_remque(&cp->link_ccbq); sym_dequeue_from_squeue()
3009 sym_insque_tail(&cp->link_ccbq, &np->comp_ccbq); sym_dequeue_from_squeue()
3043 static void sym_sir_bad_scsi_status(struct sym_hcb *np, int num, struct sym_ccb *cp) sym_sir_bad_scsi_status() argument
3046 u_char s_status = cp->ssss_status; sym_sir_bad_scsi_status()
3047 u_char h_flags = cp->host_flags; sym_sir_bad_scsi_status()
3072 sym_print_addr(cp->cmd, "%s\n", sym_sir_bad_scsi_status()
3076 sym_complete_error (np, cp); sym_sir_bad_scsi_status()
3084 sym_complete_error (np, cp); sym_sir_bad_scsi_status()
3092 sym_dequeue_from_squeue(np, i, cp->target, cp->lun, -1); sym_sir_bad_scsi_status()
3099 cp->sv_scsi_status = cp->ssss_status; sym_sir_bad_scsi_status()
3100 cp->sv_xerr_status = cp->xerr_status; sym_sir_bad_scsi_status()
3101 cp->sv_resid = sym_compute_residual(np, cp); sym_sir_bad_scsi_status()
3108 cp->scsi_smsg2[0] = IDENTIFY(0, cp->lun); sym_sir_bad_scsi_status()
3119 * cp->nego_status is filled by sym_prepare_nego(). sym_sir_bad_scsi_status()
3121 cp->nego_status = 0; sym_sir_bad_scsi_status()
3122 msglen += sym_prepare_nego(np, cp, &cp->scsi_smsg2[msglen]); sym_sir_bad_scsi_status()
3126 cp->phys.smsg.addr = CCB_BA(cp, scsi_smsg2); sym_sir_bad_scsi_status()
3127 cp->phys.smsg.size = cpu_to_scr(msglen); sym_sir_bad_scsi_status()
3132 cp->phys.cmd.addr = CCB_BA(cp, sensecmd); sym_sir_bad_scsi_status()
3133 cp->phys.cmd.size = cpu_to_scr(6); sym_sir_bad_scsi_status()
3138 cp->sensecmd[0] = REQUEST_SENSE; sym_sir_bad_scsi_status()
3139 cp->sensecmd[1] = 0; sym_sir_bad_scsi_status()
3140 if (cp->cmd->device->scsi_level <= SCSI_2 && cp->lun <= 7) sym_sir_bad_scsi_status()
3141 cp->sensecmd[1] = cp->lun << 5; sym_sir_bad_scsi_status()
3142 cp->sensecmd[4] = SYM_SNS_BBUF_LEN; sym_sir_bad_scsi_status()
3143 cp->data_len = SYM_SNS_BBUF_LEN; sym_sir_bad_scsi_status()
3148 memset(cp->sns_bbuf, 0, SYM_SNS_BBUF_LEN); sym_sir_bad_scsi_status()
3149 cp->phys.sense.addr = CCB_BA(cp, sns_bbuf); sym_sir_bad_scsi_status()
3150 cp->phys.sense.size = cpu_to_scr(SYM_SNS_BBUF_LEN); sym_sir_bad_scsi_status()
3157 cp->phys.head.savep = cpu_to_scr(startp); sym_sir_bad_scsi_status()
3158 cp->phys.head.lastp = cpu_to_scr(startp); sym_sir_bad_scsi_status()
3159 cp->startp = cpu_to_scr(startp); sym_sir_bad_scsi_status()
3160 cp->goalp = cpu_to_scr(startp + 16); sym_sir_bad_scsi_status()
3162 cp->host_xflags = 0; sym_sir_bad_scsi_status()
3163 cp->host_status = cp->nego_status ? HS_NEGOTIATE : HS_BUSY; sym_sir_bad_scsi_status()
3164 cp->ssss_status = S_ILLEGAL; sym_sir_bad_scsi_status()
3165 cp->host_flags = (HF_SENSE|HF_DATA_IN); sym_sir_bad_scsi_status()
3166 cp->xerr_status = 0; sym_sir_bad_scsi_status()
3167 cp->extra_bytes = 0; sym_sir_bad_scsi_status()
3169 cp->phys.head.go.start = cpu_to_scr(SCRIPTA_BA(np, select)); sym_sir_bad_scsi_status()
3174 sym_put_start_queue(np, cp); sym_sir_bad_scsi_status()
3202 struct sym_ccb *cp; sym_clear_tasks() local
3218 cp = sym_que_entry(qp, struct sym_ccb, link_ccbq); sym_clear_tasks()
3219 cmd = cp->cmd; sym_clear_tasks()
3220 if (cp->host_status != HS_DISCONNECT || sym_clear_tasks()
3221 cp->target != target || sym_clear_tasks()
3222 (lun != -1 && cp->lun != lun) || sym_clear_tasks()
3224 (cp->tag != NO_TAG && cp->scsi_smsg[2] != task))) { sym_clear_tasks()
3225 sym_insque_tail(&cp->link_ccbq, &np->busy_ccbq); sym_clear_tasks()
3228 sym_insque_tail(&cp->link_ccbq, &np->comp_ccbq); sym_clear_tasks()
3235 printf("XXXX TASK @%p CLEARED\n", cp); sym_clear_tasks()
3284 struct sym_ccb *cp; sym_sir_task_recovery() local
3325 cp = sym_que_entry(qp,struct sym_ccb,link_ccbq); sym_sir_task_recovery()
3326 if (cp->host_status != HS_DISCONNECT) sym_sir_task_recovery()
3328 if (cp->to_abort) { sym_sir_task_recovery()
3329 target = cp->target; sym_sir_task_recovery()
3355 cp = NULL; sym_sir_task_recovery()
3357 cp = sym_que_entry(qp, struct sym_ccb, link_ccbq); sym_sir_task_recovery()
3358 if (cp->host_status != HS_BUSY && sym_sir_task_recovery()
3359 cp->host_status != HS_NEGOTIATE) sym_sir_task_recovery()
3361 if (!cp->to_abort) sym_sir_task_recovery()
3369 if (cp == np->last_cp) { sym_sir_task_recovery()
3370 cp->to_abort = 0; sym_sir_task_recovery()
3393 i = sym_dequeue_from_squeue(np, i, cp->target, cp->lun, -1); sym_sir_task_recovery()
3399 assert(i && sym_get_cam_status(cp->cmd) == DID_SOFT_ERROR); sym_sir_task_recovery()
3401 sym_remque(&cp->link_ccbq); sym_sir_task_recovery()
3402 sym_insque_tail(&cp->link_ccbq, &np->comp_ccbq); sym_sir_task_recovery()
3407 if (cp->to_abort == 2) sym_sir_task_recovery()
3408 sym_set_cam_status(cp->cmd, DID_TIME_OUT); sym_sir_task_recovery()
3410 sym_set_cam_status(cp->cmd, DID_ABORT); sym_sir_task_recovery()
3471 cp = NULL; sym_sir_task_recovery()
3473 cp = sym_que_entry(qp, struct sym_ccb, link_ccbq); sym_sir_task_recovery()
3474 if (cp->host_status != HS_DISCONNECT) sym_sir_task_recovery()
3476 if (cp->target != target) sym_sir_task_recovery()
3478 if (!cp->to_abort) sym_sir_task_recovery()
3501 np->abrt_msg[0] = IDENTIFY(0, cp->lun); sym_sir_task_recovery()
3509 if (cp->tag == NO_TAG) { sym_sir_task_recovery()
3513 np->abrt_msg[1] = cp->scsi_smsg[1]; sym_sir_task_recovery()
3514 np->abrt_msg[2] = cp->scsi_smsg[2]; sym_sir_task_recovery()
3523 if (cp->to_abort == 2) sym_sir_task_recovery()
3524 sym_set_cam_status(cp->cmd, DID_TIME_OUT); sym_sir_task_recovery()
3525 cp->to_abort = 0; /* We donnot expect to fail here */ sym_sir_task_recovery()
3639 static int sym_evaluate_dp(struct sym_hcb *np, struct sym_ccb *cp, u32 scr, int *ofs) sym_evaluate_dp() argument
3653 pm = &cp->phys.pm0; sym_evaluate_dp()
3655 pm = &cp->phys.pm1; sym_evaluate_dp()
3667 if (cp->host_flags & HF_SENSE) { sym_evaluate_dp()
3678 tmp = scr_to_cpu(cp->goalp); sym_evaluate_dp()
3682 dp_sgmin = SYM_CONF_MAX_SG - cp->segments; sym_evaluate_dp()
3700 tmp = scr_to_cpu(cp->phys.data[dp_sg].size); sym_evaluate_dp()
3711 tmp = scr_to_cpu(cp->phys.data[dp_sg].size); sym_evaluate_dp()
3732 if (dp_sg > cp->ext_sg || sym_evaluate_dp()
3733 (dp_sg == cp->ext_sg && dp_ofs > cp->ext_ofs)) { sym_evaluate_dp()
3734 cp->ext_sg = dp_sg; sym_evaluate_dp()
3735 cp->ext_ofs = dp_ofs; sym_evaluate_dp()
3757 static void sym_modify_dp(struct sym_hcb *np, struct sym_tcb *tp, struct sym_ccb *cp, int ofs) sym_modify_dp() argument
3760 u32 dp_scr = sym_get_script_dp (np, cp); sym_modify_dp()
3770 if (cp->host_flags & HF_SENSE) sym_modify_dp()
3777 dp_sg = sym_evaluate_dp(np, cp, dp_scr, &dp_ofs); sym_modify_dp()
3785 dp_ret = cpu_to_scr(cp->goalp); sym_modify_dp()
3806 pm = &cp->phys.pm0; sym_modify_dp()
3810 pm = &cp->phys.pm1; sym_modify_dp()
3826 tmp = scr_to_cpu(cp->phys.data[dp_sg-1].addr); sym_modify_dp()
3827 tmp += scr_to_cpu(cp->phys.data[dp_sg-1].size) + dp_ofs; sym_modify_dp()
3832 sym_set_script_dp (np, cp, dp_scr); sym_modify_dp()
3856 int sym_compute_residual(struct sym_hcb *np, struct sym_ccb *cp) sym_compute_residual() argument
3868 if (cp->xerr_status & (XE_EXTRA_DATA|XE_SODL_UNRUN|XE_SWIDE_OVRUN)) { sym_compute_residual()
3869 if (cp->xerr_status & XE_EXTRA_DATA) sym_compute_residual()
3870 resid -= cp->extra_bytes; sym_compute_residual()
3871 if (cp->xerr_status & XE_SODL_UNRUN) sym_compute_residual()
3873 if (cp->xerr_status & XE_SWIDE_OVRUN) sym_compute_residual()
3881 if (cp->phys.head.lastp == cp->goalp) sym_compute_residual()
3888 if (cp->startp == cp->phys.head.lastp || sym_compute_residual()
3889 sym_evaluate_dp(np, cp, scr_to_cpu(cp->phys.head.lastp), sym_compute_residual()
3891 return cp->data_len - cp->odd_byte_adjustment; sym_compute_residual()
3897 if (cp->host_flags & HF_SENSE) { sym_compute_residual()
3905 dp_sgmin = SYM_CONF_MAX_SG - cp->segments; sym_compute_residual()
3906 resid = -cp->ext_ofs; sym_compute_residual()
3907 for (dp_sg = cp->ext_sg; dp_sg < SYM_CONF_MAX_SG; ++dp_sg) { sym_compute_residual()
3908 u_int tmp = scr_to_cpu(cp->phys.data[dp_sg].size); sym_compute_residual()
3912 resid -= cp->odd_byte_adjustment; sym_compute_residual()
3958 sym_sync_nego_check(struct sym_hcb *np, int req, struct sym_ccb *cp) sym_sync_nego_check() argument
3960 int target = cp->target; sym_sync_nego_check()
3995 sym_print_addr(cp->cmd, sym_sync_nego_check()
4036 static void sym_sync_nego(struct sym_hcb *np, struct sym_tcb *tp, struct sym_ccb *cp) sym_sync_nego() argument
4046 if (cp->nego_status && cp->nego_status != NS_SYNC) sym_sync_nego()
4054 result = sym_sync_nego_check(np, req, cp); sym_sync_nego()
4058 cp->nego_status = NS_SYNC; sym_sync_nego()
4173 static void sym_ppr_nego(struct sym_hcb *np, struct sym_tcb *tp, struct sym_ccb *cp) sym_ppr_nego() argument
4183 if (cp->nego_status && cp->nego_status != NS_PPR) sym_ppr_nego()
4191 result = sym_ppr_nego_check(np, req, cp->target); sym_ppr_nego()
4195 cp->nego_status = NS_PPR; sym_ppr_nego()
4210 sym_wide_nego_check(struct sym_hcb *np, int req, struct sym_ccb *cp) sym_wide_nego_check() argument
4212 int target = cp->target; sym_wide_nego_check()
4234 sym_print_addr(cp->cmd, "wdtr: wide=%d chg=%d.\n", sym_wide_nego_check()
4273 static void sym_wide_nego(struct sym_hcb *np, struct sym_tcb *tp, struct sym_ccb *cp) sym_wide_nego() argument
4283 if (cp->nego_status && cp->nego_status != NS_WIDE) sym_wide_nego()
4291 result = sym_wide_nego_check(np, req, cp); sym_wide_nego()
4295 cp->nego_status = NS_WIDE; sym_wide_nego()
4308 sym_print_nego_msg(np, cp->target, sym_wide_nego()
4312 cp->nego_status = NS_SYNC; sym_wide_nego()
4337 static void sym_nego_default(struct sym_hcb *np, struct sym_tcb *tp, struct sym_ccb *cp) sym_nego_default() argument
4339 switch (cp->nego_status) { sym_nego_default()
4342 sym_setpprot (np, cp->target, 0, 0, 0, 0, 0, 0); sym_nego_default()
4353 sym_setsync (np, cp->target, 0, 0, 0, 0); sym_nego_default()
4356 sym_setwide (np, cp->target, 0); sym_nego_default()
4361 cp->nego_status = 0; sym_nego_default()
4368 static void sym_nego_rejected(struct sym_hcb *np, struct sym_tcb *tp, struct sym_ccb *cp) sym_nego_rejected() argument
4370 sym_nego_default(np, tp, cp); sym_nego_rejected()
4381 struct sym_ccb *cp = sym_ccb_from_dsa(np, dsa); sym_int_sir() local
4403 sym_complete_error(np, cp); sym_int_sir()
4419 scmd_printk(KERN_WARNING, cp->cmd, sym_int_sir()
4427 scmd_printk(KERN_WARNING, cp->cmd, sym_int_sir()
4435 scmd_printk(KERN_WARNING, cp->cmd, sym_int_sir()
4464 scmd_printk(KERN_WARNING, cp->cmd, sym_int_sir()
4476 if (cp) { sym_int_sir()
4477 cp->xerr_status &= ~XE_PARITY_ERR; sym_int_sir()
4478 if (!cp->xerr_status) sym_int_sir()
4489 if (!cp) sym_int_sir()
4491 sym_sir_bad_scsi_status(np, num, cp); sym_int_sir()
4498 sym_print_msg(cp, "M_REJECT to send for ", np->msgin); sym_int_sir()
4508 if (cp) { sym_int_sir()
4510 cp->xerr_status |= XE_SWIDE_OVRUN; sym_int_sir()
4519 if (cp) { sym_int_sir()
4521 cp->xerr_status |= XE_SODL_UNRUN; sym_int_sir()
4531 if (cp) { sym_int_sir()
4533 cp->xerr_status |= XE_EXTRA_DATA; sym_int_sir()
4534 cp->extra_bytes += INL(np, nc_scratcha); sym_int_sir()
4541 if (cp) { sym_int_sir()
4543 cp->xerr_status |= XE_BAD_PHASE; sym_int_sir()
4550 if (!cp) sym_int_sir()
4562 sym_print_msg(cp, "extended msg ", sym_int_sir()
4566 sym_modify_dp(np, tp, cp, tmp); sym_int_sir()
4569 sym_sync_nego(np, tp, cp); sym_int_sir()
4572 sym_ppr_nego(np, tp, cp); sym_int_sir()
4575 sym_wide_nego(np, tp, cp); sym_int_sir()
4590 sym_print_msg(cp, "1 or 2 byte ", np->msgin); sym_int_sir()
4591 if (cp->host_flags & HF_SENSE) sym_int_sir()
4594 sym_modify_dp(np, tp, cp, -1); sym_int_sir()
4598 sym_nego_rejected(np, tp, cp); sym_int_sir()
4600 sym_print_addr(cp->cmd, sym_int_sir()
4615 sym_print_msg(cp, "WEIRD message received", np->msgin); sym_int_sir()
4630 sym_nego_default(np, tp, cp); sym_int_sir()
4658 struct sym_ccb *cp = NULL; sym_get_ccb() local
4668 cp = sym_que_entry(qp, struct sym_ccb, link_ccbq); sym_get_ccb()
4702 lp->itlq_tbl[tag] = cpu_to_scr(cp->ccb_ba); sym_get_ccb()
4707 cp->tags_si = lp->tags_si; sym_get_ccb()
4708 ++lp->tags_sum[cp->tags_si]; sym_get_ccb()
4736 lp->head.itl_task_sa = cpu_to_scr(cp->ccb_ba); sym_get_ccb()
4748 sym_insque_tail(&cp->link_ccbq, &np->busy_ccbq); sym_get_ccb()
4751 sym_remque(&cp->link2_ccbq); sym_get_ccb()
4752 sym_insque_tail(&cp->link2_ccbq, &lp->waiting_ccbq); sym_get_ccb()
4756 cp->to_abort = 0; sym_get_ccb()
4757 cp->odd_byte_adjustment = 0; sym_get_ccb()
4758 cp->tag = tag; sym_get_ccb()
4759 cp->order = tag_order; sym_get_ccb()
4760 cp->target = tn; sym_get_ccb()
4761 cp->lun = ln; sym_get_ccb()
4764 sym_print_addr(cmd, "ccb @%p using tag %d.\n", cp, tag); sym_get_ccb()
4768 return cp; sym_get_ccb()
4770 sym_insque_head(&cp->link_ccbq, &np->free_ccbq); sym_get_ccb()
4777 void sym_free_ccb (struct sym_hcb *np, struct sym_ccb *cp) sym_free_ccb() argument
4779 struct sym_tcb *tp = &np->target[cp->target]; sym_free_ccb()
4780 struct sym_lcb *lp = sym_lp(tp, cp->lun); sym_free_ccb()
4783 sym_print_addr(cp->cmd, "ccb @%p freeing tag %d.\n", sym_free_ccb()
4784 cp, cp->tag); sym_free_ccb()
4794 if (cp->tag != NO_TAG) { sym_free_ccb()
4796 --lp->tags_sum[cp->tags_si]; sym_free_ccb()
4801 lp->cb_tags[lp->if_tag] = cp->tag; sym_free_ccb()
4808 lp->itlq_tbl[cp->tag] = cpu_to_scr(np->bad_itlq_ba); sym_free_ccb()
4831 if (cp == tp->nego_cp) sym_free_ccb()
4839 if (cp == np->last_cp) sym_free_ccb()
4846 cp->cmd = NULL; sym_free_ccb()
4847 cp->host_status = HS_IDLE; sym_free_ccb()
4848 sym_remque(&cp->link_ccbq); sym_free_ccb()
4849 sym_insque_head(&cp->link_ccbq, &np->free_ccbq); sym_free_ccb()
4853 sym_remque(&cp->link2_ccbq); sym_free_ccb()
4854 sym_insque_tail(&cp->link2_ccbq, &np->dummy_ccbq); sym_free_ccb()
4855 if (cp->started) { sym_free_ccb()
4856 if (cp->tag != NO_TAG) sym_free_ccb()
4862 cp->started = 0; sym_free_ccb()
4871 struct sym_ccb *cp = NULL; sym_alloc_ccb() local
4884 cp = sym_calloc_dma(sizeof(struct sym_ccb), "CCB"); sym_alloc_ccb()
4885 if (!cp) sym_alloc_ccb()
4896 cp->ccb_ba = vtobus(cp); sym_alloc_ccb()
4901 hcode = CCB_HASH_CODE(cp->ccb_ba); sym_alloc_ccb()
4902 cp->link_ccbh = np->ccbh[hcode]; sym_alloc_ccb()
4903 np->ccbh[hcode] = cp; sym_alloc_ccb()
4908 cp->phys.head.go.start = cpu_to_scr(SCRIPTA_BA(np, idle)); sym_alloc_ccb()
4909 cp->phys.head.go.restart = cpu_to_scr(SCRIPTB_BA(np, bad_i_t_l)); sym_alloc_ccb()
4914 cp->phys.smsg_ext.addr = cpu_to_scr(HCB_BA(np, msgin[2])); sym_alloc_ccb()
4919 sym_insque_head(&cp->link_ccbq, &np->free_ccbq); sym_alloc_ccb()
4925 sym_insque_head(&cp->link2_ccbq, &np->dummy_ccbq); sym_alloc_ccb()
4927 return cp; sym_alloc_ccb()
4929 if (cp) sym_alloc_ccb()
4930 sym_mfree_dma(cp, sizeof(*cp), "CCB"); sym_alloc_ccb()
4940 struct sym_ccb *cp; sym_ccb_from_dsa() local
4943 cp = np->ccbh[hcode]; sym_ccb_from_dsa()
4944 while (cp) { sym_ccb_from_dsa()
4945 if (cp->ccb_ba == dsa) sym_ccb_from_dsa()
4947 cp = cp->link_ccbh; sym_ccb_from_dsa()
4950 return cp; sym_ccb_from_dsa()
5140 int sym_queue_scsiio(struct sym_hcb *np, struct scsi_cmnd *cmd, struct sym_ccb *cp) sym_queue_scsiio() argument
5152 cp->cmd = cmd; sym_queue_scsiio()
5157 tp = &np->target[cp->target]; sym_queue_scsiio()
5164 can_disconnect = (cp->tag != NO_TAG) || sym_queue_scsiio()
5167 msgptr = cp->scsi_smsg; sym_queue_scsiio()
5174 if (cp->tag != NO_TAG) { sym_queue_scsiio()
5175 u_char order = cp->order; sym_queue_scsiio()
5214 msgptr[msglen++] = cp->tag; sym_queue_scsiio()
5216 msgptr[msglen++] = (cp->tag << 1) + 1; sym_queue_scsiio()
5227 cp->nego_status = 0; sym_queue_scsiio()
5231 msglen += sym_prepare_nego(np, cp, msgptr + msglen); sym_queue_scsiio()
5237 cp->phys.head.go.start = cpu_to_scr(SCRIPTA_BA(np, select)); sym_queue_scsiio()
5238 cp->phys.head.go.restart = cpu_to_scr(SCRIPTA_BA(np, resel_dsa)); sym_queue_scsiio()
5243 cp->phys.select.sel_id = cp->target; sym_queue_scsiio()
5244 cp->phys.select.sel_scntl3 = tp->head.wval; sym_queue_scsiio()
5245 cp->phys.select.sel_sxfer = tp->head.sval; sym_queue_scsiio()
5246 cp->phys.select.sel_scntl4 = tp->head.uval; sym_queue_scsiio()
5251 cp->phys.smsg.addr = CCB_BA(cp, scsi_smsg); sym_queue_scsiio()
5252 cp->phys.smsg.size = cpu_to_scr(msglen); sym_queue_scsiio()
5257 cp->host_xflags = 0; sym_queue_scsiio()
5258 cp->host_status = cp->nego_status ? HS_NEGOTIATE : HS_BUSY; sym_queue_scsiio()
5259 cp->ssss_status = S_ILLEGAL; sym_queue_scsiio()
5260 cp->xerr_status = 0; sym_queue_scsiio()
5261 cp->host_flags = 0; sym_queue_scsiio()
5262 cp->extra_bytes = 0; sym_queue_scsiio()
5268 cp->ext_sg = -1; sym_queue_scsiio()
5269 cp->ext_ofs = 0; sym_queue_scsiio()
5275 return sym_setup_data_and_start(np, cmd, cp); sym_queue_scsiio()
5300 static int sym_abort_ccb(struct sym_hcb *np, struct sym_ccb *cp, int timed_out) sym_abort_ccb() argument
5305 if (!cp || !cp->host_status || cp->host_status == HS_WAIT) sym_abort_ccb()
5312 if (cp->to_abort) { sym_abort_ccb()
5320 cp->to_abort = timed_out ? 2 : 1; sym_abort_ccb()
5332 struct sym_ccb *cp; sym_abort_scsiio() local
5338 cp = NULL; sym_abort_scsiio()
5342 cp = cp2; sym_abort_scsiio()
5347 return sym_abort_ccb(np, cp, timed_out); sym_abort_scsiio()
5360 void sym_complete_error(struct sym_hcb *np, struct sym_ccb *cp) sym_complete_error() argument
5372 if (!cp || !cp->cmd) sym_complete_error()
5375 cmd = cp->cmd; sym_complete_error()
5378 dev_info(&sdev->sdev_gendev, "CCB=%p STAT=%x/%x/%x\n", cp, sym_complete_error()
5379 cp->host_status, cp->ssss_status, cp->host_flags); sym_complete_error()
5385 tp = &np->target[cp->target]; sym_complete_error()
5391 if (cp->xerr_status) { sym_complete_error()
5393 sym_print_xerr(cmd, cp->xerr_status); sym_complete_error()
5394 if (cp->host_status == HS_COMPLETE) sym_complete_error()
5395 cp->host_status = HS_COMP_ERR; sym_complete_error()
5401 resid = sym_compute_residual(np, cp); sym_complete_error()
5405 cp->sv_resid = 0; sym_complete_error()
5417 i = sym_dequeue_from_squeue(np, i, cp->target, sdev->lun, -1); sym_complete_error()
5425 if (cp->host_status == HS_COMPLETE && sym_complete_error()
5426 cp->ssss_status == S_QUEUE_FULL) { sym_complete_error()
5443 cp->host_status = HS_BUSY; sym_complete_error()
5444 cp->ssss_status = S_ILLEGAL; sym_complete_error()
5457 sym_set_cam_result_error(np, cp, resid); sym_complete_error()
5465 sym_remque(&cp->link_ccbq); sym_complete_error()
5466 sym_insque_head(&cp->link_ccbq, &np->comp_ccbq); sym_complete_error()
5491 void sym_complete_ok (struct sym_hcb *np, struct sym_ccb *cp) sym_complete_ok() argument
5501 if (!cp || !cp->cmd) sym_complete_ok()
5503 assert (cp->host_status == HS_COMPLETE); sym_complete_ok()
5508 cmd = cp->cmd; sym_complete_ok()
5513 tp = &np->target[cp->target]; sym_complete_ok()
5514 lp = sym_lp(tp, cp->lun); sym_complete_ok()
5521 if (cp->phys.head.lastp != cp->goalp) sym_complete_ok()
5522 resid = sym_compute_residual(np, cp); sym_complete_ok()
5539 sym_set_cam_result_ok(cp, cmd, resid); sym_complete_ok()
5562 sym_free_ccb (np, cp); sym_complete_ok()
5806 struct sym_ccb *cp; sym_hcb_free() local
5823 cp = sym_que_entry(qp, struct sym_ccb, link_ccbq); sym_hcb_free()
5824 sym_mfree_dma(cp, sizeof(*cp), "CCB"); sym_hcb_free()
H A Dsym_glue.c187 void sym_set_cam_result_error(struct sym_hcb *np, struct sym_ccb *cp, int resid) sym_set_cam_result_error() argument
189 struct scsi_cmnd *cmd = cp->cmd; sym_set_cam_result_error()
194 scsi_status = cp->ssss_status; sym_set_cam_result_error()
196 if (cp->host_flags & HF_SENSE) { sym_set_cam_result_error()
197 scsi_status = cp->sv_scsi_status; sym_set_cam_result_error()
198 resid = cp->sv_resid; sym_set_cam_result_error()
199 if (sym_verbose && cp->sv_xerr_status) sym_set_cam_result_error()
200 sym_print_xerr(cmd, cp->sv_xerr_status); sym_set_cam_result_error()
201 if (cp->host_status == HS_COMPLETE && sym_set_cam_result_error()
202 cp->ssss_status == S_GOOD && sym_set_cam_result_error()
203 cp->xerr_status == 0) { sym_set_cam_result_error()
205 cp->sv_xerr_status); sym_set_cam_result_error()
211 memcpy(cmd->sense_buffer, cp->sns_bbuf, sym_set_cam_result_error()
224 cp->target,cp->lun, -1); sym_set_cam_result_error()
237 } else if (cp->host_status == HS_COMPLETE) /* Bad SCSI status */ sym_set_cam_result_error()
239 else if (cp->host_status == HS_SEL_TIMEOUT) /* Selection timeout */ sym_set_cam_result_error()
241 else if (cp->host_status == HS_UNEXPECTED) /* Unexpected BUS FREE*/ sym_set_cam_result_error()
246 cp->host_status, cp->ssss_status, sym_set_cam_result_error()
247 cp->xerr_status); sym_set_cam_result_error()
252 cam_status = sym_xerr_cam_status(DID_ERROR, cp->xerr_status); sym_set_cam_result_error()
258 static int sym_scatter(struct sym_hcb *np, struct sym_ccb *cp, struct scsi_cmnd *cmd) sym_scatter() argument
263 cp->data_len = 0; sym_scatter()
268 struct sym_tcb *tp = &np->target[cp->target]; sym_scatter()
276 data = &cp->phys.data[SYM_CONF_MAX_SG - use_sg]; sym_scatter()
284 cp->odd_byte_adjustment++; scsi_for_each_sg()
288 cp->data_len += len; scsi_for_each_sg()
305 struct sym_ccb *cp; sym_queue_command() local
322 cp = sym_get_ccb(np, cmd, order); sym_queue_command()
323 if (!cp) sym_queue_command()
325 sym_queue_scsiio(np, cmd, cp); sym_queue_command()
332 static inline int sym_setup_cdb(struct sym_hcb *np, struct scsi_cmnd *cmd, struct sym_ccb *cp) sym_setup_cdb() argument
334 memcpy(cp->cdb_buf, cmd->cmnd, cmd->cmd_len); sym_setup_cdb()
336 cp->phys.cmd.addr = CCB_BA(cp, cdb_buf[0]); sym_setup_cdb()
337 cp->phys.cmd.size = cpu_to_scr(cmd->cmd_len); sym_setup_cdb()
345 int sym_setup_data_and_start(struct sym_hcb *np, struct scsi_cmnd *cmd, struct sym_ccb *cp) sym_setup_data_and_start() argument
353 if (sym_setup_cdb(np, cmd, cp)) sym_setup_data_and_start()
361 cp->segments = sym_scatter(np, cp, cmd); sym_setup_data_and_start()
362 if (cp->segments < 0) { sym_setup_data_and_start()
370 if (!cp->segments) sym_setup_data_and_start()
373 cp->data_len = 0; sym_setup_data_and_start()
374 cp->segments = 0; sym_setup_data_and_start()
387 lastp = goalp - 8 - (cp->segments * (2*4)); sym_setup_data_and_start()
390 cp->host_flags |= HF_DATA_IN; sym_setup_data_and_start()
392 lastp = goalp - 8 - (cp->segments * (2*4)); sym_setup_data_and_start()
403 cp->phys.head.lastp = cpu_to_scr(lastp); sym_setup_data_and_start()
404 cp->phys.head.savep = cpu_to_scr(lastp); sym_setup_data_and_start()
405 cp->startp = cp->phys.head.savep; sym_setup_data_and_start()
406 cp->goalp = cpu_to_scr(goalp); sym_setup_data_and_start()
415 switch (cp->cdb_buf[0]) { sym_setup_data_and_start()
427 sym_put_start_queue(np, cp); sym_setup_data_and_start()
431 sym_free_ccb(np, cp); sym_setup_data_and_start()
638 struct sym_ccb *cp = sym_que_entry(qp, struct sym_ccb, link_ccbq); sym_eh_handler() local
639 if (cp->cmd == cmd) { sym_eh_handler()
H A Dsym_glue.h256 static inline void sym_set_cam_result_ok(struct sym_ccb *cp, struct scsi_cmnd *cmd, int resid) sym_set_cam_result_ok() argument
259 cmd->result = (((DID_OK) << 16) + ((cp->ssss_status) & 0x7f)); sym_set_cam_result_ok()
261 void sym_set_cam_result_error(struct sym_hcb *np, struct sym_ccb *cp, int resid);
266 int sym_setup_data_and_start (struct sym_hcb *np, struct scsi_cmnd *csio, struct sym_ccb *cp);
/linux-4.1.27/arch/mips/rb532/
H A Dprom.c73 char *cp, *board; prom_setup_cmdline() local
81 cp = cmd_line; prom_setup_cmdline()
96 *(cp++) = ' '; prom_setup_cmdline()
106 strcpy(cp, prom_argv[i]); prom_setup_cmdline()
107 cp += strlen(prom_argv[i]); prom_setup_cmdline()
109 *(cp++) = ' '; prom_setup_cmdline()
113 *(cp++) = ' '; prom_setup_cmdline()
114 strcpy(cp, arcs_cmdline); prom_setup_cmdline()
115 cp += strlen(arcs_cmdline); prom_setup_cmdline()
/linux-4.1.27/arch/powerpc/boot/
H A Dserial.c39 char ch, *cp; serial_edit_cmdline() local
42 cp = buf; serial_edit_cmdline()
44 cp = &buf[count]; serial_edit_cmdline()
52 if (cp != buf) { serial_edit_cmdline()
53 cp--; serial_edit_cmdline()
59 while (cp != buf) { serial_edit_cmdline()
60 cp--; serial_edit_cmdline()
65 *cp++ = ch; serial_edit_cmdline()
74 *cp = 0; serial_edit_cmdline()
H A Dmktree.c45 unsigned int cksum, *cp; main() local
92 cp = (void *)&bt; main()
94 cksum += *cp++; main()
128 cp = tmpbuf; main()
130 cksum += *cp++; main()
/linux-4.1.27/net/netfilter/
H A Dxt_ipvs.c55 struct ip_vs_conn *cp; ipvs_mt() local
88 cp = pp->conn_out_get(family, skb, &iph, 1 /* inverse */); ipvs_mt()
89 if (unlikely(cp == NULL)) { ipvs_mt()
100 if ((cp->vport == data->vport) ^ ipvs_mt()
107 if ((cp->control != NULL && ipvs_mt()
108 cp->control->vport == data->vportctl) ^ ipvs_mt()
131 if (((cp->flags & IP_VS_CONN_F_FWD_MASK) == data->fwd_method) ^ ipvs_mt()
138 if (ipvs_mt_addrcmp(&cp->vaddr, &data->vaddr, ipvs_mt()
147 __ip_vs_conn_put(cp); ipvs_mt()
/linux-4.1.27/include/linux/
H A Dprefetch.h56 char *cp; prefetch_range() local
59 for (cp = addr; cp < end; cp += PREFETCH_STRIDE) prefetch_range()
60 prefetch(cp); prefetch_range()
H A Dtty_ldisc.h79 * void (*receive_buf)(struct tty_struct *, const unsigned char *cp,
84 * processing. <cp> is a pointer to the buffer of input
117 * int (*receive_buf2)(struct tty_struct *, const unsigned char *cp,
122 * processing. <cp> is a pointer to the buffer of input
208 void (*receive_buf)(struct tty_struct *, const unsigned char *cp,
213 int (*receive_buf2)(struct tty_struct *, const unsigned char *cp,
/linux-4.1.27/arch/avr32/lib/
H A Dstrnlen_user.S32 cp.w r8, 0
44 cp.w r12, 0 /* addr must always be < TASK_SIZE */
52 cp.w r12, r9
H A Dio-writesl.S12 cp.w r10, 0
H A Dstrncpy_from_user.S38 cp.w r10, 0
45 cp.w r8, 0
H A D__avr32_asr64.S16 cp.w r12, 0
H A D__avr32_lsl64.S16 cp.w r12, 0
H A D__avr32_lsr64.S16 cp.w r12, 0
H A Dio-readsl.S12 cp.w r10, 0
H A Dio-readsb.S23 cp.w r10, 0
H A Dio-readsw.S23 cp.w r10, 0
H A Dio-writesw.S17 cp.w r10, 0
H A Dcsum_partial_copy_generic.S78 cp.w r8, 0
93 cp.w r8, 0
H A Dclear_user.S48 5: cp.w r11, 4
H A Dcsum_partial.S34 cp r11, 2
H A Dio-writesb.S23 cp.w r10, 0
/linux-4.1.27/fs/nilfs2/
H A Dcpfile.c86 struct nilfs_checkpoint *cp = kaddr + bh_offset(bh); nilfs_cpfile_block_add_valid_checkpoints() local
89 count = le32_to_cpu(cp->cp_checkpoints_count) + n; nilfs_cpfile_block_add_valid_checkpoints()
90 cp->cp_checkpoints_count = cpu_to_le32(count); nilfs_cpfile_block_add_valid_checkpoints()
100 struct nilfs_checkpoint *cp = kaddr + bh_offset(bh); nilfs_cpfile_block_sub_valid_checkpoints() local
103 WARN_ON(le32_to_cpu(cp->cp_checkpoints_count) < n); nilfs_cpfile_block_sub_valid_checkpoints()
104 count = le32_to_cpu(cp->cp_checkpoints_count) - n; nilfs_cpfile_block_sub_valid_checkpoints()
105 cp->cp_checkpoints_count = cpu_to_le32(count); nilfs_cpfile_block_sub_valid_checkpoints()
130 struct nilfs_checkpoint *cp = kaddr + bh_offset(bh); nilfs_cpfile_block_init() local
135 nilfs_checkpoint_set_invalid(cp); nilfs_cpfile_block_init()
136 cp = (void *)cp + cpsz; nilfs_cpfile_block_init()
234 struct nilfs_checkpoint *cp; nilfs_cpfile_get_checkpoint() local
251 cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, cp_bh, kaddr); nilfs_cpfile_get_checkpoint()
252 if (nilfs_checkpoint_invalid(cp)) { nilfs_cpfile_get_checkpoint()
260 nilfs_checkpoint_clear_invalid(cp); nilfs_cpfile_get_checkpoint()
276 *cpp = cp; nilfs_cpfile_get_checkpoint()
329 struct nilfs_checkpoint *cp; nilfs_cpfile_delete_checkpoints() local
363 cp = nilfs_cpfile_block_get_checkpoint( nilfs_cpfile_delete_checkpoints()
366 for (i = 0; i < ncps; i++, cp = (void *)cp + cpsz) { nilfs_cpfile_delete_checkpoints()
367 if (nilfs_checkpoint_snapshot(cp)) { nilfs_cpfile_delete_checkpoints()
369 } else if (!nilfs_checkpoint_invalid(cp)) { nilfs_cpfile_delete_checkpoints()
370 nilfs_checkpoint_set_invalid(cp); nilfs_cpfile_delete_checkpoints()
423 struct nilfs_checkpoint *cp, nilfs_cpfile_checkpoint_to_cpinfo()
426 ci->ci_flags = le32_to_cpu(cp->cp_flags); nilfs_cpfile_checkpoint_to_cpinfo()
427 ci->ci_cno = le64_to_cpu(cp->cp_cno); nilfs_cpfile_checkpoint_to_cpinfo()
428 ci->ci_create = le64_to_cpu(cp->cp_create); nilfs_cpfile_checkpoint_to_cpinfo()
429 ci->ci_nblk_inc = le64_to_cpu(cp->cp_nblk_inc); nilfs_cpfile_checkpoint_to_cpinfo()
430 ci->ci_inodes_count = le64_to_cpu(cp->cp_inodes_count); nilfs_cpfile_checkpoint_to_cpinfo()
431 ci->ci_blocks_count = le64_to_cpu(cp->cp_blocks_count); nilfs_cpfile_checkpoint_to_cpinfo()
432 ci->ci_next = le64_to_cpu(cp->cp_snapshot_list.ssl_next); nilfs_cpfile_checkpoint_to_cpinfo()
438 struct nilfs_checkpoint *cp; nilfs_cpfile_do_get_cpinfo() local
462 cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, bh, kaddr); nilfs_cpfile_do_get_cpinfo()
463 for (i = 0; i < ncps && n < nci; i++, cp = (void *)cp + cpsz) { nilfs_cpfile_do_get_cpinfo()
464 if (!nilfs_checkpoint_invalid(cp)) { nilfs_cpfile_do_get_cpinfo()
465 nilfs_cpfile_checkpoint_to_cpinfo(cpfile, cp, nilfs_cpfile_do_get_cpinfo()
491 struct nilfs_checkpoint *cp; nilfs_cpfile_do_get_ssinfo() local
527 cp = nilfs_cpfile_block_get_checkpoint(cpfile, curr, bh, kaddr); nilfs_cpfile_do_get_ssinfo()
529 if (unlikely(nilfs_checkpoint_invalid(cp) || nilfs_cpfile_do_get_ssinfo()
530 !nilfs_checkpoint_snapshot(cp))) nilfs_cpfile_do_get_ssinfo()
532 nilfs_cpfile_checkpoint_to_cpinfo(cpfile, cp, ci); nilfs_cpfile_do_get_ssinfo()
535 next = le64_to_cpu(cp->cp_snapshot_list.ssl_next); nilfs_cpfile_do_get_ssinfo()
614 struct nilfs_checkpoint *cp; nilfs_cpfile_block_get_snapshot_list() local
618 cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, bh, kaddr); nilfs_cpfile_block_get_snapshot_list()
619 list = &cp->cp_snapshot_list; nilfs_cpfile_block_get_snapshot_list()
631 struct nilfs_checkpoint *cp; nilfs_cpfile_set_snapshot() local
646 cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, cp_bh, kaddr); nilfs_cpfile_set_snapshot()
647 if (nilfs_checkpoint_invalid(cp)) { nilfs_cpfile_set_snapshot()
652 if (nilfs_checkpoint_snapshot(cp)) { nilfs_cpfile_set_snapshot()
683 cp = nilfs_cpfile_block_get_checkpoint( nilfs_cpfile_set_snapshot()
685 list = &cp->cp_snapshot_list; nilfs_cpfile_set_snapshot()
707 cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, cp_bh, kaddr); nilfs_cpfile_set_snapshot()
708 cp->cp_snapshot_list.ssl_next = cpu_to_le64(curr); nilfs_cpfile_set_snapshot()
709 cp->cp_snapshot_list.ssl_prev = cpu_to_le64(prev); nilfs_cpfile_set_snapshot()
710 nilfs_checkpoint_set_snapshot(cp); nilfs_cpfile_set_snapshot()
750 struct nilfs_checkpoint *cp; nilfs_cpfile_clear_snapshot() local
764 cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, cp_bh, kaddr); nilfs_cpfile_clear_snapshot()
765 if (nilfs_checkpoint_invalid(cp)) { nilfs_cpfile_clear_snapshot()
770 if (!nilfs_checkpoint_snapshot(cp)) { nilfs_cpfile_clear_snapshot()
776 list = &cp->cp_snapshot_list; nilfs_cpfile_clear_snapshot()
816 cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, cp_bh, kaddr); nilfs_cpfile_clear_snapshot()
817 cp->cp_snapshot_list.ssl_next = cpu_to_le64(0); nilfs_cpfile_clear_snapshot()
818 cp->cp_snapshot_list.ssl_prev = cpu_to_le64(0); nilfs_cpfile_clear_snapshot()
819 nilfs_checkpoint_clear_snapshot(cp); nilfs_cpfile_clear_snapshot()
869 struct nilfs_checkpoint *cp; nilfs_cpfile_is_snapshot() local
883 cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, bh, kaddr); nilfs_cpfile_is_snapshot()
884 if (nilfs_checkpoint_invalid(cp)) nilfs_cpfile_is_snapshot()
887 ret = nilfs_checkpoint_snapshot(cp); nilfs_cpfile_is_snapshot()
422 nilfs_cpfile_checkpoint_to_cpinfo(struct inode *cpfile, struct nilfs_checkpoint *cp, struct nilfs_cpinfo *ci) nilfs_cpfile_checkpoint_to_cpinfo() argument
/linux-4.1.27/arch/ia64/hp/sim/boot/
H A Dfw-emu.c243 char *cp, *cmd_line; sys_fw_init() local
261 cp = fw_mem; sys_fw_init()
262 efi_systab = (void *) cp; cp += sizeof(*efi_systab); sys_fw_init()
263 efi_runtime = (void *) cp; cp += sizeof(*efi_runtime); sys_fw_init()
264 efi_tables = (void *) cp; cp += sizeof(*efi_tables); sys_fw_init()
265 sal_systab = (void *) cp; cp += sizeof(*sal_systab); sys_fw_init()
266 sal_ed = (void *) cp; cp += sizeof(*sal_ed); sys_fw_init()
267 efi_memmap = (void *) cp; cp += NUM_MEM_DESCS*sizeof(*efi_memmap); sys_fw_init()
268 bp = (void *) cp; cp += sizeof(*bp); sys_fw_init()
269 cmd_line = (void *) cp; sys_fw_init()
330 for (cp = (char *) sal_systab; cp < (char *) efi_memmap; ++cp) sys_fw_init()
331 checksum += *cp; sys_fw_init()
/linux-4.1.27/arch/microblaze/boot/dts/
H A DMakefile11 $(call if_changed,cp)
/linux-4.1.27/include/net/
H A Dip_vs.h462 struct ip_vs_conn *cp, struct ip_vs_iphdr *iph);
465 struct ip_vs_conn *cp, struct ip_vs_iphdr *iph);
472 void (*state_transition)(struct ip_vs_conn *cp, int direction,
480 int (*app_conn_bind)(struct ip_vs_conn *cp);
561 int (*packet_xmit)(struct sk_buff *skb, struct ip_vs_conn *cp,
581 static inline struct net *ip_vs_conn_net(const struct ip_vs_conn *cp) ip_vs_conn_net() argument
584 return cp->net; ip_vs_conn_net()
590 static inline void ip_vs_conn_net_set(struct ip_vs_conn *cp, struct net *net) ip_vs_conn_net_set() argument
593 cp->net = net; ip_vs_conn_net_set()
597 static inline int ip_vs_conn_net_eq(const struct ip_vs_conn *cp, ip_vs_conn_net_eq() argument
601 return cp->net == net; ip_vs_conn_net_eq()
768 int (*show_pe_data)(const struct ip_vs_conn *cp, char *buf);
829 int (*state_transition)(struct ip_vs_conn *cp, int direction,
1196 static inline bool __ip_vs_conn_get(struct ip_vs_conn *cp) __ip_vs_conn_get() argument
1198 return atomic_inc_not_zero(&cp->refcnt); __ip_vs_conn_get()
1202 static inline void __ip_vs_conn_put(struct ip_vs_conn *cp) __ip_vs_conn_put() argument
1205 atomic_dec(&cp->refcnt); __ip_vs_conn_put()
1207 void ip_vs_conn_put(struct ip_vs_conn *cp);
1208 void ip_vs_conn_fill_cport(struct ip_vs_conn *cp, __be16 cport);
1214 void ip_vs_conn_expire_now(struct ip_vs_conn *cp);
1218 void ip_vs_tcp_conn_listen(struct net *net, struct ip_vs_conn *cp);
1224 static inline void ip_vs_control_del(struct ip_vs_conn *cp) ip_vs_control_del() argument
1226 struct ip_vs_conn *ctl_cp = cp->control; ip_vs_control_del()
1230 IP_VS_DBG_ADDR(cp->af, &cp->caddr), ip_vs_control_del()
1231 ntohs(cp->cport), ip_vs_control_del()
1232 IP_VS_DBG_ADDR(cp->af, &cp->vaddr), ip_vs_control_del()
1233 ntohs(cp->vport)); ip_vs_control_del()
1239 "cp.dst=%s:%d ctl_cp.dst=%s:%d\n", ip_vs_control_del()
1240 IP_VS_DBG_ADDR(cp->af, &cp->caddr), ip_vs_control_del()
1241 ntohs(cp->cport), ip_vs_control_del()
1242 IP_VS_DBG_ADDR(cp->af, &ctl_cp->caddr), ip_vs_control_del()
1245 cp->control = NULL; ip_vs_control_del()
1249 IP_VS_DBG_ADDR(cp->af, &cp->caddr), ip_vs_control_del()
1250 ntohs(cp->cport), ip_vs_control_del()
1251 IP_VS_DBG_ADDR(cp->af, &cp->vaddr), ip_vs_control_del()
1252 ntohs(cp->vport)); ip_vs_control_del()
1260 ip_vs_control_add(struct ip_vs_conn *cp, struct ip_vs_conn *ctl_cp) ip_vs_control_add() argument
1262 if (cp->control) { ip_vs_control_add()
1265 IP_VS_DBG_ADDR(cp->af, &cp->caddr), ip_vs_control_add()
1266 ntohs(cp->cport), ip_vs_control_add()
1267 IP_VS_DBG_ADDR(cp->af, &cp->vaddr), ip_vs_control_add()
1268 ntohs(cp->vport)); ip_vs_control_add()
1270 ip_vs_control_del(cp); ip_vs_control_add()
1274 "cp.dst=%s:%d ctl_cp.dst=%s:%d\n", ip_vs_control_add()
1275 IP_VS_DBG_ADDR(cp->af, &cp->caddr), ip_vs_control_add()
1276 ntohs(cp->cport), ip_vs_control_add()
1277 IP_VS_DBG_ADDR(cp->af, &ctl_cp->caddr), ip_vs_control_add()
1280 cp->control = ctl_cp; ip_vs_control_add()
1305 int ip_vs_bind_app(struct ip_vs_conn *cp, struct ip_vs_protocol *pp);
1306 void ip_vs_unbind_app(struct ip_vs_conn *cp);
1389 void ip_vs_try_bind_dest(struct ip_vs_conn *cp);
1413 void ip_vs_sync_conn(struct net *net, struct ip_vs_conn *cp, int pkts);
1422 int ip_vs_null_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
1424 int ip_vs_bypass_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
1426 int ip_vs_nat_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
1428 int ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
1430 int ip_vs_dr_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
1432 int ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
1438 int ip_vs_bypass_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
1440 int ip_vs_nat_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
1442 int ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
1444 int ip_vs_dr_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
1446 int ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
1470 #define IP_VS_FWD_METHOD(cp) (cp->flags & IP_VS_CONN_F_FWD_MASK) ip_vs_todrop()
1472 static inline char ip_vs_fwd_tag(struct ip_vs_conn *cp) ip_vs_fwd_tag() argument
1476 switch (IP_VS_FWD_METHOD(cp)) { ip_vs_fwd_tag()
1494 struct ip_vs_conn *cp, int dir);
1498 struct ip_vs_conn *cp, int dir);
1557 void ip_vs_update_conntrack(struct sk_buff *skb, struct ip_vs_conn *cp,
1561 struct ip_vs_conn *cp, u_int8_t proto,
1563 void ip_vs_conn_drop_conntrack(struct ip_vs_conn *cp);
1573 struct ip_vs_conn *cp, int outin) ip_vs_update_conntrack()
1582 static inline void ip_vs_conn_drop_conntrack(struct ip_vs_conn *cp) ip_vs_conn_drop_conntrack() argument
1572 ip_vs_update_conntrack(struct sk_buff *skb, struct ip_vs_conn *cp, int outin) ip_vs_update_conntrack() argument
/linux-4.1.27/kernel/trace/
H A Dtrace_kdb.c106 char *cp; kdb_ftdump() local
112 skip_lines = simple_strtol(argv[1], &cp, 0); kdb_ftdump()
113 if (*cp) kdb_ftdump()
118 cpu_file = simple_strtol(argv[2], &cp, 0); kdb_ftdump()
119 if (*cp || cpu_file >= NR_CPUS || cpu_file < 0 || kdb_ftdump()
/linux-4.1.27/drivers/infiniband/hw/qib/
H A Dqib_qsfp.c273 int qib_refresh_qsfp_cache(struct qib_pportdata *ppd, struct qib_qsfp_cache *cp) qib_refresh_qsfp_cache() argument
281 memset(cp, 0, sizeof(*cp)); qib_refresh_qsfp_cache()
311 ret = qsfp_read(ppd, QSFP_MOD_ID_OFFS, &cp->id, 1); qib_refresh_qsfp_cache()
314 if ((cp->id & 0xFE) != 0x0C) qib_refresh_qsfp_cache()
316 "QSFP ID byte is 0x%02X, S/B 0x0C/D\n", cp->id); qib_refresh_qsfp_cache()
317 cks = cp->id; qib_refresh_qsfp_cache()
319 ret = qsfp_read(ppd, QSFP_MOD_PWR_OFFS, &cp->pwr, 1); qib_refresh_qsfp_cache()
322 cks += cp->pwr; qib_refresh_qsfp_cache()
329 ret = qsfp_read(ppd, QSFP_MOD_LEN_OFFS, &cp->len, 1); qib_refresh_qsfp_cache()
332 cks += cp->len; qib_refresh_qsfp_cache()
334 ret = qsfp_read(ppd, QSFP_MOD_TECH_OFFS, &cp->tech, 1); qib_refresh_qsfp_cache()
337 cks += cp->tech; qib_refresh_qsfp_cache()
339 ret = qsfp_read(ppd, QSFP_VEND_OFFS, &cp->vendor, QSFP_VEND_LEN); qib_refresh_qsfp_cache()
343 cks += cp->vendor[idx]; qib_refresh_qsfp_cache()
345 ret = qsfp_read(ppd, QSFP_IBXCV_OFFS, &cp->xt_xcv, 1); qib_refresh_qsfp_cache()
348 cks += cp->xt_xcv; qib_refresh_qsfp_cache()
350 ret = qsfp_read(ppd, QSFP_VOUI_OFFS, &cp->oui, QSFP_VOUI_LEN); qib_refresh_qsfp_cache()
354 cks += cp->oui[idx]; qib_refresh_qsfp_cache()
356 ret = qsfp_read(ppd, QSFP_PN_OFFS, &cp->partnum, QSFP_PN_LEN); qib_refresh_qsfp_cache()
360 cks += cp->partnum[idx]; qib_refresh_qsfp_cache()
362 ret = qsfp_read(ppd, QSFP_REV_OFFS, &cp->rev, QSFP_REV_LEN); qib_refresh_qsfp_cache()
366 cks += cp->rev[idx]; qib_refresh_qsfp_cache()
368 ret = qsfp_read(ppd, QSFP_ATTEN_OFFS, &cp->atten, QSFP_ATTEN_LEN); qib_refresh_qsfp_cache()
372 cks += cp->atten[idx]; qib_refresh_qsfp_cache()
380 ret = qsfp_read(ppd, QSFP_CC_OFFS, &cp->cks1, 1); qib_refresh_qsfp_cache()
383 if (cks != cp->cks1) qib_refresh_qsfp_cache()
385 "QSFP cks1 is %02X, computed %02X\n", cp->cks1, qib_refresh_qsfp_cache()
394 ret = qsfp_read(ppd, QSFP_SN_OFFS, &cp->serial, QSFP_SN_LEN); qib_refresh_qsfp_cache()
398 cks += cp->serial[idx]; qib_refresh_qsfp_cache()
400 ret = qsfp_read(ppd, QSFP_DATE_OFFS, &cp->date, QSFP_DATE_LEN); qib_refresh_qsfp_cache()
404 cks += cp->date[idx]; qib_refresh_qsfp_cache()
406 ret = qsfp_read(ppd, QSFP_LOT_OFFS, &cp->lot, QSFP_LOT_LEN); qib_refresh_qsfp_cache()
410 cks += cp->lot[idx]; qib_refresh_qsfp_cache()
417 ret = qsfp_read(ppd, QSFP_CC_EXT_OFFS, &cp->cks2, 1); qib_refresh_qsfp_cache()
421 if (cks != cp->cks2) qib_refresh_qsfp_cache()
423 "QSFP cks2 is %02X, computed %02X\n", cp->cks2, qib_refresh_qsfp_cache()
428 cp->id = 0; qib_refresh_qsfp_cache()
/linux-4.1.27/drivers/base/
H A Dclass.c30 struct subsys_private *cp = to_subsys_private(kobj); class_attr_show() local
34 ret = class_attr->show(cp->class, class_attr, buf); class_attr_show()
42 struct subsys_private *cp = to_subsys_private(kobj); class_attr_store() local
46 ret = class_attr->store(cp->class, class_attr, buf, count); class_attr_store()
52 struct subsys_private *cp = to_subsys_private(kobj); class_release() local
53 struct class *class = cp->class; class_release()
63 kfree(cp); class_release()
68 struct subsys_private *cp = to_subsys_private(kobj); class_child_ns_type() local
69 struct class *class = cp->class; class_child_ns_type()
168 struct subsys_private *cp; __class_register() local
173 cp = kzalloc(sizeof(*cp), GFP_KERNEL); __class_register()
174 if (!cp) __class_register()
176 klist_init(&cp->klist_devices, klist_class_dev_get, klist_class_dev_put); __class_register()
177 INIT_LIST_HEAD(&cp->interfaces); __class_register()
178 kset_init(&cp->glue_dirs); __class_register()
179 __mutex_init(&cp->mutex, "subsys mutex", key); __class_register()
180 error = kobject_set_name(&cp->subsys.kobj, "%s", cls->name); __class_register()
182 kfree(cp); __class_register()
193 cp->subsys.kobj.kset = class_kset; __class_register()
195 cp->subsys.kobj.kset = class_kset; __class_register()
197 cp->subsys.kobj.ktype = &class_ktype; __class_register()
198 cp->class = cls; __class_register()
199 cls->p = cp; __class_register()
201 error = kset_register(&cp->subsys); __class_register()
203 kfree(cp); __class_register()
/linux-4.1.27/drivers/scsi/
H A Dncr53c8xx.c869 #define CCB_DONE_VALID(cp) (((u_long) cp) != CCB_DONE_EMPTY)
873 #define CCB_DONE_VALID(cp) \
874 ((((u_long) cp) & 0xffffffff00000000ul) && \
875 (((u_long) cp) & 0xfffffffful) != CCB_DONE_EMPTY)
1392 struct ccb * cp; member in struct:head
1588 #define CCB_PHYS(cp,lbl) (cp->p_ccb + offsetof(struct ccb, lbl))
1920 static void ncr_complete (struct ncb *np, struct ccb *cp);
1922 static void ncr_free_ccb (struct ncb *np, struct ccb *cp);
1923 static void ncr_init_ccb (struct ncb *np, struct ccb *cp);
1938 static int ncr_prepare_nego(struct ncb *np, struct ccb *cp, u_char *msgptr);
1943 static int ncr_scatter (struct ncb *np, struct ccb *cp, struct scsi_cmnd *cmd);
1945 static void ncr_setsync (struct ncb *np, struct ccb *cp, u_char scntl3, u_char sxfer);
1947 static void ncr_setwide (struct ncb *np, struct ccb *cp, u_char wide, u_char ack);
1953 static void ncr_put_start_queue(struct ncb *np, struct ccb *cp);
2804 ** || NADDR (header.cp),
3463 *p++ =NADDR (header.cp); ncr_script_fill()
3679 static void ncr_print_msg(struct ccb *cp, char *label, u_char *msg) ncr_print_msg() argument
3681 PRINT_ADDR(cp->cmd, "%s: ", label); ncr_print_msg()
4060 static int ncr_prepare_nego(struct ncb *np, struct ccb *cp, u_char *msgptr) ncr_prepare_nego() argument
4062 struct tcb *tp = &np->target[cp->target]; ncr_prepare_nego()
4095 cp->nego_status = nego; ncr_prepare_nego()
4098 tp->nego_cp = cp; ncr_prepare_nego()
4100 ncr_print_msg(cp, nego == NS_WIDE ? ncr_prepare_nego()
4124 struct ccb *cp; ncr_queue_command() local
4179 if (np->settle_time || !(cp=ncr_get_ccb (np, cmd))) { ncr_queue_command()
4183 cp->cmd = cmd; ncr_queue_command()
4194 if (cp ->tag != NO_TAG || ncr_queue_command()
4195 (cp != np->ccb && np->disc && !(tp->usrflag & UF_NODISC))) ncr_queue_command()
4198 msgptr = cp->scsi_smsg; ncr_queue_command()
4202 if (cp->tag != NO_TAG) { ncr_queue_command()
4241 msgptr[msglen++] = (cp->tag << 1) + 1; ncr_queue_command()
4253 segments = ncr_scatter(np, cp, cp->cmd); ncr_queue_command()
4255 ncr_free_ccb(np, cp); ncr_queue_command()
4260 cp->data_len = 0; ncr_queue_command()
4273 cp->nego_status = 0; ncr_queue_command()
4276 msglen += ncr_prepare_nego (np, cp, msgptr + msglen); ncr_queue_command()
4285 if (!cp->data_len) ncr_queue_command()
4306 cp->phys.header.wgoalp = cpu_to_scr(goalp); ncr_queue_command()
4307 cp->phys.header.wlastp = cpu_to_scr(lastp); ncr_queue_command()
4328 cp->phys.header.lastp = cpu_to_scr(lastp); ncr_queue_command()
4329 cp->phys.header.goalp = cpu_to_scr(goalp); ncr_queue_command()
4332 cp->phys.header.savep = ncr_queue_command()
4335 cp->phys.header.savep= cpu_to_scr(lastp); ncr_queue_command()
4341 cp->startp = cp->phys.header.savep; ncr_queue_command()
4357 cp->start.schedule.l_paddr = cpu_to_scr(NCB_SCRIPT_PHYS (np, select)); ncr_queue_command()
4358 cp->restart.schedule.l_paddr = cpu_to_scr(NCB_SCRIPT_PHYS (np, resel_dsa)); ncr_queue_command()
4362 cp->phys.select.sel_id = sdev_id(sdev); ncr_queue_command()
4363 cp->phys.select.sel_scntl3 = tp->wval; ncr_queue_command()
4364 cp->phys.select.sel_sxfer = tp->sval; ncr_queue_command()
4368 cp->phys.smsg.addr = cpu_to_scr(CCB_PHYS (cp, scsi_smsg)); ncr_queue_command()
4369 cp->phys.smsg.size = cpu_to_scr(msglen); ncr_queue_command()
4374 memcpy(cp->cdb_buf, cmd->cmnd, min_t(int, cmd->cmd_len, sizeof(cp->cdb_buf))); ncr_queue_command()
4375 cp->phys.cmd.addr = cpu_to_scr(CCB_PHYS (cp, cdb_buf[0])); ncr_queue_command()
4376 cp->phys.cmd.size = cpu_to_scr(cmd->cmd_len); ncr_queue_command()
4381 cp->actualquirks = 0; ncr_queue_command()
4382 cp->host_status = cp->nego_status ? HS_NEGOTIATE : HS_BUSY; ncr_queue_command()
4383 cp->scsi_status = S_ILLEGAL; ncr_queue_command()
4384 cp->parity_status = 0; ncr_queue_command()
4386 cp->xerr_status = XE_OK; ncr_queue_command()
4388 cp->sync_status = tp->sval; ncr_queue_command()
4389 cp->wide_status = tp->wval; ncr_queue_command()
4400 cp->magic = CCB_MAGIC; ncr_queue_command()
4406 cp->auto_sense = 0; ncr_queue_command()
4410 ncr_put_start_queue(np, cp); ncr_queue_command()
4431 struct ccb *cp; ncr_start_next_ccb() local
4441 cp = list_entry(qp, struct ccb, link_ccbq); ncr_start_next_ccb()
4443 lp->jump_ccb[cp->tag == NO_TAG ? 0 : cp->tag] = ncr_start_next_ccb()
4444 cpu_to_scr(CCB_PHYS (cp, restart)); ncr_start_next_ccb()
4445 ncr_put_start_queue(np, cp); ncr_start_next_ccb()
4449 static void ncr_put_start_queue(struct ncb *np, struct ccb *cp) ncr_put_start_queue() argument
4462 np->scripth->tryloop [np->squeueput] = cpu_to_scr(CCB_PHYS (cp, start)); ncr_put_start_queue()
4466 cp->queued = 1; ncr_put_start_queue()
4565 struct ccb *cp; ncr_reset_bus() local
4584 for (found=0, cp=np->ccb; cp; cp=cp->link_ccb) { ncr_reset_bus()
4588 if (cp->host_status == HS_IDLE) continue; ncr_reset_bus()
4589 if (cp->cmd == cmd) { ncr_reset_bus()
4634 struct ccb *cp;
4650 for (found=0, cp=np->ccb; cp; cp=cp->link_ccb) {
4654 if (cp->host_status == HS_IDLE) continue;
4655 if (cp->cmd == cmd) {
4674 switch(cp->host_status) {
4677 printk ("%s: abort ccb=%p (cancel)\n", ncr_name (np), cp);
4678 cp->start.schedule.l_paddr =
4683 cp->restart.schedule.l_paddr =
4706 struct ccb *cp; ncr_detach() local
4766 while ((cp=np->ccb->link_ccb) != NULL) { ncr_detach()
4767 np->ccb->link_ccb = cp->link_ccb; ncr_detach()
4768 if (cp->host_status) { ncr_detach()
4770 ncr_name(np), cp->host_status); ncr_detach()
4773 printk("%s: freeing ccb (%lx)\n", ncr_name(np), (u_long) cp); ncr_detach()
4775 m_free_dma(cp, sizeof(*cp), "CCB"); ncr_detach()
4816 void ncr_complete (struct ncb *np, struct ccb *cp) ncr_complete() argument
4826 if (!cp || cp->magic != CCB_MAGIC || !cp->cmd) ncr_complete()
4834 printk ("CCB=%lx STAT=%x/%x\n", (unsigned long)cp, ncr_complete()
4835 cp->host_status,cp->scsi_status); ncr_complete()
4841 cmd = cp->cmd; ncr_complete()
4842 cp->cmd = NULL; ncr_complete()
4852 if (cp == tp->nego_cp) ncr_complete()
4858 if (cp->auto_sense) { ncr_complete()
4859 cp->scsi_status = cp->auto_sense; ncr_complete()
4868 if (cp == lp->held_ccb) { ncr_complete()
4878 if (cp->parity_status > 1) { ncr_complete()
4879 PRINT_ADDR(cmd, "%d parity error(s).\n",cp->parity_status); ncr_complete()
4886 if (cp->xerr_status != XE_OK) { ncr_complete()
4887 switch (cp->xerr_status) { ncr_complete()
4896 cp->xerr_status); ncr_complete()
4899 if (cp->host_status==HS_COMPLETE) ncr_complete()
4900 cp->host_status = HS_FAIL; ncr_complete()
4907 if (cp->host_status!=HS_COMPLETE || cp->scsi_status!=S_GOOD) { ncr_complete()
4910 cp->host_status, cp->scsi_status); ncr_complete()
4917 if ( (cp->host_status == HS_COMPLETE) ncr_complete()
4918 && (cp->scsi_status == S_GOOD || ncr_complete()
4919 cp->scsi_status == S_COND_MET)) { ncr_complete()
4925 cmd->result = ScsiResult(DID_OK, cp->scsi_status); ncr_complete()
4932 /* if (cp->phys.header.lastp != cp->phys.header.goalp) */ ncr_complete()
4940 tp->bytes += cp->data_len; ncr_complete()
4955 } else if ((cp->host_status == HS_COMPLETE) ncr_complete()
4956 && (cp->scsi_status == S_CHECK_COND)) { ncr_complete()
4965 memcpy(cmd->sense_buffer, cp->sense_buf, ncr_complete()
4967 sizeof(cp->sense_buf))); ncr_complete()
4976 } else if ((cp->host_status == HS_COMPLETE) ncr_complete()
4977 && (cp->scsi_status == S_CONFLICT)) { ncr_complete()
4983 } else if ((cp->host_status == HS_COMPLETE) ncr_complete()
4984 && (cp->scsi_status == S_BUSY || ncr_complete()
4985 cp->scsi_status == S_QUEUE_FULL)) { ncr_complete()
4990 cmd->result = ScsiResult(DID_OK, cp->scsi_status); ncr_complete()
4992 } else if ((cp->host_status == HS_SEL_TIMEOUT) ncr_complete()
4993 || (cp->host_status == HS_TIMEOUT)) { ncr_complete()
4998 cmd->result = ScsiResult(DID_TIME_OUT, cp->scsi_status); ncr_complete()
5000 } else if (cp->host_status == HS_RESET) { ncr_complete()
5005 cmd->result = ScsiResult(DID_RESET, cp->scsi_status); ncr_complete()
5007 } else if (cp->host_status == HS_ABORTED) { ncr_complete()
5012 cmd->result = ScsiResult(DID_ABORT, cp->scsi_status); ncr_complete()
5020 cp->host_status, cp->scsi_status, cp); ncr_complete()
5022 cmd->result = ScsiResult(DID_ERROR, cp->scsi_status); ncr_complete()
5036 if (cp->host_status==HS_COMPLETE) { ncr_complete()
5037 switch (cp->scsi_status) { ncr_complete()
5048 printk (" STAT: %x\n", cp->scsi_status); ncr_complete()
5051 } else printk (" HOSTERROR: %x", cp->host_status); ncr_complete()
5058 ncr_free_ccb (np, cp); ncr_complete()
5092 static void ncr_ccb_skipped(struct ncb *np, struct ccb *cp) ncr_ccb_skipped() argument
5094 struct tcb *tp = &np->target[cp->target]; ncr_ccb_skipped()
5095 struct lcb *lp = tp->lp[cp->lun]; ncr_ccb_skipped()
5097 if (lp && cp != np->ccb) { ncr_ccb_skipped()
5098 cp->host_status &= ~HS_SKIPMASK; ncr_ccb_skipped()
5099 cp->start.schedule.l_paddr = ncr_ccb_skipped()
5101 list_move_tail(&cp->link_ccbq, &lp->skip_ccbq); ncr_ccb_skipped()
5102 if (cp->queued) { ncr_ccb_skipped()
5106 if (cp->queued) { ncr_ccb_skipped()
5108 cp->queued = 0; ncr_ccb_skipped()
5118 struct ccb *cp; ncr_wakeup_done() local
5128 cp = np->ccb_done[j]; ncr_wakeup_done()
5129 if (!CCB_DONE_VALID(cp)) ncr_wakeup_done()
5139 if (cp->host_status & HS_DONEMASK) ncr_wakeup_done()
5140 ncr_complete (np, cp); ncr_wakeup_done()
5141 else if (cp->host_status & HS_SKIPMASK) ncr_wakeup_done()
5142 ncr_ccb_skipped (np, cp); ncr_wakeup_done()
5148 cp = np->ccb; ncr_wakeup_done()
5149 while (cp) { ncr_wakeup_done()
5150 if (cp->host_status & HS_DONEMASK) ncr_wakeup_done()
5151 ncr_complete (np, cp); ncr_wakeup_done()
5152 else if (cp->host_status & HS_SKIPMASK) ncr_wakeup_done()
5153 ncr_ccb_skipped (np, cp); ncr_wakeup_done()
5154 cp = cp->link_ccb; ncr_wakeup_done()
5164 struct ccb *cp = np->ccb; ncr_wakeup() local
5166 while (cp) { ncr_wakeup()
5167 if (cp->host_status != HS_IDLE) { ncr_wakeup()
5168 cp->host_status = code; ncr_wakeup()
5169 ncr_complete (np, cp); ncr_wakeup()
5171 cp = cp->link_ccb; ncr_wakeup()
5497 struct ccb *cp; ncr_set_sync_wide_status() local
5511 for (cp = np->ccb; cp; cp = cp->link_ccb) { ncr_set_sync_wide_status()
5512 if (!cp->cmd) continue; ncr_set_sync_wide_status()
5513 if (scmd_id(cp->cmd) != target) continue; ncr_set_sync_wide_status()
5515 cp->sync_status = tp->sval; ncr_set_sync_wide_status()
5516 cp->wide_status = tp->wval; ncr_set_sync_wide_status()
5518 cp->phys.select.sel_scntl3 = tp->wval; ncr_set_sync_wide_status()
5519 cp->phys.select.sel_sxfer = tp->sval; ncr_set_sync_wide_status()
5530 static void ncr_setsync (struct ncb *np, struct ccb *cp, u_char scntl3, u_char sxfer) ncr_setsync() argument
5532 struct scsi_cmnd *cmd = cp->cmd; ncr_setsync()
5587 static void ncr_setwide (struct ncb *np, struct ccb *cp, u_char wide, u_char ack) ncr_setwide() argument
5589 struct scsi_cmnd *cmd = cp->cmd; ncr_setwide()
6089 struct ccb *cp; ncr_int_sto() local
6097 cp = np->ccb; ncr_int_sto()
6098 while (cp && (CCB_PHYS (cp, phys) != dsa)) ncr_int_sto()
6099 cp = cp->link_ccb; ncr_int_sto()
6101 if (cp) { ncr_int_sto()
6102 cp-> host_status = HS_SEL_TIMEOUT; ncr_int_sto()
6103 ncr_complete (np, cp); ncr_int_sto()
6256 struct ccb *cp; ncr_int_ma() local
6313 ** locate matching cp. ncr_int_ma()
6319 cp = np->header.cp; ncr_int_ma()
6320 if (CCB_PHYS(cp, phys) != dsa) ncr_int_ma()
6321 cp = NULL; ncr_int_ma()
6323 cp = np->ccb; ncr_int_ma()
6324 while (cp && (CCB_PHYS (cp, phys) != dsa)) ncr_int_ma()
6325 cp = cp->link_ccb; ncr_int_ma()
6344 else if (cp) { ncr_int_ma()
6345 if (dsp == CCB_PHYS (cp, patch[2])) { ncr_int_ma()
6346 vdsp = &cp->patch[0]; ncr_int_ma()
6349 else if (dsp == CCB_PHYS (cp, patch[6])) { ncr_int_ma()
6350 vdsp = &cp->patch[4]; ncr_int_ma()
6361 cp, np->header.cp, ncr_int_ma()
6367 ** cp=0 means that the DSA does not point to a valid control ncr_int_ma()
6372 if (!cp) { ncr_int_ma()
6375 ncr_name (np), (u_long) np->header.cp); ncr_int_ma()
6386 tblp = (u32 *) ((char*) &cp->phys + oadr); ncr_int_ma()
6407 PRINT_ADDR(cp->cmd, "internal error: cmd=%02x != %02x=(vdsp[0] " ncr_int_ma()
6414 ** cp != np->header.cp means that the header of the CCB ncr_int_ma()
6419 if (cp != np->header.cp) { ncr_int_ma()
6422 ncr_name (np), (u_long) cp, (u_long) np->header.cp); ncr_int_ma()
6430 PRINT_ADDR(cp->cmd, "phase change %x-%x %d@%08x resid=%d.\n", ncr_int_ma()
6441 newcmd = cp->patch; ncr_int_ma()
6442 newtmp = CCB_PHYS (cp, patch); ncr_int_ma()
6443 if (newtmp == scr_to_cpu(cp->phys.header.savep)) { ncr_int_ma()
6444 newcmd = &cp->patch[4]; ncr_int_ma()
6445 newtmp = CCB_PHYS (cp, patch[4]); ncr_int_ma()
6458 PRINT_ADDR(cp->cmd, "newcmd[%d] %x %x %x %x.\n", ncr_int_ma()
6459 (int) (newcmd - cp->patch), ncr_int_ma()
6516 cp->host_status = HS_BUSY; ncr_int_ma()
6541 static void ncr_sir_to_redo(struct ncb *np, int num, struct ccb *cp) ncr_sir_to_redo() argument
6543 struct scsi_cmnd *cmd = cp->cmd; ncr_sir_to_redo()
6557 ** are before cp and busy ones after. ncr_sir_to_redo()
6565 if (cp2 == cp) ncr_sir_to_redo()
6570 lp->held_ccb = cp; /* Requeue when this one completes */ ncr_sir_to_redo()
6598 cp->phys.header.savep = cp->startp; ncr_sir_to_redo()
6599 cp->host_status = HS_BUSY; ncr_sir_to_redo()
6600 cp->scsi_status = S_ILLEGAL; ncr_sir_to_redo()
6602 ncr_put_start_queue(np, cp); ncr_sir_to_redo()
6612 if (cp->auto_sense) ncr_sir_to_redo()
6622 cp->scsi_smsg2[0] = IDENTIFY(0, cmd->device->lun); ncr_sir_to_redo()
6623 cp->phys.smsg.addr = cpu_to_scr(CCB_PHYS (cp, scsi_smsg2)); ncr_sir_to_redo()
6624 cp->phys.smsg.size = cpu_to_scr(1); ncr_sir_to_redo()
6629 cp->phys.cmd.addr = cpu_to_scr(CCB_PHYS (cp, sensecmd)); ncr_sir_to_redo()
6630 cp->phys.cmd.size = cpu_to_scr(6); ncr_sir_to_redo()
6635 cp->sensecmd[0] = 0x03; ncr_sir_to_redo()
6636 cp->sensecmd[1] = (cmd->device->lun & 0x7) << 5; ncr_sir_to_redo()
6637 cp->sensecmd[4] = sizeof(cp->sense_buf); ncr_sir_to_redo()
6642 memset(cp->sense_buf, 0, sizeof(cp->sense_buf)); ncr_sir_to_redo()
6643 cp->phys.sense.addr = cpu_to_scr(CCB_PHYS(cp,sense_buf[0])); ncr_sir_to_redo()
6644 cp->phys.sense.size = cpu_to_scr(sizeof(cp->sense_buf)); ncr_sir_to_redo()
6651 cp->phys.header.savep = startp; ncr_sir_to_redo()
6652 cp->phys.header.goalp = startp + 24; ncr_sir_to_redo()
6653 cp->phys.header.lastp = startp; ncr_sir_to_redo()
6654 cp->phys.header.wgoalp = startp + 24; ncr_sir_to_redo()
6655 cp->phys.header.wlastp = startp; ncr_sir_to_redo()
6657 cp->host_status = HS_BUSY; ncr_sir_to_redo()
6658 cp->scsi_status = S_ILLEGAL; ncr_sir_to_redo()
6659 cp->auto_sense = s_status; ncr_sir_to_redo()
6661 cp->start.schedule.l_paddr = ncr_sir_to_redo()
6668 cp->start.schedule.l_paddr = ncr_sir_to_redo()
6671 ncr_put_start_queue(np, cp); ncr_sir_to_redo()
6697 struct ccb *cp=NULL; ncr_int_sir() local
6743 cp = np->header.cp; ncr_int_sir()
6744 if (!cp || CCB_PHYS (cp, phys) != dsa) ncr_int_sir()
6746 ncr_sir_to_redo(np, num, cp); ncr_int_sir()
6752 cp = np->ccb; ncr_int_sir()
6753 while (cp && (CCB_PHYS (cp, phys) != dsa)) ncr_int_sir()
6754 cp = cp->link_ccb; ncr_int_sir()
6756 BUG_ON(!cp); ncr_int_sir()
6757 BUG_ON(cp != np->header.cp); ncr_int_sir()
6759 if (!cp || cp != np->header.cp) ncr_int_sir()
6848 PRINT_ADDR(cp->cmd, "negotiation failed sir=%x " ncr_int_sir()
6849 "status=%x.\n", num, cp->nego_status); ncr_int_sir()
6856 switch (cp->nego_status) { ncr_int_sir()
6861 ncr_setsync (np, cp, 0, 0xe0); ncr_int_sir()
6866 ncr_setwide (np, cp, 0, 0); ncr_int_sir()
6872 cp->nego_status = 0; ncr_int_sir()
6877 ncr_print_msg(cp, "sync msgin", np->msgin); ncr_int_sir()
6924 PRINT_ADDR(cp->cmd, "sync: per=%d scntl3=0x%x ofs=%d " ncr_int_sir()
6930 switch (cp->nego_status) { ncr_int_sir()
6938 ncr_setsync(np, cp, 0, 0xe0); ncr_int_sir()
6944 ncr_setsync(np, cp, scntl3, (fak<<5)|ofs); ncr_int_sir()
6951 ncr_setwide(np, cp, 0, 0); ncr_int_sir()
6963 ncr_setsync(np, cp, scntl3, (fak<<5)|ofs); ncr_int_sir()
6966 cp->nego_status = NS_SYNC; ncr_int_sir()
6969 ncr_print_msg(cp, "sync msgout", np->msgout); ncr_int_sir()
6985 ncr_print_msg(cp, "wide msgin", np->msgin); ncr_int_sir()
7011 PRINT_ADDR(cp->cmd, "wide: wide=%d chg=%d.\n", wide, ncr_int_sir()
7017 switch (cp->nego_status) { ncr_int_sir()
7026 ncr_setwide(np, cp, 0, 1); ncr_int_sir()
7031 ncr_setwide(np, cp, wide, 1); ncr_int_sir()
7039 ncr_setsync(np, cp, 0, 0xe0); ncr_int_sir()
7050 ncr_setwide(np, cp, wide, 1); ncr_int_sir()
7055 cp->nego_status = NS_WIDE; ncr_int_sir()
7058 ncr_print_msg(cp, "wide msgout", np->msgin); ncr_int_sir()
7077 PRINT_ADDR(cp->cmd, "MESSAGE_REJECT received (%x:%x).\n", ncr_int_sir()
7089 ncr_print_msg(cp, "MESSAGE_REJECT sent for", np->msgin); ncr_int_sir()
7108 PRINT_ADDR(cp->cmd, "IGNORE_WIDE_RESIDUE received, but not yet " ncr_int_sir()
7121 PRINT_ADDR(cp->cmd, "DISCONNECT received, but datapointer " ncr_int_sir()
7150 struct ccb *cp = NULL; ncr_get_ccb() local
7174 cp = list_entry(qp, struct ccb, link_ccbq); ncr_get_ccb()
7175 if (cp->magic) { ncr_get_ccb()
7177 "(@%p)\n", cp); ncr_get_ccb()
7178 cp = NULL; ncr_get_ccb()
7189 if (cp) { ncr_get_ccb()
7200 if (!cp) ncr_get_ccb()
7201 cp = np->ccb; ncr_get_ccb()
7207 while (cp->magic) { ncr_get_ccb()
7209 if (tsleep ((caddr_t)cp, PRIBIO|PCATCH, "ncr", 0)) ncr_get_ccb()
7214 if (cp->magic) ncr_get_ccb()
7217 cp->magic = 1; ncr_get_ccb()
7234 cp->tag = tag; ncr_get_ccb()
7235 cp->target = tn; ncr_get_ccb()
7236 cp->lun = ln; ncr_get_ccb()
7239 PRINT_ADDR(cmd, "ccb @%p using tag %d.\n", cp, tag); ncr_get_ccb()
7242 return cp; ncr_get_ccb()
7254 static void ncr_free_ccb (struct ncb *np, struct ccb *cp) ncr_free_ccb() argument
7256 struct tcb *tp = &np->target[cp->target]; ncr_free_ccb()
7257 struct lcb *lp = tp->lp[cp->lun]; ncr_free_ccb()
7260 PRINT_ADDR(cp->cmd, "ccb @%p freeing tag %d.\n", cp, cp->tag); ncr_free_ccb()
7269 if (cp->tag != NO_TAG) { ncr_free_ccb()
7270 lp->cb_tags[lp->if_tag++] = cp->tag; ncr_free_ccb()
7273 lp->tags_umap &= ~(((tagmap_t) 1) << cp->tag); ncr_free_ccb()
7275 lp->jump_ccb[cp->tag] = ncr_free_ccb()
7288 if (cp != np->ccb) ncr_free_ccb()
7289 list_move(&cp->link_ccbq, &lp->free_ccbq); ncr_free_ccb()
7291 if (cp->queued) { ncr_free_ccb()
7295 cp -> host_status = HS_IDLE; ncr_free_ccb()
7296 cp -> magic = 0; ncr_free_ccb()
7297 if (cp->queued) { ncr_free_ccb()
7299 cp->queued = 0; ncr_free_ccb()
7303 if (cp == np->ccb) ncr_free_ccb()
7304 wakeup ((caddr_t) cp); ncr_free_ccb()
7316 static void ncr_init_ccb(struct ncb *np, struct ccb *cp) ncr_init_ccb() argument
7323 cp->p_ccb = vtobus(cp); ncr_init_ccb()
7324 cp->phys.header.cp = cp; ncr_init_ccb()
7329 INIT_LIST_HEAD(&cp->link_ccbq); ncr_init_ccb()
7337 cp->start.setup_dsa[0] = cpu_to_scr(copy_4); ncr_init_ccb()
7338 cp->start.setup_dsa[1] = cpu_to_scr(CCB_PHYS(cp, start.p_phys)); ncr_init_ccb()
7339 cp->start.setup_dsa[2] = cpu_to_scr(ncr_reg_bus_addr(nc_dsa)); ncr_init_ccb()
7340 cp->start.schedule.l_cmd = cpu_to_scr(SCR_JUMP); ncr_init_ccb()
7341 cp->start.p_phys = cpu_to_scr(CCB_PHYS(cp, phys)); ncr_init_ccb()
7343 memcpy(&cp->restart, &cp->start, sizeof(cp->restart)); ncr_init_ccb()
7345 cp->start.schedule.l_paddr = cpu_to_scr(NCB_SCRIPT_PHYS (np, idle)); ncr_init_ccb()
7346 cp->restart.schedule.l_paddr = cpu_to_scr(NCB_SCRIPTH_PHYS (np, abort)); ncr_init_ccb()
7359 struct ccb *cp = NULL; ncr_alloc_ccb() local
7364 cp = m_calloc_dma(sizeof(struct ccb), "CCB"); ncr_alloc_ccb()
7365 if (!cp) ncr_alloc_ccb()
7373 memset(cp, 0, sizeof (*cp)); ncr_alloc_ccb()
7374 ncr_init_ccb(np, cp); ncr_alloc_ccb()
7380 cp->link_ccb = np->ccb->link_ccb; ncr_alloc_ccb()
7381 np->ccb->link_ccb = cp; ncr_alloc_ccb()
7383 list_add(&cp->link_ccbq, &lp->free_ccbq); ncr_alloc_ccb()
7647 static int ncr_scatter(struct ncb *np, struct ccb *cp, struct scsi_cmnd *cmd) ncr_scatter() argument
7652 cp->data_len = 0; ncr_scatter()
7664 data = &cp->phys.data[MAX_SCATTER - use_sg]; ncr_scatter()
7671 cp->data_len += len; scsi_for_each_sg()
H A Deata_pio.c163 struct eata_ccb *cp; eata_pio_int_handler() local
183 cp = &hd->ccb[0]; eata_pio_int_handler()
184 cmd = cp->cmd; eata_pio_int_handler()
190 if (cp->DataIn) { eata_pio_int_handler()
215 } else { /* cp->DataOut */ eata_pio_int_handler()
259 hd->devflags |= (1 << cp->cp_id); eata_pio_int_handler()
260 } else if (hd->devflags & (1 << cp->cp_id)) eata_pio_int_handler()
265 if (cp->status == LOCKED) { eata_pio_int_handler()
266 cp->status = FREE; eata_pio_int_handler()
276 cp->status = FREE; /* now we can release the slot */ eata_pio_int_handler()
310 struct eata_ccb *cp; eata_pio_queue_lck() local
332 cp = &hd->ccb[y]; eata_pio_queue_lck()
334 memset(cp, 0, sizeof(struct eata_ccb)); eata_pio_queue_lck()
336 cp->status = USED; /* claim free slot */ eata_pio_queue_lck()
344 cp->DataOut = 1; /* Output mode */ eata_pio_queue_lck()
346 cp->DataIn = 0; /* Input mode */ eata_pio_queue_lck()
348 cp->Interpret = (cmd->device->id == hd->hostid); eata_pio_queue_lck()
349 cp->cp_datalen = cpu_to_be32(scsi_bufflen(cmd)); eata_pio_queue_lck()
350 cp->Auto_Req_Sen = 0; eata_pio_queue_lck()
351 cp->cp_reqDMA = 0; eata_pio_queue_lck()
352 cp->reqlen = 0; eata_pio_queue_lck()
354 cp->cp_id = cmd->device->id; eata_pio_queue_lck()
355 cp->cp_lun = cmd->device->lun; eata_pio_queue_lck()
356 cp->cp_dispri = 0; eata_pio_queue_lck()
357 cp->cp_identify = 1; eata_pio_queue_lck()
358 memcpy(cp->cp_cdb, cmd->cmnd, COMMAND_SIZE(*cmd->cmnd)); eata_pio_queue_lck()
360 cp->cp_statDMA = 0; eata_pio_queue_lck()
362 cp->cp_viraddr = cp; eata_pio_queue_lck()
363 cp->cmd = cmd; eata_pio_queue_lck()
386 cp->status = FREE; eata_pio_queue_lck()
392 outsw(base + HA_RDATA, cp, hd->cplen); eata_pio_queue_lck()
514 struct eata_ccb cp; get_pio_board_data() local
518 memset(&cp, 0, sizeof(struct eata_ccb)); get_pio_board_data()
521 cp.DataIn = 1; get_pio_board_data()
522 cp.Interpret = 1; /* Interpret command */ get_pio_board_data()
524 cp.cp_datalen = cpu_to_be32(254); get_pio_board_data()
525 cp.cp_dataDMA = cpu_to_be32(0); get_pio_board_data()
527 cp.cp_id = id; get_pio_board_data()
528 cp.cp_lun = 0; get_pio_board_data()
530 cp.cp_cdb[0] = INQUIRY; get_pio_board_data()
531 cp.cp_cdb[1] = 0; get_pio_board_data()
532 cp.cp_cdb[2] = 0; get_pio_board_data()
533 cp.cp_cdb[3] = 0; get_pio_board_data()
534 cp.cp_cdb[4] = 254; get_pio_board_data()
535 cp.cp_cdb[5] = 0; get_pio_board_data()
543 outsw(base + HA_RDATA, &cp, cplen); get_pio_board_data()
H A Dhpsa.c763 struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex]; set_ioaccel1_performant_mode() local
768 cp->ReplyQueue = smp_processor_id() % h->nreply_queues; set_ioaccel1_performant_mode()
781 struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex]; set_ioaccel2_performant_mode() local
786 cp->reply_queue = smp_processor_id() % h->nreply_queues; set_ioaccel2_performant_mode()
792 c->busaddr |= (h->ioaccel2_blockFetchTable[cp->sg_count]); set_ioaccel2_performant_mode()
1759 static void complete_scsi_command(struct CommandList *cp) complete_scsi_command() argument
1771 ei = cp->err_info; complete_scsi_command()
1772 cmd = cp->scsi_cmd; complete_scsi_command()
1773 h = cp->h; complete_scsi_command()
1777 if ((cp->cmd_type == CMD_SCSI) && complete_scsi_command()
1778 (le16_to_cpu(cp->Header.SGTotal) > h->max_cmd_sg_entries)) complete_scsi_command()
1779 hpsa_unmap_sg_chain_block(h, cp); complete_scsi_command()
1784 if (cp->cmd_type == CMD_IOACCEL2 || cp->cmd_type == CMD_IOACCEL1) complete_scsi_command()
1785 atomic_dec(&cp->phys_disk->ioaccel_cmds_out); complete_scsi_command()
1787 if (cp->cmd_type == CMD_IOACCEL2) complete_scsi_command()
1788 return process_ioaccel2_completion(h, cp, cmd, dev); complete_scsi_command()
1794 if (cp->cmd_type == CMD_IOACCEL1) complete_scsi_command()
1795 atomic_dec(&cp->phys_disk->ioaccel_cmds_out); complete_scsi_command()
1796 cmd_free(h, cp); complete_scsi_command()
1814 if (cp->cmd_type == CMD_IOACCEL1) { complete_scsi_command()
1815 struct io_accel1_cmd *c = &h->ioaccel_cmd_pool[cp->cmdindex]; complete_scsi_command()
1816 cp->Header.SGList = scsi_sg_count(cmd); complete_scsi_command()
1817 cp->Header.SGTotal = cpu_to_le16(cp->Header.SGList); complete_scsi_command()
1818 cp->Request.CDBLen = le16_to_cpu(c->io_flags) & complete_scsi_command()
1820 cp->Header.tag = c->tag; complete_scsi_command()
1821 memcpy(cp->Header.LUN.LunAddrBytes, c->CISS_LUN, 8); complete_scsi_command()
1822 memcpy(cp->Request.CDB, c->CDB, cp->Request.CDBLen); complete_scsi_command()
1831 INIT_WORK(&cp->work, hpsa_command_resubmit_worker); complete_scsi_command()
1833 h->resubmit_wq, &cp->work); complete_scsi_command()
1861 dev_warn(&h->pdev->dev, "cp %p has status 0x%x " complete_scsi_command()
1864 cp, ei->ScsiStatus, complete_scsi_command()
1868 dev_warn(&h->pdev->dev, "cp %p SCSI status was 0. " complete_scsi_command()
1869 "Returning no connection.\n", cp), complete_scsi_command()
1891 "CDB %16phN data overrun\n", cp->Request.CDB); complete_scsi_command()
1894 /* print_bytes(cp, sizeof(*cp), 1, 0); complete_scsi_command()
1895 print_cmd(cp); */ complete_scsi_command()
1908 cp->Request.CDB); complete_scsi_command()
1913 cp->Request.CDB); complete_scsi_command()
1918 cp->Request.CDB); complete_scsi_command()
1923 cp->Request.CDB, ei->ScsiStatus); complete_scsi_command()
1928 cp->Request.CDB); complete_scsi_command()
1933 cp->Request.CDB); complete_scsi_command()
1938 cp->Request.CDB); complete_scsi_command()
1950 "cp %p had HP SSD Smart Path error\n", cp); complete_scsi_command()
1954 dev_warn(&h->pdev->dev, "cp %p returned unknown status %x\n", complete_scsi_command()
1955 cp, ei->CommandStatus); complete_scsi_command()
1957 cmd_free(h, cp); complete_scsi_command()
1973 struct CommandList *cp, hpsa_map_one()
1981 cp->Header.SGList = 0; hpsa_map_one()
1982 cp->Header.SGTotal = cpu_to_le16(0); hpsa_map_one()
1989 cp->Header.SGList = 0; hpsa_map_one()
1990 cp->Header.SGTotal = cpu_to_le16(0); hpsa_map_one()
1993 cp->SG[0].Addr = cpu_to_le64(addr64); hpsa_map_one()
1994 cp->SG[0].Len = cpu_to_le32(buflen); hpsa_map_one()
1995 cp->SG[0].Ext = cpu_to_le32(HPSA_SG_LAST); /* we are not chaining */ hpsa_map_one()
1996 cp->Header.SGList = 1; /* no. SGs contig in this cmd */ hpsa_map_one()
1997 cp->Header.SGTotal = cpu_to_le16(1); /* total sgs in cmd list */ hpsa_map_one()
2071 struct CommandList *cp) hpsa_scsi_interpret_error()
2073 const struct ErrorInfo *ei = cp->err_info; hpsa_scsi_interpret_error()
2074 struct device *d = &cp->h->pdev->dev; hpsa_scsi_interpret_error()
2079 hpsa_print_cmd(h, "SCSI status", cp); hpsa_scsi_interpret_error()
2094 hpsa_print_cmd(h, "overrun condition", cp); hpsa_scsi_interpret_error()
2100 hpsa_print_cmd(h, "invalid command", cp); hpsa_scsi_interpret_error()
2105 hpsa_print_cmd(h, "protocol error", cp); hpsa_scsi_interpret_error()
2108 hpsa_print_cmd(h, "hardware error", cp); hpsa_scsi_interpret_error()
2111 hpsa_print_cmd(h, "connection lost", cp); hpsa_scsi_interpret_error()
2114 hpsa_print_cmd(h, "aborted", cp); hpsa_scsi_interpret_error()
2117 hpsa_print_cmd(h, "abort failed", cp); hpsa_scsi_interpret_error()
2120 hpsa_print_cmd(h, "unsolicited abort", cp); hpsa_scsi_interpret_error()
2123 hpsa_print_cmd(h, "timed out", cp); hpsa_scsi_interpret_error()
2126 hpsa_print_cmd(h, "unabortable", cp); hpsa_scsi_interpret_error()
2129 hpsa_print_cmd(h, "unknown status", cp); hpsa_scsi_interpret_error()
3256 * hpsa command, cp.
3259 struct CommandList *cp, hpsa_scatter_gather()
3275 curr_sg = cp->SG; hpsa_scatter_gather()
3282 curr_sg = h->cmd_sg_list[cp->cmdindex]; scsi_for_each_sg()
3296 cp->Header.SGList = h->max_cmd_sg_entries;
3297 cp->Header.SGTotal = cpu_to_le16(use_sg + 1);
3298 if (hpsa_map_sg_chain_block(h, cp)) {
3307 cp->Header.SGList = (u8) use_sg; /* no. SGs contig in this cmd */
3308 cp->Header.SGTotal = cpu_to_le16(use_sg); /* total sgs in cmd list */
3365 struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex]; hpsa_scsi_ioaccel1_queue_command() local
3391 (c->cmdindex * sizeof(*cp)); hpsa_scsi_ioaccel1_queue_command()
3401 curr_sg = cp->SG; scsi_for_each_sg()
3435 cp->dev_handle = cpu_to_le16(ioaccel_handle & 0xFFFF);
3436 cp->transfer_len = cpu_to_le32(total_len);
3437 cp->io_flags = cpu_to_le16(IOACCEL1_IOFLAGS_IO_REQ |
3439 cp->control = cpu_to_le32(control);
3440 memcpy(cp->CDB, cdb, cdb_len);
3441 memcpy(cp->CISS_LUN, scsi3addr, 8);
3467 struct CommandList *c, struct io_accel2_cmd *cp) set_encrypt_ioaccel2()
3478 cp->dekindex = map->dekindex; set_encrypt_ioaccel2()
3481 cp->direction |= IOACCEL2_DIRECTION_ENCRYPT_MASK; set_encrypt_ioaccel2()
3516 cp->tweak_lower = cpu_to_le32(first_block); set_encrypt_ioaccel2()
3517 cp->tweak_upper = cpu_to_le32(first_block >> 32); set_encrypt_ioaccel2()
3525 struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex]; hpsa_scsi_ioaccel2_queue_command() local
3546 (c->cmdindex * sizeof(*cp)); hpsa_scsi_ioaccel2_queue_command()
3549 memset(cp, 0, sizeof(*cp)); hpsa_scsi_ioaccel2_queue_command()
3550 cp->IU_type = IOACCEL2_IU_TYPE; hpsa_scsi_ioaccel2_queue_command()
3560 curr_sg = cp->sg; scsi_for_each_sg()
3576 cp->direction &= ~IOACCEL2_DIRECTION_MASK;
3577 cp->direction |= IOACCEL2_DIR_DATA_OUT;
3580 cp->direction &= ~IOACCEL2_DIRECTION_MASK;
3581 cp->direction |= IOACCEL2_DIR_DATA_IN;
3584 cp->direction &= ~IOACCEL2_DIRECTION_MASK;
3585 cp->direction |= IOACCEL2_DIR_NO_DATA;
3594 cp->direction &= ~IOACCEL2_DIRECTION_MASK;
3595 cp->direction |= IOACCEL2_DIR_NO_DATA;
3599 set_encrypt_ioaccel2(h, c, cp);
3601 cp->scsi_nexus = cpu_to_le32(ioaccel_handle);
3602 cp->Tag = cpu_to_le32(c->cmdindex << DIRECT_LOOKUP_SHIFT);
3603 memcpy(cp->cdb, cdb, sizeof(cp->cdb));
3606 cp->sg_count = (u8) use_sg;
3608 cp->data_len = cpu_to_le32(total_len);
3609 cp->err_ptr = cpu_to_le64(c->busaddr +
3611 cp->err_len = cpu_to_le32(sizeof(cp->error_data));
4747 u32 cp; hpsa_ioctl32_passthru() local
4758 err |= get_user(cp, &arg32->buf); hpsa_ioctl32_passthru()
4759 arg64.buf = compat_ptr(cp); hpsa_ioctl32_passthru()
4784 u32 cp; hpsa_ioctl32_big_passthru() local
4796 err |= get_user(cp, &arg32->buf); hpsa_ioctl32_big_passthru()
4797 arg64.buf = compat_ptr(cp); hpsa_ioctl32_big_passthru()
7324 struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[i]; hpsa_enter_performant_mode() local
7326 cp->function = IOACCEL1_FUNCTION_SCSIIO; hpsa_enter_performant_mode()
7327 cp->err_info = (u32) (h->errinfo_pool_dhandle + hpsa_enter_performant_mode()
7329 cp->err_info_len = sizeof(struct ErrorInfo); hpsa_enter_performant_mode()
7330 cp->sgl_offset = IOACCEL1_SGLOFFSET; hpsa_enter_performant_mode()
7331 cp->host_context_flags = hpsa_enter_performant_mode()
7333 cp->timeout_sec = 0; hpsa_enter_performant_mode()
7334 cp->ReplyQueue = 0; hpsa_enter_performant_mode()
7335 cp->tag = hpsa_enter_performant_mode()
7337 cp->host_addr = hpsa_enter_performant_mode()
1972 hpsa_map_one(struct pci_dev *pdev, struct CommandList *cp, unsigned char *buf, size_t buflen, int data_direction) hpsa_map_one() argument
2070 hpsa_scsi_interpret_error(struct ctlr_info *h, struct CommandList *cp) hpsa_scsi_interpret_error() argument
3258 hpsa_scatter_gather(struct ctlr_info *h, struct CommandList *cp, struct scsi_cmnd *cmd) hpsa_scatter_gather() argument
3466 set_encrypt_ioaccel2(struct ctlr_info *h, struct CommandList *c, struct io_accel2_cmd *cp) set_encrypt_ioaccel2() argument
H A Du14-34f.c580 unsigned int cpp_index; /* cp index */
582 /* All the cp structure is zero filled by queuecommand except the
584 dma_addr_t cp_dma_addr; /* dma handle for this cp structure */
591 struct mscp cp[MAX_MAILBOXES]; /* Mailboxes for this board */ member in struct:hostdata
752 cpp = &HD(j)->cp[0]; board_inquiry()
978 HD(j)->cp[i].cp_dma_addr = pci_map_single(HD(j)->pdev, port_detect()
979 &HD(j)->cp[i], sizeof(struct mscp), PCI_DMA_BIDIRECTIONAL); port_detect()
982 if (! ((&HD(j)->cp[i])->sglist = kmalloc( port_detect()
1120 cpp = &HD(j)->cp[i]; SCpnt = cpp->SCpnt; map_dma()
1158 cpp = &HD(j)->cp[i]; SCpnt = cpp->SCpnt; unmap_dma()
1179 cpp = &HD(j)->cp[i]; SCpnt = cpp->SCpnt; sync_dma()
1215 cpp = &HD(j)->cp[i]; SCpnt = cpp->SCpnt; scsi_to_dev_dir()
1281 cpp = &HD(j)->cp[i]; u14_34f_queuecommand_lck()
1362 if (SCarg != HD(j)->cp[i].SCpnt) u14_34f_eh_abort()
1363 panic("%s: abort, mbox %d, SCarg %p, cp SCpnt %p.\n", u14_34f_eh_abort()
1364 BN(j), i, SCarg, HD(j)->cp[i].SCpnt); u14_34f_eh_abort()
1439 if (!(SCpnt = HD(j)->cp[i].SCpnt)) u14_34f_eh_host_reset()
1489 SCpnt = HD(j)->cp[i].SCpnt; u14_34f_eh_host_reset()
1501 SCpnt = HD(j)->cp[i].SCpnt; u14_34f_eh_host_reset()
1604 k = il[n]; cpp = &HD(j)->cp[k]; SCpnt = cpp->SCpnt; reorder()
1641 k = il[n]; cpp = &HD(j)->cp[k]; SCpnt = cpp->SCpnt; reorder()
1665 k = il[n]; cpp = &HD(j)->cp[k]; SCpnt = cpp->SCpnt; reorder()
1688 cpp = &HD(j)->cp[k]; SCpnt = cpp->SCpnt; flush_dev()
1700 k = il[n]; cpp = &HD(j)->cp[k]; SCpnt = cpp->SCpnt; flush_dev()
1748 if (H2DEV(HD(j)->cp[i].cp_dma_addr) == ret) break; ihdlr()
1752 (void *)ret, (void *)H2DEV(HD(j)->cp[0].cp_dma_addr)); ihdlr()
1754 cpp = &(HD(j)->cp[i]); ihdlr()
1951 kfree((&HD(j)->cp[i])->sglist); u14_34f_release()
1954 pci_unmap_single(HD(j)->pdev, HD(j)->cp[i].cp_dma_addr, u14_34f_release()
/linux-4.1.27/arch/sparc/kernel/
H A Dds.c134 struct ds_cap_state *cp,
145 static void md_update_data(struct ds_info *dp, struct ds_cap_state *cp,
148 struct ds_cap_state *cp,
151 struct ds_cap_state *cp,
155 struct ds_cap_state *cp,
159 struct ds_cap_state *cp,
162 struct ds_cap_state *cp,
279 struct ds_cap_state *cp, md_update_data()
299 pkt.data.handle = cp->handle; md_update_data()
318 struct ds_cap_state *cp, domain_shutdown_data()
337 pkt.data.handle = cp->handle; domain_shutdown_data()
358 struct ds_cap_state *cp, domain_panic_data()
377 pkt.data.handle = cp->handle; domain_panic_data()
421 struct ds_cap_state *cp, __dr_cpu_send_error()
433 pkt.data.handle = cp->handle; __dr_cpu_send_error()
447 struct ds_cap_state *cp, dr_cpu_send_error()
453 __dr_cpu_send_error(dp, cp, data); dr_cpu_send_error()
531 static int dr_cpu_configure(struct ds_info *dp, struct ds_cap_state *cp, dr_cpu_configure() argument
544 dr_cpu_init_response(resp, req_num, cp->handle, dr_cpu_configure()
589 struct ds_cap_state *cp, dr_cpu_unconfigure()
603 dr_cpu_init_response(resp, req_num, cp->handle, dr_cpu_unconfigure()
628 static void dr_cpu_data(struct ds_info *dp, struct ds_cap_state *cp, void *buf, dr_cpu_data() argument
646 dr_cpu_send_error(dp, cp, data); dr_cpu_data()
662 err = dr_cpu_configure(dp, cp, req_num, &mask); dr_cpu_data()
664 err = dr_cpu_unconfigure(dp, cp, req_num, &mask); dr_cpu_data()
667 dr_cpu_send_error(dp, cp, data); dr_cpu_data()
680 struct ds_cap_state *cp, ds_pri_data()
725 struct ds_cap_state *cp, ds_var_data()
744 struct ds_cap_state *cp; ldom_set_var() local
749 cp = NULL; ldom_set_var()
755 cp = tmp; ldom_set_var()
759 if (!cp) { ldom_set_var()
765 cp = tmp; ldom_set_var()
772 if (cp) { ldom_set_var()
795 pkt.header.data.handle = cp->handle; ldom_set_var()
892 struct ds_cap_state *cp = &dp->ds_states[i]; register_services() local
896 if (cp->state == CAP_STATE_REGISTERED) register_services()
900 cp->handle = ((u64) i << 32) | new_count; register_services()
903 strlen(cp->service_id)); register_services()
908 pbuf.req.handle = cp->handle; register_services()
911 strcpy(pbuf.req.svc_id, cp->service_id); register_services()
915 cp->state = CAP_STATE_REG_SENT; register_services()
937 struct ds_cap_state *cp = find_cap(dp, ap->handle); ds_handshake() local
939 if (!cp) { ds_handshake()
945 dp->id, cp->service_id); ds_handshake()
946 cp->state = CAP_STATE_REGISTERED; ds_handshake()
949 struct ds_cap_state *cp = find_cap(dp, np->handle); ds_handshake() local
951 if (!cp) { ds_handshake()
957 cp->state = CAP_STATE_UNKNOWN; ds_handshake()
1006 struct ds_cap_state *cp = find_cap(dp, dpkt->handle); process_ds_work() local
1009 if (!cp) { process_ds_work()
1018 cp->data(dp, cp, dpkt, req_len); process_ds_work()
1085 struct ds_cap_state *cp = &dp->ds_states[i]; ds_reset() local
1087 cp->state = CAP_STATE_UNKNOWN; ds_reset()
278 md_update_data(struct ds_info *dp, struct ds_cap_state *cp, void *buf, int len) md_update_data() argument
317 domain_shutdown_data(struct ds_info *dp, struct ds_cap_state *cp, void *buf, int len) domain_shutdown_data() argument
357 domain_panic_data(struct ds_info *dp, struct ds_cap_state *cp, void *buf, int len) domain_panic_data() argument
420 __dr_cpu_send_error(struct ds_info *dp, struct ds_cap_state *cp, struct ds_data *data) __dr_cpu_send_error() argument
446 dr_cpu_send_error(struct ds_info *dp, struct ds_cap_state *cp, struct ds_data *data) dr_cpu_send_error() argument
588 dr_cpu_unconfigure(struct ds_info *dp, struct ds_cap_state *cp, u64 req_num, cpumask_t *mask) dr_cpu_unconfigure() argument
679 ds_pri_data(struct ds_info *dp, struct ds_cap_state *cp, void *buf, int len) ds_pri_data() argument
724 ds_var_data(struct ds_info *dp, struct ds_cap_state *cp, void *buf, int len) ds_var_data() argument
/linux-4.1.27/tools/power/cpupower/utils/
H A Dcpufreq-set.c69 int power = 0, match_count = 0, i, cp, pad; string_to_frequency() local
98 for (cp = 0; isdigit(str[cp]); cp++) string_to_frequency()
101 if (str[cp] == '.') { string_to_frequency()
102 while (power > -1 && isdigit(str[cp+1])) string_to_frequency()
103 cp++, power--; string_to_frequency()
108 pad = 0, cp += power + 1; string_to_frequency()
110 if (cp <= 0 || cp + pad > NORM_FREQ_LEN - 1) string_to_frequency()
114 for (i = 0; i < cp; i++, str++) { string_to_frequency()
120 for (; i < cp + pad; i++) string_to_frequency()
126 normalized[i-1] = 0; /* cp > 0 && pad >= 0 ==> i > 0 */ string_to_frequency()
/linux-4.1.27/drivers/media/tuners/
H A Dtda827x.c99 u8 cp; member in struct:tda827x_data
105 { .lomax = 62000000, .spd = 3, .bs = 2, .bp = 0, .cp = 0, .gc3 = 3, .div1p5 = 1},
106 { .lomax = 66000000, .spd = 3, .bs = 3, .bp = 0, .cp = 0, .gc3 = 3, .div1p5 = 1},
107 { .lomax = 76000000, .spd = 3, .bs = 1, .bp = 0, .cp = 0, .gc3 = 3, .div1p5 = 0},
108 { .lomax = 84000000, .spd = 3, .bs = 2, .bp = 0, .cp = 0, .gc3 = 3, .div1p5 = 0},
109 { .lomax = 93000000, .spd = 3, .bs = 2, .bp = 0, .cp = 0, .gc3 = 1, .div1p5 = 0},
110 { .lomax = 98000000, .spd = 3, .bs = 3, .bp = 0, .cp = 0, .gc3 = 1, .div1p5 = 0},
111 { .lomax = 109000000, .spd = 3, .bs = 3, .bp = 1, .cp = 0, .gc3 = 1, .div1p5 = 0},
112 { .lomax = 123000000, .spd = 2, .bs = 2, .bp = 1, .cp = 0, .gc3 = 1, .div1p5 = 1},
113 { .lomax = 133000000, .spd = 2, .bs = 3, .bp = 1, .cp = 0, .gc3 = 1, .div1p5 = 1},
114 { .lomax = 151000000, .spd = 2, .bs = 1, .bp = 1, .cp = 0, .gc3 = 1, .div1p5 = 0},
115 { .lomax = 154000000, .spd = 2, .bs = 2, .bp = 1, .cp = 0, .gc3 = 1, .div1p5 = 0},
116 { .lomax = 181000000, .spd = 2, .bs = 2, .bp = 1, .cp = 0, .gc3 = 0, .div1p5 = 0},
117 { .lomax = 185000000, .spd = 2, .bs = 2, .bp = 2, .cp = 0, .gc3 = 1, .div1p5 = 0},
118 { .lomax = 217000000, .spd = 2, .bs = 3, .bp = 2, .cp = 0, .gc3 = 1, .div1p5 = 0},
119 { .lomax = 244000000, .spd = 1, .bs = 2, .bp = 2, .cp = 0, .gc3 = 1, .div1p5 = 1},
120 { .lomax = 265000000, .spd = 1, .bs = 3, .bp = 2, .cp = 0, .gc3 = 1, .div1p5 = 1},
121 { .lomax = 302000000, .spd = 1, .bs = 1, .bp = 2, .cp = 0, .gc3 = 1, .div1p5 = 0},
122 { .lomax = 324000000, .spd = 1, .bs = 2, .bp = 2, .cp = 0, .gc3 = 1, .div1p5 = 0},
123 { .lomax = 370000000, .spd = 1, .bs = 2, .bp = 3, .cp = 0, .gc3 = 1, .div1p5 = 0},
124 { .lomax = 454000000, .spd = 1, .bs = 3, .bp = 3, .cp = 0, .gc3 = 1, .div1p5 = 0},
125 { .lomax = 493000000, .spd = 0, .bs = 2, .bp = 3, .cp = 0, .gc3 = 1, .div1p5 = 1},
126 { .lomax = 530000000, .spd = 0, .bs = 3, .bp = 3, .cp = 0, .gc3 = 1, .div1p5 = 1},
127 { .lomax = 554000000, .spd = 0, .bs = 1, .bp = 3, .cp = 0, .gc3 = 1, .div1p5 = 0},
128 { .lomax = 604000000, .spd = 0, .bs = 1, .bp = 4, .cp = 0, .gc3 = 0, .div1p5 = 0},
129 { .lomax = 696000000, .spd = 0, .bs = 2, .bp = 4, .cp = 0, .gc3 = 0, .div1p5 = 0},
130 { .lomax = 740000000, .spd = 0, .bs = 2, .bp = 4, .cp = 1, .gc3 = 0, .div1p5 = 0},
131 { .lomax = 820000000, .spd = 0, .bs = 3, .bp = 4, .cp = 0, .gc3 = 0, .div1p5 = 0},
132 { .lomax = 865000000, .spd = 0, .bs = 3, .bp = 4, .cp = 1, .gc3 = 0, .div1p5 = 0},
133 { .lomax = 0, .spd = 0, .bs = 0, .bp = 0, .cp = 0, .gc3 = 0, .div1p5 = 0}
214 buf[1] = 0x50 + tda827x_table[i].cp; tda827xo_set_params()
318 reg2[1] = (tuner_reg[4] & 0xfc) + tda827x_table[i].cp; tda827xo_set_analog_params()
/linux-4.1.27/drivers/s390/cio/
H A Ddevice_pgid.c58 struct ccw1 *cp = cdev->private->iccws; nop_build_cp() local
60 cp->cmd_code = CCW_CMD_NOOP; nop_build_cp()
61 cp->cda = 0; nop_build_cp()
62 cp->count = 0; nop_build_cp()
63 cp->flags = CCW_FLAG_SLI; nop_build_cp()
64 req->cp = cp; nop_build_cp()
135 struct ccw1 *cp = cdev->private->iccws; spid_build_cp() local
140 cp->cmd_code = CCW_CMD_SET_PGID; spid_build_cp()
141 cp->cda = (u32) (addr_t) pgid; spid_build_cp()
142 cp->count = sizeof(*pgid); spid_build_cp()
143 cp->flags = CCW_FLAG_SLI; spid_build_cp()
144 req->cp = cp; spid_build_cp()
436 struct ccw1 *cp = cdev->private->iccws; snid_build_cp() local
440 cp->cmd_code = CCW_CMD_SENSE_PGID; snid_build_cp()
441 cp->cda = (u32) (addr_t) &cdev->private->pgid[i]; snid_build_cp()
442 cp->count = sizeof(struct pgid); snid_build_cp()
443 cp->flags = CCW_FLAG_SLI; snid_build_cp()
444 req->cp = cp; snid_build_cp()
622 struct ccw1 *cp = cdev->private->iccws; stlck_build_cp() local
624 cp[0].cmd_code = CCW_CMD_STLCK; stlck_build_cp()
625 cp[0].cda = (u32) (addr_t) buf1; stlck_build_cp()
626 cp[0].count = 32; stlck_build_cp()
627 cp[0].flags = CCW_FLAG_CC; stlck_build_cp()
628 cp[1].cmd_code = CCW_CMD_RELEASE; stlck_build_cp()
629 cp[1].cda = (u32) (addr_t) buf2; stlck_build_cp()
630 cp[1].count = 32; stlck_build_cp()
631 cp[1].flags = 0; stlck_build_cp()
632 req->cp = cp; stlck_build_cp()
H A Ddevice_id.c202 struct ccw1 *cp = cdev->private->iccws; ccw_device_sense_id_start() local
209 cp->cmd_code = CCW_CMD_SENSE_ID; ccw_device_sense_id_start()
210 cp->cda = (u32) (addr_t) &cdev->private->senseid; ccw_device_sense_id_start()
211 cp->count = sizeof(struct senseid); ccw_device_sense_id_start()
212 cp->flags = CCW_FLAG_SLI; ccw_device_sense_id_start()
215 req->cp = cp; ccw_device_sense_id_start()
H A Dchp.c251 struct channel_path *cp = to_channelpath(dev); chp_status_write() local
261 mutex_lock(&cp->lock); chp_status_write()
262 error = s390_vary_chpid(cp->chpid, 1); chp_status_write()
263 mutex_unlock(&cp->lock); chp_status_write()
265 mutex_lock(&cp->lock); chp_status_write()
266 error = s390_vary_chpid(cp->chpid, 0); chp_status_write()
267 mutex_unlock(&cp->lock); chp_status_write()
279 struct channel_path *cp; chp_configure_show() local
282 cp = to_channelpath(dev); chp_configure_show()
283 status = chp_info_get_status(cp->chpid); chp_configure_show()
296 struct channel_path *cp; chp_configure_write() local
304 cp = to_channelpath(dev); chp_configure_write()
305 chp_cfg_schedule(cp->chpid, val); chp_configure_write()
409 struct channel_path *cp; chp_release() local
411 cp = to_channelpath(dev); chp_release()
412 kfree(cp); chp_release()
/linux-4.1.27/net/dns_resolver/
H A Ddns_query.c76 char *desc, *cp; dns_query() local
105 cp = desc; dns_query()
107 memcpy(cp, type, typelen); dns_query()
108 cp += typelen; dns_query()
109 *cp++ = ':'; dns_query()
111 memcpy(cp, name, namelen); dns_query()
112 cp += namelen; dns_query()
113 *cp = '\0'; dns_query()
/linux-4.1.27/drivers/md/bcache/
H A Dsysfs.h88 #define strtoul_or_return(cp) \
91 int _r = kstrtoul(cp, 10, &_v); \
97 #define strtoi_h_or_return(cp, v) \
99 int _r = strtoi_h(cp, &v); \
H A Dutil.h306 static inline int bch_strtol_h(const char *cp, long *res) bch_strtol_h() argument
309 return bch_strtoint_h(cp, (int *) res); bch_strtol_h()
311 return bch_strtoll_h(cp, (long long *) res); bch_strtol_h()
315 static inline int bch_strtoul_h(const char *cp, long *res) bch_strtoul_h() argument
318 return bch_strtouint_h(cp, (unsigned int *) res); bch_strtoul_h()
320 return bch_strtoull_h(cp, (unsigned long long *) res); bch_strtoul_h()
324 #define strtoi_h(cp, res) \
326 ? bch_strtoint_h(cp, (void *) res) \
328 ? bch_strtol_h(cp, (void *) res) \
330 ? bch_strtoll_h(cp, (void *) res) \
332 ? bch_strtouint_h(cp, (void *) res) \
334 ? bch_strtoul_h(cp, (void *) res) \
336 ? bch_strtoull_h(cp, (void *) res) : -EINVAL)
338 #define strtoul_safe(cp, var) \
341 int _r = kstrtoul(cp, 10, &_v); \
347 #define strtoul_safe_clamp(cp, var, min, max) \
350 int _r = kstrtoul(cp, 10, &_v); \
/linux-4.1.27/arch/avr32/mm/
H A Dclear_page.S23 cp r12, r9
H A Dcopy_page.S26 cp r11, r10
/linux-4.1.27/arch/cris/boot/
H A DMakefile23 @cp $< $@
/linux-4.1.27/tools/testing/selftests/exec/
H A DMakefile15 cp $< $@
/linux-4.1.27/arch/mips/include/asm/
H A Dpmon.h24 int (*smpfork) (unsigned long cp, char *sp);
42 #define pmon_smpfork(cp, sp) debug_vectors->_s.smpfork(cp, sp)
/linux-4.1.27/drivers/crypto/
H A Dmv_cesa.c1024 struct crypto_priv *cp; mv_probe() local
1038 cp = kzalloc(sizeof(*cp), GFP_KERNEL); mv_probe()
1039 if (!cp) mv_probe()
1042 spin_lock_init(&cp->lock); mv_probe()
1043 crypto_init_queue(&cp->queue, 50); mv_probe()
1044 cp->reg = ioremap(res->start, resource_size(res)); mv_probe()
1045 if (!cp->reg) { mv_probe()
1055 cp->sram_size = resource_size(res); mv_probe()
1056 cp->max_req_size = cp->sram_size - SRAM_CFG_SPACE; mv_probe()
1057 cp->sram = ioremap(res->start, cp->sram_size); mv_probe()
1058 if (!cp->sram) { mv_probe()
1071 cp->irq = irq; mv_probe()
1073 platform_set_drvdata(pdev, cp); mv_probe()
1074 cpg = cp; mv_probe()
1076 cp->queue_th = kthread_run(queue_manag, cp, "mv_crypto"); mv_probe()
1077 if (IS_ERR(cp->queue_th)) { mv_probe()
1078 ret = PTR_ERR(cp->queue_th); mv_probe()
1083 cp); mv_probe()
1089 cp->clk = clk_get(&pdev->dev, NULL); mv_probe()
1090 if (!IS_ERR(cp->clk)) mv_probe()
1091 clk_prepare_enable(cp->clk); mv_probe()
1130 free_irq(irq, cp); mv_probe()
1131 if (!IS_ERR(cp->clk)) { mv_probe()
1132 clk_disable_unprepare(cp->clk); mv_probe()
1133 clk_put(cp->clk); mv_probe()
1136 kthread_stop(cp->queue_th); mv_probe()
1138 iounmap(cp->sram); mv_probe()
1140 iounmap(cp->reg); mv_probe()
1142 kfree(cp); mv_probe()
1149 struct crypto_priv *cp = platform_get_drvdata(pdev); mv_remove() local
1153 if (cp->has_sha1) mv_remove()
1155 if (cp->has_hmac_sha1) mv_remove()
1157 kthread_stop(cp->queue_th); mv_remove()
1158 free_irq(cp->irq, cp); mv_remove()
1159 memset(cp->sram, 0, cp->sram_size); mv_remove()
1160 iounmap(cp->sram); mv_remove()
1161 iounmap(cp->reg); mv_remove()
1163 if (!IS_ERR(cp->clk)) { mv_remove()
1164 clk_disable_unprepare(cp->clk); mv_remove()
1165 clk_put(cp->clk); mv_remove()
1168 kfree(cp); mv_remove()
/linux-4.1.27/drivers/net/ethernet/apple/
H A Dmace.c434 volatile struct dbdma_cmd *cp; mace_open() local
445 cp = mp->rx_cmds; mace_open()
455 cp->req_count = cpu_to_le16(RX_BUFLEN); mace_open()
456 cp->command = cpu_to_le16(INPUT_LAST + INTR_ALWAYS); mace_open()
457 cp->phy_addr = cpu_to_le32(virt_to_bus(data)); mace_open()
458 cp->xfer_status = 0; mace_open()
459 ++cp; mace_open()
462 cp->command = cpu_to_le16(DBDMA_STOP); mace_open()
467 ++cp; mace_open()
468 cp->command = cpu_to_le16(DBDMA_NOP + BR_ALWAYS); mace_open()
469 cp->cmd_dep = cpu_to_le32(virt_to_bus(mp->rx_cmds)); mace_open()
477 cp = mp->tx_cmds + NCMDS_TX * N_TX_RING; mace_open()
478 cp->command = cpu_to_le16(DBDMA_NOP + BR_ALWAYS); mace_open()
479 cp->cmd_dep = cpu_to_le32(virt_to_bus(mp->tx_cmds)); mace_open()
535 volatile struct dbdma_cmd *cp, *np; mace_xmit_start() local
560 cp = mp->tx_cmds + NCMDS_TX * fill; mace_xmit_start()
561 cp->req_count = cpu_to_le16(len); mace_xmit_start()
562 cp->phy_addr = cpu_to_le32(virt_to_bus(skb->data)); mace_xmit_start()
571 out_le16(&cp->xfer_status, 0); mace_xmit_start()
572 out_le16(&cp->command, OUTPUT_LAST); mace_xmit_start()
665 volatile struct dbdma_cmd *cp; mace_interrupt() local
726 cp = mp->tx_cmds + NCMDS_TX * i; mace_interrupt()
727 stat = le16_to_cpu(cp->xfer_status); mace_interrupt()
791 cp = mp->tx_cmds + NCMDS_TX * i; mace_interrupt()
792 out_le16(&cp->xfer_status, 0); mace_interrupt()
793 out_le16(&cp->command, OUTPUT_LAST); mace_interrupt()
812 volatile struct dbdma_cmd *cp; mace_tx_timeout() local
824 cp = mp->tx_cmds + NCMDS_TX * mp->tx_empty; mace_tx_timeout()
833 cp = bus_to_virt(le32_to_cpu(rd->cmdptr)); mace_tx_timeout()
835 out_le16(&cp->xfer_status, 0); mace_tx_timeout()
836 out_le32(&rd->cmdptr, virt_to_bus(cp)); mace_tx_timeout()
854 cp = mp->tx_cmds + NCMDS_TX * i; mace_tx_timeout()
855 out_le16(&cp->xfer_status, 0); mace_tx_timeout()
856 out_le16(&cp->command, OUTPUT_LAST); mace_tx_timeout()
857 out_le32(&td->cmdptr, virt_to_bus(cp)); mace_tx_timeout()
881 volatile struct dbdma_cmd *cp, *np; mace_rxdma_intr() local
891 cp = mp->rx_cmds + i; mace_rxdma_intr()
892 stat = le16_to_cpu(cp->xfer_status); mace_rxdma_intr()
905 nb = le16_to_cpu(cp->req_count) - le16_to_cpu(cp->res_count); mace_rxdma_intr()
906 out_le16(&cp->command, DBDMA_STOP); mace_rxdma_intr()
956 cp = mp->rx_cmds + i; mace_rxdma_intr()
965 cp->req_count = cpu_to_le16(RX_BUFLEN); mace_rxdma_intr()
967 cp->phy_addr = cpu_to_le32(virt_to_bus(data)); mace_rxdma_intr()
968 out_le16(&cp->xfer_status, 0); mace_rxdma_intr()
969 out_le16(&cp->command, INPUT_LAST + INTR_ALWAYS); mace_rxdma_intr()
H A Dbmac.c200 dbdma_setcmd(volatile struct dbdma_cmd *cp, dbdma_setcmd() argument
204 out_le16(&cp->command, cmd); dbdma_setcmd()
205 out_le16(&cp->req_count, count); dbdma_setcmd()
206 out_le32(&cp->phy_addr, addr); dbdma_setcmd()
207 out_le32(&cp->cmd_dep, cmd_dep); dbdma_setcmd()
208 out_le16(&cp->xfer_status, 0); dbdma_setcmd()
209 out_le16(&cp->res_count, 0); dbdma_setcmd()
566 bmac_construct_xmt(struct sk_buff *skb, volatile struct dbdma_cmd *cp) bmac_construct_xmt() argument
576 dbdma_setcmd(cp, (OUTPUT_LAST | INTR_ALWAYS | WAIT_IFCLR), len, baddr, 0); bmac_construct_xmt()
580 bmac_construct_rxbuff(struct sk_buff *skb, volatile struct dbdma_cmd *cp) bmac_construct_rxbuff() argument
584 dbdma_setcmd(cp, (INPUT_LAST | INTR_ALWAYS), RX_BUFLEN, bmac_construct_rxbuff()
684 volatile struct dbdma_cmd *cp; bmac_rxdma_intr() local
701 cp = &bp->rx_cmds[i]; bmac_rxdma_intr()
702 stat = le16_to_cpu(cp->xfer_status); bmac_rxdma_intr()
703 residual = le16_to_cpu(cp->res_count); bmac_rxdma_intr()
731 cp->res_count = cpu_to_le16(0); bmac_rxdma_intr()
732 cp->xfer_status = cpu_to_le16(0); bmac_rxdma_intr()
757 volatile struct dbdma_cmd *cp; bmac_txdma_intr() local
771 cp = &bp->tx_cmds[bp->tx_empty]; bmac_txdma_intr()
772 stat = le16_to_cpu(cp->xfer_status); bmac_txdma_intr()
780 if (cp == bus_to_virt(in_le32(&bp->tx_dma->cmdptr))) bmac_txdma_intr()
1482 volatile struct dbdma_cmd *cp; bmac_tx_timeout() local
1494 cp = &bp->tx_cmds[bp->tx_empty]; bmac_tx_timeout()
1496 /* le32_to_cpu(td->status), le16_to_cpu(cp->xfer_status), bp->tx_bad_runt, */ bmac_tx_timeout()
1509 cp = bus_to_virt(le32_to_cpu(rd->cmdptr)); bmac_tx_timeout()
1511 out_le16(&cp->xfer_status, 0); bmac_tx_timeout()
1512 out_le32(&rd->cmdptr, virt_to_bus(cp)); bmac_tx_timeout()
1529 cp = &bp->tx_cmds[i]; bmac_tx_timeout()
1530 out_le16(&cp->xfer_status, 0); bmac_tx_timeout()
1531 out_le16(&cp->command, OUTPUT_LAST); bmac_tx_timeout()
1532 out_le32(&td->cmdptr, virt_to_bus(cp)); bmac_tx_timeout()
1548 static void dump_dbdma(volatile struct dbdma_cmd *cp,int count)
1553 ip = (int*)(cp+i);
/linux-4.1.27/arch/um/drivers/
H A Dpty.c81 char *pty, *bank, *cp; getmaster() local
92 for (cp = "0123456789abcdef"; *cp; cp++) { getmaster()
93 *pty = *cp; getmaster()
/linux-4.1.27/drivers/tty/
H A Drocket.c314 static void rp_do_receive(struct r_port *info, CHANNEL_t *cp, rp_do_receive() argument
321 ToRecv = sGetRxCnt(cp); rp_do_receive()
339 sEnRxStatusMode(cp); rp_do_receive()
356 CharNStat = sInW(sGetTxRxDataIO(cp)); rp_do_receive()
386 if (sGetRxCnt(cp) == 0) { rp_do_receive()
390 sDisRxStatusMode(cp); rp_do_receive()
409 sInStrW(sGetTxRxDataIO(cp), (unsigned short *) cbuf, wRecv); rp_do_receive()
411 cbuf[ToRecv - 1] = sInB(sGetTxRxDataIO(cp)); rp_do_receive()
426 CHANNEL_t *cp = &info->channel; rp_do_transmit() local
444 info->xmit_fifo_room = TXFIFO_SIZE - sGetTxCnt(cp); rp_do_transmit()
454 sOutStrW(sGetTxRxDataIO(cp), (unsigned short *) (info->xmit_buf + info->xmit_tail), c / 2); rp_do_transmit()
456 sOutB(sGetTxRxDataIO(cp), info->xmit_buf[info->xmit_tail + c - 1]); rp_do_transmit()
492 CHANNEL_t *cp; rp_handle_port() local
504 cp = &info->channel; rp_handle_port()
506 IntMask = sGetChanIntID(cp) & info->intmask; rp_handle_port()
510 ChanStatus = sGetChanStatus(cp); rp_handle_port()
512 rp_do_receive(info, cp, ChanStatus); rp_handle_port()
712 CHANNEL_t *cp; configure_r_port() local
715 cp = &info->channel; configure_r_port()
720 sSetData8(cp); configure_r_port()
723 sSetData7(cp); configure_r_port()
727 sSetStop2(cp); configure_r_port()
730 sSetStop1(cp); configure_r_port()
734 sEnParity(cp); configure_r_port()
737 sSetOddParity(cp); configure_r_port()
739 sSetEvenParity(cp); configure_r_port()
742 sDisParity(cp); configure_r_port()
761 sSetBaud(cp, divisor); configure_r_port()
768 sEnCTSFlowCtl(cp); configure_r_port()
771 sDisCTSFlowCtl(cp); configure_r_port()
777 if (sGetChanStatus(cp) & CD_ACT) configure_r_port()
790 sEnTxSoftFlowCtl(cp); configure_r_port()
792 sEnIXANY(cp); configure_r_port()
794 sDisIXANY(cp); configure_r_port()
796 sSetTxXONChar(cp, START_CHAR(tty)); configure_r_port()
797 sSetTxXOFFChar(cp, STOP_CHAR(tty)); configure_r_port()
799 sDisTxSoftFlowCtl(cp); configure_r_port()
800 sDisIXANY(cp); configure_r_port()
801 sClrTxXOFF(cp); configure_r_port()
834 sEnRTSToggle(cp); configure_r_port()
836 sDisRTSToggle(cp); configure_r_port()
840 if (cp->CtlP->boardType == ROCKET_TYPE_PC104) { configure_r_port()
843 sSetInterfaceMode(cp, InterfaceModeRS485); configure_r_port()
846 sSetInterfaceMode(cp, InterfaceModeRS422); configure_r_port()
851 sSetInterfaceMode(cp, InterfaceModeRS232T); configure_r_port()
853 sSetInterfaceMode(cp, InterfaceModeRS232); configure_r_port()
886 CHANNEL_t *cp; rp_open() local
933 cp = &info->channel; rp_open()
934 sSetRxTrigger(cp, TRIG_1); rp_open()
935 if (sGetChanStatus(cp) & CD_ACT) rp_open()
939 sDisRxStatusMode(cp); rp_open()
940 sFlushRxFIFO(cp); rp_open()
941 sFlushTxFIFO(cp); rp_open()
943 sEnInterrupts(cp, (TXINT_EN | MCINT_EN | RXINT_EN | SRCINT_EN | CHANINT_EN)); rp_open()
944 sSetRxTrigger(cp, TRIG_1); rp_open()
946 sGetChanStatus(cp); rp_open()
947 sDisRxStatusMode(cp); rp_open()
948 sClrTxXOFF(cp); rp_open()
950 sDisCTSFlowCtl(cp); rp_open()
951 sDisTxSoftFlowCtl(cp); rp_open()
953 sEnRxFIFO(cp); rp_open()
954 sEnTransmit(cp); rp_open()
972 sSetDTR(cp); rp_open()
973 sSetRTS(cp); rp_open()
997 CHANNEL_t *cp; rp_close() local
1010 cp = &info->channel; rp_close()
1016 timeout = (sGetTxCnt(cp) + 1) * HZ / info->cps; rp_close()
1022 sDisTransmit(cp); rp_close()
1023 sDisInterrupts(cp, (TXINT_EN | MCINT_EN | RXINT_EN | SRCINT_EN | CHANINT_EN)); rp_close()
1024 sDisCTSFlowCtl(cp); rp_close()
1025 sDisTxSoftFlowCtl(cp); rp_close()
1026 sClrTxXOFF(cp); rp_close()
1027 sFlushRxFIFO(cp); rp_close()
1028 sFlushTxFIFO(cp); rp_close()
1029 sClrRTS(cp); rp_close()
1031 sClrDTR(cp); rp_close()
1076 CHANNEL_t *cp; rp_set_termios() local
1095 cp = &info->channel; rp_set_termios()
1099 sClrDTR(cp); rp_set_termios()
1100 sClrRTS(cp); rp_set_termios()
1105 sSetRTS(cp); rp_set_termios()
1106 sSetDTR(cp); rp_set_termios()
1355 CHANNEL_t *cp; rp_send_xchar() local
1360 cp = &info->channel; rp_send_xchar()
1361 if (sGetTxCnt(cp)) rp_send_xchar()
1362 sWriteTxPrioByte(cp, ch); rp_send_xchar()
1364 sWriteTxByte(sGetTxRxDataIO(cp), ch); rp_send_xchar()
1449 CHANNEL_t *cp; rp_wait_until_sent() local
1457 cp = &info->channel; rp_wait_until_sent()
1466 txcnt = sGetTxCnt(cp); rp_wait_until_sent()
1468 if (sGetChanStatusLo(cp) & TXSHRMT) rp_wait_until_sent()
1502 CHANNEL_t *cp; rp_hangup() local
1525 cp = &info->channel; rp_hangup()
1526 sDisRxFIFO(cp); rp_hangup()
1527 sDisTransmit(cp); rp_hangup()
1528 sDisInterrupts(cp, (TXINT_EN | MCINT_EN | RXINT_EN | SRCINT_EN | CHANINT_EN)); rp_hangup()
1529 sDisCTSFlowCtl(cp); rp_hangup()
1530 sDisTxSoftFlowCtl(cp); rp_hangup()
1531 sClrTxXOFF(cp); rp_hangup()
1547 CHANNEL_t *cp; rp_put_char() local
1564 cp = &info->channel; rp_put_char()
1567 info->xmit_fifo_room = TXFIFO_SIZE - sGetTxCnt(cp); rp_put_char()
1575 sOutB(sGetTxRxDataIO(cp), ch); rp_put_char()
1594 CHANNEL_t *cp; rp_write() local
1608 cp = &info->channel; rp_write()
1611 info->xmit_fifo_room = TXFIFO_SIZE - sGetTxCnt(cp); rp_write()
1622 sOutStrW(sGetTxRxDataIO(cp), (unsigned short *) b, c / 2); rp_write()
1626 sOutB(sGetTxRxDataIO(cp), b[c - 1]); rp_write()
1726 CHANNEL_t *cp; rp_flush_buffer() local
1741 cp = &info->channel; rp_flush_buffer()
1742 sFlushTxFIFO(cp); rp_flush_buffer()
H A Dn_tty.c560 const unsigned char *cp; process_output_block() local
572 for (i = 0, cp = buf; i < nr; i++, cp++) { process_output_block()
573 unsigned char c = *cp; process_output_block()
1516 n_tty_receive_buf_real_raw(struct tty_struct *tty, const unsigned char *cp, n_tty_receive_buf_real_raw() argument
1524 memcpy(read_buf_addr(ldata, head), cp, n); n_tty_receive_buf_real_raw() local
1526 cp += n; n_tty_receive_buf_real_raw()
1531 memcpy(read_buf_addr(ldata, head), cp, n); n_tty_receive_buf_real_raw() local
1536 n_tty_receive_buf_raw(struct tty_struct *tty, const unsigned char *cp, n_tty_receive_buf_raw() argument
1546 put_tty_queue(*cp++, ldata); n_tty_receive_buf_raw()
1548 n_tty_receive_char_flagged(tty, *cp++, flag); n_tty_receive_buf_raw()
1553 n_tty_receive_buf_closing(struct tty_struct *tty, const unsigned char *cp, n_tty_receive_buf_closing() argument
1562 n_tty_receive_char_closing(tty, *cp++); n_tty_receive_buf_closing()
1564 n_tty_receive_char_flagged(tty, *cp++, flag); n_tty_receive_buf_closing()
1569 n_tty_receive_buf_standard(struct tty_struct *tty, const unsigned char *cp, n_tty_receive_buf_standard() argument
1579 unsigned char c = *cp++; n_tty_receive_buf_standard()
1594 n_tty_receive_char_lnext(tty, *cp++, flag); n_tty_receive_buf_standard()
1598 n_tty_receive_char_flagged(tty, *cp++, flag); n_tty_receive_buf_standard()
1603 n_tty_receive_buf_fast(struct tty_struct *tty, const unsigned char *cp, n_tty_receive_buf_fast() argument
1613 unsigned char c = *cp++; n_tty_receive_buf_fast()
1620 n_tty_receive_char_lnext(tty, *cp++, flag); n_tty_receive_buf_fast()
1624 n_tty_receive_char_flagged(tty, *cp++, flag); n_tty_receive_buf_fast()
1628 static void __receive_buf(struct tty_struct *tty, const unsigned char *cp, __receive_buf() argument
1635 n_tty_receive_buf_real_raw(tty, cp, fp, count); __receive_buf()
1637 n_tty_receive_buf_raw(tty, cp, fp, count); __receive_buf()
1639 n_tty_receive_buf_closing(tty, cp, fp, count); __receive_buf()
1646 n_tty_receive_char_lnext(tty, *cp++, flag); __receive_buf()
1651 n_tty_receive_buf_fast(tty, cp, fp, count); __receive_buf()
1653 n_tty_receive_buf_standard(tty, cp, fp, count); __receive_buf()
1675 * @cp: input chars
1677 * @count: number of input chars in @cp
1684 * Returns the # of input chars from @cp which were processed.
1706 n_tty_receive_buf_common(struct tty_struct *tty, const unsigned char *cp, n_tty_receive_buf_common() argument
1749 __receive_buf(tty, cp, fp, n); n_tty_receive_buf_common()
1751 cp += n; n_tty_receive_buf_common()
1775 static void n_tty_receive_buf(struct tty_struct *tty, const unsigned char *cp, n_tty_receive_buf() argument
1778 n_tty_receive_buf_common(tty, cp, fp, count, 0); n_tty_receive_buf()
1781 static int n_tty_receive_buf2(struct tty_struct *tty, const unsigned char *cp, n_tty_receive_buf2() argument
1784 return n_tty_receive_buf_common(tty, cp, fp, count, 1); n_tty_receive_buf2()
H A Dn_tracerouter.c163 * @cp: buffer, block of characters to be eventually read by
166 * @count: number of characters (aka, bytes) in cp.
168 * This function takes the input buffer, cp, and passes it to
172 const unsigned char *cp, n_tracerouter_receivebuf()
176 n_tracesink_datadrain((u8 *) cp, count); n_tracerouter_receivebuf()
171 n_tracerouter_receivebuf(struct tty_struct *tty, const unsigned char *cp, char *fp, int count) n_tracerouter_receivebuf() argument
/linux-4.1.27/drivers/mmc/host/
H A Dmmc_spi.c200 u8 *cp = host->data->status; mmc_spi_skip() local
212 if (cp[i] != byte) mmc_spi_skip()
213 return cp[i]; mmc_spi_skip()
265 u8 *cp = host->data->status; mmc_spi_response_get() local
266 u8 *end = cp + host->t.len; mmc_spi_response_get()
283 cp += 8; mmc_spi_response_get()
284 while (cp < end && *cp == 0xff) mmc_spi_response_get()
285 cp++; mmc_spi_response_get()
288 if (cp == end) { mmc_spi_response_get()
289 cp = host->data->status; mmc_spi_response_get()
290 end = cp+1; mmc_spi_response_get()
307 if (*cp != 0xff) mmc_spi_response_get()
316 if (*cp & 0x80) { mmc_spi_response_get()
318 rotator = *cp++ << 8; mmc_spi_response_get()
320 if (cp == end) { mmc_spi_response_get()
324 cp = host->data->status; mmc_spi_response_get()
325 end = cp+1; mmc_spi_response_get()
327 rotator |= *cp++; mmc_spi_response_get()
335 cmd->resp[0] = *cp++; mmc_spi_response_get()
361 while (cp < end && *cp == 0) mmc_spi_response_get()
362 cp++; mmc_spi_response_get()
363 if (cp == end) mmc_spi_response_get()
372 if (cp == end) { mmc_spi_response_get()
376 cp = host->data->status; mmc_spi_response_get()
377 end = cp+1; mmc_spi_response_get()
381 rotator |= *cp << bitshift; mmc_spi_response_get()
384 cmd->resp[0] |= *cp << 8; mmc_spi_response_get()
395 if (cp == end) { mmc_spi_response_get()
399 cp = host->data->status; mmc_spi_response_get()
400 end = cp+1; mmc_spi_response_get()
403 rotator |= *cp++ << bitshift; mmc_spi_response_get()
407 cmd->resp[1] |= *cp++; mmc_spi_response_get()
450 u8 *cp = data->status; mmc_spi_command_send() local
467 memset(cp, 0xff, sizeof(data->status)); mmc_spi_command_send()
469 cp[1] = 0x40 | cmd->opcode; mmc_spi_command_send()
470 put_unaligned_be32(cmd->arg, cp+2); mmc_spi_command_send()
471 cp[6] = crc7_be(0, cp+1, 5) | 0x01; mmc_spi_command_send()
472 cp += 7; mmc_spi_command_send()
510 cp += 2; /* min(N(CR)) + status */ mmc_spi_command_send()
513 cp += 10; /* max(N(CR)) + status + min(N(RC),N(WR)) */ mmc_spi_command_send()
515 cp++; mmc_spi_command_send()
517 cp += 4; mmc_spi_command_send()
519 cp = data->status + sizeof(data->status); mmc_spi_command_send()
533 t->len = cp - data->status; mmc_spi_command_send()
836 u8 *cp = t->rx_buf; mmc_spi_readblock() local
841 temp = *cp; mmc_spi_readblock()
842 *cp++ = leftover | (temp >> bitshift); mmc_spi_readblock()
845 cp = (u8 *) &scratch->crc_val; mmc_spi_readblock()
846 temp = *cp; mmc_spi_readblock()
847 *cp++ = leftover | (temp >> bitshift); mmc_spi_readblock()
849 temp = *cp; mmc_spi_readblock()
850 *cp = leftover | (temp >> bitshift); mmc_spi_readblock()
/linux-4.1.27/sound/ppc/
H A Dpmac.c214 volatile struct dbdma_cmd __iomem *cp; snd_pmac_pcm_prepare() local
253 for (i = 0, cp = rec->cmd.cmds; i < rec->nperiods; i++, cp++) { snd_pmac_pcm_prepare()
254 cp->phy_addr = cpu_to_le32(offset); snd_pmac_pcm_prepare()
255 cp->req_count = cpu_to_le16(rec->period_size); snd_pmac_pcm_prepare()
256 /*cp->res_count = cpu_to_le16(0);*/ snd_pmac_pcm_prepare()
257 cp->xfer_status = cpu_to_le16(0); snd_pmac_pcm_prepare()
261 cp->command = cpu_to_le16(DBDMA_NOP + BR_ALWAYS); snd_pmac_pcm_prepare()
262 cp->cmd_dep = cpu_to_le32(rec->cmd.addr); snd_pmac_pcm_prepare()
278 volatile struct dbdma_cmd __iomem *cp; snd_pmac_pcm_trigger() local
291 for (i = 0, cp = rec->cmd.cmds; i < rec->nperiods; i++, cp++) snd_pmac_pcm_trigger()
292 out_le16(&cp->command, command); snd_pmac_pcm_trigger()
306 for (i = 0, cp = rec->cmd.cmds; i < rec->nperiods; i++, cp++) snd_pmac_pcm_trigger()
307 out_le16(&cp->command, DBDMA_STOP); snd_pmac_pcm_trigger()
330 volatile struct dbdma_cmd __iomem *cp = &rec->cmd.cmds[rec->cur_period]; snd_pmac_pcm_pointer() local
331 stat = le16_to_cpu(cp->xfer_status); snd_pmac_pcm_pointer()
333 count = in_le16(&cp->res_count); snd_pmac_pcm_pointer()
414 volatile struct dbdma_cmd __iomem *cp) snd_pmac_pcm_dead_xfer()
427 memcpy((void *)emergency_dbdma.cmds, (void *)cp, snd_pmac_pcm_dead_xfer()
430 cp->xfer_status = cpu_to_le16(0); snd_pmac_pcm_dead_xfer()
431 cp->req_count = cpu_to_le16(rec->period_size); snd_pmac_pcm_dead_xfer()
432 cp = emergency_dbdma.cmds; snd_pmac_pcm_dead_xfer()
437 req = le16_to_cpu(cp->req_count); snd_pmac_pcm_dead_xfer()
438 res = le16_to_cpu(cp->res_count); snd_pmac_pcm_dead_xfer()
439 phy = le32_to_cpu(cp->phy_addr); snd_pmac_pcm_dead_xfer()
441 cp->req_count = cpu_to_le16(res); snd_pmac_pcm_dead_xfer()
442 cp->res_count = cpu_to_le16(0); snd_pmac_pcm_dead_xfer()
443 cp->xfer_status = cpu_to_le16(0); snd_pmac_pcm_dead_xfer()
444 cp->phy_addr = cpu_to_le32(phy); snd_pmac_pcm_dead_xfer()
446 cp->cmd_dep = cpu_to_le32(rec->cmd.addr snd_pmac_pcm_dead_xfer()
449 cp->command = cpu_to_le16(OUTPUT_MORE | BR_ALWAYS | INTR_ALWAYS); snd_pmac_pcm_dead_xfer()
465 volatile struct dbdma_cmd __iomem *cp; snd_pmac_pcm_update() local
474 cp = emergency_dbdma.cmds; snd_pmac_pcm_update()
476 cp = &rec->cmd.cmds[rec->cur_period]; snd_pmac_pcm_update()
478 stat = le16_to_cpu(cp->xfer_status); snd_pmac_pcm_update()
481 snd_pmac_pcm_dead_xfer(rec, cp); snd_pmac_pcm_update()
492 cp->xfer_status = cpu_to_le16(0); snd_pmac_pcm_update()
493 cp->req_count = cpu_to_le16(rec->period_size); snd_pmac_pcm_update()
494 /*cp->res_count = cpu_to_le16(0);*/ snd_pmac_pcm_update()
413 snd_pmac_pcm_dead_xfer(struct pmac_stream *rec, volatile struct dbdma_cmd __iomem *cp) snd_pmac_pcm_dead_xfer() argument
/linux-4.1.27/drivers/isdn/divert/
H A Ddivert_procfs.c41 put_info_buffer(char *cp) put_info_buffer() argument
48 if (!cp) put_info_buffer()
50 if (!*cp) put_info_buffer()
52 if (!(ib = kmalloc(sizeof(struct divert_info) + strlen(cp), GFP_ATOMIC))) put_info_buffer()
54 strcpy(ib->info_start, cp); /* set output string */ put_info_buffer()
189 char *cp; isdn_divert_ioctl_unlocked() local
205 cp = divert_if.drv_to_name(dioctl.getid.drvid); isdn_divert_ioctl_unlocked()
206 if (!cp) isdn_divert_ioctl_unlocked()
208 if (!*cp) isdn_divert_ioctl_unlocked()
210 strcpy(dioctl.getid.drvnam, cp); isdn_divert_ioctl_unlocked()
/linux-4.1.27/drivers/mtd/chips/
H A Dgen_probe.c17 struct chip_probe *cp);
18 static int genprobe_new_chip(struct map_info *map, struct chip_probe *cp,
21 struct mtd_info *mtd_do_chip_probe(struct map_info *map, struct chip_probe *cp) mtd_do_chip_probe() argument
27 cfi = genprobe_ident_chips(map, cp); mtd_do_chip_probe()
59 static struct cfi_private *genprobe_ident_chips(struct map_info *map, struct chip_probe *cp) genprobe_ident_chips() argument
71 if (!genprobe_new_chip(map, cp, &cfi)) { genprobe_ident_chips()
74 cp->name, map->name); genprobe_ident_chips()
130 cp->probe_chip(map, i << cfi.chipshift, chip_map, &cfi); genprobe_ident_chips()
165 static int genprobe_new_chip(struct map_info *map, struct chip_probe *cp, genprobe_new_chip() argument
186 if (cp->probe_chip(map, 0, NULL, cfi)) genprobe_new_chip()
/linux-4.1.27/drivers/scsi/libfc/
H A Dfc_disc.c513 struct fc_ct_hdr *cp; fc_disc_gpn_ft_resp() local
534 cp = fc_frame_payload_get(fp, sizeof(*cp)); fc_disc_gpn_ft_resp()
535 if (!cp) { fc_disc_gpn_ft_resp()
539 } else if (ntohs(cp->ct_cmd) == FC_FS_ACC) { fc_disc_gpn_ft_resp()
542 len -= sizeof(*cp); fc_disc_gpn_ft_resp()
543 error = fc_disc_gpn_ft_parse(disc, cp + 1, len); fc_disc_gpn_ft_resp()
544 } else if (ntohs(cp->ct_cmd) == FC_FS_RJT) { fc_disc_gpn_ft_resp()
546 "(check zoning)\n", cp->ct_reason, fc_disc_gpn_ft_resp()
547 cp->ct_explan); fc_disc_gpn_ft_resp()
549 if (cp->ct_reason == FC_FS_RJT_UNABL && fc_disc_gpn_ft_resp()
550 cp->ct_explan == FC_FS_EXP_FTNR) fc_disc_gpn_ft_resp()
554 "%x\n", ntohs(cp->ct_cmd)); fc_disc_gpn_ft_resp()
588 struct fc_ct_hdr *cp; fc_disc_gpn_id_resp() local
601 cp = fc_frame_payload_get(fp, sizeof(*cp)); fc_disc_gpn_id_resp()
602 if (!cp) fc_disc_gpn_id_resp()
604 if (ntohs(cp->ct_cmd) == FC_FS_ACC) { fc_disc_gpn_id_resp()
606 sizeof(*cp) + sizeof(*pn)) fc_disc_gpn_id_resp()
608 pn = (struct fc_ns_gid_pn *)(cp + 1); fc_disc_gpn_id_resp()
628 } else if (ntohs(cp->ct_cmd) == FC_FS_RJT) { fc_disc_gpn_id_resp()
630 cp->ct_reason, cp->ct_explan); fc_disc_gpn_id_resp()
634 ntohs(cp->ct_cmd)); fc_disc_gpn_id_resp()
/linux-4.1.27/arch/powerpc/platforms/embedded6xx/
H A Dmvme5100.c60 struct device_node *cp = NULL; mvme5100_pic_init() local
80 cp = of_find_compatible_node(NULL, NULL, "chrp,iic"); mvme5100_pic_init()
81 if (cp == NULL) { mvme5100_pic_init()
86 cirq = irq_of_parse_and_map(cp, 0); mvme5100_pic_init()
106 i8259_init(cp, intack); mvme5100_pic_init()
107 of_node_put(cp); mvme5100_pic_init()
/linux-4.1.27/drivers/staging/lustre/lustre/llite/
H A Dvvp_page.c56 static void vvp_page_fini_common(struct ccc_page *cp) vvp_page_fini_common() argument
58 struct page *vmpage = cp->cpg_page; vvp_page_fini_common()
67 struct ccc_page *cp = cl2ccc_page(slice); vvp_page_fini() local
68 struct page *vmpage = cp->cpg_page; vvp_page_fini()
75 vvp_page_fini_common(cp); vvp_page_fini()
272 struct ccc_page *cp = cl2ccc_page(slice); vvp_page_completion_read() local
273 struct page *vmpage = cp->cpg_page; vvp_page_completion_read()
280 if (cp->cpg_defer_uptodate) vvp_page_completion_read()
284 if (!cp->cpg_defer_uptodate) vvp_page_completion_read()
287 cp->cpg_defer_uptodate = 0; vvp_page_completion_read()
297 struct ccc_page *cp = cl2ccc_page(slice); vvp_page_completion_write() local
299 struct page *vmpage = cp->cpg_page; vvp_page_completion_write()
316 cp->cpg_write_queued = 0; vvp_page_completion_write()
317 vvp_write_complete(cl2ccc(slice->cpl_obj), cp); vvp_page_completion_write()
494 struct ccc_page *cp = cl2ccc_page(slice); vvp_transient_page_fini() local
498 vvp_page_fini_common(cp); vvp_transient_page_fini()
H A Dvvp_io.c747 struct ccc_page *cp; vvp_io_fault_start() local
752 cp = cl2ccc_page(cl_page_at(page, &vvp_device_type)); vvp_io_fault_start()
753 vvp_write_pending(cl2ccc(obj), cp); vvp_io_fault_start() local
820 struct ccc_page *cp = cl2ccc_page(slice); vvp_io_read_page() local
826 struct page *vmpage = cp->cpg_page; vvp_io_read_page()
836 cp->cpg_defer_uptodate); vvp_io_read_page()
848 if (cp->cpg_defer_uptodate) { vvp_io_read_page()
849 cp->cpg_ra_used = 1; vvp_io_read_page()
866 struct cl_page *page, struct ccc_page *cp, vvp_page_sync_io()
896 struct ccc_page *cp, vvp_io_prepare_partial()
913 char *kaddr = kmap_atomic(cp->cpg_page); vvp_io_prepare_partial()
917 } else if (cp->cpg_defer_uptodate) vvp_io_prepare_partial()
918 cp->cpg_ra_used = 1; vvp_io_prepare_partial()
920 result = vvp_page_sync_io(env, io, pg, cp, CRT_READ); vvp_io_prepare_partial()
941 struct ccc_page *cp = cl2ccc_page(slice); vvp_io_prepare_write() local
943 struct page *vmpage = cp->cpg_page; vvp_io_prepare_write()
963 pg, cp, from, to); vvp_io_prepare_write()
976 struct ccc_page *cp = cl2ccc_page(slice); vvp_io_commit_write() local
981 struct page *vmpage = cp->cpg_page; vvp_io_commit_write()
1019 vvp_write_pending(cl2ccc(obj), cp); vvp_io_commit_write() local
1048 result = vvp_page_sync_io(env, io, pg, cp, CRT_WRITE); vvp_io_commit_write()
865 vvp_page_sync_io(const struct lu_env *env, struct cl_io *io, struct cl_page *page, struct ccc_page *cp, enum cl_req_type crt) vvp_page_sync_io() argument
894 vvp_io_prepare_partial(const struct lu_env *env, struct cl_io *io, struct cl_object *obj, struct cl_page *pg, struct ccc_page *cp, unsigned from, unsigned to) vvp_io_prepare_partial() argument
/linux-4.1.27/arch/metag/mm/
H A Dl2cache.c19 char *cp = p; parse_l2cache() local
21 if (get_option(&cp, &l2cache_init) != 1) { parse_l2cache()
31 char *cp = p; parse_l2cache_pf() local
33 if (get_option(&cp, &l2cache_init_pf) != 1) { parse_l2cache_pf()
/linux-4.1.27/arch/m68k/mvme147/
H A Dconfig.c135 volatile unsigned short *cp = (volatile unsigned short *)0xfffe1012; mvme147_gettimeoffset() local
138 n = *cp; mvme147_gettimeoffset()
139 while (n != *cp) mvme147_gettimeoffset()
140 n = *cp; mvme147_gettimeoffset()
/linux-4.1.27/drivers/block/
H A Dcciss_scsi.c302 print_cmd(CommandList_struct *cp)
304 printk("queue:%d\n", cp->Header.ReplyQueue);
305 printk("sglist:%d\n", cp->Header.SGList);
306 printk("sgtot:%d\n", cp->Header.SGTotal);
307 printk("Tag:0x%08x/0x%08x\n", cp->Header.Tag.upper,
308 cp->Header.Tag.lower);
310 cp->Header.LUN.LunAddrBytes[0],
311 cp->Header.LUN.LunAddrBytes[1],
312 cp->Header.LUN.LunAddrBytes[2],
313 cp->Header.LUN.LunAddrBytes[3],
314 cp->Header.LUN.LunAddrBytes[4],
315 cp->Header.LUN.LunAddrBytes[5],
316 cp->Header.LUN.LunAddrBytes[6],
317 cp->Header.LUN.LunAddrBytes[7]);
318 printk("CDBLen:%d\n", cp->Request.CDBLen);
319 printk("Type:%d\n",cp->Request.Type.Type);
320 printk("Attr:%d\n",cp->Request.Type.Attribute);
321 printk(" Dir:%d\n",cp->Request.Type.Direction);
322 printk("Timeout:%d\n",cp->Request.Timeout);
325 cp->Request.CDB[0], cp->Request.CDB[1],
326 cp->Request.CDB[2], cp->Request.CDB[3],
327 cp->Request.CDB[4], cp->Request.CDB[5],
328 cp->Request.CDB[6], cp->Request.CDB[7],
329 cp->Request.CDB[8], cp->Request.CDB[9],
330 cp->Request.CDB[10], cp->Request.CDB[11],
331 cp->Request.CDB[12], cp->Request.CDB[13],
332 cp->Request.CDB[14], cp->Request.CDB[15]),
334 cp->ErrDesc.Addr.upper, cp->ErrDesc.Addr.lower,
335 cp->ErrDesc.Len);
337 printk("scsistatus:%d\n", cp->err_info->ScsiStatus);
338 printk("senselen:%d\n", cp->err_info->SenseLen);
339 printk("cmd status:%d\n", cp->err_info->CommandStatus);
340 printk("resid cnt:%d\n", cp->err_info->ResidualCnt);
341 printk("offense size:%d\n", cp->err_info->MoreErrInfo.Invalid_Cmd.offense_size);
342 printk("offense byte:%d\n", cp->err_info->MoreErrInfo.Invalid_Cmd.offense_num);
343 printk("offense value:%d\n", cp->err_info->MoreErrInfo.Invalid_Cmd.offense_value);
H A Dswim3.c240 static void init_dma(struct dbdma_cmd *cp, int cmd, void *buf, int count);
440 static inline void init_dma(struct dbdma_cmd *cp, int cmd, init_dma() argument
443 cp->req_count = cpu_to_le16(count); init_dma()
444 cp->command = cpu_to_le16(cmd); init_dma()
445 cp->phy_addr = cpu_to_le32(virt_to_bus(buf)); init_dma()
446 cp->xfer_status = 0; init_dma()
453 struct dbdma_cmd *cp = fs->dma_cmd; setup_transfer() local
477 out_le32(&dr->cmdptr, virt_to_bus(cp)); setup_transfer()
480 init_dma(cp, OUTPUT_MORE, write_preamble, sizeof(write_preamble)); setup_transfer()
481 ++cp; setup_transfer()
482 init_dma(cp, OUTPUT_MORE, bio_data(req->bio), 512); setup_transfer()
483 ++cp; setup_transfer()
484 init_dma(cp, OUTPUT_LAST, write_postamble, sizeof(write_postamble)); setup_transfer()
486 init_dma(cp, INPUT_LAST, bio_data(req->bio), n * 512); setup_transfer()
488 ++cp; setup_transfer()
489 out_le16(&cp->command, DBDMA_STOP); setup_transfer()
682 struct dbdma_cmd *cp; swim3_interrupt() local
753 cp = fs->dma_cmd; swim3_interrupt()
755 ++cp; swim3_interrupt()
763 if ((intr & ERROR_INTR) == 0 && cp->xfer_status == 0) { swim3_interrupt()
766 if (cp->xfer_status != 0) swim3_interrupt()
774 stat = le16_to_cpu(cp->xfer_status); swim3_interrupt()
775 resid = le16_to_cpu(cp->res_count); swim3_interrupt()
/linux-4.1.27/drivers/net/wan/
H A Dhdlc_ppp.c211 struct cp_header *cp; ppp_tx_cp() local
231 cp = (struct cp_header *)skb_put(skb, sizeof(struct cp_header)); ppp_tx_cp()
232 cp->code = code; ppp_tx_cp()
233 cp->id = id; ppp_tx_cp()
234 cp->len = htons(sizeof(struct cp_header) + magic_len + len); ppp_tx_cp()
438 struct cp_header *cp; ppp_rx() local
451 cp = (struct cp_header*)skb_pull(skb, sizeof(struct hdlc_header)); ppp_rx()
465 len = ntohs(cp->len); ppp_rx()
474 if (cp->code < CP_CODES) ppp_rx()
475 sprintf(debug_buffer, "[%s id 0x%X]", code_names[cp->code], ppp_rx()
476 cp->id); ppp_rx()
478 sprintf(debug_buffer, "[code %u id 0x%X]", cp->code, cp->id); ppp_rx()
490 switch (cp->code) { ppp_rx()
502 cp->id, len - 4, skb->data + 4); ppp_rx()
506 if (cp->id == ppp->echo_id) ppp_rx()
515 switch (cp->code) { ppp_rx()
517 ppp_cp_parse_cr(dev, pid, cp->id, len, skb->data); ppp_rx()
521 if (cp->id == proto->cr_id) ppp_rx()
527 if (cp->id == proto->cr_id) ppp_rx()
532 ppp_cp_event(dev, pid, RTR, 0, cp->id, 0, NULL); ppp_rx()
547 ppp_cp_event(dev, pid, RUC, 0, 0, len, cp); ppp_rx()
/linux-4.1.27/fs/xfs/
H A Duuid.c49 char *cp = (char *)uuid; uuid_is_nil() local
55 if (*cp++) return 0; /* not nil */ uuid_is_nil()
/linux-4.1.27/arch/arm64/kernel/vdso/
H A DMakefile43 cp $@ include/generated/
61 cmd_vdso_install = cp $(obj)/$@.dbg $(MODLIB)/vdso/$@
/linux-4.1.27/arch/avr32/boot/u-boot/
H A Dhead.S31 cp r2, r3
50 cp.w r12, r0
/linux-4.1.27/include/linux/can/
H A Dcore.h45 extern int can_proto_register(const struct can_proto *cp);
46 extern void can_proto_unregister(const struct can_proto *cp);
/linux-4.1.27/drivers/misc/eeprom/
H A Dat25.c74 u8 *cp; at25_ee_read() local
87 cp = command; at25_ee_read()
93 *cp++ = instr; at25_ee_read()
98 *cp++ = offset >> 16; at25_ee_read()
100 *cp++ = offset >> 8; at25_ee_read()
103 *cp++ = offset >> 0; at25_ee_read()
181 u8 *cp = bounce; at25_ee_write() local
185 *cp = AT25_WREN; at25_ee_write()
186 status = spi_write(at25->spi, cp, 1); at25_ee_write()
197 *cp++ = instr; at25_ee_write()
202 *cp++ = offset >> 16; at25_ee_write()
204 *cp++ = offset >> 8; at25_ee_write()
207 *cp++ = offset >> 0; at25_ee_write()
214 memcpy(cp, buf, segment); at25_ee_write()
/linux-4.1.27/scripts/dtc/
H A Dtreesource.c140 cell_t *cp = (cell_t *)val.val; write_propval_cells() local
145 while (m && (m->offset <= ((char *)cp - val.val))) { write_propval_cells()
147 assert(m->offset == ((char *)cp - val.val)); write_propval_cells()
153 fprintf(f, "0x%x", fdt32_to_cpu(*cp++)); write_propval_cells()
154 if ((void *)cp >= propend) write_propval_cells()
/linux-4.1.27/arch/mips/alchemy/common/
H A Ddbdma.c261 au1x_dma_chan_t *cp; au1xxx_dbdma_chan_alloc() local
324 cp = (au1x_dma_chan_t *)dcp; au1xxx_dbdma_chan_alloc()
343 cp->ddma_cfg = i; au1xxx_dbdma_chan_alloc()
756 au1x_dma_chan_t *cp; au1xxx_dbdma_stop() local
761 cp = ctp->chan_ptr; au1xxx_dbdma_stop()
762 cp->ddma_cfg &= ~DDMA_CFG_EN; /* Disable channel */ au1xxx_dbdma_stop()
764 while (!(cp->ddma_stat & DDMA_STAT_H)) { au1xxx_dbdma_stop()
773 cp->ddma_stat |= (DDMA_STAT_DB | DDMA_STAT_V); au1xxx_dbdma_stop()
786 au1x_dma_chan_t *cp; au1xxx_dbdma_start() local
789 cp = ctp->chan_ptr; au1xxx_dbdma_start()
790 cp->ddma_desptr = virt_to_phys(ctp->cur_ptr); au1xxx_dbdma_start()
791 cp->ddma_cfg |= DDMA_CFG_EN; /* Enable channel */ au1xxx_dbdma_start()
793 cp->ddma_dbell = 0; au1xxx_dbdma_start()
827 au1x_dma_chan_t *cp; au1xxx_get_dma_residue() local
831 cp = ctp->chan_ptr; au1xxx_get_dma_residue()
834 rv = cp->ddma_bytecnt; au1xxx_get_dma_residue()
868 au1x_dma_chan_t *cp; dbdma_interrupt() local
875 cp = ctp->chan_ptr; dbdma_interrupt()
879 cp->ddma_irq = 0; dbdma_interrupt()
894 au1x_dma_chan_t *cp; au1xxx_dbdma_dump() local
900 cp = ctp->chan_ptr; au1xxx_dbdma_dump()
909 printk(KERN_DEBUG "dbdma chan %x\n", (u32)cp); au1xxx_dbdma_dump()
911 cp->ddma_cfg, cp->ddma_desptr, cp->ddma_statptr); au1xxx_dbdma_dump()
913 cp->ddma_dbell, cp->ddma_irq, cp->ddma_stat, au1xxx_dbdma_dump()
914 cp->ddma_bytecnt); au1xxx_dbdma_dump()
/linux-4.1.27/drivers/of/
H A Dfdt_address.c74 u64 cp, s, da; fdt_bus_default_map() local
76 cp = of_read_number(range, na); fdt_bus_default_map()
80 pr_debug("FDT: default map, cp=%llx, s=%llx, da=%llx\n", fdt_bus_default_map()
81 cp, s, da); fdt_bus_default_map()
83 if (da < cp || da >= (cp + s)) fdt_bus_default_map()
85 return da - cp; fdt_bus_default_map()
H A Daddress.c64 u64 cp, s, da; of_bus_default_map() local
66 cp = of_read_number(range, na); of_bus_default_map()
70 pr_debug("OF: default map, cp=%llx, s=%llx, da=%llx\n", of_bus_default_map()
71 (unsigned long long)cp, (unsigned long long)s, of_bus_default_map()
74 if (da < cp || da >= (cp + s)) of_bus_default_map()
76 return da - cp; of_bus_default_map()
143 u64 cp, s, da; of_bus_pci_map() local
154 cp = of_read_number(range + 1, na - 1); of_bus_pci_map()
158 pr_debug("OF: PCI map, cp=%llx, s=%llx, da=%llx\n", of_bus_pci_map()
159 (unsigned long long)cp, (unsigned long long)s, of_bus_pci_map()
162 if (da < cp || da >= (cp + s)) of_bus_pci_map()
164 return da - cp; of_bus_pci_map()
366 u64 cp, s, da; of_bus_isa_map() local
373 cp = of_read_number(range + 1, na - 1); of_bus_isa_map()
377 pr_debug("OF: ISA map, cp=%llx, s=%llx, da=%llx\n", of_bus_isa_map()
378 (unsigned long long)cp, (unsigned long long)s, of_bus_isa_map()
381 if (da < cp || da >= (cp + s)) of_bus_isa_map()
383 return da - cp; of_bus_isa_map()
/linux-4.1.27/fs/hpfs/
H A Dmap.c65 struct code_page_directory *cp = hpfs_map_sector(s, cps, &bh, 0); hpfs_load_code_page() local
66 if (!cp) return NULL; hpfs_load_code_page()
67 if (le32_to_cpu(cp->magic) != CP_DIR_MAGIC) { hpfs_load_code_page()
69 le32_to_cpu(cp->magic)); hpfs_load_code_page()
73 if (!le32_to_cpu(cp->n_code_pages)) { hpfs_load_code_page()
78 cpds = le32_to_cpu(cp->array[0].code_page_data); hpfs_load_code_page()
79 cpi = le16_to_cpu(cp->array[0].index); hpfs_load_code_page()
/linux-4.1.27/net/can/
H A Daf_can.c120 const struct can_proto *cp; can_get_proto() local
123 cp = rcu_dereference(proto_tab[protocol]); can_get_proto()
124 if (cp && !try_module_get(cp->prot->owner)) can_get_proto()
125 cp = NULL; can_get_proto()
128 return cp; can_get_proto()
131 static inline void can_put_proto(const struct can_proto *cp) can_put_proto() argument
133 module_put(cp->prot->owner); can_put_proto()
140 const struct can_proto *cp; can_create() local
151 cp = can_get_proto(protocol); can_create()
154 if (!cp) { can_create()
168 cp = can_get_proto(protocol); can_create()
174 if (!cp) can_create()
177 if (cp->type != sock->type) { can_create()
182 sock->ops = cp->ops; can_create()
184 sk = sk_alloc(net, PF_CAN, GFP_KERNEL, cp->prot); can_create()
203 can_put_proto(cp); can_create()
763 * @cp: pointer to CAN protocol structure
771 int can_proto_register(const struct can_proto *cp) can_proto_register() argument
773 int proto = cp->protocol; can_proto_register()
781 err = proto_register(cp->prot, 0); can_proto_register()
791 RCU_INIT_POINTER(proto_tab[proto], cp); can_proto_register()
796 proto_unregister(cp->prot); can_proto_register()
804 * @cp: pointer to CAN protocol structure
806 void can_proto_unregister(const struct can_proto *cp) can_proto_unregister() argument
808 int proto = cp->protocol; can_proto_unregister()
811 BUG_ON(proto_tab[proto] != cp); can_proto_unregister()
817 proto_unregister(cp->prot); can_proto_unregister()
/linux-4.1.27/arch/ia64/kernel/
H A Dsal.c203 char *cp; chk_nointroute_opt() local
205 for (cp = boot_command_line; *cp; ) { chk_nointroute_opt()
206 if (memcmp(cp, "nointroute", 10) == 0) { chk_nointroute_opt()
211 while (*cp != ' ' && *cp) chk_nointroute_opt()
212 ++cp; chk_nointroute_opt()
213 while (*cp == ' ') chk_nointroute_opt()
214 ++cp; chk_nointroute_opt()
H A Defi.c477 char *cp, vendor[100] = "unknown"; efi_init() local
487 for (cp = boot_command_line; *cp; ) { efi_init()
488 if (memcmp(cp, "mem=", 4) == 0) { efi_init()
489 mem_limit = memparse(cp + 4, &cp); efi_init()
490 } else if (memcmp(cp, "max_addr=", 9) == 0) { efi_init()
491 max_addr = GRANULEROUNDDOWN(memparse(cp + 9, &cp)); efi_init()
492 } else if (memcmp(cp, "min_addr=", 9) == 0) { efi_init()
493 min_addr = GRANULEROUNDDOWN(memparse(cp + 9, &cp)); efi_init()
495 while (*cp != ' ' && *cp) efi_init()
496 ++cp; efi_init()
497 while (*cp == ' ') efi_init()
498 ++cp; efi_init()
/linux-4.1.27/fs/afs/
H A Dcell.c39 char keyname[4 + AFS_MAXCELLNAME + 1], *cp, *dp, *next; afs_cell_alloc() local
113 cp = cell->name; afs_cell_alloc()
115 *dp++ = toupper(*cp); afs_cell_alloc()
116 } while (*cp++); afs_cell_alloc()
229 char *cp; afs_cell_init() local
241 cp = strchr(rootcell, ':'); afs_cell_init()
242 if (!cp) afs_cell_init()
245 *cp++ = 0; afs_cell_init()
248 new_root = afs_cell_create(rootcell, strlen(rootcell), cp, false); afs_cell_init()
/linux-4.1.27/drivers/video/console/
H A Dnewport_con.c57 #define RENDER(regs, cp) do { \
58 (regs)->go.zpattern = BMASK((cp)[0x0]); (regs)->go.zpattern = BMASK((cp)[0x1]); \
59 (regs)->go.zpattern = BMASK((cp)[0x2]); (regs)->go.zpattern = BMASK((cp)[0x3]); \
60 (regs)->go.zpattern = BMASK((cp)[0x4]); (regs)->go.zpattern = BMASK((cp)[0x5]); \
61 (regs)->go.zpattern = BMASK((cp)[0x6]); (regs)->go.zpattern = BMASK((cp)[0x7]); \
62 (regs)->go.zpattern = BMASK((cp)[0x8]); (regs)->go.zpattern = BMASK((cp)[0x9]); \
63 (regs)->go.zpattern = BMASK((cp)[0xa]); (regs)->go.zpattern = BMASK((cp)[0xb]); \
64 (regs)->go.zpattern = BMASK((cp)[0xc]); (regs)->go.zpattern = BMASK((cp)[0xd]); \
65 (regs)->go.zpattern = BMASK((cp)[0xe]); (regs)->go.zpattern = BMASK((cp)[0xf]); \
/linux-4.1.27/arch/sparc/boot/
H A Dinstall.sh50 cp $3 $4/System.map
/linux-4.1.27/arch/microblaze/boot/
H A DMakefile31 $(call if_changed,cp,.unstrip)
/linux-4.1.27/arch/nios2/boot/
H A Dinstall.sh50 cp $3 $4/System.map
/linux-4.1.27/arch/m68k/
H A Dinstall.sh50 cp $3 $4/System.map
/linux-4.1.27/arch/mips/pmcs-msp71xx/
H A Dmsp_prom.c309 char *cp; prom_init_cmdline() local
314 cp = &(arcs_cmdline[0]); prom_init_cmdline()
316 strcpy(cp, prom_argv[actr]); prom_init_cmdline()
317 cp += strlen(prom_argv[actr]); prom_init_cmdline()
318 *cp++ = ' '; prom_init_cmdline()
321 if (cp != &(arcs_cmdline[0])) /* get rid of trailing space */ prom_init_cmdline()
322 --cp; prom_init_cmdline()
323 *cp = '\0'; prom_init_cmdline()
/linux-4.1.27/arch/arm/vfp/
H A Dvfpinstr.h19 #define CPNUM(cp) ((cp) << 8)
/linux-4.1.27/drivers/media/platform/ti-vpe/
H A Dsc.c67 const u16 *cp; sc_set_hs_coeffs() local
90 cp = scaler_hs_coeffs[idx]; sc_set_hs_coeffs()
94 *coeff_h++ = *cp++; sc_set_hs_coeffs()
120 const u16 *cp; sc_set_vs_coeffs() local
136 cp = scaler_vs_coeffs[idx]; sc_set_vs_coeffs()
140 *coeff_v++ = *cp++; sc_set_vs_coeffs()
/linux-4.1.27/drivers/net/hamradio/
H A Dbaycom_epp.c540 unsigned char *cp; do_rxpacket() local
553 cp = skb_put(skb, pktlen); do_rxpacket()
554 *cp++ = 0; /* KISS kludge */ do_rxpacket()
555 memcpy(cp, bc->hdlcrx.buf, pktlen - 1); do_rxpacket()
567 unsigned char *cp; receive() local
582 cp = tmp; receive()
583 for (; cnt2 > 0; cnt2--, cp++) { receive()
585 bitstream |= (*cp) << 8; receive()
587 bitbuf |= (*cp) << 8; receive()
970 const char *cp; baycom_setmode() local
984 if ((cp = strstr(modestr,"fclk="))) { baycom_setmode()
985 bc->cfg.fclk = simple_strtoul(cp+5, NULL, 0); baycom_setmode()
991 if ((cp = strstr(modestr,"bps="))) { baycom_setmode()
992 bc->cfg.bps = simple_strtoul(cp+4, NULL, 0); baycom_setmode()
1018 hi.data.cp.tx_delay = bc->ch_params.tx_delay; baycom_ioctl()
1019 hi.data.cp.tx_tail = bc->ch_params.tx_tail; baycom_ioctl()
1020 hi.data.cp.slottime = bc->ch_params.slottime; baycom_ioctl()
1021 hi.data.cp.ppersist = bc->ch_params.ppersist; baycom_ioctl()
1022 hi.data.cp.fulldup = bc->ch_params.fulldup; baycom_ioctl()
1028 bc->ch_params.tx_delay = hi.data.cp.tx_delay; baycom_ioctl()
1029 bc->ch_params.tx_tail = hi.data.cp.tx_tail; baycom_ioctl()
1030 bc->ch_params.slottime = hi.data.cp.slottime; baycom_ioctl()
1031 bc->ch_params.ppersist = hi.data.cp.ppersist; baycom_ioctl()
1032 bc->ch_params.fulldup = hi.data.cp.fulldup; baycom_ioctl()
H A Dhdlcdrv.c148 unsigned char *cp; hdlc_rx_flag() local
160 cp = skb_put(skb, pkt_len); hdlc_rx_flag()
161 *cp++ = 0; /* KISS kludge */ hdlc_rx_flag()
162 memcpy(cp, s->hdlcrx.buffer, pkt_len - 1); hdlc_rx_flag()
518 bi.data.cp.tx_delay = s->ch_params.tx_delay; hdlcdrv_ioctl()
519 bi.data.cp.tx_tail = s->ch_params.tx_tail; hdlcdrv_ioctl()
520 bi.data.cp.slottime = s->ch_params.slottime; hdlcdrv_ioctl()
521 bi.data.cp.ppersist = s->ch_params.ppersist; hdlcdrv_ioctl()
522 bi.data.cp.fulldup = s->ch_params.fulldup; hdlcdrv_ioctl()
528 s->ch_params.tx_delay = bi.data.cp.tx_delay; hdlcdrv_ioctl()
529 s->ch_params.tx_tail = bi.data.cp.tx_tail; hdlcdrv_ioctl()
530 s->ch_params.slottime = bi.data.cp.slottime; hdlcdrv_ioctl()
531 s->ch_params.ppersist = bi.data.cp.ppersist; hdlcdrv_ioctl()
532 s->ch_params.fulldup = bi.data.cp.fulldup; hdlcdrv_ioctl()
/linux-4.1.27/drivers/atm/
H A Dfore200e.h46 /* the cp starts putting a received PDU into one *small* buffer,
212 /* cp resident transmit queue entry */
220 /* cp resident receive queue entry */
228 /* cp resident buffer supply queue entry */
240 STATUS_COMPLETE = (1<<1), /* completion status (written by cp) */
242 STATUS_ERROR = (1<<3) /* completion status (written by cp) */
246 /* cp operation code */
281 enum opcode opcode : 8, /* cp opcode */
303 enum opcode opcode : 8, /* cp opcode */
330 enum opcode opcode : 8, /* cp opcode */
455 enum opcode opcode : 8, /* cp opcode */
482 enum opcode opcode : 8, /* cp opcode */
496 /* cp command */
509 /* cp resident command queue */
521 struct cp_txq_entry __iomem *cp_entry; /* addr of cp resident tx queue entry */
536 struct cp_rxq_entry __iomem *cp_entry; /* addr of cp resident rx queue entry */
546 struct cp_bsq_entry __iomem *cp_entry; /* addr of cp resident buffer supply queue entry */
556 struct cp_cmdq_entry __iomem *cp_entry; /* addr of cp resident cmd queue entry */
694 /* cp resident queues */
701 u32 imask; /* 1 enables cp to host interrupts */
706 u32 heartbeat; /* cp heartbeat */
723 BSTAT_CP_RUNNING = (u32) 0xce11feed, /* cp is running */
854 struct cp_queues __iomem * cp_queues; /* cp resident queues */
898 #define FORE200E_CP_QUEUES_OFFSET 0x00004d40 /* cp resident queues */
/linux-4.1.27/drivers/usb/gadget/
H A Dconfig.c86 struct usb_config_descriptor *cp = buf; usb_gadget_config_buf() local
92 *cp = *config; usb_gadget_config_buf()
104 cp->bLength = USB_DT_CONFIG_SIZE; usb_gadget_config_buf()
105 cp->bDescriptorType = USB_DT_CONFIG; usb_gadget_config_buf()
106 cp->wTotalLength = cpu_to_le16(len); usb_gadget_config_buf()
107 cp->bmAttributes |= USB_CONFIG_ATT_ONE; usb_gadget_config_buf()
/linux-4.1.27/scripts/package/
H A Dbuilddeb18 cp debian/copyright "$pdir/usr/share/doc/$pname/"
19 cp debian/changelog "$pdir/usr/share/doc/$pname/changelog.Debian"
132 cp System.map "$tmpdir/usr/lib/uml/modules/$version/System.map"
133 cp $KCONFIG_CONFIG "$tmpdir/usr/share/doc/$packagename/config"
136 cp System.map "$tmpdir/boot/System.map-$version"
137 cp $KCONFIG_CONFIG "$tmpdir/boot/config-$version"
141 cp $KBUILD_IMAGE "$tmpdir/$installed_image_path"
143 cp arch/$ARCH/boot/$KBUILD_IMAGE "$tmpdir/$installed_image_path"
311 (cd $objtree; cp $KCONFIG_CONFIG $destdir/.config) # copy .config manually to be where it's expected to be
365 cp vmlinux $dbg_dir/usr/lib/debug/lib/modules/$version/
/linux-4.1.27/drivers/mtd/maps/
H A Dphysmap_of.c117 const char *cp; of_get_probes() local
123 cp = of_get_property(dp, "linux,part-probe", &cplen); of_get_probes()
124 if (cp == NULL) of_get_probes()
129 if (cp[l] == 0) of_get_probes()
135 res[count] = cp; of_get_probes()
136 l = strlen(cp) + 1; of_get_probes()
137 cp += l; of_get_probes()
/linux-4.1.27/arch/x86/kernel/cpu/microcode/
H A Damd_early.c233 struct cpio_data cp; load_ucode_amd_bsp() local
245 cp = find_ucode_in_initrd(); load_ucode_amd_bsp()
246 if (!cp.data) load_ucode_amd_bsp()
249 *data = cp.data; load_ucode_amd_bsp()
250 *size = cp.size; load_ucode_amd_bsp()
252 apply_ucode_in_initrd(cp.data, cp.size, true); load_ucode_amd_bsp()
/linux-4.1.27/arch/parisc/mm/
H A Dinit.c90 char *cp, *end; mem_limit_func() local
96 for (cp = boot_command_line; *cp; ) { mem_limit_func()
97 if (memcmp(cp, "mem=", 4) == 0) { mem_limit_func()
98 cp += 4; mem_limit_func()
99 limit = memparse(cp, &end); mem_limit_func()
100 if (end != cp) mem_limit_func()
102 cp = end; mem_limit_func()
104 while (*cp != ' ' && *cp) mem_limit_func()
105 ++cp; mem_limit_func()
106 while (*cp == ' ') mem_limit_func()
107 ++cp; mem_limit_func()
/linux-4.1.27/drivers/media/usb/au0828/
H A Dau0828-core.c53 u16 index, unsigned char *cp, u16 size);
82 /* cp must be memory that has been allocated by kmalloc */ send_control_msg()
103 u16 index, unsigned char *cp, u16 size) recv_control_msg()
123 is why we didn't just pass "cp" into usb_control_msg */ recv_control_msg()
124 memcpy(cp, dev->ctrlmsg, size); recv_control_msg()
102 recv_control_msg(struct au0828_dev *dev, u16 request, u32 value, u16 index, unsigned char *cp, u16 size) recv_control_msg() argument
/linux-4.1.27/drivers/media/platform/exynos4-is/
H A Dmedia-dev.c1207 struct cam_clk_provider *cp = &fmd->clk_provider; fimc_md_unregister_clk_provider() local
1210 if (cp->of_node) fimc_md_unregister_clk_provider()
1211 of_clk_del_provider(cp->of_node); fimc_md_unregister_clk_provider()
1213 for (i = 0; i < cp->num_clocks; i++) fimc_md_unregister_clk_provider()
1214 clk_unregister(cp->clks[i]); fimc_md_unregister_clk_provider()
1219 struct cam_clk_provider *cp = &fmd->clk_provider; fimc_md_register_clk_provider() local
1224 struct cam_clk *camclk = &cp->camclk[i]; fimc_md_register_clk_provider()
1243 cp->clks[i] = clk_register(NULL, &camclk->hw); fimc_md_register_clk_provider()
1244 if (IS_ERR(cp->clks[i])) { fimc_md_register_clk_provider()
1246 init.name, PTR_ERR(cp->clks[i])); fimc_md_register_clk_provider()
1247 ret = PTR_ERR(cp->clks[i]); fimc_md_register_clk_provider()
1250 cp->num_clocks++; fimc_md_register_clk_provider()
1253 if (cp->num_clocks == 0) { fimc_md_register_clk_provider()
1258 cp->clk_data.clks = cp->clks; fimc_md_register_clk_provider()
1259 cp->clk_data.clk_num = cp->num_clocks; fimc_md_register_clk_provider()
1260 cp->of_node = dev->of_node; fimc_md_register_clk_provider()
1262 &cp->clk_data); fimc_md_register_clk_provider()
/linux-4.1.27/drivers/gpu/drm/amd/amdkfd/
H A Dkfd_priv.h39 * When working with cp scheduler we should assign the HIQ manually or via
42 * in the cp scheduling taking that in mind we set the HIQ slot in the
77 * @KFD_SCHED_POLICY_HWS: H/W scheduling policy known as command processor (cp)
81 * to the cp and the user mode queues list that are currently running.
257 * cp read from the ring buffer. This field updates automatically by the H/W.
272 * @vmid: If the scheduling mode is no cp scheduling the field defines the vmid
293 /* Not relevant for user mode queues in cp scheduling */
319 * @mec: Used only in no cp scheduling mode and identifies to micro engine id
322 * @pipe: Used only in no cp scheduling mode and identifies the queue's pipe id.
324 * @queue: Used only in no cp scheduliong mode and identifies the queue's slot.
356 KFD_MQD_TYPE_COMPUTE = 0, /* for no cp scheduling */
358 KFD_MQD_TYPE_CP, /* for cp queues and diq */
H A Dkfd_mqd_manager.h34 * @load_mqd: Loads the mqd to a concrete hqd slot. Used only for no cp
40 * Used only for no cp scheduling.
/linux-4.1.27/drivers/base/power/
H A Dsysfs.c111 char *cp; control_store() local
114 cp = memchr(buf, '\n', n); control_store()
115 if (cp) control_store()
116 len = cp - buf; control_store()
349 char *cp; wake_store() local
355 cp = memchr(buf, '\n', n); wake_store()
356 if (cp) wake_store()
357 len = cp - buf; wake_store()
573 char *cp; async_store() local
576 cp = memchr(buf, '\n', n); async_store()
577 if (cp) async_store()
578 len = cp - buf; async_store()
/linux-4.1.27/include/scsi/
H A Dfc_encode.h538 struct fc_els_cssp *cp; fc_plogi_fill() local
551 cp = &plogi->fl_cssp[3 - 1]; /* class 3 parameters */ fc_plogi_fill()
552 cp->cp_class = htons(FC_CPC_VALID | FC_CPC_SEQ); fc_plogi_fill()
558 cp->cp_rdfs = htons((u16) lport->mfs); fc_plogi_fill()
559 cp->cp_con_seq = htons(255); fc_plogi_fill()
560 cp->cp_open_seq = 1; fc_plogi_fill()
569 struct fc_els_cssp *cp; fc_flogi_fill() local
582 cp = &flogi->fl_cssp[3 - 1]; /* class 3 parameters */ fc_flogi_fill()
583 cp->cp_class = htons(FC_CPC_VALID | FC_CPC_SEQ); fc_flogi_fill()
594 struct fc_els_cssp *cp; fc_fdisc_fill() local
607 cp = &fdisc->fl_cssp[3 - 1]; /* class 3 parameters */ fc_fdisc_fill()
608 cp->cp_class = htons(FC_CPC_VALID | FC_CPC_SEQ); fc_fdisc_fill()
H A Dfc_frame.h220 u8 *cp; fc_frame_payload_op() local
222 cp = fc_frame_payload_get(fp, sizeof(u8)); fc_frame_payload_op()
223 if (!cp) fc_frame_payload_op()
225 return *cp; fc_frame_payload_op()
/linux-4.1.27/fs/cifs/
H A Dcifs_unicode.c115 * @cp - codepage to which character should be converted
123 cifs_mapchar(char *target, const __u16 *from, const struct nls_table *cp, cifs_mapchar() argument
138 len = cp->uni2char(src_char, target, NLS_MAX_CHARSET_SIZE); cifs_mapchar()
146 if (strcmp(cp->charset, "utf8")) cifs_mapchar()
445 const struct nls_table *cp, int map_chars) cifsConvertToUTF16()
457 return cifs_strtoUTF16(target, source, PATH_MAX, cp); cifsConvertToUTF16()
482 charlen = cp->char2uni(source + i, srclen - i, &tmp); cifsConvertToUTF16()
493 if (strcmp(cp->charset, "utf8") || !wchar_to) cifsConvertToUTF16()
586 * @cp - source codepage
595 const struct nls_table *cp, int remap) cifs_strndup_to_utf16()
600 len = cifs_local_to_utf16_bytes(src, maxlen, cp); cifs_strndup_to_utf16()
607 cifsConvertToUTF16(dst, src, strlen(src), cp, remap); cifs_strndup_to_utf16()
444 cifsConvertToUTF16(__le16 *target, const char *source, int srclen, const struct nls_table *cp, int map_chars) cifsConvertToUTF16() argument
594 cifs_strndup_to_utf16(const char *src, const int maxlen, int *utf16_len, const struct nls_table *cp, int remap) cifs_strndup_to_utf16() argument
/linux-4.1.27/drivers/scsi/aacraid/
H A Daachba.c877 char * cp = dev->supplement_adapter_info.AdapterTypeText; setinqstr() local
879 if ((cp[0] == 'A') && (cp[1] == 'O') && (cp[2] == 'C')) setinqstr()
883 while (*cp && *cp != ' ' && --c) setinqstr()
884 ++cp; setinqstr()
885 c = *cp; setinqstr()
886 *cp = '\0'; setinqstr()
889 *cp = c; setinqstr()
890 while (*cp && *cp != ' ') setinqstr()
891 ++cp; setinqstr()
893 while (*cp == ' ') setinqstr()
894 ++cp; setinqstr()
897 if (strlen(cp) > sizeof(str->pid)) { setinqstr()
898 c = cp[sizeof(str->pid)]; setinqstr()
899 cp[sizeof(str->pid)] = '\0'; setinqstr()
901 inqstrcpy (cp, str->pid); setinqstr()
903 cp[sizeof(str->pid)] = c; setinqstr()
2513 char cp[13]; aac_scsi_cmd() local
2518 cp[0] = (capacity >> 56) & 0xff; aac_scsi_cmd()
2519 cp[1] = (capacity >> 48) & 0xff; aac_scsi_cmd()
2520 cp[2] = (capacity >> 40) & 0xff; aac_scsi_cmd()
2521 cp[3] = (capacity >> 32) & 0xff; aac_scsi_cmd()
2522 cp[4] = (capacity >> 24) & 0xff; aac_scsi_cmd()
2523 cp[5] = (capacity >> 16) & 0xff; aac_scsi_cmd()
2524 cp[6] = (capacity >> 8) & 0xff; aac_scsi_cmd()
2525 cp[7] = (capacity >> 0) & 0xff; aac_scsi_cmd()
2526 cp[8] = (fsa_dev_ptr[cid].block_size >> 24) & 0xff; aac_scsi_cmd()
2527 cp[9] = (fsa_dev_ptr[cid].block_size >> 16) & 0xff; aac_scsi_cmd()
2528 cp[10] = (fsa_dev_ptr[cid].block_size >> 8) & 0xff; aac_scsi_cmd()
2529 cp[11] = (fsa_dev_ptr[cid].block_size) & 0xff; aac_scsi_cmd()
2530 cp[12] = 0; aac_scsi_cmd()
2536 alloc_len = min_t(size_t, alloc_len, sizeof(cp)); aac_scsi_cmd()
2537 scsi_sg_copy_from_buffer(scsicmd, cp, alloc_len); aac_scsi_cmd()
2554 char cp[8]; aac_scsi_cmd() local
2562 cp[0] = (capacity >> 24) & 0xff; aac_scsi_cmd()
2563 cp[1] = (capacity >> 16) & 0xff; aac_scsi_cmd()
2564 cp[2] = (capacity >> 8) & 0xff; aac_scsi_cmd()
2565 cp[3] = (capacity >> 0) & 0xff; aac_scsi_cmd()
2566 cp[4] = (fsa_dev_ptr[cid].block_size >> 24) & 0xff; aac_scsi_cmd()
2567 cp[5] = (fsa_dev_ptr[cid].block_size >> 16) & 0xff; aac_scsi_cmd()
2568 cp[6] = (fsa_dev_ptr[cid].block_size >> 8) & 0xff; aac_scsi_cmd()
2569 cp[7] = (fsa_dev_ptr[cid].block_size) & 0xff; aac_scsi_cmd()
2570 scsi_sg_copy_from_buffer(scsicmd, cp, sizeof(cp)); aac_scsi_cmd()
/linux-4.1.27/drivers/media/i2c/
H A Dmt9v011.c352 struct v4l2_captureparm *cp = &parms->parm.capture; mt9v011_g_parm() local
357 memset(cp, 0, sizeof(struct v4l2_captureparm)); mt9v011_g_parm()
358 cp->capability = V4L2_CAP_TIMEPERFRAME; mt9v011_g_parm()
360 &cp->timeperframe.numerator, mt9v011_g_parm()
361 &cp->timeperframe.denominator); mt9v011_g_parm()
368 struct v4l2_captureparm *cp = &parms->parm.capture; mt9v011_s_parm() local
369 struct v4l2_fract *tpf = &cp->timeperframe; mt9v011_s_parm()
374 if (cp->extendedmode != 0) mt9v011_s_parm()
H A Dvs6624.c663 struct v4l2_captureparm *cp = &parms->parm.capture; vs6624_g_parm() local
668 memset(cp, 0, sizeof(*cp)); vs6624_g_parm()
669 cp->capability = V4L2_CAP_TIMEPERFRAME; vs6624_g_parm()
670 cp->timeperframe.numerator = sensor->frame_rate.denominator; vs6624_g_parm()
671 cp->timeperframe.denominator = sensor->frame_rate.numerator; vs6624_g_parm()
678 struct v4l2_captureparm *cp = &parms->parm.capture; vs6624_s_parm() local
679 struct v4l2_fract *tpf = &cp->timeperframe; vs6624_s_parm()
683 if (cp->extendedmode != 0) vs6624_s_parm()
/linux-4.1.27/drivers/target/tcm_fc/
H A Dtfc_conf.c68 const char *cp; ft_parse_wwn() local
76 for (cp = name; cp < &name[FT_NAMELEN - 1]; cp++) { ft_parse_wwn()
77 c = *cp; ft_parse_wwn()
78 if (c == '\n' && cp[1] == '\0') ft_parse_wwn()
91 return cp - name; ft_parse_wwn()
102 err, cp - name, pos, byte); ft_parse_wwn()
/linux-4.1.27/arch/avr32/include/asm/
H A Datomic.h111 " cp.w %0, %4\n" __atomic_add_unless()
125 " cp.w %0, %4\n" __atomic_add_unless()
/linux-4.1.27/net/caif/
H A Dcfctrl.c382 u8 *cp; cfctrl_recv() local
438 cp = (u8 *) linkparam.u.rfm.volume; cfctrl_recv()
442 *cp++ = tmp; cfctrl_recv()
443 *cp = '\0'; cfctrl_recv()
465 cp = (u8 *) linkparam.u.utility.name; cfctrl_recv()
472 *cp++ = tmp; cfctrl_recv()
478 cp = linkparam.u.utility.params; cfctrl_recv()
481 *cp++ = tmp; cfctrl_recv()
/linux-4.1.27/lib/raid6/test/
H A DMakefile58 cp -f $< $@
61 cp -f $< $@
/linux-4.1.27/kernel/
H A Dcpuset.c546 struct cpuset *cp; update_domain_attr_tree() local
550 cpuset_for_each_descendant_pre(cp, pos_css, root_cs) { cpuset_for_each_descendant_pre()
551 /* skip the whole subtree if @cp doesn't have any CPU */ cpuset_for_each_descendant_pre()
552 if (cpumask_empty(cp->cpus_allowed)) { cpuset_for_each_descendant_pre()
557 if (is_sched_load_balance(cp)) cpuset_for_each_descendant_pre()
558 update_domain_attr(dattr, cp); cpuset_for_each_descendant_pre()
620 struct cpuset *cp; /* scans q */ generate_sched_domains() local
657 csa = kmalloc(nr_cpusets() * sizeof(cp), GFP_KERNEL); generate_sched_domains()
663 cpuset_for_each_descendant_pre(cp, pos_css, &top_cpuset) { generate_sched_domains()
664 if (cp == &top_cpuset) generate_sched_domains()
667 * Continue traversing beyond @cp iff @cp has some CPUs and generate_sched_domains()
674 if (!cpumask_empty(cp->cpus_allowed) && generate_sched_domains()
675 !(is_sched_load_balance(cp) && generate_sched_domains()
676 cpumask_intersects(cp->cpus_allowed, non_isolated_cpus))) generate_sched_domains()
679 if (is_sched_load_balance(cp)) generate_sched_domains()
680 csa[csn++] = cp; generate_sched_domains()
682 /* skip @cp's subtree */ generate_sched_domains()
868 struct cpuset *cp; update_cpumasks_hier() local
873 cpuset_for_each_descendant_pre(cp, pos_css, cs) { cpuset_for_each_descendant_pre()
874 struct cpuset *parent = parent_cs(cp); cpuset_for_each_descendant_pre()
876 cpumask_and(new_cpus, cp->cpus_allowed, parent->effective_cpus); cpuset_for_each_descendant_pre()
882 if (cgroup_on_dfl(cp->css.cgroup) && cpumask_empty(new_cpus)) cpuset_for_each_descendant_pre()
886 if (cpumask_equal(new_cpus, cp->effective_cpus)) { cpuset_for_each_descendant_pre()
891 if (!css_tryget_online(&cp->css)) cpuset_for_each_descendant_pre()
896 cpumask_copy(cp->effective_cpus, new_cpus); cpuset_for_each_descendant_pre()
899 WARN_ON(!cgroup_on_dfl(cp->css.cgroup) && cpuset_for_each_descendant_pre()
900 !cpumask_equal(cp->cpus_allowed, cp->effective_cpus)); cpuset_for_each_descendant_pre()
902 update_tasks_cpumask(cp); cpuset_for_each_descendant_pre()
908 if (!cpumask_empty(cp->cpus_allowed) && cpuset_for_each_descendant_pre()
909 is_sched_load_balance(cp)) cpuset_for_each_descendant_pre()
913 css_put(&cp->css); cpuset_for_each_descendant_pre()
1125 struct cpuset *cp; update_nodemasks_hier() local
1129 cpuset_for_each_descendant_pre(cp, pos_css, cs) { cpuset_for_each_descendant_pre()
1130 struct cpuset *parent = parent_cs(cp); cpuset_for_each_descendant_pre()
1132 nodes_and(*new_mems, cp->mems_allowed, parent->effective_mems); cpuset_for_each_descendant_pre()
1138 if (cgroup_on_dfl(cp->css.cgroup) && nodes_empty(*new_mems)) cpuset_for_each_descendant_pre()
1142 if (nodes_equal(*new_mems, cp->effective_mems)) { cpuset_for_each_descendant_pre()
1147 if (!css_tryget_online(&cp->css)) cpuset_for_each_descendant_pre()
1152 cp->effective_mems = *new_mems; cpuset_for_each_descendant_pre()
1155 WARN_ON(!cgroup_on_dfl(cp->css.cgroup) && cpuset_for_each_descendant_pre()
1156 !nodes_equal(cp->mems_allowed, cp->effective_mems)); cpuset_for_each_descendant_pre()
1158 update_tasks_nodemask(cp); cpuset_for_each_descendant_pre()
1161 css_put(&cp->css); cpuset_for_each_descendant_pre()
/linux-4.1.27/drivers/isdn/hisax/
H A Dhfc4s8s_l1.c636 u_char *cp; rx_d_frame() local
701 cp = skb->data; rx_d_frame()
706 *((unsigned long *) cp) = fRead_hfc32(l1p->hw); rx_d_frame()
707 cp += 4; rx_d_frame()
712 *cp++ = fRead_hfc8(l1p->hw); rx_d_frame()
717 if (*(--cp)) { rx_d_frame()
720 skb->len = (cp - skb->data) - 2; rx_d_frame()
842 u_char *cp; tx_d_frame() local
866 cp = skb->data; tx_d_frame()
872 fWrite_hfc32(l1p->hw, *(unsigned long *) cp); tx_d_frame()
873 cp += 4; tx_d_frame()
878 fWrite_hfc8(l1p->hw, *cp++); tx_d_frame()
896 u_char *cp; tx_b_frame() local
950 cp = skb->data + bch->tx_cnt; tx_b_frame()
955 fWrite_hfc32(l1->hw, *(unsigned long *) cp); tx_b_frame()
956 cp += 4; tx_b_frame()
961 fWrite_hfc8(l1->hw, *cp++); tx_b_frame()
/linux-4.1.27/drivers/media/pci/ttpci/
H A Dbudget-ci.c751 u8 band, cp, filter; philips_tdm1316l_tuner_set_params() local
758 cp = 3; philips_tdm1316l_tuner_set_params()
760 cp = 5; philips_tdm1316l_tuner_set_params()
762 cp = 6; philips_tdm1316l_tuner_set_params()
764 cp = 3; philips_tdm1316l_tuner_set_params()
766 cp = 5; philips_tdm1316l_tuner_set_params()
768 cp = 6; philips_tdm1316l_tuner_set_params()
770 cp = 3; philips_tdm1316l_tuner_set_params()
772 cp = 5; philips_tdm1316l_tuner_set_params()
774 cp = 7; philips_tdm1316l_tuner_set_params()
819 tuner_buf[3] = (cp << 5) | (filter << 3) | band; philips_tdm1316l_tuner_set_params()
870 u8 band, cp, filter; dvbc_philips_tdm1316l_tuner_set_params() local
877 cp = 3; dvbc_philips_tdm1316l_tuner_set_params()
880 cp = 5; dvbc_philips_tdm1316l_tuner_set_params()
883 cp = 6; dvbc_philips_tdm1316l_tuner_set_params()
886 cp = 3; dvbc_philips_tdm1316l_tuner_set_params()
889 cp = 5; dvbc_philips_tdm1316l_tuner_set_params()
892 cp = 6; dvbc_philips_tdm1316l_tuner_set_params()
895 cp = 3; dvbc_philips_tdm1316l_tuner_set_params()
898 cp = 5; dvbc_philips_tdm1316l_tuner_set_params()
901 cp = 7; dvbc_philips_tdm1316l_tuner_set_params()
916 tuner_buf[3] = (cp << 5) | (filter << 3) | band; dvbc_philips_tdm1316l_tuner_set_params()
/linux-4.1.27/drivers/media/usb/ttusb-budget/
H A Ddvb-ttusb-budget.c1083 u8 band, cp, filter; philips_tdm1316l_tuner_set_params() local
1088 else if (tuner_frequency < 130000000) cp = 3; philips_tdm1316l_tuner_set_params()
1089 else if (tuner_frequency < 160000000) cp = 5; philips_tdm1316l_tuner_set_params()
1090 else if (tuner_frequency < 200000000) cp = 6; philips_tdm1316l_tuner_set_params()
1091 else if (tuner_frequency < 290000000) cp = 3; philips_tdm1316l_tuner_set_params()
1092 else if (tuner_frequency < 420000000) cp = 5; philips_tdm1316l_tuner_set_params()
1093 else if (tuner_frequency < 480000000) cp = 6; philips_tdm1316l_tuner_set_params()
1094 else if (tuner_frequency < 620000000) cp = 3; philips_tdm1316l_tuner_set_params()
1095 else if (tuner_frequency < 830000000) cp = 5; philips_tdm1316l_tuner_set_params()
1096 else if (tuner_frequency < 895000000) cp = 7; philips_tdm1316l_tuner_set_params()
1139 tuner_buf[3] = (cp << 5) | (filter << 3) | band; philips_tdm1316l_tuner_set_params()
1410 u8 band, cp, filter; dvbc_philips_tdm1316l_tuner_set_params() local
1415 else if (tuner_frequency < 130000000) {cp = 3; band = 1;} dvbc_philips_tdm1316l_tuner_set_params()
1416 else if (tuner_frequency < 160000000) {cp = 5; band = 1;} dvbc_philips_tdm1316l_tuner_set_params()
1417 else if (tuner_frequency < 200000000) {cp = 6; band = 1;} dvbc_philips_tdm1316l_tuner_set_params()
1418 else if (tuner_frequency < 290000000) {cp = 3; band = 2;} dvbc_philips_tdm1316l_tuner_set_params()
1419 else if (tuner_frequency < 420000000) {cp = 5; band = 2;} dvbc_philips_tdm1316l_tuner_set_params()
1420 else if (tuner_frequency < 480000000) {cp = 6; band = 2;} dvbc_philips_tdm1316l_tuner_set_params()
1421 else if (tuner_frequency < 620000000) {cp = 3; band = 4;} dvbc_philips_tdm1316l_tuner_set_params()
1422 else if (tuner_frequency < 830000000) {cp = 5; band = 4;} dvbc_philips_tdm1316l_tuner_set_params()
1423 else if (tuner_frequency < 895000000) {cp = 7; band = 4;} dvbc_philips_tdm1316l_tuner_set_params()
1437 tuner_buf[3] = (cp << 5) | (filter << 3) | band; dvbc_philips_tdm1316l_tuner_set_params()
/linux-4.1.27/drivers/media/pci/bt8xx/
H A Ddvb-bt8xx.c159 unsigned char cp = 0; thomson_dtt7579_tuner_calc_regs() local
167 cp = 0xb4; thomson_dtt7579_tuner_calc_regs()
169 cp = 0xbc; thomson_dtt7579_tuner_calc_regs()
171 cp = 0xf4; thomson_dtt7579_tuner_calc_regs()
183 pllbuf[3] = cp; thomson_dtt7579_tuner_calc_regs()
354 unsigned char cp = 0; advbt771_samsung_tdtc9251dh0_tuner_calc_regs() local
361 cp = 0xB4; advbt771_samsung_tdtc9251dh0_tuner_calc_regs()
363 cp = 0xBC; advbt771_samsung_tdtc9251dh0_tuner_calc_regs()
365 cp = 0xB4; advbt771_samsung_tdtc9251dh0_tuner_calc_regs()
367 cp = 0xBC; advbt771_samsung_tdtc9251dh0_tuner_calc_regs()
369 cp = 0xF4; advbt771_samsung_tdtc9251dh0_tuner_calc_regs()
371 cp = 0xFC; advbt771_samsung_tdtc9251dh0_tuner_calc_regs()
373 cp = 0xBC; advbt771_samsung_tdtc9251dh0_tuner_calc_regs()
375 cp = 0xF4; advbt771_samsung_tdtc9251dh0_tuner_calc_regs()
377 cp = 0xFC; advbt771_samsung_tdtc9251dh0_tuner_calc_regs()
401 pllbuf[3] = cp; advbt771_samsung_tdtc9251dh0_tuner_calc_regs()
/linux-4.1.27/drivers/gpu/drm/amd/include/
H A Dkgd_kfd_interface.h96 * This function is used only for no cp scheduling mode.
98 * @set_pasid_vmid_mapping: Exposes pasid/vmid pair to the H/W for no cp
99 * scheduling mode. Only used for no cp scheduling mode.
103 * @hqd_load: Loads the mqd structure to a H/W hqd slot. used only for no cp
/linux-4.1.27/include/uapi/linux/
H A Dhdlcdrv.h54 struct hdlcdrv_channel_params cp; member in union:hdlcdrv_ioctl::__anon13269
/linux-4.1.27/arch/s390/include/asm/
H A Dnmi.h26 __u32 cp : 1; /* 09 channel-report pending */ member in struct:mci
/linux-4.1.27/arch/s390/kernel/vdso32/
H A DMakefile52 cmd_vdso_install = cp $(obj)/$@.dbg $(MODLIB)/vdso/$@
/linux-4.1.27/arch/s390/kernel/vdso64/
H A DMakefile52 cmd_vdso_install = cp $(obj)/$@.dbg $(MODLIB)/vdso/$@
/linux-4.1.27/arch/parisc/
H A Dinstall.sh65 cp $3 $4/System.map-$1
/linux-4.1.27/arch/powerpc/kernel/vdso32/
H A DMakefile52 cmd_vdso_install = cp $(obj)/$@.dbg $(MODLIB)/vdso/$@
/linux-4.1.27/arch/powerpc/kernel/vdso64/
H A DMakefile45 cmd_vdso_install = cp $(obj)/$@.dbg $(MODLIB)/vdso/$@
/linux-4.1.27/arch/blackfin/boot/
H A Dinstall.sh57 cp $3 $4/System.map
/linux-4.1.27/arch/cris/boot/rescue/
H A DMakefile29 cp -p $(obj)/rescue.bin $(objtree)
/linux-4.1.27/arch/arm/boot/
H A Dinstall.sh60 cp $3 $4/System.map-$1
/linux-4.1.27/arch/arm/mach-davinci/
H A Dda8xx-dt.c24 { .compatible = "ti,cp-intc", .data = cp_intc_of_init, },
/linux-4.1.27/include/linux/mtd/
H A Dgen_probe.h35 struct mtd_info *mtd_do_chip_probe(struct map_info *map, struct chip_probe *cp);
/linux-4.1.27/drivers/media/usb/cx231xx/
H A Dcx231xx-audio.c102 unsigned char *cp; cx231xx_audio_isocirq() local
135 cp = (unsigned char *)urb->transfer_buffer + cx231xx_audio_isocirq()
146 memcpy(runtime->dma_area + oldptr * stride, cp, cx231xx_audio_isocirq()
148 memcpy(runtime->dma_area, cp + cnt * stride, cx231xx_audio_isocirq()
151 memcpy(runtime->dma_area + oldptr * stride, cp, cx231xx_audio_isocirq()
192 unsigned char *cp; cx231xx_audio_bulkirq() local
225 cp = (unsigned char *)urb->transfer_buffer; cx231xx_audio_bulkirq()
232 memcpy(runtime->dma_area + oldptr * stride, cp, cx231xx_audio_bulkirq()
234 memcpy(runtime->dma_area, cp + cnt * stride, cx231xx_audio_bulkirq()
237 memcpy(runtime->dma_area + oldptr * stride, cp, cx231xx_audio_bulkirq()
/linux-4.1.27/net/ipv4/
H A Dipconfig.c1345 char *cp, *cq; root_nfs_parse_addr() local
1347 cp = cq = name; root_nfs_parse_addr()
1349 while (*cp >= '0' && *cp <= '9') root_nfs_parse_addr()
1350 cp++; root_nfs_parse_addr()
1351 if (cp == cq || cp - cq > 3) root_nfs_parse_addr()
1353 if (*cp == '.' || octets == 3) root_nfs_parse_addr()
1356 cp++; root_nfs_parse_addr()
1357 cq = cp; root_nfs_parse_addr()
1359 if (octets == 4 && (*cp == ':' || *cp == '\0')) { root_nfs_parse_addr()
1360 if (*cp == ':') root_nfs_parse_addr()
1361 *cp++ = '\0'; root_nfs_parse_addr()
1363 memmove(name, cp, strlen(cp) + 1); root_nfs_parse_addr()
1588 char *cp, *ip, *dp; ip_auto_config_setup() local
1614 if ((cp = strchr(ip, ':'))) ip_auto_config_setup()
1615 *cp++ = '\0'; ip_auto_config_setup()
1670 ip = cp; ip_auto_config_setup()
/linux-4.1.27/drivers/staging/lustre/lnet/klnds/o2iblnd/
H A Do2iblnd_cb.c2190 struct rdma_conn_param cp; kiblnd_passive_connect() local
2413 memset(&cp, 0, sizeof(cp)); kiblnd_passive_connect()
2414 cp.private_data = ackmsg; kiblnd_passive_connect()
2415 cp.private_data_len = ackmsg->ibm_nob; kiblnd_passive_connect()
2416 cp.responder_resources = 0; /* No atomic ops or RDMA reads */ kiblnd_passive_connect()
2417 cp.initiator_depth = 0; kiblnd_passive_connect()
2418 cp.flow_control = 1; kiblnd_passive_connect()
2419 cp.retry_count = *kiblnd_tunables.kib_retry_count; kiblnd_passive_connect()
2420 cp.rnr_retry_count = *kiblnd_tunables.kib_rnr_retry_count; kiblnd_passive_connect()
2424 rc = rdma_accept(cmid, &cp); kiblnd_passive_connect()
2452 __u64 incarnation, int why, kib_connparams_t *cp) kiblnd_reconnect()
2506 cp != NULL ? cp->ibcp_queue_depth : IBLND_MSG_QUEUE_SIZE(version), kiblnd_reconnect()
2507 cp != NULL ? cp->ibcp_max_frags : IBLND_RDMA_FRAGS(version), kiblnd_reconnect()
2508 cp != NULL ? cp->ibcp_max_msg_size : IBLND_MSG_SIZE); kiblnd_reconnect()
2536 kib_connparams_t *cp = NULL; kiblnd_rejected() local
2564 cp = &rej->ibr_cp; kiblnd_rejected()
2568 __swab16s(&cp->ibcp_queue_depth); kiblnd_rejected()
2569 __swab16s(&cp->ibcp_max_frags); kiblnd_rejected()
2570 __swab32s(&cp->ibcp_max_msg_size); kiblnd_rejected()
2605 incarnation, rej->ibr_why, cp); kiblnd_rejected()
2611 cp != NULL ? cp->ibcp_queue_depth : kiblnd_rejected()
2619 cp != NULL ? cp->ibcp_max_frags : kiblnd_rejected()
2755 struct rdma_conn_param cp; kiblnd_active_connect() local
2791 memset(&cp, 0, sizeof(cp)); kiblnd_active_connect()
2792 cp.private_data = msg; kiblnd_active_connect()
2793 cp.private_data_len = msg->ibm_nob; kiblnd_active_connect()
2794 cp.responder_resources = 0; /* No atomic ops or RDMA reads */ kiblnd_active_connect()
2795 cp.initiator_depth = 0; kiblnd_active_connect()
2796 cp.flow_control = 1; kiblnd_active_connect()
2797 cp.retry_count = *kiblnd_tunables.kib_retry_count; kiblnd_active_connect()
2798 cp.rnr_retry_count = *kiblnd_tunables.kib_rnr_retry_count; kiblnd_active_connect()
2803 rc = rdma_connect(cmid, &cp); kiblnd_active_connect()
2451 kiblnd_reconnect(kib_conn_t *conn, int version, __u64 incarnation, int why, kib_connparams_t *cp) kiblnd_reconnect() argument

Completed in 6304 milliseconds

123