Searched refs:msg (Results 1 - 200 of 1789) sorted by relevance

123456789

/linux-4.1.27/fs/reiserfs/
H A Dhashes.c44 u32 keyed_hash(const signed char *msg, int len) keyed_hash() argument
59 a = (u32) msg[0] | keyed_hash()
60 (u32) msg[1] << 8 | (u32) msg[2] << 16 | (u32) msg[3] << 24; keyed_hash()
61 b = (u32) msg[4] | keyed_hash()
62 (u32) msg[5] << 8 | (u32) msg[6] << 16 | (u32) msg[7] << 24; keyed_hash()
63 c = (u32) msg[8] | keyed_hash()
64 (u32) msg[9] << 8 | keyed_hash()
65 (u32) msg[10] << 16 | (u32) msg[11] << 24; keyed_hash()
66 d = (u32) msg[12] | keyed_hash()
67 (u32) msg[13] << 8 | keyed_hash()
68 (u32) msg[14] << 16 | (u32) msg[15] << 24; keyed_hash()
73 msg += 16; keyed_hash()
77 a = (u32) msg[0] | keyed_hash()
78 (u32) msg[1] << 8 | (u32) msg[2] << 16 | (u32) msg[3] << 24; keyed_hash()
79 b = (u32) msg[4] | keyed_hash()
80 (u32) msg[5] << 8 | (u32) msg[6] << 16 | (u32) msg[7] << 24; keyed_hash()
81 c = (u32) msg[8] | keyed_hash()
82 (u32) msg[9] << 8 | keyed_hash()
83 (u32) msg[10] << 16 | (u32) msg[11] << 24; keyed_hash()
88 d |= msg[i]; keyed_hash()
91 a = (u32) msg[0] | keyed_hash()
92 (u32) msg[1] << 8 | (u32) msg[2] << 16 | (u32) msg[3] << 24; keyed_hash()
93 b = (u32) msg[4] | keyed_hash()
94 (u32) msg[5] << 8 | (u32) msg[6] << 16 | (u32) msg[7] << 24; keyed_hash()
99 c |= msg[i]; keyed_hash()
102 a = (u32) msg[0] | keyed_hash()
103 (u32) msg[1] << 8 | (u32) msg[2] << 16 | (u32) msg[3] << 24; keyed_hash()
108 b |= msg[i]; keyed_hash()
114 a |= msg[i]; keyed_hash()
128 u32 yura_hash(const signed char *msg, int len) yura_hash() argument
138 a = msg[0] - 48; yura_hash()
140 a = (msg[0] - 48) * pow; yura_hash()
143 c = msg[i] - 48; yura_hash()
167 u32 r5_hash(const signed char *msg, int len) r5_hash() argument
170 while (*msg) { r5_hash()
171 a += *msg << 4; r5_hash()
172 a += *msg >> 4; r5_hash()
174 msg++; r5_hash()
/linux-4.1.27/include/trace/events/
H A Dspi.h46 TP_PROTO(struct spi_message *msg),
48 TP_ARGS(msg),
53 __field( struct spi_message *, msg )
57 __entry->bus_num = msg->spi->master->bus_num;
58 __entry->chip_select = msg->spi->chip_select;
59 __entry->msg = msg;
64 (struct spi_message *)__entry->msg)
69 TP_PROTO(struct spi_message *msg),
71 TP_ARGS(msg)
77 TP_PROTO(struct spi_message *msg),
79 TP_ARGS(msg)
85 TP_PROTO(struct spi_message *msg),
87 TP_ARGS(msg),
92 __field( struct spi_message *, msg )
98 __entry->bus_num = msg->spi->master->bus_num;
99 __entry->chip_select = msg->spi->chip_select;
100 __entry->msg = msg;
101 __entry->frame = msg->frame_length;
102 __entry->actual = msg->actual_length;
107 (struct spi_message *)__entry->msg,
113 TP_PROTO(struct spi_message *msg, struct spi_transfer *xfer),
115 TP_ARGS(msg, xfer),
125 __entry->bus_num = msg->spi->master->bus_num;
126 __entry->chip_select = msg->spi->chip_select;
139 TP_PROTO(struct spi_message *msg, struct spi_transfer *xfer),
141 TP_ARGS(msg, xfer)
147 TP_PROTO(struct spi_message *msg, struct spi_transfer *xfer),
149 TP_ARGS(msg, xfer)
H A Dprintk.h15 __dynamic_array(char, msg, len + 1)
19 memcpy(__get_dynamic_array(msg), text, len);
20 ((char *)__get_dynamic_array(msg))[len] = 0;
23 TP_printk("%s", __get_str(msg))
H A Di2c.h30 TP_PROTO(const struct i2c_adapter *adap, const struct i2c_msg *msg,
32 TP_ARGS(adap, msg, num),
39 __dynamic_array(__u8, buf, msg->len) ),
43 __entry->addr = msg->addr;
44 __entry->flags = msg->flags;
45 __entry->len = msg->len;
46 memcpy(__get_dynamic_array(buf), msg->buf, msg->len);
63 TP_PROTO(const struct i2c_adapter *adap, const struct i2c_msg *msg,
65 TP_ARGS(adap, msg, num),
76 __entry->addr = msg->addr;
77 __entry->flags = msg->flags;
78 __entry->len = msg->len;
94 TP_PROTO(const struct i2c_adapter *adap, const struct i2c_msg *msg,
96 TP_ARGS(adap, msg, num),
103 __dynamic_array(__u8, buf, msg->len) ),
107 __entry->addr = msg->addr;
108 __entry->flags = msg->flags;
109 __entry->len = msg->len;
110 memcpy(__get_dynamic_array(buf), msg->buf, msg->len);
/linux-4.1.27/net/ceph/
H A Dmsgpool.c13 struct ceph_msg *msg; msgpool_alloc() local
15 msg = ceph_msg_new(pool->type, pool->front_len, gfp_mask, true); msgpool_alloc()
16 if (!msg) { msgpool_alloc()
19 dout("msgpool_alloc %s %p\n", pool->name, msg); msgpool_alloc()
20 msg->pool = pool; msgpool_alloc()
22 return msg; msgpool_alloc()
28 struct ceph_msg *msg = element; msgpool_free() local
30 dout("msgpool_release %s %p\n", pool->name, msg); msgpool_free()
31 msg->pool = NULL; msgpool_free()
32 ceph_msg_put(msg); msgpool_free()
57 struct ceph_msg *msg; ceph_msgpool_get() local
68 msg = mempool_alloc(pool->pool, GFP_NOFS); ceph_msgpool_get()
69 dout("msgpool_get %s %p\n", pool->name, msg); ceph_msgpool_get()
70 return msg; ceph_msgpool_get()
73 void ceph_msgpool_put(struct ceph_msgpool *pool, struct ceph_msg *msg) ceph_msgpool_put() argument
75 dout("msgpool_put %s %p\n", pool->name, msg); ceph_msgpool_put()
77 /* reset msg front_len; user may have changed it */ ceph_msgpool_put()
78 msg->front.iov_len = pool->front_len; ceph_msgpool_put()
79 msg->hdr.front_len = cpu_to_le32(pool->front_len); ceph_msgpool_put()
81 kref_init(&msg->kref); /* retake single ref */ ceph_msgpool_put()
82 mempool_free(msg, pool->pool); ceph_msgpool_put()
H A Dmessenger.c528 struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL }; ceph_tcp_recvmsg() local
531 r = kernel_recvmsg(sock, &msg, &iov, 1, len, msg.msg_flags); ceph_tcp_recvmsg()
560 struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL }; ceph_tcp_sendmsg() local
564 msg.msg_flags |= MSG_MORE; ceph_tcp_sendmsg()
566 msg.msg_flags |= MSG_EOR; /* superfluous, but what the hell */ ceph_tcp_sendmsg()
568 r = kernel_sendmsg(sock, &msg, iov, kvlen, len); ceph_tcp_sendmsg()
636 static void ceph_msg_remove(struct ceph_msg *msg) ceph_msg_remove() argument
638 list_del_init(&msg->list_head); ceph_msg_remove()
639 BUG_ON(msg->con == NULL); ceph_msg_remove()
640 msg->con->ops->put(msg->con); ceph_msg_remove()
641 msg->con = NULL; ceph_msg_remove()
643 ceph_msg_put(msg); ceph_msg_remove()
648 struct ceph_msg *msg = list_first_entry(head, struct ceph_msg, ceph_msg_remove_list() local
650 ceph_msg_remove(msg); ceph_msg_remove_list()
1111 static void ceph_msg_data_cursor_init(struct ceph_msg *msg, size_t length) ceph_msg_data_cursor_init() argument
1113 struct ceph_msg_data_cursor *cursor = &msg->cursor; ceph_msg_data_cursor_init()
1117 BUG_ON(length > msg->data_length); ceph_msg_data_cursor_init()
1118 BUG_ON(list_empty(&msg->data)); ceph_msg_data_cursor_init()
1120 cursor->data_head = &msg->data; ceph_msg_data_cursor_init()
1122 data = list_first_entry(&msg->data, struct ceph_msg_data, links); ceph_msg_data_cursor_init()
1213 static void prepare_message_data(struct ceph_msg *msg, u32 data_len) prepare_message_data() argument
1215 BUG_ON(!msg); prepare_message_data()
1220 ceph_msg_data_cursor_init(msg, (size_t)data_len); prepare_message_data()
1549 struct ceph_msg *msg = con->out_msg; write_partial_message_data() local
1550 struct ceph_msg_data_cursor *cursor = &msg->cursor; write_partial_message_data()
1554 dout("%s %p msg %p\n", __func__, con, msg); write_partial_message_data()
1556 if (list_empty(&msg->data)) write_partial_message_data()
1567 crc = do_datacrc ? le32_to_cpu(msg->footer.data_crc) : 0; write_partial_message_data()
1576 page = ceph_msg_data_next(&msg->cursor, &page_offset, &length, write_partial_message_data()
1582 msg->footer.data_crc = cpu_to_le32(crc); write_partial_message_data()
1588 need_crc = ceph_msg_data_advance(&msg->cursor, (size_t)ret); write_partial_message_data()
1591 dout("%s %p msg %p done\n", __func__, con, msg); write_partial_message_data()
1595 msg->footer.data_crc = cpu_to_le32(crc); write_partial_message_data()
1597 msg->footer.flags |= CEPH_MSG_FOOTER_NOCRC; write_partial_message_data()
2249 struct ceph_msg *msg = con->in_msg; read_partial_msg_data() local
2250 struct ceph_msg_data_cursor *cursor = &msg->cursor; read_partial_msg_data()
2258 BUG_ON(!msg); read_partial_msg_data()
2259 if (list_empty(&msg->data)) read_partial_msg_data()
2265 page = ceph_msg_data_next(&msg->cursor, &page_offset, &length, read_partial_msg_data()
2277 (void) ceph_msg_data_advance(&msg->cursor, (size_t)ret); read_partial_msg_data()
2302 dout("read_partial_message con %p msg %p\n", con, m); read_partial_message()
2425 dout("read_partial_message got msg %p %d (%u) + %d (%u) + %d (%u)\n", read_partial_message()
2464 struct ceph_msg *msg; process_message() local
2468 msg = con->in_msg; process_message()
2474 con->peer_name = msg->hdr.src; process_message()
2480 msg, le64_to_cpu(msg->hdr.seq), process_message()
2481 ENTITY_NAME(msg->hdr.src), process_message()
2482 le16_to_cpu(msg->hdr.type), process_message()
2483 ceph_msg_type_name(le16_to_cpu(msg->hdr.type)), process_message()
2484 le32_to_cpu(msg->hdr.front_len), process_message()
2485 le32_to_cpu(msg->hdr.data_len), process_message()
2487 con->ops->dispatch(con, msg); process_message()
2539 /* msg pages? */ try_write()
3000 void ceph_con_send(struct ceph_connection *con, struct ceph_msg *msg) ceph_con_send() argument
3003 msg->hdr.src = con->msgr->inst.name; ceph_con_send()
3004 BUG_ON(msg->front.iov_len != le32_to_cpu(msg->hdr.front_len)); ceph_con_send()
3005 msg->needs_out_seq = true; ceph_con_send()
3010 dout("con_send %p closed, dropping %p\n", con, msg); ceph_con_send()
3011 ceph_msg_put(msg); ceph_con_send()
3016 BUG_ON(msg->con != NULL); ceph_con_send()
3017 msg->con = con->ops->get(con); ceph_con_send()
3018 BUG_ON(msg->con == NULL); ceph_con_send()
3020 BUG_ON(!list_empty(&msg->list_head)); ceph_con_send()
3021 list_add_tail(&msg->list_head, &con->out_queue); ceph_con_send()
3022 dout("----- %p to %s%lld %d=%s len %d+%d+%d -----\n", msg, ceph_con_send()
3023 ENTITY_NAME(con->peer_name), le16_to_cpu(msg->hdr.type), ceph_con_send()
3024 ceph_msg_type_name(le16_to_cpu(msg->hdr.type)), ceph_con_send()
3025 le32_to_cpu(msg->hdr.front_len), ceph_con_send()
3026 le32_to_cpu(msg->hdr.middle_len), ceph_con_send()
3027 le32_to_cpu(msg->hdr.data_len)); ceph_con_send()
3042 void ceph_msg_revoke(struct ceph_msg *msg) ceph_msg_revoke() argument
3044 struct ceph_connection *con = msg->con; ceph_msg_revoke()
3050 if (!list_empty(&msg->list_head)) { ceph_msg_revoke()
3051 dout("%s %p msg %p - was on queue\n", __func__, con, msg); ceph_msg_revoke()
3052 list_del_init(&msg->list_head); ceph_msg_revoke()
3053 BUG_ON(msg->con == NULL); ceph_msg_revoke()
3054 msg->con->ops->put(msg->con); ceph_msg_revoke()
3055 msg->con = NULL; ceph_msg_revoke()
3056 msg->hdr.seq = 0; ceph_msg_revoke()
3058 ceph_msg_put(msg); ceph_msg_revoke()
3060 if (con->out_msg == msg) { ceph_msg_revoke()
3066 BUG_ON(!msg->data_length); ceph_msg_revoke()
3068 con->out_skip += sizeof(msg->footer); ceph_msg_revoke()
3070 con->out_skip += sizeof(msg->old_footer); ceph_msg_revoke()
3073 if (msg->data_length) ceph_msg_revoke()
3074 con->out_skip += msg->cursor.total_resid; ceph_msg_revoke()
3075 if (msg->middle) ceph_msg_revoke()
3079 dout("%s %p msg %p - was sending, will write %d skip %d\n", ceph_msg_revoke()
3080 __func__, con, msg, con->out_kvec_bytes, con->out_skip); ceph_msg_revoke()
3081 msg->hdr.seq = 0; ceph_msg_revoke()
3083 ceph_msg_put(msg); ceph_msg_revoke()
3092 void ceph_msg_revoke_incoming(struct ceph_msg *msg) ceph_msg_revoke_incoming() argument
3096 BUG_ON(msg == NULL); ceph_msg_revoke_incoming()
3097 if (!msg->con) { ceph_msg_revoke_incoming()
3098 dout("%s msg %p null con\n", __func__, msg); ceph_msg_revoke_incoming()
3103 con = msg->con; ceph_msg_revoke_incoming()
3105 if (con->in_msg == msg) { ceph_msg_revoke_incoming()
3111 dout("%s %p msg %p revoked\n", __func__, con, msg); ceph_msg_revoke_incoming()
3123 dout("%s %p in_msg %p msg %p no-op\n", ceph_msg_revoke_incoming()
3124 __func__, con, con->in_msg, msg); ceph_msg_revoke_incoming()
3170 void ceph_msg_data_add_pages(struct ceph_msg *msg, struct page **pages, ceph_msg_data_add_pages() argument
3184 list_add_tail(&data->links, &msg->data); ceph_msg_data_add_pages()
3185 msg->data_length += length; ceph_msg_data_add_pages()
3189 void ceph_msg_data_add_pagelist(struct ceph_msg *msg, ceph_msg_data_add_pagelist() argument
3201 list_add_tail(&data->links, &msg->data); ceph_msg_data_add_pagelist()
3202 msg->data_length += pagelist->length; ceph_msg_data_add_pagelist()
3207 void ceph_msg_data_add_bio(struct ceph_msg *msg, struct bio *bio, ceph_msg_data_add_bio() argument
3219 list_add_tail(&data->links, &msg->data); ceph_msg_data_add_bio()
3220 msg->data_length += length; ceph_msg_data_add_bio()
3227 * the new msg has a ref count of 1.
3284 static int ceph_alloc_middle(struct ceph_connection *con, struct ceph_msg *msg) ceph_alloc_middle() argument
3286 int type = le16_to_cpu(msg->hdr.type); ceph_alloc_middle()
3287 int middle_len = le32_to_cpu(msg->hdr.middle_len); ceph_alloc_middle()
3289 dout("alloc_middle %p type %d %s middle_len %d\n", msg, type, ceph_alloc_middle()
3292 BUG_ON(msg->middle); ceph_alloc_middle()
3294 msg->middle = ceph_buffer_new(middle_len, GFP_NOFS); ceph_alloc_middle()
3295 if (!msg->middle) ceph_alloc_middle()
3319 struct ceph_msg *msg; ceph_con_in_msg_alloc() local
3326 msg = con->ops->alloc_msg(con, hdr, skip); ceph_con_in_msg_alloc()
3329 if (msg) ceph_con_in_msg_alloc()
3330 ceph_msg_put(msg); ceph_con_in_msg_alloc()
3333 if (msg) { ceph_con_in_msg_alloc()
3335 con->in_msg = msg; ceph_con_in_msg_alloc()
3406 struct ceph_msg *ceph_msg_get(struct ceph_msg *msg) ceph_msg_get() argument
3408 dout("%s %p (was %d)\n", __func__, msg, ceph_msg_get()
3409 atomic_read(&msg->kref.refcount)); ceph_msg_get()
3410 kref_get(&msg->kref); ceph_msg_get()
3411 return msg; ceph_msg_get()
3415 void ceph_msg_put(struct ceph_msg *msg) ceph_msg_put() argument
3417 dout("%s %p (was %d)\n", __func__, msg, ceph_msg_put()
3418 atomic_read(&msg->kref.refcount)); ceph_msg_put()
3419 kref_put(&msg->kref, ceph_msg_release); ceph_msg_put()
3423 void ceph_msg_dump(struct ceph_msg *msg) ceph_msg_dump() argument
3425 pr_debug("msg_dump %p (front_alloc_len %d length %zd)\n", msg, ceph_msg_dump()
3426 msg->front_alloc_len, msg->data_length); ceph_msg_dump()
3429 &msg->hdr, sizeof(msg->hdr), true); ceph_msg_dump()
3432 msg->front.iov_base, msg->front.iov_len, true); ceph_msg_dump()
3433 if (msg->middle) ceph_msg_dump()
3436 msg->middle->vec.iov_base, ceph_msg_dump()
3437 msg->middle->vec.iov_len, true); ceph_msg_dump()
3440 &msg->footer, sizeof(msg->footer), true); ceph_msg_dump()
/linux-4.1.27/drivers/staging/rts5208/
H A Dtrace.c11 struct trace_msg_t *msg = &chip->trace_msg[chip->msg_idx]; _rtsx_trace() local
16 strncpy(msg->file, file, MSG_FILE_LEN - 1); _rtsx_trace()
17 strncpy(msg->func, func, MSG_FUNC_LEN - 1); _rtsx_trace()
18 msg->line = (u16)line; _rtsx_trace()
19 get_current_time(msg->timeval_buf, TIME_VAL_LEN); _rtsx_trace()
20 msg->valid = 1; _rtsx_trace()
/linux-4.1.27/drivers/connector/
H A Dcn_proc.c69 struct cn_msg *msg; proc_fork_connector() local
77 msg = buffer_to_cn_msg(buffer); proc_fork_connector()
78 ev = (struct proc_event *)msg->data; proc_fork_connector()
80 get_seq(&msg->seq, &ev->cpu); proc_fork_connector()
91 memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id)); proc_fork_connector()
92 msg->ack = 0; /* not used */ proc_fork_connector()
93 msg->len = sizeof(*ev); proc_fork_connector()
94 msg->flags = 0; /* not used */ proc_fork_connector()
96 cn_netlink_send(msg, 0, CN_IDX_PROC, GFP_KERNEL); proc_fork_connector()
101 struct cn_msg *msg; proc_exec_connector() local
108 msg = buffer_to_cn_msg(buffer); proc_exec_connector()
109 ev = (struct proc_event *)msg->data; proc_exec_connector()
111 get_seq(&msg->seq, &ev->cpu); proc_exec_connector()
117 memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id)); proc_exec_connector()
118 msg->ack = 0; /* not used */ proc_exec_connector()
119 msg->len = sizeof(*ev); proc_exec_connector()
120 msg->flags = 0; /* not used */ proc_exec_connector()
121 cn_netlink_send(msg, 0, CN_IDX_PROC, GFP_KERNEL); proc_exec_connector()
126 struct cn_msg *msg; proc_id_connector() local
134 msg = buffer_to_cn_msg(buffer); proc_id_connector()
135 ev = (struct proc_event *)msg->data; proc_id_connector()
153 get_seq(&msg->seq, &ev->cpu); proc_id_connector()
156 memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id)); proc_id_connector()
157 msg->ack = 0; /* not used */ proc_id_connector()
158 msg->len = sizeof(*ev); proc_id_connector()
159 msg->flags = 0; /* not used */ proc_id_connector()
160 cn_netlink_send(msg, 0, CN_IDX_PROC, GFP_KERNEL); proc_id_connector()
165 struct cn_msg *msg; proc_sid_connector() local
172 msg = buffer_to_cn_msg(buffer); proc_sid_connector()
173 ev = (struct proc_event *)msg->data; proc_sid_connector()
175 get_seq(&msg->seq, &ev->cpu); proc_sid_connector()
181 memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id)); proc_sid_connector()
182 msg->ack = 0; /* not used */ proc_sid_connector()
183 msg->len = sizeof(*ev); proc_sid_connector()
184 msg->flags = 0; /* not used */ proc_sid_connector()
185 cn_netlink_send(msg, 0, CN_IDX_PROC, GFP_KERNEL); proc_sid_connector()
190 struct cn_msg *msg; proc_ptrace_connector() local
197 msg = buffer_to_cn_msg(buffer); proc_ptrace_connector()
198 ev = (struct proc_event *)msg->data; proc_ptrace_connector()
200 get_seq(&msg->seq, &ev->cpu); proc_ptrace_connector()
214 memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id)); proc_ptrace_connector()
215 msg->ack = 0; /* not used */ proc_ptrace_connector()
216 msg->len = sizeof(*ev); proc_ptrace_connector()
217 msg->flags = 0; /* not used */ proc_ptrace_connector()
218 cn_netlink_send(msg, 0, CN_IDX_PROC, GFP_KERNEL); proc_ptrace_connector()
223 struct cn_msg *msg; proc_comm_connector() local
230 msg = buffer_to_cn_msg(buffer); proc_comm_connector()
231 ev = (struct proc_event *)msg->data; proc_comm_connector()
233 get_seq(&msg->seq, &ev->cpu); proc_comm_connector()
240 memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id)); proc_comm_connector()
241 msg->ack = 0; /* not used */ proc_comm_connector()
242 msg->len = sizeof(*ev); proc_comm_connector()
243 msg->flags = 0; /* not used */ proc_comm_connector()
244 cn_netlink_send(msg, 0, CN_IDX_PROC, GFP_KERNEL); proc_comm_connector()
249 struct cn_msg *msg; proc_coredump_connector() local
256 msg = buffer_to_cn_msg(buffer); proc_coredump_connector()
257 ev = (struct proc_event *)msg->data; proc_coredump_connector()
259 get_seq(&msg->seq, &ev->cpu); proc_coredump_connector()
265 memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id)); proc_coredump_connector()
266 msg->ack = 0; /* not used */ proc_coredump_connector()
267 msg->len = sizeof(*ev); proc_coredump_connector()
268 msg->flags = 0; /* not used */ proc_coredump_connector()
269 cn_netlink_send(msg, 0, CN_IDX_PROC, GFP_KERNEL); proc_coredump_connector()
274 struct cn_msg *msg; proc_exit_connector() local
281 msg = buffer_to_cn_msg(buffer); proc_exit_connector()
282 ev = (struct proc_event *)msg->data; proc_exit_connector()
284 get_seq(&msg->seq, &ev->cpu); proc_exit_connector()
292 memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id)); proc_exit_connector()
293 msg->ack = 0; /* not used */ proc_exit_connector()
294 msg->len = sizeof(*ev); proc_exit_connector()
295 msg->flags = 0; /* not used */ proc_exit_connector()
296 cn_netlink_send(msg, 0, CN_IDX_PROC, GFP_KERNEL); proc_exit_connector()
309 struct cn_msg *msg; cn_proc_ack() local
316 msg = buffer_to_cn_msg(buffer); cn_proc_ack()
317 ev = (struct proc_event *)msg->data; cn_proc_ack()
319 msg->seq = rcvd_seq; cn_proc_ack()
324 memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id)); cn_proc_ack()
325 msg->ack = rcvd_ack + 1; cn_proc_ack()
326 msg->len = sizeof(*ev); cn_proc_ack()
327 msg->flags = 0; /* not used */ cn_proc_ack()
328 cn_netlink_send(msg, 0, CN_IDX_PROC, GFP_KERNEL); cn_proc_ack()
335 static void cn_proc_mcast_ctl(struct cn_msg *msg, cn_proc_mcast_ctl() argument
341 if (msg->len != sizeof(*mc_op)) cn_proc_mcast_ctl()
359 mc_op = (enum proc_cn_mcast_op *)msg->data; cn_proc_mcast_ctl()
373 cn_proc_ack(err, msg->seq, msg->ack); cn_proc_mcast_ctl()
H A Dconnector.c48 * msg->seq and msg->ack are used to determine message genealogy.
67 * If msg->len != len, then additional cn_msg messages are expected following
68 * the first msg.
73 int cn_netlink_send_mult(struct cn_msg *msg, u16 len, u32 portid, u32 __group, cn_netlink_send_mult() argument
91 if (cn_cb_equal(&__cbq->id.id, &msg->id)) { cn_netlink_send_mult()
106 size = sizeof(*msg) + len; cn_netlink_send_mult()
112 nlh = nlmsg_put(skb, 0, msg->seq, NLMSG_DONE, size, 0); cn_netlink_send_mult()
120 memcpy(data, msg, size); cn_netlink_send_mult()
131 /* same as cn_netlink_send_mult except msg->len is used for len */ cn_netlink_send()
132 int cn_netlink_send(struct cn_msg *msg, u32 portid, u32 __group, cn_netlink_send() argument
135 return cn_netlink_send_mult(msg, msg->len, portid, __group, gfp_mask); cn_netlink_send()
147 struct cn_msg *msg = nlmsg_data(nlmsg_hdr(skb)); cn_call_callback() local
151 /* verify msg->len is within skb */ cn_call_callback()
153 if (nlh->nlmsg_len < NLMSG_HDRLEN + sizeof(struct cn_msg) + msg->len) cn_call_callback()
158 if (cn_cb_equal(&i->id.id, &msg->id)) { cn_call_callback()
167 cbq->callback(msg, nsp); cn_call_callback()
179 * It checks skb, netlink header and msg sizes, and calls callback helper.
/linux-4.1.27/drivers/isdn/act2000/
H A Dcapi.c145 m->msg.listen_req.controller = 0; actcapi_listen_req()
146 m->msg.listen_req.infomask = 0x3f; /* All information */ actcapi_listen_req()
147 m->msg.listen_req.eazmask = eazmask; actcapi_listen_req()
148 m->msg.listen_req.simask = (eazmask) ? 0x86 : 0; /* All SI's */ actcapi_listen_req()
166 m->msg.connect_req.controller = 0; actcapi_connect_req()
167 m->msg.connect_req.bchan = 0x83; actcapi_connect_req()
168 m->msg.connect_req.infomask = 0x3f; actcapi_connect_req()
169 m->msg.connect_req.si1 = si1; actcapi_connect_req()
170 m->msg.connect_req.si2 = si2; actcapi_connect_req()
171 m->msg.connect_req.eaz = eaz ? eaz : '0'; actcapi_connect_req()
172 m->msg.connect_req.addr.len = strlen(phone) + 1; actcapi_connect_req()
173 m->msg.connect_req.addr.tnp = 0x81; actcapi_connect_req()
174 memcpy(m->msg.connect_req.addr.num, phone, strlen(phone)); actcapi_connect_req()
188 m->msg.connect_b3_req.plci = chan->plci; actcapi_connect_b3_req()
189 memset(&m->msg.connect_b3_req.ncpi, 0, actcapi_connect_b3_req()
190 sizeof(m->msg.connect_b3_req.ncpi)); actcapi_connect_b3_req()
191 m->msg.connect_b3_req.ncpi.len = 13; actcapi_connect_b3_req()
192 m->msg.connect_b3_req.ncpi.modulo = 8; actcapi_connect_b3_req()
210 m->msg.manufacturer_req_net.manuf_msg = 0x11; actcapi_manufacturer_req_net()
211 m->msg.manufacturer_req_net.controller = 1; actcapi_manufacturer_req_net()
212 m->msg.manufacturer_req_net.nettype = (card->ptype == ISDN_PTYPE_EURO) ? 1 : 0; actcapi_manufacturer_req_net()
239 m->msg.manufacturer_req_v42.manuf_msg = 0x10;
240 m->msg.manufacturer_req_v42.controller = 0;
241 m->msg.manufacturer_req_v42.v42control = (arg ? 1 : 0);
262 m->msg.manufacturer_req_err.manuf_msg = 0x03; actcapi_manufacturer_req_errh()
263 m->msg.manufacturer_req_err.controller = 0; actcapi_manufacturer_req_errh()
289 m->msg.manufacturer_req_msn.manuf_msg = 0x13 + i; actcapi_manufacturer_req_msn()
290 m->msg.manufacturer_req_msn.controller = 0; actcapi_manufacturer_req_msn()
291 m->msg.manufacturer_req_msn.msnmap.eaz = p->eaz; actcapi_manufacturer_req_msn()
292 m->msg.manufacturer_req_msn.msnmap.len = len; actcapi_manufacturer_req_msn()
293 memcpy(m->msg.manufacturer_req_msn.msnmap.msn, p->msn, len); actcapi_manufacturer_req_msn()
309 m->msg.select_b2_protocol_req.plci = chan->plci; actcapi_select_b2_protocol_req()
310 memset(&m->msg.select_b2_protocol_req.dlpd, 0, actcapi_select_b2_protocol_req()
311 sizeof(m->msg.select_b2_protocol_req.dlpd)); actcapi_select_b2_protocol_req()
312 m->msg.select_b2_protocol_req.dlpd.len = 6; actcapi_select_b2_protocol_req()
315 m->msg.select_b2_protocol_req.protocol = 0x03; actcapi_select_b2_protocol_req()
316 m->msg.select_b2_protocol_req.dlpd.dlen = 4000; actcapi_select_b2_protocol_req()
319 m->msg.select_b2_protocol_req.protocol = 0x02; actcapi_select_b2_protocol_req()
320 m->msg.select_b2_protocol_req.dlpd.dlen = 4000; actcapi_select_b2_protocol_req()
325 m->msg.select_b2_protocol_req.protocol = 0x01; actcapi_select_b2_protocol_req()
326 m->msg.select_b2_protocol_req.dlpd.dlen = 4000; actcapi_select_b2_protocol_req()
327 m->msg.select_b2_protocol_req.dlpd.laa = 3; actcapi_select_b2_protocol_req()
328 m->msg.select_b2_protocol_req.dlpd.lab = 1; actcapi_select_b2_protocol_req()
329 m->msg.select_b2_protocol_req.dlpd.win = 7; actcapi_select_b2_protocol_req()
330 m->msg.select_b2_protocol_req.dlpd.modulo = 8; actcapi_select_b2_protocol_req()
344 m->msg.select_b3_protocol_req.plci = chan->plci; actcapi_select_b3_protocol_req()
345 memset(&m->msg.select_b3_protocol_req.ncpd, 0, actcapi_select_b3_protocol_req()
346 sizeof(m->msg.select_b3_protocol_req.ncpd)); actcapi_select_b3_protocol_req()
349 m->msg.select_b3_protocol_req.protocol = 0x04; actcapi_select_b3_protocol_req()
350 m->msg.select_b3_protocol_req.ncpd.len = 13; actcapi_select_b3_protocol_req()
351 m->msg.select_b3_protocol_req.ncpd.modulo = 8; actcapi_select_b3_protocol_req()
365 m->msg.listen_b3_req.plci = chan->plci; actcapi_listen_b3_req()
377 m->msg.disconnect_req.plci = chan->plci; actcapi_disconnect_req()
378 m->msg.disconnect_req.cause = 0; actcapi_disconnect_req()
390 m->msg.disconnect_b3_req.ncci = chan->ncci; actcapi_disconnect_b3_req()
391 memset(&m->msg.disconnect_b3_req.ncpi, 0, actcapi_disconnect_b3_req()
392 sizeof(m->msg.disconnect_b3_req.ncpi)); actcapi_disconnect_b3_req()
393 m->msg.disconnect_b3_req.ncpi.len = 13; actcapi_disconnect_b3_req()
394 m->msg.disconnect_b3_req.ncpi.modulo = 8; actcapi_disconnect_b3_req()
407 m->msg.connect_resp.plci = chan->plci; actcapi_connect_resp()
408 m->msg.connect_resp.rejectcause = cause; actcapi_connect_resp()
425 m->msg.connect_resp.plci = chan->plci; actcapi_connect_active_resp()
439 m->msg.connect_b3_resp.ncci = chan->ncci; actcapi_connect_b3_resp()
440 m->msg.connect_b3_resp.rejectcause = rejectcause; actcapi_connect_b3_resp()
442 memset(&m->msg.connect_b3_resp.ncpi, 0, actcapi_connect_b3_resp()
443 sizeof(m->msg.connect_b3_resp.ncpi)); actcapi_connect_b3_resp()
444 m->msg.connect_b3_resp.ncpi.len = 13; actcapi_connect_b3_resp()
445 m->msg.connect_b3_resp.ncpi.modulo = 8; actcapi_connect_b3_resp()
459 m->msg.connect_b3_active_resp.ncci = chan->ncci; actcapi_connect_b3_active_resp()
472 m->msg.info_resp.plci = chan->plci; actcapi_info_resp()
484 m->msg.disconnect_b3_resp.ncci = chan->ncci; actcapi_disconnect_b3_resp()
498 m->msg.disconnect_resp.plci = chan->plci; actcapi_disconnect_resp()
553 actcapi_msg *msg = (actcapi_msg *)skb->data; actcapi_data_b3_ind() local
555 EVAL_NCCI(msg->msg.data_b3_ind.fakencci, plci, controller, ncci); actcapi_data_b3_ind()
563 blocknr = msg->msg.data_b3_ind.blocknr; actcapi_data_b3_ind()
570 msg = (actcapi_msg *)skb_put(skb, 11); actcapi_data_b3_ind()
571 msg->hdr.len = 11; actcapi_data_b3_ind()
572 msg->hdr.applicationID = 1; actcapi_data_b3_ind()
573 msg->hdr.cmd.cmd = 0x86; actcapi_data_b3_ind()
574 msg->hdr.cmd.subcmd = 0x03; actcapi_data_b3_ind()
575 msg->hdr.msgnum = actcapi_nextsmsg(card); actcapi_data_b3_ind()
576 msg->msg.data_b3_resp.ncci = ncci; actcapi_data_b3_ind()
577 msg->msg.data_b3_resp.blocknr = blocknr; actcapi_data_b3_ind()
605 if ((((m->msg.data_b3_req.fakencci >> 8) & 0xff) == chan->ncci) && handle_ack()
606 (m->msg.data_b3_req.blocknr == blocknr)) { handle_ack()
609 chan->queued -= m->msg.data_b3_req.datalen; handle_ack()
610 if (m->msg.data_b3_req.flags) handle_ack()
611 ret = m->msg.data_b3_req.datalen; handle_ack()
634 actcapi_msg *msg; actcapi_dispatch() local
644 msg = (actcapi_msg *)skb->data; actcapi_dispatch()
645 ccmd = ((msg->hdr.cmd.cmd << 8) | msg->hdr.cmd.subcmd); actcapi_dispatch()
654 chan = find_ncci(card, msg->msg.data_b3_conf.ncci); actcapi_dispatch()
656 if (msg->msg.data_b3_conf.info != 0) actcapi_dispatch()
658 msg->msg.data_b3_conf.info); actcapi_dispatch()
660 msg->msg.data_b3_conf.blocknr); actcapi_dispatch()
672 chan = find_dialing(card, msg->hdr.msgnum); actcapi_dispatch()
674 if (msg->msg.connect_conf.info) { actcapi_dispatch()
682 card->bch[chan].plci = msg->msg.connect_conf.plci; actcapi_dispatch()
688 chan = new_plci(card, msg->msg.connect_ind.plci); actcapi_dispatch()
691 ctmp->plci = msg->msg.connect_ind.plci; actcapi_dispatch()
698 cmd.parm.setup.si1 = msg->msg.connect_ind.si1; actcapi_dispatch()
699 cmd.parm.setup.si2 = msg->msg.connect_ind.si2; actcapi_dispatch()
702 act2000_find_eaz(card, msg->msg.connect_ind.eaz)); actcapi_dispatch()
704 cmd.parm.setup.eazmsn[0] = msg->msg.connect_ind.eaz; actcapi_dispatch()
708 memcpy(cmd.parm.setup.phone, msg->msg.connect_ind.addr.num, actcapi_dispatch()
709 msg->msg.connect_ind.addr.len - 1); actcapi_dispatch()
710 cmd.parm.setup.plan = msg->msg.connect_ind.addr.tnp; actcapi_dispatch()
718 chan = find_plci(card, msg->msg.connect_active_ind.plci); actcapi_dispatch()
732 chan = find_plci(card, msg->msg.connect_b3_ind.plci); actcapi_dispatch()
734 card->bch[chan].ncci = msg->msg.connect_b3_ind.ncci; actcapi_dispatch()
738 ctmp->ncci = msg->msg.connect_b3_ind.ncci; actcapi_dispatch()
744 chan = find_ncci(card, msg->msg.connect_b3_active_ind.ncci); actcapi_dispatch()
755 chan = find_ncci(card, msg->msg.disconnect_b3_ind.ncci); actcapi_dispatch()
780 chan = find_plci(card, msg->msg.disconnect_ind.plci); actcapi_dispatch()
791 ctmp->plci = msg->msg.disconnect_ind.plci; actcapi_dispatch()
797 chan = find_plci(card, msg->msg.select_b2_protocol_conf.plci); actcapi_dispatch()
803 if (msg->msg.select_b2_protocol_conf.info == 0) actcapi_dispatch()
817 chan = find_plci(card, msg->msg.select_b3_protocol_conf.plci); actcapi_dispatch()
823 if (msg->msg.select_b3_protocol_conf.info == 0) actcapi_dispatch()
836 chan = find_plci(card, msg->msg.listen_b3_conf.plci); actcapi_dispatch()
841 if (msg->msg.listen_b3_conf.info == 0) actcapi_dispatch()
853 if (msg->msg.listen_b3_conf.info == 0) { actcapi_dispatch()
872 chan = find_plci(card, msg->msg.connect_b3_conf.plci); actcapi_dispatch()
875 if (msg->msg.connect_b3_conf.info) { actcapi_dispatch()
882 ctmp->ncci = msg->msg.connect_b3_conf.ncci; actcapi_dispatch()
889 chan = find_ncci(card, msg->msg.disconnect_b3_conf.ncci); actcapi_dispatch()
895 chan = find_plci(card, msg->msg.info_ind.plci); actcapi_dispatch()
909 if (msg->msg.manuf_msg == 3) { actcapi_dispatch()
912 &msg->msg.manufacturer_ind_err.errstring, actcapi_dispatch()
913 msg->hdr.len - 16); actcapi_dispatch()
914 if (msg->msg.manufacturer_ind_err.errcode) actcapi_dispatch()
1014 actcapi_msg *msg = (actcapi_msg *)skb->data; actcapi_debug_msg() local
1020 if (msg->hdr.cmd.cmd == 0x86) actcapi_debug_msg()
1028 if ((msg->hdr.cmd.cmd == valid_msg[i].cmd.cmd) && actcapi_debug_msg()
1029 (msg->hdr.cmd.subcmd == valid_msg[i].cmd.subcmd)) { actcapi_debug_msg()
1033 printk(KERN_DEBUG "%s %s msg\n", direction ? "Outgoing" : "Incoming", descr); actcapi_debug_msg()
1034 printk(KERN_DEBUG " ApplID = %d\n", msg->hdr.applicationID); actcapi_debug_msg()
1035 printk(KERN_DEBUG " Len = %d\n", msg->hdr.len); actcapi_debug_msg()
1036 printk(KERN_DEBUG " MsgNum = 0x%04x\n", msg->hdr.msgnum); actcapi_debug_msg()
1037 printk(KERN_DEBUG " Cmd = 0x%02x\n", msg->hdr.cmd.cmd); actcapi_debug_msg()
1038 printk(KERN_DEBUG " SubCmd = 0x%02x\n", msg->hdr.cmd.subcmd); actcapi_debug_msg()
1043 msg->msg.data_b3_ind.blocknr); actcapi_debug_msg()
1048 msg->msg.connect_conf.plci); actcapi_debug_msg()
1050 msg->msg.connect_conf.info); actcapi_debug_msg()
1055 msg->msg.connect_ind.plci); actcapi_debug_msg()
1057 msg->msg.connect_ind.controller); actcapi_debug_msg()
1059 msg->msg.connect_ind.si1); actcapi_debug_msg()
1061 msg->msg.connect_ind.si2); actcapi_debug_msg()
1063 msg->msg.connect_ind.eaz); actcapi_debug_msg()
1064 actcapi_debug_caddr(&msg->msg.connect_ind.addr); actcapi_debug_msg()
1069 msg->msg.connect_active_ind.plci); actcapi_debug_msg()
1070 actcapi_debug_caddr(&msg->msg.connect_active_ind.addr); actcapi_debug_msg()
1075 msg->msg.listen_conf.controller); actcapi_debug_msg()
1077 msg->msg.listen_conf.info); actcapi_debug_msg()
1082 msg->msg.info_ind.plci); actcapi_debug_msg()
1084 msg->msg.info_ind.nr.mask); actcapi_debug_msg()
1085 if (msg->hdr.len > 12) { actcapi_debug_msg()
1086 int l = msg->hdr.len - 12; actcapi_debug_msg()
1090 p += sprintf(p, "%02x ", msg->msg.info_ind.el.display[j]); actcapi_debug_msg()
1097 msg->msg.select_b2_protocol_conf.plci); actcapi_debug_msg()
1099 msg->msg.select_b2_protocol_conf.info); actcapi_debug_msg()
1104 msg->msg.select_b3_protocol_conf.plci); actcapi_debug_msg()
1106 msg->msg.select_b3_protocol_conf.info); actcapi_debug_msg()
1111 msg->msg.listen_b3_conf.plci); actcapi_debug_msg()
1113 msg->msg.listen_b3_conf.info); actcapi_debug_msg()
1118 msg->msg.connect_b3_ind.ncci); actcapi_debug_msg()
1120 msg->msg.connect_b3_ind.plci); actcapi_debug_msg()
1121 actcapi_debug_ncpi(&msg->msg.connect_b3_ind.ncpi); actcapi_debug_msg()
1126 msg->msg.connect_b3_active_ind.ncci); actcapi_debug_msg()
1127 actcapi_debug_ncpi(&msg->msg.connect_b3_active_ind.ncpi); actcapi_debug_msg()
1132 msg->msg.manufacturer_ind_err.manuf_msg); actcapi_debug_msg()
1133 switch (msg->msg.manufacturer_ind_err.manuf_msg) { actcapi_debug_msg()
1136 msg->msg.manufacturer_ind_err.controller); actcapi_debug_msg()
1138 msg->msg.manufacturer_ind_err.errcode); actcapi_debug_msg()
1140 strncpy(tmp, &msg->msg.manufacturer_ind_err.errstring, actcapi_debug_msg()
1141 msg->hdr.len - 16); actcapi_debug_msg()
1149 msg->msg.listen_req.infomask); actcapi_debug_msg()
1151 msg->msg.listen_req.eazmask); actcapi_debug_msg()
1153 msg->msg.listen_req.simask); actcapi_debug_msg()
1158 msg->msg.select_b2_protocol_req.plci); actcapi_debug_msg()
1160 msg->msg.select_b2_protocol_req.protocol); actcapi_debug_msg()
1161 if (msg->hdr.len >= 11) actcapi_debug_msg()
1164 actcapi_debug_dlpd(&msg->msg.select_b2_protocol_req.dlpd); actcapi_debug_msg()
1169 msg->msg.connect_resp.plci); actcapi_debug_msg()
1171 msg->msg.connect_resp.rejectcause); actcapi_debug_msg()
1176 msg->msg.connect_active_resp.plci); actcapi_debug_msg()
/linux-4.1.27/drivers/staging/lustre/lnet/lnet/
H A Dlib-msg.c36 * lnet/lnet/lib-msg.c
61 lnet_build_msg_event(lnet_msg_t *msg, lnet_event_kind_t ev_type) lnet_build_msg_event() argument
63 lnet_hdr_t *hdr = &msg->msg_hdr; lnet_build_msg_event()
64 lnet_event_t *ev = &msg->msg_ev; lnet_build_msg_event()
66 LASSERT(!msg->msg_routing); lnet_build_msg_event()
85 ev->sender = msg->msg_from; lnet_build_msg_event()
86 ev->mlength = msg->msg_wanted; lnet_build_msg_event()
87 ev->offset = msg->msg_offset; lnet_build_msg_event()
95 ev->pt_index = hdr->msg.put.ptl_index; lnet_build_msg_event()
96 ev->match_bits = hdr->msg.put.match_bits; lnet_build_msg_event()
97 ev->hdr_data = hdr->msg.put.hdr_data; lnet_build_msg_event()
101 ev->pt_index = hdr->msg.get.ptl_index; lnet_build_msg_event()
102 ev->match_bits = hdr->msg.get.match_bits; lnet_build_msg_event()
107 ev->match_bits = hdr->msg.ack.match_bits; lnet_build_msg_event()
108 ev->mlength = hdr->msg.ack.mlength; lnet_build_msg_event()
115 if (msg->msg_type == LNET_MSG_PUT) { lnet_build_msg_event()
116 ev->pt_index = le32_to_cpu(hdr->msg.put.ptl_index); lnet_build_msg_event()
117 ev->match_bits = le64_to_cpu(hdr->msg.put.match_bits); lnet_build_msg_event()
118 ev->offset = le32_to_cpu(hdr->msg.put.offset); lnet_build_msg_event()
121 ev->hdr_data = le64_to_cpu(hdr->msg.put.hdr_data); lnet_build_msg_event()
124 LASSERT(msg->msg_type == LNET_MSG_GET); lnet_build_msg_event()
125 ev->pt_index = le32_to_cpu(hdr->msg.get.ptl_index); lnet_build_msg_event()
126 ev->match_bits = le64_to_cpu(hdr->msg.get.match_bits); lnet_build_msg_event()
128 ev->rlength = le32_to_cpu(hdr->msg.get.sink_length); lnet_build_msg_event()
129 ev->offset = le32_to_cpu(hdr->msg.get.src_offset); lnet_build_msg_event()
137 lnet_msg_commit(lnet_msg_t *msg, int cpt) lnet_msg_commit() argument
143 LASSERT(!msg->msg_tx_committed); lnet_msg_commit()
145 if (msg->msg_sending) { lnet_msg_commit()
146 LASSERT(!msg->msg_receiving); lnet_msg_commit()
148 msg->msg_tx_cpt = cpt; lnet_msg_commit()
149 msg->msg_tx_committed = 1; lnet_msg_commit()
150 if (msg->msg_rx_committed) { /* routed message REPLY */ lnet_msg_commit()
151 LASSERT(msg->msg_onactivelist); lnet_msg_commit()
155 LASSERT(!msg->msg_sending); lnet_msg_commit()
156 msg->msg_rx_cpt = cpt; lnet_msg_commit()
157 msg->msg_rx_committed = 1; lnet_msg_commit()
160 LASSERT(!msg->msg_onactivelist); lnet_msg_commit()
161 msg->msg_onactivelist = 1; lnet_msg_commit()
162 list_add(&msg->msg_activelist, &container->msc_active); lnet_msg_commit()
170 lnet_msg_decommit_tx(lnet_msg_t *msg, int status) lnet_msg_decommit_tx() argument
173 lnet_event_t *ev = &msg->msg_ev; lnet_msg_decommit_tx()
175 LASSERT(msg->msg_tx_committed); lnet_msg_decommit_tx()
179 counters = the_lnet.ln_counters[msg->msg_tx_cpt]; lnet_msg_decommit_tx()
182 LASSERT(msg->msg_routing); lnet_msg_decommit_tx()
183 LASSERT(msg->msg_rx_committed); lnet_msg_decommit_tx()
186 counters->route_length += msg->msg_len; lnet_msg_decommit_tx()
192 LASSERT(!msg->msg_rx_committed); lnet_msg_decommit_tx()
194 LASSERT(msg->msg_type == LNET_MSG_ACK); lnet_msg_decommit_tx()
195 msg->msg_type = LNET_MSG_PUT; /* fix type */ lnet_msg_decommit_tx()
199 LASSERT(!msg->msg_rx_committed); lnet_msg_decommit_tx()
200 if (msg->msg_type == LNET_MSG_PUT) lnet_msg_decommit_tx()
201 counters->send_length += msg->msg_len; lnet_msg_decommit_tx()
205 LASSERT(msg->msg_rx_committed); lnet_msg_decommit_tx()
208 LASSERT(msg->msg_type == LNET_MSG_REPLY); lnet_msg_decommit_tx()
209 msg->msg_type = LNET_MSG_GET; /* fix type */ lnet_msg_decommit_tx()
215 lnet_return_tx_credits_locked(msg); lnet_msg_decommit_tx()
216 msg->msg_tx_committed = 0; lnet_msg_decommit_tx()
220 lnet_msg_decommit_rx(lnet_msg_t *msg, int status) lnet_msg_decommit_rx() argument
223 lnet_event_t *ev = &msg->msg_ev; lnet_msg_decommit_rx()
225 LASSERT(!msg->msg_tx_committed); /* decommitted or never committed */ lnet_msg_decommit_rx()
226 LASSERT(msg->msg_rx_committed); lnet_msg_decommit_rx()
231 counters = the_lnet.ln_counters[msg->msg_rx_cpt]; lnet_msg_decommit_rx()
235 LASSERT(msg->msg_routing); lnet_msg_decommit_rx()
239 LASSERT(msg->msg_type == LNET_MSG_ACK); lnet_msg_decommit_rx()
247 LASSERT(msg->msg_type == LNET_MSG_REPLY || lnet_msg_decommit_rx()
248 msg->msg_type == LNET_MSG_GET); lnet_msg_decommit_rx()
249 counters->send_length += msg->msg_wanted; lnet_msg_decommit_rx()
253 LASSERT(msg->msg_type == LNET_MSG_PUT); lnet_msg_decommit_rx()
259 LASSERT(msg->msg_type == LNET_MSG_GET || lnet_msg_decommit_rx()
260 msg->msg_type == LNET_MSG_REPLY); lnet_msg_decommit_rx()
266 counters->recv_length += msg->msg_wanted; lnet_msg_decommit_rx()
269 lnet_return_rx_credits_locked(msg); lnet_msg_decommit_rx()
270 msg->msg_rx_committed = 0; lnet_msg_decommit_rx()
274 lnet_msg_decommit(lnet_msg_t *msg, int cpt, int status) lnet_msg_decommit() argument
278 LASSERT(msg->msg_tx_committed || msg->msg_rx_committed); lnet_msg_decommit()
279 LASSERT(msg->msg_onactivelist); lnet_msg_decommit()
281 if (msg->msg_tx_committed) { /* always decommit for sending first */ lnet_msg_decommit()
282 LASSERT(cpt == msg->msg_tx_cpt); lnet_msg_decommit()
283 lnet_msg_decommit_tx(msg, status); lnet_msg_decommit()
286 if (msg->msg_rx_committed) { lnet_msg_decommit()
287 /* forwarding msg committed for both receiving and sending */ lnet_msg_decommit()
288 if (cpt != msg->msg_rx_cpt) { lnet_msg_decommit()
290 cpt2 = msg->msg_rx_cpt; lnet_msg_decommit()
293 lnet_msg_decommit_rx(msg, status); lnet_msg_decommit()
296 list_del(&msg->msg_activelist); lnet_msg_decommit()
297 msg->msg_onactivelist = 0; lnet_msg_decommit()
308 lnet_msg_attach_md(lnet_msg_t *msg, lnet_libmd_t *md, lnet_msg_attach_md() argument
316 LASSERT(!msg->msg_routing); lnet_msg_attach_md()
318 msg->msg_md = md; lnet_msg_attach_md()
319 if (msg->msg_receiving) { /* committed for receiving */ lnet_msg_attach_md()
320 msg->msg_offset = offset; lnet_msg_attach_md()
321 msg->msg_wanted = mlen; lnet_msg_attach_md()
331 lnet_md2handle(&msg->msg_ev.md_handle, md); lnet_msg_attach_md()
332 lnet_md_deconstruct(md, &msg->msg_ev.md); lnet_msg_attach_md()
336 lnet_msg_detach_md(lnet_msg_t *msg, int status) lnet_msg_detach_md() argument
338 lnet_libmd_t *md = msg->msg_md; lnet_msg_detach_md()
347 msg->msg_ev.status = status; lnet_msg_detach_md()
348 msg->msg_ev.unlinked = unlink; lnet_msg_detach_md()
349 lnet_eq_enqueue_event(md->md_eq, &msg->msg_ev); lnet_msg_detach_md()
355 msg->msg_md = NULL; lnet_msg_detach_md()
359 lnet_complete_msg_locked(lnet_msg_t *msg, int cpt) lnet_complete_msg_locked() argument
363 int status = msg->msg_ev.status; lnet_complete_msg_locked()
365 LASSERT(msg->msg_onactivelist); lnet_complete_msg_locked()
367 if (status == 0 && msg->msg_ack) { lnet_complete_msg_locked()
370 lnet_msg_decommit(msg, cpt, 0); lnet_complete_msg_locked()
372 msg->msg_ack = 0; lnet_complete_msg_locked()
375 LASSERT(msg->msg_ev.type == LNET_EVENT_PUT); lnet_complete_msg_locked()
376 LASSERT(!msg->msg_routing); lnet_complete_msg_locked()
378 ack_wmd = msg->msg_hdr.msg.put.ack_wmd; lnet_complete_msg_locked()
380 lnet_prep_send(msg, LNET_MSG_ACK, msg->msg_ev.initiator, 0, 0); lnet_complete_msg_locked()
382 msg->msg_hdr.msg.ack.dst_wmd = ack_wmd; lnet_complete_msg_locked()
383 msg->msg_hdr.msg.ack.match_bits = msg->msg_ev.match_bits; lnet_complete_msg_locked()
384 msg->msg_hdr.msg.ack.mlength = cpu_to_le32(msg->msg_ev.mlength); lnet_complete_msg_locked()
386 /* NB: we probably want to use NID of msg::msg_from as 3rd lnet_complete_msg_locked()
388 rc = lnet_send(msg->msg_ev.target.nid, msg, LNET_NID_ANY); lnet_complete_msg_locked()
405 (msg->msg_routing && !msg->msg_sending)) { lnet_complete_msg_locked()
407 LASSERT(!msg->msg_receiving); /* called back recv already */ lnet_complete_msg_locked()
410 rc = lnet_send(LNET_NID_ANY, msg, LNET_NID_ANY); lnet_complete_msg_locked()
429 lnet_msg_decommit(msg, cpt, status); lnet_complete_msg_locked()
430 lnet_msg_free_locked(msg); lnet_complete_msg_locked()
435 lnet_finalize(lnet_ni_t *ni, lnet_msg_t *msg, int status) lnet_finalize() argument
445 if (msg == NULL) lnet_finalize()
448 CDEBUG(D_WARNING, "%s msg->%s Flags:%s%s%s%s%s%s%s%s%s%s%s txp %s rxp %s\n", lnet_finalize()
449 lnet_msgtyp2str(msg->msg_type), libcfs_id2str(msg->msg_target), lnet_finalize()
450 msg->msg_target_is_router ? "t" : "", lnet_finalize()
451 msg->msg_routing ? "X" : "", lnet_finalize()
452 msg->msg_ack ? "A" : "", lnet_finalize()
453 msg->msg_sending ? "S" : "", lnet_finalize()
454 msg->msg_receiving ? "R" : "", lnet_finalize()
455 msg->msg_delayed ? "d" : "", lnet_finalize()
456 msg->msg_txcredit ? "C" : "", lnet_finalize()
457 msg->msg_peertxcredit ? "c" : "", lnet_finalize()
458 msg->msg_rtrcredit ? "F" : "", lnet_finalize()
459 msg->msg_peerrtrcredit ? "f" : "", lnet_finalize()
460 msg->msg_onactivelist ? "!" : "", lnet_finalize()
461 msg->msg_txpeer == NULL ? "<none>" : libcfs_nid2str(msg->msg_txpeer->lp_nid), lnet_finalize()
462 msg->msg_rxpeer == NULL ? "<none>" : libcfs_nid2str(msg->msg_rxpeer->lp_nid)); lnet_finalize()
464 msg->msg_ev.status = status; lnet_finalize()
466 if (msg->msg_md != NULL) { lnet_finalize()
467 cpt = lnet_cpt_of_cookie(msg->msg_md->md_lh.lh_cookie); lnet_finalize()
470 lnet_msg_detach_md(msg, status); lnet_finalize()
476 if (!msg->msg_tx_committed && !msg->msg_rx_committed) { lnet_finalize()
478 LASSERT(!msg->msg_onactivelist); lnet_finalize()
479 lnet_msg_free(msg); lnet_finalize()
488 cpt = msg->msg_tx_committed ? msg->msg_tx_cpt : msg->msg_rx_cpt; lnet_finalize()
492 list_add_tail(&msg->msg_list, &container->msc_finalizing); lnet_finalize()
514 msg = list_entry(container->msc_finalizing.next, lnet_finalize()
517 list_del(&msg->msg_list); lnet_finalize()
521 rc = lnet_complete_msg_locked(msg, cpt); lnet_finalize()
543 lnet_msg_t *msg = list_entry(container->msc_active.next, lnet_msg_container_cleanup() local
546 LASSERT(msg->msg_onactivelist); lnet_msg_container_cleanup()
547 msg->msg_onactivelist = 0; lnet_msg_container_cleanup()
548 list_del(&msg->msg_activelist); lnet_msg_container_cleanup()
549 lnet_msg_free(msg); lnet_msg_container_cleanup()
554 CERROR("%d active msg on exit\n", count); lnet_msg_container_cleanup()
H A Dlib-move.c565 lnet_ni_recv(lnet_ni_t *ni, void *private, lnet_msg_t *msg, int delayed, lnet_ni_recv() argument
574 LASSERT(mlen == 0 || msg != NULL); lnet_ni_recv()
576 if (msg != NULL) { lnet_ni_recv()
577 LASSERT(msg->msg_receiving); lnet_ni_recv()
578 LASSERT(!msg->msg_sending); lnet_ni_recv()
579 LASSERT(rlen == msg->msg_len); lnet_ni_recv()
580 LASSERT(mlen <= msg->msg_len); lnet_ni_recv()
581 LASSERT(msg->msg_offset == offset); lnet_ni_recv()
582 LASSERT(msg->msg_wanted == mlen); lnet_ni_recv()
584 msg->msg_receiving = 0; lnet_ni_recv()
587 niov = msg->msg_niov; lnet_ni_recv()
588 iov = msg->msg_iov; lnet_ni_recv()
589 kiov = msg->msg_kiov; lnet_ni_recv()
596 rc = (ni->ni_lnd->lnd_recv)(ni, private, msg, delayed, lnet_ni_recv()
599 lnet_finalize(ni, msg, rc); lnet_ni_recv()
603 lnet_setpayloadbuffer(lnet_msg_t *msg) lnet_setpayloadbuffer() argument
605 lnet_libmd_t *md = msg->msg_md; lnet_setpayloadbuffer()
607 LASSERT(msg->msg_len > 0); lnet_setpayloadbuffer()
608 LASSERT(!msg->msg_routing); lnet_setpayloadbuffer()
610 LASSERT(msg->msg_niov == 0); lnet_setpayloadbuffer()
611 LASSERT(msg->msg_iov == NULL); lnet_setpayloadbuffer()
612 LASSERT(msg->msg_kiov == NULL); lnet_setpayloadbuffer()
614 msg->msg_niov = md->md_niov; lnet_setpayloadbuffer()
616 msg->msg_kiov = md->md_iov.kiov; lnet_setpayloadbuffer()
618 msg->msg_iov = md->md_iov.iov; lnet_setpayloadbuffer()
622 lnet_prep_send(lnet_msg_t *msg, int type, lnet_process_id_t target, lnet_prep_send() argument
625 msg->msg_type = type; lnet_prep_send()
626 msg->msg_target = target; lnet_prep_send()
627 msg->msg_len = len; lnet_prep_send()
628 msg->msg_offset = offset; lnet_prep_send()
631 lnet_setpayloadbuffer(msg); lnet_prep_send()
633 memset(&msg->msg_hdr, 0, sizeof(msg->msg_hdr)); lnet_prep_send()
634 msg->msg_hdr.type = cpu_to_le32(type); lnet_prep_send()
635 msg->msg_hdr.dest_nid = cpu_to_le64(target.nid); lnet_prep_send()
636 msg->msg_hdr.dest_pid = cpu_to_le32(target.pid); lnet_prep_send()
638 msg->msg_hdr.src_pid = cpu_to_le32(the_lnet.ln_pid); lnet_prep_send()
639 msg->msg_hdr.payload_length = cpu_to_le32(len); lnet_prep_send()
643 lnet_ni_send(lnet_ni_t *ni, lnet_msg_t *msg) lnet_ni_send() argument
645 void *priv = msg->msg_private; lnet_ni_send()
650 (msg->msg_txcredit && msg->msg_peertxcredit)); lnet_ni_send()
652 rc = (ni->ni_lnd->lnd_send)(ni, priv, msg); lnet_ni_send()
654 lnet_finalize(ni, msg, rc); lnet_ni_send()
658 lnet_ni_eager_recv(lnet_ni_t *ni, lnet_msg_t *msg) lnet_ni_eager_recv() argument
662 LASSERT(!msg->msg_sending); lnet_ni_eager_recv()
663 LASSERT(msg->msg_receiving); lnet_ni_eager_recv()
664 LASSERT(!msg->msg_rx_ready_delay); lnet_ni_eager_recv()
667 msg->msg_rx_ready_delay = 1; lnet_ni_eager_recv()
668 rc = (ni->ni_lnd->lnd_eager_recv)(ni, msg->msg_private, msg, lnet_ni_eager_recv()
669 &msg->msg_private); lnet_ni_eager_recv()
672 libcfs_nid2str(msg->msg_rxpeer->lp_nid), lnet_ni_eager_recv()
673 libcfs_id2str(msg->msg_target), rc); lnet_ni_eager_recv()
775 * \param msg The message to be sent.
780 * \retval 0 If \a msg sent or OK to send.
781 * \retval EAGAIN If \a msg blocked for credit.
786 lnet_post_send_locked(lnet_msg_t *msg, int do_send) lnet_post_send_locked() argument
788 lnet_peer_t *lp = msg->msg_txpeer; lnet_post_send_locked()
790 int cpt = msg->msg_tx_cpt; lnet_post_send_locked()
794 LASSERT(!do_send || msg->msg_tx_delayed); lnet_post_send_locked()
795 LASSERT(!msg->msg_receiving); lnet_post_send_locked()
796 LASSERT(msg->msg_tx_committed); lnet_post_send_locked()
799 if ((msg->msg_target.pid & LNET_PID_USERFLAG) == 0 && lnet_post_send_locked()
802 the_lnet.ln_counters[cpt]->drop_length += msg->msg_len; lnet_post_send_locked()
806 libcfs_id2str(msg->msg_target)); lnet_post_send_locked()
808 lnet_finalize(ni, msg, -EHOSTUNREACH); lnet_post_send_locked()
814 if (msg->msg_md != NULL && lnet_post_send_locked()
815 (msg->msg_md->md_flags & LNET_MD_FLAG_ABORTED) != 0) { lnet_post_send_locked()
819 libcfs_id2str(msg->msg_target)); lnet_post_send_locked()
821 lnet_finalize(ni, msg, -ECANCELED); lnet_post_send_locked()
827 if (!msg->msg_peertxcredit) { lnet_post_send_locked()
831 msg->msg_peertxcredit = 1; lnet_post_send_locked()
832 lp->lp_txqnob += msg->msg_len + sizeof(lnet_hdr_t); lnet_post_send_locked()
839 msg->msg_tx_delayed = 1; lnet_post_send_locked()
840 list_add_tail(&msg->msg_list, &lp->lp_txq); lnet_post_send_locked()
845 if (!msg->msg_txcredit) { lnet_post_send_locked()
849 msg->msg_txcredit = 1; lnet_post_send_locked()
856 msg->msg_tx_delayed = 1; lnet_post_send_locked()
857 list_add_tail(&msg->msg_list, &tq->tq_delayed); lnet_post_send_locked()
864 lnet_ni_send(ni, msg); lnet_post_send_locked()
872 lnet_msg2bufpool(lnet_msg_t *msg) lnet_msg2bufpool() argument
877 LASSERT(msg->msg_rx_committed); lnet_msg2bufpool()
879 cpt = msg->msg_rx_cpt; lnet_msg2bufpool()
882 LASSERT(msg->msg_len <= LNET_MTU); lnet_msg2bufpool()
883 while (msg->msg_len > (unsigned int)rbp->rbp_npages * PAGE_CACHE_SIZE) { lnet_msg2bufpool()
892 lnet_post_routed_recv_locked(lnet_msg_t *msg, int do_recv) lnet_post_routed_recv_locked() argument
896 * return EAGAIN if msg blocked and 0 if received or OK to receive */ lnet_post_routed_recv_locked()
897 lnet_peer_t *lp = msg->msg_rxpeer; lnet_post_routed_recv_locked()
901 LASSERT(msg->msg_iov == NULL); lnet_post_routed_recv_locked()
902 LASSERT(msg->msg_kiov == NULL); lnet_post_routed_recv_locked()
903 LASSERT(msg->msg_niov == 0); lnet_post_routed_recv_locked()
904 LASSERT(msg->msg_routing); lnet_post_routed_recv_locked()
905 LASSERT(msg->msg_receiving); lnet_post_routed_recv_locked()
906 LASSERT(!msg->msg_sending); lnet_post_routed_recv_locked()
909 LASSERT(!do_recv || msg->msg_rx_delayed); lnet_post_routed_recv_locked()
911 if (!msg->msg_peerrtrcredit) { lnet_post_routed_recv_locked()
915 msg->msg_peerrtrcredit = 1; lnet_post_routed_recv_locked()
922 LASSERT(msg->msg_rx_ready_delay); lnet_post_routed_recv_locked()
923 msg->msg_rx_delayed = 1; lnet_post_routed_recv_locked()
924 list_add_tail(&msg->msg_list, &lp->lp_rtrq); lnet_post_routed_recv_locked()
929 rbp = lnet_msg2bufpool(msg); lnet_post_routed_recv_locked()
931 if (!msg->msg_rtrcredit) { lnet_post_routed_recv_locked()
935 msg->msg_rtrcredit = 1; lnet_post_routed_recv_locked()
942 LASSERT(msg->msg_rx_ready_delay); lnet_post_routed_recv_locked()
943 msg->msg_rx_delayed = 1; lnet_post_routed_recv_locked()
944 list_add_tail(&msg->msg_list, &rbp->rbp_msgs); lnet_post_routed_recv_locked()
953 msg->msg_niov = rbp->rbp_npages; lnet_post_routed_recv_locked()
954 msg->msg_kiov = &rb->rb_kiov[0]; lnet_post_routed_recv_locked()
957 int cpt = msg->msg_rx_cpt; lnet_post_routed_recv_locked()
960 lnet_ni_recv(lp->lp_ni, msg->msg_private, msg, 1, lnet_post_routed_recv_locked()
961 0, msg->msg_len, msg->msg_len); lnet_post_routed_recv_locked()
968 lnet_return_tx_credits_locked(lnet_msg_t *msg) lnet_return_tx_credits_locked() argument
970 lnet_peer_t *txpeer = msg->msg_txpeer; lnet_return_tx_credits_locked()
973 if (msg->msg_txcredit) { lnet_return_tx_credits_locked()
975 struct lnet_tx_queue *tq = ni->ni_tx_queues[msg->msg_tx_cpt]; lnet_return_tx_credits_locked()
978 msg->msg_txcredit = 0; lnet_return_tx_credits_locked()
996 if (msg->msg_peertxcredit) { lnet_return_tx_credits_locked()
998 msg->msg_peertxcredit = 0; lnet_return_tx_credits_locked()
1003 txpeer->lp_txqnob -= msg->msg_len + sizeof(lnet_hdr_t); lnet_return_tx_credits_locked()
1020 msg->msg_txpeer = NULL; lnet_return_tx_credits_locked()
1026 lnet_return_rx_credits_locked(lnet_msg_t *msg) lnet_return_rx_credits_locked() argument
1028 lnet_peer_t *rxpeer = msg->msg_rxpeer; lnet_return_rx_credits_locked()
1031 if (msg->msg_rtrcredit) { lnet_return_rx_credits_locked()
1036 /* NB If a msg ever blocks for a buffer in rbp_msgs, it stays lnet_return_rx_credits_locked()
1039 LASSERT(msg->msg_kiov != NULL); lnet_return_rx_credits_locked()
1041 rb = list_entry(msg->msg_kiov, lnet_rtrbuf_t, rb_kiov[0]); lnet_return_rx_credits_locked()
1043 LASSERT(rbp == lnet_msg2bufpool(msg)); lnet_return_rx_credits_locked()
1045 msg->msg_kiov = NULL; lnet_return_rx_credits_locked()
1046 msg->msg_rtrcredit = 0; lnet_return_rx_credits_locked()
1064 if (msg->msg_peerrtrcredit) { lnet_return_rx_credits_locked()
1066 msg->msg_peerrtrcredit = 0; lnet_return_rx_credits_locked()
1081 msg->msg_rxpeer = NULL; lnet_return_rx_credits_locked()
1183 lnet_send(lnet_nid_t src_nid, lnet_msg_t *msg, lnet_nid_t rtr_nid) lnet_send() argument
1185 lnet_nid_t dst_nid = msg->msg_target.nid; lnet_send()
1197 LASSERT(msg->msg_txpeer == NULL); lnet_send()
1198 LASSERT(!msg->msg_sending); lnet_send()
1199 LASSERT(!msg->msg_target_is_router); lnet_send()
1200 LASSERT(!msg->msg_receiving); lnet_send()
1202 msg->msg_sending = 1; lnet_send()
1204 LASSERT(!msg->msg_tx_committed); lnet_send()
1225 LASSERT(!msg->msg_routing); lnet_send()
1248 lnet_msg_commit(msg, cpt); lnet_send()
1250 if (!msg->msg_routing) lnet_send()
1251 msg->msg_hdr.src_nid = cpu_to_le64(src_nid); lnet_send()
1256 lnet_ni_send(src_ni, msg); lnet_send()
1284 libcfs_id2str(msg->msg_target), lnet_send()
1308 lnet_msgtyp2str(msg->msg_type), msg->msg_len); lnet_send()
1321 lnet_msg_commit(msg, cpt); lnet_send()
1323 if (!msg->msg_routing) { lnet_send()
1325 msg->msg_hdr.src_nid = cpu_to_le64(src_nid); lnet_send()
1328 msg->msg_target_is_router = 1; lnet_send()
1329 msg->msg_target.nid = lp->lp_nid; lnet_send()
1330 msg->msg_target.pid = LUSTRE_SRV_LNET_PID; lnet_send()
1335 LASSERT(!msg->msg_peertxcredit); lnet_send()
1336 LASSERT(!msg->msg_txcredit); lnet_send()
1337 LASSERT(msg->msg_txpeer == NULL); lnet_send()
1339 msg->msg_txpeer = lp; /* msg takes my ref on lp */ lnet_send()
1341 rc = lnet_post_send_locked(msg, 0); lnet_send()
1348 lnet_ni_send(src_ni, msg); lnet_send()
1365 lnet_recv_put(lnet_ni_t *ni, lnet_msg_t *msg) lnet_recv_put() argument
1367 lnet_hdr_t *hdr = &msg->msg_hdr; lnet_recv_put()
1369 if (msg->msg_wanted != 0) lnet_recv_put()
1370 lnet_setpayloadbuffer(msg); lnet_recv_put()
1372 lnet_build_msg_event(msg, LNET_EVENT_PUT); lnet_recv_put()
1376 msg->msg_ack = (!lnet_is_wire_handle_none(&hdr->msg.put.ack_wmd) && lnet_recv_put()
1377 (msg->msg_md->md_options & LNET_MD_ACK_DISABLE) == 0); lnet_recv_put()
1379 lnet_ni_recv(ni, msg->msg_private, msg, msg->msg_rx_delayed, lnet_recv_put()
1380 msg->msg_offset, msg->msg_wanted, hdr->payload_length); lnet_recv_put()
1384 lnet_parse_put(lnet_ni_t *ni, lnet_msg_t *msg) lnet_parse_put() argument
1386 lnet_hdr_t *hdr = &msg->msg_hdr; lnet_parse_put()
1391 hdr->msg.put.match_bits = le64_to_cpu(hdr->msg.put.match_bits); lnet_parse_put()
1392 hdr->msg.put.ptl_index = le32_to_cpu(hdr->msg.put.ptl_index); lnet_parse_put()
1393 hdr->msg.put.offset = le32_to_cpu(hdr->msg.put.offset); lnet_parse_put()
1398 info.mi_portal = hdr->msg.put.ptl_index; lnet_parse_put()
1400 info.mi_roffset = hdr->msg.put.offset; lnet_parse_put()
1401 info.mi_mbits = hdr->msg.put.match_bits; lnet_parse_put()
1403 msg->msg_rx_ready_delay = ni->ni_lnd->lnd_eager_recv == NULL; lnet_parse_put()
1406 rc = lnet_ptl_match_md(&info, msg); lnet_parse_put()
1412 lnet_recv_put(ni, msg); lnet_parse_put()
1416 if (msg->msg_rx_delayed) /* attached on delayed list */ lnet_parse_put()
1419 rc = lnet_ni_eager_recv(ni, msg); lnet_parse_put()
1434 lnet_parse_get(lnet_ni_t *ni, lnet_msg_t *msg, int rdma_get) lnet_parse_get() argument
1437 lnet_hdr_t *hdr = &msg->msg_hdr; lnet_parse_get()
1442 hdr->msg.get.match_bits = le64_to_cpu(hdr->msg.get.match_bits); lnet_parse_get()
1443 hdr->msg.get.ptl_index = le32_to_cpu(hdr->msg.get.ptl_index); lnet_parse_get()
1444 hdr->msg.get.sink_length = le32_to_cpu(hdr->msg.get.sink_length); lnet_parse_get()
1445 hdr->msg.get.src_offset = le32_to_cpu(hdr->msg.get.src_offset); lnet_parse_get()
1450 info.mi_portal = hdr->msg.get.ptl_index; lnet_parse_get()
1451 info.mi_rlength = hdr->msg.get.sink_length; lnet_parse_get()
1452 info.mi_roffset = hdr->msg.get.src_offset; lnet_parse_get()
1453 info.mi_mbits = hdr->msg.get.match_bits; lnet_parse_get()
1455 rc = lnet_ptl_match_md(&info, msg); lnet_parse_get()
1465 lnet_build_msg_event(msg, LNET_EVENT_GET); lnet_parse_get()
1467 reply_wmd = hdr->msg.get.return_wmd; lnet_parse_get()
1469 lnet_prep_send(msg, LNET_MSG_REPLY, info.mi_id, lnet_parse_get()
1470 msg->msg_offset, msg->msg_wanted); lnet_parse_get()
1472 msg->msg_hdr.msg.reply.dst_wmd = reply_wmd; lnet_parse_get()
1476 lnet_ni_recv(ni, msg->msg_private, msg, 0, lnet_parse_get()
1477 msg->msg_offset, msg->msg_len, msg->msg_len); lnet_parse_get()
1481 lnet_ni_recv(ni, msg->msg_private, NULL, 0, 0, 0, 0); lnet_parse_get()
1482 msg->msg_receiving = 0; lnet_parse_get()
1484 rc = lnet_send(ni->ni_nid, msg, LNET_NID_ANY); lnet_parse_get()
1491 lnet_finalize(ni, msg, rc); lnet_parse_get()
1498 lnet_parse_reply(lnet_ni_t *ni, lnet_msg_t *msg) lnet_parse_reply() argument
1500 void *private = msg->msg_private; lnet_parse_reply()
1501 lnet_hdr_t *hdr = &msg->msg_hdr; lnet_parse_reply()
1508 cpt = lnet_cpt_of_cookie(hdr->msg.reply.dst_wmd.wh_object_cookie); lnet_parse_reply()
1515 md = lnet_wire_handle2md(&hdr->msg.reply.dst_wmd); lnet_parse_reply()
1520 hdr->msg.reply.dst_wmd.wh_interface_cookie, lnet_parse_reply()
1521 hdr->msg.reply.dst_wmd.wh_object_cookie); lnet_parse_reply()
1539 rlength, hdr->msg.reply.dst_wmd.wh_object_cookie, lnet_parse_reply()
1547 mlength, rlength, hdr->msg.reply.dst_wmd.wh_object_cookie); lnet_parse_reply()
1549 lnet_msg_attach_md(msg, md, 0, mlength); lnet_parse_reply()
1552 lnet_setpayloadbuffer(msg); lnet_parse_reply()
1556 lnet_build_msg_event(msg, LNET_EVENT_REPLY); lnet_parse_reply()
1558 lnet_ni_recv(ni, private, msg, 0, 0, mlength, rlength); lnet_parse_reply()
1563 lnet_parse_ack(lnet_ni_t *ni, lnet_msg_t *msg) lnet_parse_ack() argument
1565 lnet_hdr_t *hdr = &msg->msg_hdr; lnet_parse_ack()
1574 hdr->msg.ack.match_bits = le64_to_cpu(hdr->msg.ack.match_bits); lnet_parse_ack()
1575 hdr->msg.ack.mlength = le32_to_cpu(hdr->msg.ack.mlength); lnet_parse_ack()
1577 cpt = lnet_cpt_of_cookie(hdr->msg.ack.dst_wmd.wh_object_cookie); lnet_parse_ack()
1581 md = lnet_wire_handle2md(&hdr->msg.ack.dst_wmd); lnet_parse_ack()
1588 hdr->msg.ack.dst_wmd.wh_interface_cookie, lnet_parse_ack()
1589 hdr->msg.ack.dst_wmd.wh_object_cookie); lnet_parse_ack()
1600 hdr->msg.ack.dst_wmd.wh_object_cookie); lnet_parse_ack()
1602 lnet_msg_attach_md(msg, md, 0, 0); lnet_parse_ack()
1606 lnet_build_msg_event(msg, LNET_EVENT_ACK); lnet_parse_ack()
1608 lnet_ni_recv(ni, msg->msg_private, msg, 0, 0, 0, msg->msg_len); lnet_parse_ack()
1613 lnet_parse_forward_locked(lnet_ni_t *ni, lnet_msg_t *msg) lnet_parse_forward_locked() argument
1617 if (msg->msg_rxpeer->lp_rtrcredits <= 0 || lnet_parse_forward_locked()
1618 lnet_msg2bufpool(msg)->rbp_credits <= 0) { lnet_parse_forward_locked()
1620 msg->msg_rx_ready_delay = 1; lnet_parse_forward_locked()
1622 lnet_net_unlock(msg->msg_rx_cpt); lnet_parse_forward_locked()
1623 rc = lnet_ni_eager_recv(ni, msg); lnet_parse_forward_locked()
1624 lnet_net_lock(msg->msg_rx_cpt); lnet_parse_forward_locked()
1629 rc = lnet_post_routed_recv_locked(msg, 0); lnet_parse_forward_locked()
1676 hdr->msg.put.ptl_index, lnet_print_hdr()
1677 hdr->msg.put.ack_wmd.wh_interface_cookie, lnet_print_hdr()
1678 hdr->msg.put.ack_wmd.wh_object_cookie, lnet_print_hdr()
1679 hdr->msg.put.match_bits); lnet_print_hdr()
1681 hdr->payload_length, hdr->msg.put.offset, lnet_print_hdr()
1682 hdr->msg.put.hdr_data); lnet_print_hdr()
1687 hdr->msg.get.ptl_index, lnet_print_hdr()
1688 hdr->msg.get.return_wmd.wh_interface_cookie, lnet_print_hdr()
1689 hdr->msg.get.return_wmd.wh_object_cookie, lnet_print_hdr()
1690 hdr->msg.get.match_bits); lnet_print_hdr()
1692 hdr->msg.get.sink_length, lnet_print_hdr()
1693 hdr->msg.get.src_offset); lnet_print_hdr()
1698 hdr->msg.ack.dst_wmd.wh_interface_cookie, lnet_print_hdr()
1699 hdr->msg.ack.dst_wmd.wh_object_cookie, lnet_print_hdr()
1700 hdr->msg.ack.mlength); lnet_print_hdr()
1705 hdr->msg.reply.dst_wmd.wh_interface_cookie, lnet_print_hdr()
1706 hdr->msg.reply.dst_wmd.wh_object_cookie, lnet_print_hdr()
1719 struct lnet_msg *msg; lnet_parse() local
1834 msg = lnet_msg_alloc(); lnet_parse()
1835 if (msg == NULL) { lnet_parse()
1842 /* msg zeroed in lnet_msg_alloc; lnet_parse()
1846 msg->msg_type = type; lnet_parse()
1847 msg->msg_private = private; lnet_parse()
1848 msg->msg_receiving = 1; lnet_parse()
1849 msg->msg_len = msg->msg_wanted = payload_length; lnet_parse()
1850 msg->msg_offset = 0; lnet_parse()
1851 msg->msg_hdr = *hdr; lnet_parse()
1853 msg->msg_from = from_nid; lnet_parse()
1855 msg->msg_target.pid = dest_pid; lnet_parse()
1856 msg->msg_target.nid = dest_nid; lnet_parse()
1857 msg->msg_routing = 1; lnet_parse()
1860 /* convert common msg->hdr fields to host byteorder */ lnet_parse()
1861 msg->msg_hdr.type = type; lnet_parse()
1862 msg->msg_hdr.src_nid = src_nid; lnet_parse()
1863 msg->msg_hdr.src_pid = le32_to_cpu(msg->msg_hdr.src_pid); lnet_parse()
1864 msg->msg_hdr.dest_nid = dest_nid; lnet_parse()
1865 msg->msg_hdr.dest_pid = dest_pid; lnet_parse()
1866 msg->msg_hdr.payload_length = payload_length; lnet_parse()
1870 rc = lnet_nid2peer_locked(&msg->msg_rxpeer, from_nid, cpt); lnet_parse()
1876 lnet_msg_free(msg); lnet_parse()
1880 if (lnet_isrouter(msg->msg_rxpeer)) { lnet_parse()
1881 lnet_peer_set_alive(msg->msg_rxpeer); lnet_parse()
1888 lnet_router_ni_update_locked(msg->msg_rxpeer, lnet_parse()
1893 lnet_msg_commit(msg, cpt); lnet_parse()
1896 rc = lnet_parse_forward_locked(ni, msg); lnet_parse()
1902 lnet_ni_recv(ni, msg->msg_private, msg, 0, lnet_parse()
1912 rc = lnet_parse_ack(ni, msg); lnet_parse()
1915 rc = lnet_parse_put(ni, msg); lnet_parse()
1918 rc = lnet_parse_get(ni, msg, rdma_req); lnet_parse()
1921 rc = lnet_parse_reply(ni, msg); lnet_parse()
1935 LASSERT(msg->msg_md == NULL); lnet_parse()
1936 lnet_finalize(ni, msg, rc); lnet_parse()
1949 lnet_msg_t *msg; lnet_drop_delayed_msg_list() local
1951 msg = list_entry(head->next, lnet_msg_t, msg_list); lnet_drop_delayed_msg_list()
1952 list_del(&msg->msg_list); lnet_drop_delayed_msg_list()
1954 id.nid = msg->msg_hdr.src_nid; lnet_drop_delayed_msg_list()
1955 id.pid = msg->msg_hdr.src_pid; lnet_drop_delayed_msg_list()
1957 LASSERT(msg->msg_md == NULL); lnet_drop_delayed_msg_list()
1958 LASSERT(msg->msg_rx_delayed); lnet_drop_delayed_msg_list()
1959 LASSERT(msg->msg_rxpeer != NULL); lnet_drop_delayed_msg_list()
1960 LASSERT(msg->msg_hdr.type == LNET_MSG_PUT); lnet_drop_delayed_msg_list()
1964 msg->msg_hdr.msg.put.ptl_index, lnet_drop_delayed_msg_list()
1965 msg->msg_hdr.msg.put.match_bits, lnet_drop_delayed_msg_list()
1966 msg->msg_hdr.msg.put.offset, lnet_drop_delayed_msg_list()
1967 msg->msg_hdr.payload_length, reason); lnet_drop_delayed_msg_list()
1969 /* NB I can't drop msg's ref on msg_rxpeer until after I've lnet_drop_delayed_msg_list()
1970 * called lnet_drop_message(), so I just hang onto msg as well lnet_drop_delayed_msg_list()
1973 lnet_drop_message(msg->msg_rxpeer->lp_ni, lnet_drop_delayed_msg_list()
1974 msg->msg_rxpeer->lp_cpt, lnet_drop_delayed_msg_list()
1975 msg->msg_private, msg->msg_len); lnet_drop_delayed_msg_list()
1981 lnet_finalize(msg->msg_rxpeer->lp_ni, msg, -ENOENT); lnet_drop_delayed_msg_list()
1989 lnet_msg_t *msg; lnet_recv_delayed_msg_list() local
1992 msg = list_entry(head->next, lnet_msg_t, msg_list); lnet_recv_delayed_msg_list()
1993 list_del(&msg->msg_list); lnet_recv_delayed_msg_list()
1995 /* md won't disappear under me, since each msg lnet_recv_delayed_msg_list()
1998 id.nid = msg->msg_hdr.src_nid; lnet_recv_delayed_msg_list()
1999 id.pid = msg->msg_hdr.src_pid; lnet_recv_delayed_msg_list()
2001 LASSERT(msg->msg_rx_delayed); lnet_recv_delayed_msg_list()
2002 LASSERT(msg->msg_md != NULL); lnet_recv_delayed_msg_list()
2003 LASSERT(msg->msg_rxpeer != NULL); lnet_recv_delayed_msg_list()
2004 LASSERT(msg->msg_hdr.type == LNET_MSG_PUT); lnet_recv_delayed_msg_list()
2007 libcfs_id2str(id), msg->msg_hdr.msg.put.ptl_index, lnet_recv_delayed_msg_list()
2008 msg->msg_hdr.msg.put.match_bits, lnet_recv_delayed_msg_list()
2009 msg->msg_hdr.msg.put.offset, lnet_recv_delayed_msg_list()
2010 msg->msg_hdr.payload_length); lnet_recv_delayed_msg_list()
2012 lnet_recv_put(msg->msg_rxpeer->lp_ni, msg); lnet_recv_delayed_msg_list()
2066 struct lnet_msg *msg; LNetPut() local
2081 msg = lnet_msg_alloc(); LNetPut()
2082 if (msg == NULL) { LNetPut()
2087 msg->msg_vmflush = !!memory_pressure_get(); LNetPut()
2102 lnet_msg_free(msg); LNetPut()
2108 lnet_msg_attach_md(msg, md, 0, 0); LNetPut()
2110 lnet_prep_send(msg, LNET_MSG_PUT, target, 0, md->md_length); LNetPut()
2112 msg->msg_hdr.msg.put.match_bits = cpu_to_le64(match_bits); LNetPut()
2113 msg->msg_hdr.msg.put.ptl_index = cpu_to_le32(portal); LNetPut()
2114 msg->msg_hdr.msg.put.offset = cpu_to_le32(offset); LNetPut()
2115 msg->msg_hdr.msg.put.hdr_data = hdr_data; LNetPut()
2119 msg->msg_hdr.msg.put.ack_wmd.wh_interface_cookie = LNetPut()
2121 msg->msg_hdr.msg.put.ack_wmd.wh_object_cookie = LNetPut()
2124 msg->msg_hdr.msg.put.ack_wmd.wh_interface_cookie = LNetPut()
2126 msg->msg_hdr.msg.put.ack_wmd.wh_object_cookie = LNetPut()
2132 lnet_build_msg_event(msg, LNET_EVENT_SEND); LNetPut()
2134 rc = lnet_send(self, msg, LNET_NID_ANY); LNetPut()
2138 lnet_finalize(NULL, msg, rc); LNetPut()
2149 /* The LND can DMA direct to the GET md (i.e. no REPLY msg). This lnet_create_reply_msg()
2150 * returns a msg for the LND to pass to lnet_finalize() when the sink lnet_create_reply_msg()
2156 struct lnet_msg *msg = lnet_msg_alloc(); lnet_create_reply_msg() local
2169 if (msg == NULL) { lnet_create_reply_msg()
2170 CERROR("%s: Dropping REPLY from %s: can't allocate msg\n", lnet_create_reply_msg()
2189 msg->msg_from = peer_id.nid; lnet_create_reply_msg()
2190 msg->msg_type = LNET_MSG_GET; /* flag this msg as an "optimized" GET */ lnet_create_reply_msg()
2191 msg->msg_hdr.src_nid = peer_id.nid; lnet_create_reply_msg()
2192 msg->msg_hdr.payload_length = getmd->md_length; lnet_create_reply_msg()
2193 msg->msg_receiving = 1; /* required by lnet_msg_attach_md */ lnet_create_reply_msg()
2195 lnet_msg_attach_md(msg, getmd, getmd->md_offset, getmd->md_length); lnet_create_reply_msg()
2201 lnet_msg_commit(msg, cpt); lnet_create_reply_msg()
2204 lnet_build_msg_event(msg, LNET_EVENT_REPLY); lnet_create_reply_msg()
2206 return msg; lnet_create_reply_msg()
2216 if (msg != NULL) lnet_create_reply_msg()
2217 lnet_msg_free(msg); lnet_create_reply_msg()
2266 struct lnet_msg *msg; LNetGet() local
2281 msg = lnet_msg_alloc(); LNetGet()
2282 if (msg == NULL) { LNetGet()
2302 lnet_msg_free(msg); LNetGet()
2308 lnet_msg_attach_md(msg, md, 0, 0); LNetGet()
2310 lnet_prep_send(msg, LNET_MSG_GET, target, 0, 0); LNetGet()
2312 msg->msg_hdr.msg.get.match_bits = cpu_to_le64(match_bits); LNetGet()
2313 msg->msg_hdr.msg.get.ptl_index = cpu_to_le32(portal); LNetGet()
2314 msg->msg_hdr.msg.get.src_offset = cpu_to_le32(offset); LNetGet()
2315 msg->msg_hdr.msg.get.sink_length = cpu_to_le32(md->md_length); LNetGet()
2318 msg->msg_hdr.msg.get.return_wmd.wh_interface_cookie = LNetGet()
2320 msg->msg_hdr.msg.get.return_wmd.wh_object_cookie = LNetGet()
2325 lnet_build_msg_event(msg, LNET_EVENT_SEND); LNetGet()
2327 rc = lnet_send(self, msg, LNET_NID_ANY); LNetGet()
2331 lnet_finalize(NULL, msg, rc); LNetGet()
H A DMakefile3 lnet-y := api-ni.o config.o lib-me.o lib-msg.o lib-eq.o \
/linux-4.1.27/arch/score/include/asm/
H A Dbug.h12 #define die(msg, regs) \
13 __die(msg, regs, __FILE__ ":", __func__, __LINE__)
14 #define die_if_kernel(msg, regs) \
15 __die_if_kernel(msg, regs, __FILE__ ":", __func__, __LINE__)
/linux-4.1.27/arch/ia64/kernel/
H A Dmsi_ia64.c18 struct msi_msg msg; ia64_set_msi_irq_affinity() local
26 __get_cached_msi_msg(idata->msi_desc, &msg); ia64_set_msi_irq_affinity()
28 addr = msg.address_lo; ia64_set_msi_irq_affinity()
31 msg.address_lo = addr; ia64_set_msi_irq_affinity()
33 data = msg.data; ia64_set_msi_irq_affinity()
36 msg.data = data; ia64_set_msi_irq_affinity()
38 pci_write_msi_msg(irq, &msg); ia64_set_msi_irq_affinity()
47 struct msi_msg msg; ia64_setup_msi_irq() local
60 msg.address_hi = 0; ia64_setup_msi_irq()
61 msg.address_lo = ia64_setup_msi_irq()
67 msg.data = ia64_setup_msi_irq()
73 pci_write_msi_msg(irq, &msg); ia64_setup_msi_irq()
137 struct msi_msg msg; dmar_msi_set_affinity() local
143 dmar_msi_read(irq, &msg); dmar_msi_set_affinity()
145 msg.data &= ~MSI_DATA_VECTOR_MASK; dmar_msi_set_affinity()
146 msg.data |= MSI_DATA_VECTOR(cfg->vector); dmar_msi_set_affinity()
147 msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK; dmar_msi_set_affinity()
148 msg.address_lo |= MSI_ADDR_DEST_ID_CPU(cpu_physical_id(cpu)); dmar_msi_set_affinity()
150 dmar_msi_write(irq, &msg); dmar_msi_set_affinity()
169 msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_msg *msg) msi_compose_msg() argument
177 msg->address_hi = 0; msi_compose_msg()
178 msg->address_lo = msi_compose_msg()
184 msg->data = msi_compose_msg()
195 struct msi_msg msg; arch_setup_dmar_msi() local
197 ret = msi_compose_msg(NULL, irq, &msg); arch_setup_dmar_msi()
200 dmar_msi_write(irq, &msg); arch_setup_dmar_msi()
/linux-4.1.27/sound/hda/
H A Dtrace.h21 TP_STRUCT__entry(__dynamic_array(char, msg, HDAC_MSG_MAX)),
23 snprintf(__get_str(msg), HDAC_MSG_MAX,
27 TP_printk("%s", __get_str(msg))
33 TP_STRUCT__entry(__dynamic_array(char, msg, HDAC_MSG_MAX)),
35 snprintf(__get_str(msg), HDAC_MSG_MAX,
39 TP_printk("%s", __get_str(msg))
45 TP_STRUCT__entry(__dynamic_array(char, msg, HDAC_MSG_MAX)),
47 snprintf(__get_str(msg), HDAC_MSG_MAX,
51 TP_printk("%s", __get_str(msg))
/linux-4.1.27/tools/perf/ui/gtk/
H A Dutil.c32 char *msg; perf_gtk__error() local
36 vasprintf(&msg, format, args) < 0) { perf_gtk__error()
47 "<b>Error</b>\n\n%s", msg); perf_gtk__error()
51 free(msg); perf_gtk__error()
58 char *msg; perf_gtk__warning_info_bar() local
61 vasprintf(&msg, format, args) < 0) { perf_gtk__warning_info_bar()
68 gtk_label_set_text(GTK_LABEL(pgctx->message_label), msg); perf_gtk__warning_info_bar()
73 free(msg); perf_gtk__warning_info_bar()
79 char *msg, *p; perf_gtk__warning_statusbar() local
82 vasprintf(&msg, format, args) < 0) { perf_gtk__warning_statusbar()
93 p = strchr(msg, '\n'); perf_gtk__warning_statusbar()
98 pgctx->statbar_ctx_id, msg); perf_gtk__warning_statusbar() local
100 free(msg); perf_gtk__warning_statusbar()
H A Dhelpline.c18 static void gtk_helpline_push(const char *msg) gtk_helpline_push() argument
24 pgctx->statbar_ctx_id, msg); gtk_helpline_push() local
/linux-4.1.27/sound/soc/intel/common/
H A Dsst-ipc.c44 struct ipc_message *msg = NULL; msg_get_empty() local
47 msg = list_first_entry(&ipc->empty_list, struct ipc_message, msg_get_empty()
49 list_del(&msg->list); msg_get_empty()
52 return msg; msg_get_empty()
56 struct ipc_message *msg, void *rx_data) tx_wait_done()
62 ret = wait_event_timeout(msg->waitq, msg->complete, tx_wait_done()
70 list_del(&msg->list); tx_wait_done()
75 if (msg->rx_size) tx_wait_done()
76 memcpy(rx_data, msg->rx_data, msg->rx_size); tx_wait_done()
77 ret = msg->errno; tx_wait_done()
80 list_add_tail(&msg->list, &ipc->empty_list); tx_wait_done()
89 struct ipc_message *msg; ipc_tx_message() local
94 msg = msg_get_empty(ipc); ipc_tx_message()
95 if (msg == NULL) { ipc_tx_message()
100 msg->header = header; ipc_tx_message()
101 msg->tx_size = tx_bytes; ipc_tx_message()
102 msg->rx_size = rx_bytes; ipc_tx_message()
103 msg->wait = wait; ipc_tx_message()
104 msg->errno = 0; ipc_tx_message()
105 msg->pending = false; ipc_tx_message()
106 msg->complete = false; ipc_tx_message()
109 ipc->ops.tx_data_copy(msg, tx_data, tx_bytes); ipc_tx_message()
111 list_add_tail(&msg->list, &ipc->tx_list); ipc_tx_message()
117 return tx_wait_done(ipc, msg, rx_data); ipc_tx_message()
126 ipc->msg = kzalloc(sizeof(struct ipc_message) * msg_empty_list_init()
128 if (ipc->msg == NULL) msg_empty_list_init()
132 init_waitqueue_head(&ipc->msg[i].waitq); msg_empty_list_init()
133 list_add(&ipc->msg[i].list, &ipc->empty_list); msg_empty_list_init()
143 struct ipc_message *msg; ipc_tx_msgs() local
162 msg = list_first_entry(&ipc->tx_list, struct ipc_message, list); ipc_tx_msgs()
163 list_move(&msg->list, &ipc->rx_list); ipc_tx_msgs()
166 ipc->ops.tx_msg(ipc, msg); ipc_tx_msgs()
190 struct ipc_message *msg; sst_ipc_reply_find_msg() local
202 list_for_each_entry(msg, &ipc->rx_list, list) { sst_ipc_reply_find_msg()
203 if ((msg->header & mask) == header) sst_ipc_reply_find_msg()
204 return msg; sst_ipc_reply_find_msg()
213 struct ipc_message *msg) sst_ipc_tx_msg_reply_complete()
215 msg->complete = true; sst_ipc_tx_msg_reply_complete()
217 if (!msg->wait) sst_ipc_tx_msg_reply_complete()
218 list_add_tail(&msg->list, &ipc->empty_list); sst_ipc_tx_msg_reply_complete()
220 wake_up(&msg->waitq); sst_ipc_tx_msg_reply_complete()
226 struct ipc_message *msg, *tmp; sst_ipc_drop_all() local
233 list_for_each_entry_safe(msg, tmp, &ipc->tx_list, list) { sst_ipc_drop_all()
234 list_move(&msg->list, &ipc->empty_list); sst_ipc_drop_all()
238 list_for_each_entry_safe(msg, tmp, &ipc->rx_list, list) { sst_ipc_drop_all()
239 list_move(&msg->list, &ipc->empty_list); sst_ipc_drop_all()
246 dev_err(ipc->dev, "dropped IPC msg RX=%d, TX=%d\n", sst_ipc_drop_all()
272 kfree(ipc->msg); sst_ipc_init()
286 if (ipc->msg) sst_ipc_fini()
287 kfree(ipc->msg); sst_ipc_fini()
55 tx_wait_done(struct sst_generic_ipc *ipc, struct ipc_message *msg, void *rx_data) tx_wait_done() argument
212 sst_ipc_tx_msg_reply_complete(struct sst_generic_ipc *ipc, struct ipc_message *msg) sst_ipc_tx_msg_reply_complete() argument
/linux-4.1.27/net/nfc/
H A Dnetlink.c67 static int nfc_genl_send_target(struct sk_buff *msg, struct nfc_target *target, nfc_genl_send_target() argument
72 hdr = genlmsg_put(msg, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, nfc_genl_send_target()
79 if (nla_put_u32(msg, NFC_ATTR_TARGET_INDEX, target->idx) || nfc_genl_send_target()
80 nla_put_u32(msg, NFC_ATTR_PROTOCOLS, target->supported_protocols) || nfc_genl_send_target()
81 nla_put_u16(msg, NFC_ATTR_TARGET_SENS_RES, target->sens_res) || nfc_genl_send_target()
82 nla_put_u8(msg, NFC_ATTR_TARGET_SEL_RES, target->sel_res)) nfc_genl_send_target()
85 nla_put(msg, NFC_ATTR_TARGET_NFCID1, target->nfcid1_len, nfc_genl_send_target()
89 nla_put(msg, NFC_ATTR_TARGET_SENSB_RES, target->sensb_res_len, nfc_genl_send_target()
93 nla_put(msg, NFC_ATTR_TARGET_SENSF_RES, target->sensf_res_len, nfc_genl_send_target()
98 if (nla_put_u8(msg, NFC_ATTR_TARGET_ISO15693_DSFID, nfc_genl_send_target()
100 nla_put(msg, NFC_ATTR_TARGET_ISO15693_UID, nfc_genl_send_target()
105 genlmsg_end(msg, hdr); nfc_genl_send_target()
109 genlmsg_cancel(msg, hdr); nfc_genl_send_target()
185 struct sk_buff *msg; nfc_genl_targets_found() local
190 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC); nfc_genl_targets_found()
191 if (!msg) nfc_genl_targets_found()
194 hdr = genlmsg_put(msg, 0, 0, &nfc_genl_family, 0, nfc_genl_targets_found()
199 if (nla_put_u32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx)) nfc_genl_targets_found()
202 genlmsg_end(msg, hdr); nfc_genl_targets_found()
204 return genlmsg_multicast(&nfc_genl_family, msg, 0, 0, GFP_ATOMIC); nfc_genl_targets_found()
207 genlmsg_cancel(msg, hdr); nfc_genl_targets_found()
209 nlmsg_free(msg); nfc_genl_targets_found()
215 struct sk_buff *msg; nfc_genl_target_lost() local
218 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); nfc_genl_target_lost()
219 if (!msg) nfc_genl_target_lost()
222 hdr = genlmsg_put(msg, 0, 0, &nfc_genl_family, 0, nfc_genl_target_lost()
227 if (nla_put_string(msg, NFC_ATTR_DEVICE_NAME, nfc_device_name(dev)) || nfc_genl_target_lost()
228 nla_put_u32(msg, NFC_ATTR_TARGET_INDEX, target_idx)) nfc_genl_target_lost()
231 genlmsg_end(msg, hdr); nfc_genl_target_lost()
233 genlmsg_multicast(&nfc_genl_family, msg, 0, 0, GFP_KERNEL); nfc_genl_target_lost()
238 genlmsg_cancel(msg, hdr); nfc_genl_target_lost()
240 nlmsg_free(msg); nfc_genl_target_lost()
246 struct sk_buff *msg; nfc_genl_tm_activated() local
249 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); nfc_genl_tm_activated()
250 if (!msg) nfc_genl_tm_activated()
253 hdr = genlmsg_put(msg, 0, 0, &nfc_genl_family, 0, nfc_genl_tm_activated()
258 if (nla_put_u32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx)) nfc_genl_tm_activated()
260 if (nla_put_u32(msg, NFC_ATTR_TM_PROTOCOLS, protocol)) nfc_genl_tm_activated()
263 genlmsg_end(msg, hdr); nfc_genl_tm_activated()
265 genlmsg_multicast(&nfc_genl_family, msg, 0, 0, GFP_KERNEL); nfc_genl_tm_activated()
270 genlmsg_cancel(msg, hdr); nfc_genl_tm_activated()
272 nlmsg_free(msg); nfc_genl_tm_activated()
278 struct sk_buff *msg; nfc_genl_tm_deactivated() local
281 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); nfc_genl_tm_deactivated()
282 if (!msg) nfc_genl_tm_deactivated()
285 hdr = genlmsg_put(msg, 0, 0, &nfc_genl_family, 0, nfc_genl_tm_deactivated()
290 if (nla_put_u32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx)) nfc_genl_tm_deactivated()
293 genlmsg_end(msg, hdr); nfc_genl_tm_deactivated()
295 genlmsg_multicast(&nfc_genl_family, msg, 0, 0, GFP_KERNEL); nfc_genl_tm_deactivated()
300 genlmsg_cancel(msg, hdr); nfc_genl_tm_deactivated()
302 nlmsg_free(msg); nfc_genl_tm_deactivated()
308 struct sk_buff *msg; nfc_genl_device_added() local
311 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); nfc_genl_device_added()
312 if (!msg) nfc_genl_device_added()
315 hdr = genlmsg_put(msg, 0, 0, &nfc_genl_family, 0, nfc_genl_device_added()
320 if (nla_put_string(msg, NFC_ATTR_DEVICE_NAME, nfc_device_name(dev)) || nfc_genl_device_added()
321 nla_put_u32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx) || nfc_genl_device_added()
322 nla_put_u32(msg, NFC_ATTR_PROTOCOLS, dev->supported_protocols) || nfc_genl_device_added()
323 nla_put_u8(msg, NFC_ATTR_DEVICE_POWERED, dev->dev_up)) nfc_genl_device_added()
326 genlmsg_end(msg, hdr); nfc_genl_device_added()
328 genlmsg_multicast(&nfc_genl_family, msg, 0, 0, GFP_KERNEL); nfc_genl_device_added()
333 genlmsg_cancel(msg, hdr); nfc_genl_device_added()
335 nlmsg_free(msg); nfc_genl_device_added()
341 struct sk_buff *msg; nfc_genl_device_removed() local
344 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); nfc_genl_device_removed()
345 if (!msg) nfc_genl_device_removed()
348 hdr = genlmsg_put(msg, 0, 0, &nfc_genl_family, 0, nfc_genl_device_removed()
353 if (nla_put_u32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx)) nfc_genl_device_removed()
356 genlmsg_end(msg, hdr); nfc_genl_device_removed()
358 genlmsg_multicast(&nfc_genl_family, msg, 0, 0, GFP_KERNEL); nfc_genl_device_removed()
363 genlmsg_cancel(msg, hdr); nfc_genl_device_removed()
365 nlmsg_free(msg); nfc_genl_device_removed()
371 struct sk_buff *msg; nfc_genl_llc_send_sdres() local
379 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); nfc_genl_llc_send_sdres()
380 if (!msg) nfc_genl_llc_send_sdres()
383 hdr = genlmsg_put(msg, 0, 0, &nfc_genl_family, 0, nfc_genl_llc_send_sdres()
388 if (nla_put_u32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx)) nfc_genl_llc_send_sdres()
391 sdp_attr = nla_nest_start(msg, NFC_ATTR_LLC_SDP); nfc_genl_llc_send_sdres()
401 uri_attr = nla_nest_start(msg, i++); hlist_for_each_entry_safe()
407 if (nla_put_u8(msg, NFC_SDP_ATTR_SAP, sdres->sap)) hlist_for_each_entry_safe()
410 if (nla_put_string(msg, NFC_SDP_ATTR_URI, sdres->uri)) hlist_for_each_entry_safe()
413 nla_nest_end(msg, uri_attr); hlist_for_each_entry_safe()
420 nla_nest_end(msg, sdp_attr);
422 genlmsg_end(msg, hdr);
424 return genlmsg_multicast(&nfc_genl_family, msg, 0, 0, GFP_ATOMIC);
427 genlmsg_cancel(msg, hdr);
430 nlmsg_free(msg);
439 struct sk_buff *msg; nfc_genl_se_added() local
442 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); nfc_genl_se_added()
443 if (!msg) nfc_genl_se_added()
446 hdr = genlmsg_put(msg, 0, 0, &nfc_genl_family, 0, nfc_genl_se_added()
451 if (nla_put_u32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx) || nfc_genl_se_added()
452 nla_put_u32(msg, NFC_ATTR_SE_INDEX, se_idx) || nfc_genl_se_added()
453 nla_put_u8(msg, NFC_ATTR_SE_TYPE, type)) nfc_genl_se_added()
456 genlmsg_end(msg, hdr); nfc_genl_se_added()
458 genlmsg_multicast(&nfc_genl_family, msg, 0, 0, GFP_KERNEL); nfc_genl_se_added()
463 genlmsg_cancel(msg, hdr); nfc_genl_se_added()
465 nlmsg_free(msg); nfc_genl_se_added()
471 struct sk_buff *msg; nfc_genl_se_removed() local
474 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); nfc_genl_se_removed()
475 if (!msg) nfc_genl_se_removed()
478 hdr = genlmsg_put(msg, 0, 0, &nfc_genl_family, 0, nfc_genl_se_removed()
483 if (nla_put_u32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx) || nfc_genl_se_removed()
484 nla_put_u32(msg, NFC_ATTR_SE_INDEX, se_idx)) nfc_genl_se_removed()
487 genlmsg_end(msg, hdr); nfc_genl_se_removed()
489 genlmsg_multicast(&nfc_genl_family, msg, 0, 0, GFP_KERNEL); nfc_genl_se_removed()
494 genlmsg_cancel(msg, hdr); nfc_genl_se_removed()
496 nlmsg_free(msg); nfc_genl_se_removed()
504 struct sk_buff *msg; nfc_genl_se_transaction() local
507 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); nfc_genl_se_transaction()
508 if (!msg) nfc_genl_se_transaction()
511 hdr = genlmsg_put(msg, 0, 0, &nfc_genl_family, 0, nfc_genl_se_transaction()
520 if (nla_put_u32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx) || nfc_genl_se_transaction()
521 nla_put_u32(msg, NFC_ATTR_SE_INDEX, se_idx) || nfc_genl_se_transaction()
522 nla_put_u8(msg, NFC_ATTR_SE_TYPE, se->type) || nfc_genl_se_transaction()
523 nla_put(msg, NFC_ATTR_SE_AID, evt_transaction->aid_len, nfc_genl_se_transaction()
525 nla_put(msg, NFC_ATTR_SE_PARAMS, evt_transaction->params_len, nfc_genl_se_transaction()
532 genlmsg_end(msg, hdr); nfc_genl_se_transaction()
534 genlmsg_multicast(&nfc_genl_family, msg, 0, 0, GFP_KERNEL); nfc_genl_se_transaction()
539 genlmsg_cancel(msg, hdr); nfc_genl_se_transaction()
543 nlmsg_free(msg); nfc_genl_se_transaction()
547 static int nfc_genl_send_device(struct sk_buff *msg, struct nfc_dev *dev, nfc_genl_send_device() argument
554 hdr = genlmsg_put(msg, portid, seq, &nfc_genl_family, flags, nfc_genl_send_device()
562 if (nla_put_string(msg, NFC_ATTR_DEVICE_NAME, nfc_device_name(dev)) || nfc_genl_send_device()
563 nla_put_u32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx) || nfc_genl_send_device()
564 nla_put_u32(msg, NFC_ATTR_PROTOCOLS, dev->supported_protocols) || nfc_genl_send_device()
565 nla_put_u8(msg, NFC_ATTR_DEVICE_POWERED, dev->dev_up) || nfc_genl_send_device()
566 nla_put_u8(msg, NFC_ATTR_RF_MODE, dev->rf_mode)) nfc_genl_send_device()
569 genlmsg_end(msg, hdr); nfc_genl_send_device()
573 genlmsg_cancel(msg, hdr); nfc_genl_send_device()
632 struct sk_buff *msg; nfc_genl_dep_link_up_event() local
637 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC); nfc_genl_dep_link_up_event()
638 if (!msg) nfc_genl_dep_link_up_event()
641 hdr = genlmsg_put(msg, 0, 0, &nfc_genl_family, 0, NFC_CMD_DEP_LINK_UP); nfc_genl_dep_link_up_event()
645 if (nla_put_u32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx)) nfc_genl_dep_link_up_event()
648 nla_put_u32(msg, NFC_ATTR_TARGET_INDEX, target_idx)) nfc_genl_dep_link_up_event()
650 if (nla_put_u8(msg, NFC_ATTR_COMM_MODE, comm_mode) || nfc_genl_dep_link_up_event()
651 nla_put_u8(msg, NFC_ATTR_RF_MODE, rf_mode)) nfc_genl_dep_link_up_event()
654 genlmsg_end(msg, hdr); nfc_genl_dep_link_up_event()
658 genlmsg_multicast(&nfc_genl_family, msg, 0, 0, GFP_ATOMIC); nfc_genl_dep_link_up_event()
663 genlmsg_cancel(msg, hdr); nfc_genl_dep_link_up_event()
665 nlmsg_free(msg); nfc_genl_dep_link_up_event()
671 struct sk_buff *msg; nfc_genl_dep_link_down_event() local
676 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC); nfc_genl_dep_link_down_event()
677 if (!msg) nfc_genl_dep_link_down_event()
680 hdr = genlmsg_put(msg, 0, 0, &nfc_genl_family, 0, nfc_genl_dep_link_down_event()
685 if (nla_put_u32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx)) nfc_genl_dep_link_down_event()
688 genlmsg_end(msg, hdr); nfc_genl_dep_link_down_event()
690 genlmsg_multicast(&nfc_genl_family, msg, 0, 0, GFP_ATOMIC); nfc_genl_dep_link_down_event()
695 genlmsg_cancel(msg, hdr); nfc_genl_dep_link_down_event()
697 nlmsg_free(msg); nfc_genl_dep_link_down_event()
703 struct sk_buff *msg; nfc_genl_get_device() local
717 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); nfc_genl_get_device()
718 if (!msg) { nfc_genl_get_device()
723 rc = nfc_genl_send_device(msg, dev, info->snd_portid, info->snd_seq, nfc_genl_get_device()
730 return genlmsg_reply(msg, info); nfc_genl_get_device()
733 nlmsg_free(msg); nfc_genl_get_device()
943 static int nfc_genl_send_params(struct sk_buff *msg, nfc_genl_send_params() argument
949 hdr = genlmsg_put(msg, portid, seq, &nfc_genl_family, 0, nfc_genl_send_params()
954 if (nla_put_u32(msg, NFC_ATTR_DEVICE_INDEX, local->dev->idx) || nfc_genl_send_params()
955 nla_put_u8(msg, NFC_ATTR_LLC_PARAM_LTO, local->lto) || nfc_genl_send_params()
956 nla_put_u8(msg, NFC_ATTR_LLC_PARAM_RW, local->rw) || nfc_genl_send_params()
957 nla_put_u16(msg, NFC_ATTR_LLC_PARAM_MIUX, be16_to_cpu(local->miux))) nfc_genl_send_params()
960 genlmsg_end(msg, hdr); nfc_genl_send_params()
965 genlmsg_cancel(msg, hdr); nfc_genl_send_params()
974 struct sk_buff *msg = NULL; nfc_genl_llc_get_params() local
994 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); nfc_genl_llc_get_params()
995 if (!msg) { nfc_genl_llc_get_params()
1000 rc = nfc_genl_send_params(msg, local, info->snd_portid, info->snd_seq); nfc_genl_llc_get_params()
1008 if (msg) nfc_genl_llc_get_params()
1009 nlmsg_free(msg); nfc_genl_llc_get_params()
1014 return genlmsg_reply(msg, info); nfc_genl_llc_get_params()
1202 struct sk_buff *msg; nfc_genl_fw_download_done() local
1205 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); nfc_genl_fw_download_done()
1206 if (!msg) nfc_genl_fw_download_done()
1209 hdr = genlmsg_put(msg, 0, 0, &nfc_genl_family, 0, nfc_genl_fw_download_done()
1214 if (nla_put_string(msg, NFC_ATTR_FIRMWARE_NAME, firmware_name) || nfc_genl_fw_download_done()
1215 nla_put_u32(msg, NFC_ATTR_FIRMWARE_DOWNLOAD_STATUS, result) || nfc_genl_fw_download_done()
1216 nla_put_u32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx)) nfc_genl_fw_download_done()
1219 genlmsg_end(msg, hdr); nfc_genl_fw_download_done()
1221 genlmsg_multicast(&nfc_genl_family, msg, 0, 0, GFP_KERNEL); nfc_genl_fw_download_done()
1226 genlmsg_cancel(msg, hdr); nfc_genl_fw_download_done()
1228 nlmsg_free(msg); nfc_genl_fw_download_done()
1278 static int nfc_genl_send_se(struct sk_buff *msg, struct nfc_dev *dev, nfc_genl_send_se() argument
1287 hdr = genlmsg_put(msg, portid, seq, &nfc_genl_family, flags, nfc_genl_send_se()
1295 if (nla_put_u32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx) || nfc_genl_send_se()
1296 nla_put_u32(msg, NFC_ATTR_SE_INDEX, se->idx) || nfc_genl_send_se()
1297 nla_put_u8(msg, NFC_ATTR_SE_TYPE, se->type)) nfc_genl_send_se()
1300 genlmsg_end(msg, hdr); nfc_genl_send_se()
1306 genlmsg_cancel(msg, hdr); nfc_genl_send_se()
1415 struct sk_buff *msg; se_io_cb() local
1418 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); se_io_cb()
1419 if (!msg) { se_io_cb()
1424 hdr = genlmsg_put(msg, 0, 0, &nfc_genl_family, 0, se_io_cb()
1429 if (nla_put_u32(msg, NFC_ATTR_DEVICE_INDEX, ctx->dev_idx) || se_io_cb()
1430 nla_put_u32(msg, NFC_ATTR_SE_INDEX, ctx->se_idx) || se_io_cb()
1431 nla_put(msg, NFC_ATTR_SE_APDU, apdu_len, apdu)) se_io_cb()
1434 genlmsg_end(msg, hdr); se_io_cb()
1436 genlmsg_multicast(&nfc_genl_family, msg, 0, 0, GFP_KERNEL); se_io_cb()
1443 genlmsg_cancel(msg, hdr); se_io_cb()
1445 nlmsg_free(msg); se_io_cb()
/linux-4.1.27/drivers/media/dvb-frontends/
H A Ddibx000_common.c27 memset(mst->msg, 0, sizeof(struct i2c_msg)); dibx000_write_word()
28 mst->msg[0].addr = mst->i2c_addr; dibx000_write_word()
29 mst->msg[0].flags = 0; dibx000_write_word()
30 mst->msg[0].buf = mst->i2c_write_buffer; dibx000_write_word()
31 mst->msg[0].len = 4; dibx000_write_word()
33 ret = i2c_transfer(mst->i2c_adap, mst->msg, 1) != 1 ? -EREMOTEIO : 0; dibx000_write_word()
51 memset(mst->msg, 0, 2 * sizeof(struct i2c_msg)); dibx000_read_word()
52 mst->msg[0].addr = mst->i2c_addr; dibx000_read_word()
53 mst->msg[0].flags = 0; dibx000_read_word()
54 mst->msg[0].buf = mst->i2c_write_buffer; dibx000_read_word()
55 mst->msg[0].len = 2; dibx000_read_word()
56 mst->msg[1].addr = mst->i2c_addr; dibx000_read_word()
57 mst->msg[1].flags = I2C_M_RD; dibx000_read_word()
58 mst->msg[1].buf = mst->i2c_read_buffer; dibx000_read_word()
59 mst->msg[1].len = 2; dibx000_read_word()
61 if (i2c_transfer(mst->i2c_adap, mst->msg, 2) != 2) dibx000_read_word()
89 static int dibx000_master_i2c_write(struct dibx000_i2c_master *mst, struct i2c_msg *msg, u8 stop) dibx000_master_i2c_write() argument
94 u16 txlen = msg->len, len; dibx000_master_i2c_write()
95 const u8 *b = msg->buf; dibx000_master_i2c_write()
107 da = (((u8) (msg->addr)) << 9) | dibx000_master_i2c_write()
116 if (txlen == msg->len) dibx000_master_i2c_write()
132 static int dibx000_master_i2c_read(struct dibx000_i2c_master *mst, struct i2c_msg *msg) dibx000_master_i2c_read() argument
135 u8 *b = msg->buf; dibx000_master_i2c_read()
136 u16 rxlen = msg->len, len; dibx000_master_i2c_read()
140 da = (((u8) (msg->addr)) << 9) | dibx000_master_i2c_read()
149 if (rxlen == msg->len) dibx000_master_i2c_read()
202 static int dibx000_i2c_master_xfer_gpio12(struct i2c_adapter *i2c_adap, struct i2c_msg msg[], int num) dibx000_i2c_master_xfer_gpio12() argument
210 if (msg[msg_index].flags & I2C_M_RD) { dibx000_i2c_master_xfer_gpio12()
211 ret = dibx000_master_i2c_read(mst, &msg[msg_index]); dibx000_i2c_master_xfer_gpio12()
215 ret = dibx000_master_i2c_write(mst, &msg[msg_index], 1); dibx000_i2c_master_xfer_gpio12()
224 static int dibx000_i2c_master_xfer_gpio34(struct i2c_adapter *i2c_adap, struct i2c_msg msg[], int num) dibx000_i2c_master_xfer_gpio34() argument
232 if (msg[msg_index].flags & I2C_M_RD) { dibx000_i2c_master_xfer_gpio34()
233 ret = dibx000_master_i2c_read(mst, &msg[msg_index]); dibx000_i2c_master_xfer_gpio34()
237 ret = dibx000_master_i2c_write(mst, &msg[msg_index], 1); dibx000_i2c_master_xfer_gpio34()
279 struct i2c_msg msg[], int num) dibx000_i2c_gated_gpio67_xfer()
297 memset(mst->msg, 0, sizeof(struct i2c_msg) * (2 + num)); dibx000_i2c_gated_gpio67_xfer()
300 dibx000_i2c_gate_ctrl(mst, &mst->i2c_write_buffer[0], msg[0].addr, 1); dibx000_i2c_gated_gpio67_xfer()
301 mst->msg[0].addr = mst->i2c_addr; dibx000_i2c_gated_gpio67_xfer()
302 mst->msg[0].buf = &mst->i2c_write_buffer[0]; dibx000_i2c_gated_gpio67_xfer()
303 mst->msg[0].len = 4; dibx000_i2c_gated_gpio67_xfer()
305 memcpy(&mst->msg[1], msg, sizeof(struct i2c_msg) * num); dibx000_i2c_gated_gpio67_xfer()
309 mst->msg[num + 1].addr = mst->i2c_addr; dibx000_i2c_gated_gpio67_xfer()
310 mst->msg[num + 1].buf = &mst->i2c_write_buffer[4]; dibx000_i2c_gated_gpio67_xfer()
311 mst->msg[num + 1].len = 4; dibx000_i2c_gated_gpio67_xfer()
313 ret = (i2c_transfer(mst->i2c_adap, mst->msg, 2 + num) == 2 + num ? dibx000_i2c_gated_gpio67_xfer()
326 struct i2c_msg msg[], int num) dibx000_i2c_gated_tuner_xfer()
343 memset(mst->msg, 0, sizeof(struct i2c_msg) * (2 + num)); dibx000_i2c_gated_tuner_xfer()
346 dibx000_i2c_gate_ctrl(mst, &mst->i2c_write_buffer[0], msg[0].addr, 1); dibx000_i2c_gated_tuner_xfer()
347 mst->msg[0].addr = mst->i2c_addr; dibx000_i2c_gated_tuner_xfer()
348 mst->msg[0].buf = &mst->i2c_write_buffer[0]; dibx000_i2c_gated_tuner_xfer()
349 mst->msg[0].len = 4; dibx000_i2c_gated_tuner_xfer()
351 memcpy(&mst->msg[1], msg, sizeof(struct i2c_msg) * num); dibx000_i2c_gated_tuner_xfer()
355 mst->msg[num + 1].addr = mst->i2c_addr; dibx000_i2c_gated_tuner_xfer()
356 mst->msg[num + 1].buf = &mst->i2c_write_buffer[4]; dibx000_i2c_gated_tuner_xfer()
357 mst->msg[num + 1].len = 4; dibx000_i2c_gated_tuner_xfer()
359 ret = (i2c_transfer(mst->i2c_adap, mst->msg, 2 + num) == 2 + num ? dibx000_i2c_gated_tuner_xfer()
440 memset(mst->msg, 0, sizeof(struct i2c_msg)); dibx000_init_i2c_master()
441 mst->msg[0].addr = i2c_addr >> 1; dibx000_init_i2c_master()
442 mst->msg[0].flags = 0; dibx000_init_i2c_master()
443 mst->msg[0].buf = mst->i2c_write_buffer; dibx000_init_i2c_master()
444 mst->msg[0].len = 4; dibx000_init_i2c_master()
486 ret = (i2c_transfer(i2c_adap, mst->msg, 1) == 1); dibx000_init_i2c_master()
278 dibx000_i2c_gated_gpio67_xfer(struct i2c_adapter *i2c_adap, struct i2c_msg msg[], int num) dibx000_i2c_gated_gpio67_xfer() argument
325 dibx000_i2c_gated_tuner_xfer(struct i2c_adapter *i2c_adap, struct i2c_msg msg[], int num) dibx000_i2c_gated_tuner_xfer() argument
/linux-4.1.27/net/wimax/
H A DMakefile6 op-msg.o \
/linux-4.1.27/drivers/media/pci/saa7164/
H A Dsaa7164-bus.c112 dprintk(DBGLVL_BUS, "Dumping msg structure:\n"); saa7164_bus_dumpmsg()
134 int saa7164_bus_set(struct saa7164_dev *dev, struct tmComResInfo* msg, saa7164_bus_set() argument
143 if (!msg) { saa7164_bus_set()
144 printk(KERN_ERR "%s() !msg\n", __func__); saa7164_bus_set()
152 if (msg->size > dev->bus.m_wMaxReqSize) { saa7164_bus_set()
158 if ((msg->size > 0) && (buf == NULL)) { saa7164_bus_set()
166 bytes_to_write = sizeof(*msg) + msg->size; saa7164_bus_set()
189 /* Process the msg and write the content onto the bus */ saa7164_bus_set()
224 * Make a copy of msg->size before it is converted to le16 since it is saa7164_bus_set()
227 size = msg->size; saa7164_bus_set()
229 msg->size = (__force u16)cpu_to_le16(msg->size); saa7164_bus_set()
230 msg->command = (__force u32)cpu_to_le32(msg->command); saa7164_bus_set()
231 msg->controlselector = (__force u16)cpu_to_le16(msg->controlselector); saa7164_bus_set()
246 dprintk(DBGLVL_BUS, "%s() sizeof(*msg) = %d\n", __func__, saa7164_bus_set()
247 (u32)sizeof(*msg)); saa7164_bus_set()
249 if (space_rem < sizeof(*msg)) { saa7164_bus_set()
252 /* Split the msg into pieces as the ring wraps */ saa7164_bus_set()
253 memcpy_toio(bus->m_pdwSetRing + curr_swp, msg, space_rem); saa7164_bus_set()
254 memcpy_toio(bus->m_pdwSetRing, (u8 *)msg + space_rem, saa7164_bus_set()
255 sizeof(*msg) - space_rem); saa7164_bus_set()
257 memcpy_toio(bus->m_pdwSetRing + sizeof(*msg) - space_rem, saa7164_bus_set()
260 } else if (space_rem == sizeof(*msg)) { saa7164_bus_set()
264 memcpy_toio(bus->m_pdwSetRing + curr_swp, msg, sizeof(*msg)); saa7164_bus_set()
269 memcpy_toio(bus->m_pdwSetRing + curr_swp, msg, sizeof(*msg)); saa7164_bus_set()
272 sizeof(*msg), buf, space_rem - saa7164_bus_set()
273 sizeof(*msg)); saa7164_bus_set()
275 space_rem - sizeof(*msg), saa7164_bus_set()
286 memcpy_toio(bus->m_pdwSetRing + curr_swp, msg, sizeof(*msg)); saa7164_bus_set()
287 memcpy_toio(bus->m_pdwSetRing + curr_swp + sizeof(*msg), buf, saa7164_bus_set()
296 /* Convert back to cpu after writing the msg to the ringbuffer. */ saa7164_bus_set()
297 msg->size = le16_to_cpu((__force __le16)msg->size); saa7164_bus_set()
298 msg->command = le32_to_cpu((__force __le32)msg->command); saa7164_bus_set()
299 msg->controlselector = le16_to_cpu((__force __le16)msg->controlselector); saa7164_bus_set()
319 int saa7164_bus_get(struct saa7164_dev *dev, struct tmComResInfo* msg, saa7164_bus_get() argument
330 if (msg == NULL) saa7164_bus_get()
333 if (msg->size > dev->bus.m_wMaxReqSize) { saa7164_bus_get()
339 if ((peekonly == 0) && (msg->size > 0) && (buf == NULL)) { saa7164_bus_get()
341 "%s() Missing msg buf, size should be %d bytes\n", saa7164_bus_get()
342 __func__, msg->size); saa7164_bus_get()
348 /* Peek the bus to see if a msg exists, if it's not what we're expecting saa7164_bus_get()
359 bytes_to_read = sizeof(*msg); saa7164_bus_get()
400 memcpy(msg, &msg_tmp, sizeof(*msg)); saa7164_bus_get()
405 if ((msg_tmp.id != msg->id) || (msg_tmp.command != msg->command) || saa7164_bus_get()
406 (msg_tmp.controlselector != msg->controlselector) || saa7164_bus_get()
407 (msg_tmp.seqno != msg->seqno) || (msg_tmp.size != msg->size)) { saa7164_bus_get()
409 printk(KERN_ERR "%s() Unexpected msg miss-match\n", __func__); saa7164_bus_get()
410 saa7164_bus_dumpmsg(dev, msg, buf); saa7164_bus_get()
417 buf_size = msg->size; saa7164_bus_get()
419 bytes_to_read = sizeof(*msg) + msg->size; saa7164_bus_get()
430 printk(KERN_ERR "%s() Invalid bus state, missing msg " saa7164_bus_get()
444 if (space_rem < sizeof(*msg)) { saa7164_bus_get()
445 /* msg wraps around the ring */ saa7164_bus_get()
446 memcpy_fromio(msg, bus->m_pdwGetRing + curr_grp, space_rem); saa7164_bus_get()
447 memcpy_fromio((u8 *)msg + space_rem, bus->m_pdwGetRing, saa7164_bus_get()
448 sizeof(*msg) - space_rem); saa7164_bus_get()
450 memcpy_fromio(buf, bus->m_pdwGetRing + sizeof(*msg) - saa7164_bus_get()
453 } else if (space_rem == sizeof(*msg)) { saa7164_bus_get()
454 memcpy_fromio(msg, bus->m_pdwGetRing + curr_grp, sizeof(*msg)); saa7164_bus_get()
459 memcpy_fromio(msg, bus->m_pdwGetRing + curr_grp, sizeof(*msg)); saa7164_bus_get()
462 sizeof(*msg), space_rem - sizeof(*msg)); saa7164_bus_get()
463 memcpy_fromio(buf + space_rem - sizeof(*msg), saa7164_bus_get()
472 memcpy_fromio(msg, bus->m_pdwGetRing + curr_grp, sizeof(*msg)); saa7164_bus_get()
474 memcpy_fromio(buf, bus->m_pdwGetRing + curr_grp + sizeof(*msg), saa7164_bus_get()
478 msg->size = le16_to_cpu((__force __le16)msg->size); saa7164_bus_get()
479 msg->command = le32_to_cpu((__force __le32)msg->command); saa7164_bus_get()
480 msg->controlselector = le16_to_cpu((__force __le16)msg->controlselector); saa7164_bus_get()
/linux-4.1.27/drivers/usb/serial/
H A Dkeyspan.c386 struct keyspan_usa26_portStatusMessage *msg; usa26_instat_callback() local
404 msg = (struct keyspan_usa26_portStatusMessage *)data; usa26_instat_callback()
407 if (msg->port >= serial->num_ports) { usa26_instat_callback()
408 dev_dbg(&urb->dev->dev, "%s - Unexpected port number %d\n", __func__, msg->port); usa26_instat_callback()
411 port = serial->port[msg->port]; usa26_instat_callback()
418 p_priv->cts_state = ((msg->hskia_cts) ? 1 : 0); usa26_instat_callback()
419 p_priv->dsr_state = ((msg->dsr) ? 1 : 0); usa26_instat_callback()
420 p_priv->dcd_state = ((msg->gpia_dcd) ? 1 : 0); usa26_instat_callback()
421 p_priv->ri_state = ((msg->ri) ? 1 : 0); usa26_instat_callback()
504 struct keyspan_usa28_portStatusMessage *msg; usa28_instat_callback() local
523 msg = (struct keyspan_usa28_portStatusMessage *)data; usa28_instat_callback()
526 if (msg->port >= serial->num_ports) { usa28_instat_callback()
527 dev_dbg(&urb->dev->dev, "%s - Unexpected port number %d\n", __func__, msg->port); usa28_instat_callback()
530 port = serial->port[msg->port]; usa28_instat_callback()
537 p_priv->cts_state = ((msg->cts) ? 1 : 0); usa28_instat_callback()
538 p_priv->dsr_state = ((msg->dsr) ? 1 : 0); usa28_instat_callback()
539 p_priv->dcd_state = ((msg->dcd) ? 1 : 0); usa28_instat_callback()
540 p_priv->ri_state = ((msg->ri) ? 1 : 0); usa28_instat_callback()
584 struct keyspan_usa49_portStatusMessage *msg; usa49_instat_callback() local
604 msg = (struct keyspan_usa49_portStatusMessage *)data; usa49_instat_callback()
607 if (msg->portNumber >= serial->num_ports) { usa49_instat_callback()
609 __func__, msg->portNumber); usa49_instat_callback()
612 port = serial->port[msg->portNumber]; usa49_instat_callback()
619 p_priv->cts_state = ((msg->cts) ? 1 : 0); usa49_instat_callback()
620 p_priv->dsr_state = ((msg->dsr) ? 1 : 0); usa49_instat_callback()
621 p_priv->dcd_state = ((msg->dcd) ? 1 : 0); usa49_instat_callback()
622 p_priv->ri_state = ((msg->ri) ? 1 : 0); usa49_instat_callback()
840 struct keyspan_usa90_portStatusMessage *msg; usa90_instat_callback() local
858 msg = (struct keyspan_usa90_portStatusMessage *)data; usa90_instat_callback()
869 p_priv->cts_state = ((msg->cts) ? 1 : 0); usa90_instat_callback()
870 p_priv->dsr_state = ((msg->dsr) ? 1 : 0); usa90_instat_callback()
871 p_priv->dcd_state = ((msg->dcd) ? 1 : 0); usa90_instat_callback()
872 p_priv->ri_state = ((msg->ri) ? 1 : 0); usa90_instat_callback()
905 struct keyspan_usa67_portStatusMessage *msg; usa67_instat_callback() local
927 msg = (struct keyspan_usa67_portStatusMessage *)data; usa67_instat_callback()
930 if (msg->port >= serial->num_ports) { usa67_instat_callback()
931 dev_dbg(&urb->dev->dev, "%s - Unexpected port number %d\n", __func__, msg->port); usa67_instat_callback()
935 port = serial->port[msg->port]; usa67_instat_callback()
942 p_priv->cts_state = ((msg->hskia_cts) ? 1 : 0); usa67_instat_callback()
943 p_priv->dcd_state = ((msg->gpia_dcd) ? 1 : 0); usa67_instat_callback()
1553 struct keyspan_usa26_portControlMessage msg; keyspan_usa26_send_setup() local
1587 memset(&msg, 0, sizeof(struct keyspan_usa26_portControlMessage)); keyspan_usa26_send_setup()
1592 msg.setClocking = 0xff; keyspan_usa26_send_setup()
1594 &msg.baudHi, &msg.baudLo, &msg.prescaler, keyspan_usa26_send_setup()
1598 msg.baudLo = 0; keyspan_usa26_send_setup()
1599 msg.baudHi = 125; /* Values for 9600 baud */ keyspan_usa26_send_setup()
1600 msg.prescaler = 10; keyspan_usa26_send_setup()
1602 msg.setPrescaler = 0xff; keyspan_usa26_send_setup()
1605 msg.lcr = (p_priv->cflag & CSTOPB) ? STOPBITS_678_2 : STOPBITS_5678_1; keyspan_usa26_send_setup()
1608 msg.lcr |= USA_DATABITS_5; keyspan_usa26_send_setup()
1611 msg.lcr |= USA_DATABITS_6; keyspan_usa26_send_setup()
1614 msg.lcr |= USA_DATABITS_7; keyspan_usa26_send_setup()
1617 msg.lcr |= USA_DATABITS_8; keyspan_usa26_send_setup()
1622 msg.lcr |= (p_priv->cflag & PARODD) ? keyspan_usa26_send_setup()
1625 msg.setLcr = 0xff; keyspan_usa26_send_setup()
1627 msg.ctsFlowControl = (p_priv->flow_control == flow_cts); keyspan_usa26_send_setup()
1628 msg.xonFlowControl = 0; keyspan_usa26_send_setup()
1629 msg.setFlowControl = 0xff; keyspan_usa26_send_setup()
1630 msg.forwardingLength = 16; keyspan_usa26_send_setup()
1631 msg.xonChar = 17; keyspan_usa26_send_setup()
1632 msg.xoffChar = 19; keyspan_usa26_send_setup()
1636 msg._txOn = 1; keyspan_usa26_send_setup()
1637 msg._txOff = 0; keyspan_usa26_send_setup()
1638 msg.txFlush = 0; keyspan_usa26_send_setup()
1639 msg.txBreak = 0; keyspan_usa26_send_setup()
1640 msg.rxOn = 1; keyspan_usa26_send_setup()
1641 msg.rxOff = 0; keyspan_usa26_send_setup()
1642 msg.rxFlush = 1; keyspan_usa26_send_setup()
1643 msg.rxForward = 0; keyspan_usa26_send_setup()
1644 msg.returnStatus = 0; keyspan_usa26_send_setup()
1645 msg.resetDataToggle = 0xff; keyspan_usa26_send_setup()
1650 msg._txOn = 0; keyspan_usa26_send_setup()
1651 msg._txOff = 1; keyspan_usa26_send_setup()
1652 msg.txFlush = 0; keyspan_usa26_send_setup()
1653 msg.txBreak = 0; keyspan_usa26_send_setup()
1654 msg.rxOn = 0; keyspan_usa26_send_setup()
1655 msg.rxOff = 1; keyspan_usa26_send_setup()
1656 msg.rxFlush = 1; keyspan_usa26_send_setup()
1657 msg.rxForward = 0; keyspan_usa26_send_setup()
1658 msg.returnStatus = 0; keyspan_usa26_send_setup()
1659 msg.resetDataToggle = 0; keyspan_usa26_send_setup()
1664 msg._txOn = (!p_priv->break_on); keyspan_usa26_send_setup()
1665 msg._txOff = 0; keyspan_usa26_send_setup()
1666 msg.txFlush = 0; keyspan_usa26_send_setup()
1667 msg.txBreak = (p_priv->break_on); keyspan_usa26_send_setup()
1668 msg.rxOn = 0; keyspan_usa26_send_setup()
1669 msg.rxOff = 0; keyspan_usa26_send_setup()
1670 msg.rxFlush = 0; keyspan_usa26_send_setup()
1671 msg.rxForward = 0; keyspan_usa26_send_setup()
1672 msg.returnStatus = 0; keyspan_usa26_send_setup()
1673 msg.resetDataToggle = 0x0; keyspan_usa26_send_setup()
1677 msg.setTxTriState_setRts = 0xff; keyspan_usa26_send_setup()
1678 msg.txTriState_rts = p_priv->rts_state; keyspan_usa26_send_setup()
1680 msg.setHskoa_setDtr = 0xff; keyspan_usa26_send_setup()
1681 msg.hskoa_dtr = p_priv->dtr_state; keyspan_usa26_send_setup()
1684 memcpy(this_urb->transfer_buffer, &msg, sizeof(msg)); keyspan_usa26_send_setup()
1687 this_urb->transfer_buffer_length = sizeof(msg); keyspan_usa26_send_setup()
1699 struct keyspan_usa28_portControlMessage msg; keyspan_usa28_send_setup() local
1728 memset(&msg, 0, sizeof(struct keyspan_usa28_portControlMessage)); keyspan_usa28_send_setup()
1730 msg.setBaudRate = 1; keyspan_usa28_send_setup()
1732 &msg.baudHi, &msg.baudLo, NULL, keyspan_usa28_send_setup()
1736 msg.baudLo = 0xff; keyspan_usa28_send_setup()
1737 msg.baudHi = 0xb2; /* Values for 9600 baud */ keyspan_usa28_send_setup()
1741 msg.parity = 0; /* XXX for now */ keyspan_usa28_send_setup()
1743 msg.ctsFlowControl = (p_priv->flow_control == flow_cts); keyspan_usa28_send_setup()
1744 msg.xonFlowControl = 0; keyspan_usa28_send_setup()
1747 msg.rts = p_priv->rts_state; keyspan_usa28_send_setup()
1748 msg.dtr = p_priv->dtr_state; keyspan_usa28_send_setup()
1750 msg.forwardingLength = 16; keyspan_usa28_send_setup()
1751 msg.forwardMs = 10; keyspan_usa28_send_setup()
1752 msg.breakThreshold = 45; keyspan_usa28_send_setup()
1753 msg.xonChar = 17; keyspan_usa28_send_setup()
1754 msg.xoffChar = 19; keyspan_usa28_send_setup()
1756 /*msg.returnStatus = 1; keyspan_usa28_send_setup()
1757 msg.resetDataToggle = 0xff;*/ keyspan_usa28_send_setup()
1760 msg._txOn = 1; keyspan_usa28_send_setup()
1761 msg._txOff = 0; keyspan_usa28_send_setup()
1762 msg.txFlush = 0; keyspan_usa28_send_setup()
1763 msg.txForceXoff = 0; keyspan_usa28_send_setup()
1764 msg.txBreak = 0; keyspan_usa28_send_setup()
1765 msg.rxOn = 1; keyspan_usa28_send_setup()
1766 msg.rxOff = 0; keyspan_usa28_send_setup()
1767 msg.rxFlush = 1; keyspan_usa28_send_setup()
1768 msg.rxForward = 0; keyspan_usa28_send_setup()
1769 msg.returnStatus = 0; keyspan_usa28_send_setup()
1770 msg.resetDataToggle = 0xff; keyspan_usa28_send_setup()
1774 msg._txOn = 0; keyspan_usa28_send_setup()
1775 msg._txOff = 1; keyspan_usa28_send_setup()
1776 msg.txFlush = 0; keyspan_usa28_send_setup()
1777 msg.txForceXoff = 0; keyspan_usa28_send_setup()
1778 msg.txBreak = 0; keyspan_usa28_send_setup()
1779 msg.rxOn = 0; keyspan_usa28_send_setup()
1780 msg.rxOff = 1; keyspan_usa28_send_setup()
1781 msg.rxFlush = 1; keyspan_usa28_send_setup()
1782 msg.rxForward = 0; keyspan_usa28_send_setup()
1783 msg.returnStatus = 0; keyspan_usa28_send_setup()
1784 msg.resetDataToggle = 0; keyspan_usa28_send_setup()
1788 msg._txOn = (!p_priv->break_on); keyspan_usa28_send_setup()
1789 msg._txOff = 0; keyspan_usa28_send_setup()
1790 msg.txFlush = 0; keyspan_usa28_send_setup()
1791 msg.txForceXoff = 0; keyspan_usa28_send_setup()
1792 msg.txBreak = (p_priv->break_on); keyspan_usa28_send_setup()
1793 msg.rxOn = 0; keyspan_usa28_send_setup()
1794 msg.rxOff = 0; keyspan_usa28_send_setup()
1795 msg.rxFlush = 0; keyspan_usa28_send_setup()
1796 msg.rxForward = 0; keyspan_usa28_send_setup()
1797 msg.returnStatus = 0; keyspan_usa28_send_setup()
1798 msg.resetDataToggle = 0x0; keyspan_usa28_send_setup()
1802 memcpy(this_urb->transfer_buffer, &msg, sizeof(msg)); keyspan_usa28_send_setup()
1805 this_urb->transfer_buffer_length = sizeof(msg); keyspan_usa28_send_setup()
1818 struct keyspan_usa49_portControlMessage msg; keyspan_usa49_send_setup() local
1855 memset(&msg, 0, sizeof(struct keyspan_usa49_portControlMessage)); keyspan_usa49_send_setup()
1857 msg.portNumber = device_port; keyspan_usa49_send_setup()
1862 msg.setClocking = 0xff; keyspan_usa49_send_setup()
1864 &msg.baudHi, &msg.baudLo, &msg.prescaler, keyspan_usa49_send_setup()
1868 msg.baudLo = 0; keyspan_usa49_send_setup()
1869 msg.baudHi = 125; /* Values for 9600 baud */ keyspan_usa49_send_setup()
1870 msg.prescaler = 10; keyspan_usa49_send_setup()
1872 /* msg.setPrescaler = 0xff; */ keyspan_usa49_send_setup()
1875 msg.lcr = (p_priv->cflag & CSTOPB) ? STOPBITS_678_2 : STOPBITS_5678_1; keyspan_usa49_send_setup()
1878 msg.lcr |= USA_DATABITS_5; keyspan_usa49_send_setup()
1881 msg.lcr |= USA_DATABITS_6; keyspan_usa49_send_setup()
1884 msg.lcr |= USA_DATABITS_7; keyspan_usa49_send_setup()
1887 msg.lcr |= USA_DATABITS_8; keyspan_usa49_send_setup()
1892 msg.lcr |= (p_priv->cflag & PARODD) ? keyspan_usa49_send_setup()
1895 msg.setLcr = 0xff; keyspan_usa49_send_setup()
1897 msg.ctsFlowControl = (p_priv->flow_control == flow_cts); keyspan_usa49_send_setup()
1898 msg.xonFlowControl = 0; keyspan_usa49_send_setup()
1899 msg.setFlowControl = 0xff; keyspan_usa49_send_setup()
1901 msg.forwardingLength = 16; keyspan_usa49_send_setup()
1902 msg.xonChar = 17; keyspan_usa49_send_setup()
1903 msg.xoffChar = 19; keyspan_usa49_send_setup()
1907 msg._txOn = 1; keyspan_usa49_send_setup()
1908 msg._txOff = 0; keyspan_usa49_send_setup()
1909 msg.txFlush = 0; keyspan_usa49_send_setup()
1910 msg.txBreak = 0; keyspan_usa49_send_setup()
1911 msg.rxOn = 1; keyspan_usa49_send_setup()
1912 msg.rxOff = 0; keyspan_usa49_send_setup()
1913 msg.rxFlush = 1; keyspan_usa49_send_setup()
1914 msg.rxForward = 0; keyspan_usa49_send_setup()
1915 msg.returnStatus = 0; keyspan_usa49_send_setup()
1916 msg.resetDataToggle = 0xff; keyspan_usa49_send_setup()
1917 msg.enablePort = 1; keyspan_usa49_send_setup()
1918 msg.disablePort = 0; keyspan_usa49_send_setup()
1922 msg._txOn = 0; keyspan_usa49_send_setup()
1923 msg._txOff = 1; keyspan_usa49_send_setup()
1924 msg.txFlush = 0; keyspan_usa49_send_setup()
1925 msg.txBreak = 0; keyspan_usa49_send_setup()
1926 msg.rxOn = 0; keyspan_usa49_send_setup()
1927 msg.rxOff = 1; keyspan_usa49_send_setup()
1928 msg.rxFlush = 1; keyspan_usa49_send_setup()
1929 msg.rxForward = 0; keyspan_usa49_send_setup()
1930 msg.returnStatus = 0; keyspan_usa49_send_setup()
1931 msg.resetDataToggle = 0; keyspan_usa49_send_setup()
1932 msg.enablePort = 0; keyspan_usa49_send_setup()
1933 msg.disablePort = 1; keyspan_usa49_send_setup()
1937 msg._txOn = (!p_priv->break_on); keyspan_usa49_send_setup()
1938 msg._txOff = 0; keyspan_usa49_send_setup()
1939 msg.txFlush = 0; keyspan_usa49_send_setup()
1940 msg.txBreak = (p_priv->break_on); keyspan_usa49_send_setup()
1941 msg.rxOn = 0; keyspan_usa49_send_setup()
1942 msg.rxOff = 0; keyspan_usa49_send_setup()
1943 msg.rxFlush = 0; keyspan_usa49_send_setup()
1944 msg.rxForward = 0; keyspan_usa49_send_setup()
1945 msg.returnStatus = 0; keyspan_usa49_send_setup()
1946 msg.resetDataToggle = 0x0; keyspan_usa49_send_setup()
1947 msg.enablePort = 0; keyspan_usa49_send_setup()
1948 msg.disablePort = 0; keyspan_usa49_send_setup()
1952 msg.setRts = 0xff; keyspan_usa49_send_setup()
1953 msg.rts = p_priv->rts_state; keyspan_usa49_send_setup()
1955 msg.setDtr = 0xff; keyspan_usa49_send_setup()
1956 msg.dtr = p_priv->dtr_state; keyspan_usa49_send_setup()
1969 dr->wLength = cpu_to_le16(sizeof(msg)); keyspan_usa49_send_setup()
1971 memcpy(s_priv->glocont_buf, &msg, sizeof(msg)); keyspan_usa49_send_setup()
1976 sizeof(msg), usa49_glocont_callback, serial); keyspan_usa49_send_setup()
1979 memcpy(this_urb->transfer_buffer, &msg, sizeof(msg)); keyspan_usa49_send_setup()
1982 this_urb->transfer_buffer_length = sizeof(msg); keyspan_usa49_send_setup()
1995 struct keyspan_usa90_portControlMessage msg; keyspan_usa90_send_setup() local
2024 memset(&msg, 0, sizeof(struct keyspan_usa90_portControlMessage)); keyspan_usa90_send_setup()
2029 msg.setClocking = 0x01; keyspan_usa90_send_setup()
2031 &msg.baudHi, &msg.baudLo, &prescaler, 0) == KEYSPAN_INVALID_BAUD_RATE) { keyspan_usa90_send_setup()
2036 &msg.baudHi, &msg.baudLo, &prescaler, 0); keyspan_usa90_send_setup()
2038 msg.setRxMode = 1; keyspan_usa90_send_setup()
2039 msg.setTxMode = 1; keyspan_usa90_send_setup()
2044 msg.rxMode = RXMODE_DMA; keyspan_usa90_send_setup()
2045 msg.txMode = TXMODE_DMA; keyspan_usa90_send_setup()
2047 msg.rxMode = RXMODE_BYHAND; keyspan_usa90_send_setup()
2048 msg.txMode = TXMODE_BYHAND; keyspan_usa90_send_setup()
2051 msg.lcr = (p_priv->cflag & CSTOPB) ? STOPBITS_678_2 : STOPBITS_5678_1; keyspan_usa90_send_setup()
2054 msg.lcr |= USA_DATABITS_5; keyspan_usa90_send_setup()
2057 msg.lcr |= USA_DATABITS_6; keyspan_usa90_send_setup()
2060 msg.lcr |= USA_DATABITS_7; keyspan_usa90_send_setup()
2063 msg.lcr |= USA_DATABITS_8; keyspan_usa90_send_setup()
2068 msg.lcr |= (p_priv->cflag & PARODD) ? keyspan_usa90_send_setup()
2073 msg.setLcr = 0x01; keyspan_usa90_send_setup()
2077 msg.txFlowControl = TXFLOW_CTS; keyspan_usa90_send_setup()
2078 msg.setTxFlowControl = 0x01; keyspan_usa90_send_setup()
2079 msg.setRxFlowControl = 0x01; keyspan_usa90_send_setup()
2081 msg.rxForwardingLength = 16; keyspan_usa90_send_setup()
2082 msg.rxForwardingTimeout = 16; keyspan_usa90_send_setup()
2083 msg.txAckSetting = 0; keyspan_usa90_send_setup()
2084 msg.xonChar = 17; keyspan_usa90_send_setup()
2085 msg.xoffChar = 19; keyspan_usa90_send_setup()
2089 msg.portEnabled = 1; keyspan_usa90_send_setup()
2090 msg.rxFlush = 1; keyspan_usa90_send_setup()
2091 msg.txBreak = (p_priv->break_on); keyspan_usa90_send_setup()
2095 msg.portEnabled = 0; keyspan_usa90_send_setup()
2098 msg.portEnabled = 1; keyspan_usa90_send_setup()
2099 msg.txBreak = (p_priv->break_on); keyspan_usa90_send_setup()
2103 msg.setRts = 0x01; keyspan_usa90_send_setup()
2104 msg.rts = p_priv->rts_state; keyspan_usa90_send_setup()
2106 msg.setDtr = 0x01; keyspan_usa90_send_setup()
2107 msg.dtr = p_priv->dtr_state; keyspan_usa90_send_setup()
2110 memcpy(this_urb->transfer_buffer, &msg, sizeof(msg)); keyspan_usa90_send_setup()
2113 this_urb->transfer_buffer_length = sizeof(msg); keyspan_usa90_send_setup()
2125 struct keyspan_usa67_portControlMessage msg; keyspan_usa67_send_setup() local
2157 memset(&msg, 0, sizeof(struct keyspan_usa67_portControlMessage)); keyspan_usa67_send_setup()
2159 msg.port = device_port; keyspan_usa67_send_setup()
2164 msg.setClocking = 0xff; keyspan_usa67_send_setup()
2166 &msg.baudHi, &msg.baudLo, &msg.prescaler, keyspan_usa67_send_setup()
2170 msg.baudLo = 0; keyspan_usa67_send_setup()
2171 msg.baudHi = 125; /* Values for 9600 baud */ keyspan_usa67_send_setup()
2172 msg.prescaler = 10; keyspan_usa67_send_setup()
2174 msg.setPrescaler = 0xff; keyspan_usa67_send_setup()
2177 msg.lcr = (p_priv->cflag & CSTOPB) ? STOPBITS_678_2 : STOPBITS_5678_1; keyspan_usa67_send_setup()
2180 msg.lcr |= USA_DATABITS_5; keyspan_usa67_send_setup()
2183 msg.lcr |= USA_DATABITS_6; keyspan_usa67_send_setup()
2186 msg.lcr |= USA_DATABITS_7; keyspan_usa67_send_setup()
2189 msg.lcr |= USA_DATABITS_8; keyspan_usa67_send_setup()
2194 msg.lcr |= (p_priv->cflag & PARODD) ? keyspan_usa67_send_setup()
2197 msg.setLcr = 0xff; keyspan_usa67_send_setup()
2199 msg.ctsFlowControl = (p_priv->flow_control == flow_cts); keyspan_usa67_send_setup()
2200 msg.xonFlowControl = 0; keyspan_usa67_send_setup()
2201 msg.setFlowControl = 0xff; keyspan_usa67_send_setup()
2202 msg.forwardingLength = 16; keyspan_usa67_send_setup()
2203 msg.xonChar = 17; keyspan_usa67_send_setup()
2204 msg.xoffChar = 19; keyspan_usa67_send_setup()
2208 msg._txOn = 1; keyspan_usa67_send_setup()
2209 msg._txOff = 0; keyspan_usa67_send_setup()
2210 msg.txFlush = 0; keyspan_usa67_send_setup()
2211 msg.txBreak = 0; keyspan_usa67_send_setup()
2212 msg.rxOn = 1; keyspan_usa67_send_setup()
2213 msg.rxOff = 0; keyspan_usa67_send_setup()
2214 msg.rxFlush = 1; keyspan_usa67_send_setup()
2215 msg.rxForward = 0; keyspan_usa67_send_setup()
2216 msg.returnStatus = 0; keyspan_usa67_send_setup()
2217 msg.resetDataToggle = 0xff; keyspan_usa67_send_setup()
2220 msg._txOn = 0; keyspan_usa67_send_setup()
2221 msg._txOff = 1; keyspan_usa67_send_setup()
2222 msg.txFlush = 0; keyspan_usa67_send_setup()
2223 msg.txBreak = 0; keyspan_usa67_send_setup()
2224 msg.rxOn = 0; keyspan_usa67_send_setup()
2225 msg.rxOff = 1; keyspan_usa67_send_setup()
2226 msg.rxFlush = 1; keyspan_usa67_send_setup()
2227 msg.rxForward = 0; keyspan_usa67_send_setup()
2228 msg.returnStatus = 0; keyspan_usa67_send_setup()
2229 msg.resetDataToggle = 0; keyspan_usa67_send_setup()
2232 msg._txOn = (!p_priv->break_on); keyspan_usa67_send_setup()
2233 msg._txOff = 0; keyspan_usa67_send_setup()
2234 msg.txFlush = 0; keyspan_usa67_send_setup()
2235 msg.txBreak = (p_priv->break_on); keyspan_usa67_send_setup()
2236 msg.rxOn = 0; keyspan_usa67_send_setup()
2237 msg.rxOff = 0; keyspan_usa67_send_setup()
2238 msg.rxFlush = 0; keyspan_usa67_send_setup()
2239 msg.rxForward = 0; keyspan_usa67_send_setup()
2240 msg.returnStatus = 0; keyspan_usa67_send_setup()
2241 msg.resetDataToggle = 0x0; keyspan_usa67_send_setup()
2245 msg.setTxTriState_setRts = 0xff; keyspan_usa67_send_setup()
2246 msg.txTriState_rts = p_priv->rts_state; keyspan_usa67_send_setup()
2248 msg.setHskoa_setDtr = 0xff; keyspan_usa67_send_setup()
2249 msg.hskoa_dtr = p_priv->dtr_state; keyspan_usa67_send_setup()
2253 memcpy(this_urb->transfer_buffer, &msg, sizeof(msg)); keyspan_usa67_send_setup()
2256 this_urb->transfer_buffer_length = sizeof(msg); keyspan_usa67_send_setup()
/linux-4.1.27/drivers/media/common/siano/
H A Dsmsendian.c31 struct sms_msg_data *msg = (struct sms_msg_data *)buffer; smsendian_handle_tx_message() local
35 switch (msg->x_msg_header.msg_type) { smsendian_handle_tx_message()
38 msg->msg_data[0] = le32_to_cpu(msg->msg_data[0]); smsendian_handle_tx_message()
43 msg_words = (msg->x_msg_header.msg_length - smsendian_handle_tx_message()
47 msg->msg_data[i] = le32_to_cpu(msg->msg_data[i]); smsendian_handle_tx_message()
58 struct sms_msg_data *msg = (struct sms_msg_data *)buffer; smsendian_handle_rx_message() local
62 switch (msg->x_msg_header.msg_type) { smsendian_handle_rx_message()
66 (struct sms_version_res *) msg; smsendian_handle_rx_message()
80 msg_words = (msg->x_msg_header.msg_length - smsendian_handle_rx_message()
84 msg->msg_data[i] = le32_to_cpu(msg->msg_data[i]); smsendian_handle_rx_message()
93 void smsendian_handle_message_header(void *msg) smsendian_handle_message_header() argument
96 struct sms_msg_hdr *phdr = (struct sms_msg_hdr *)msg; smsendian_handle_message_header()
H A Dsmsdvb-main.c712 struct sms_msg_hdr msg; smsdvb_send_statistics_request() local
720 msg.msg_src_id = DVBT_BDA_CONTROL_MSG_ID; smsdvb_send_statistics_request()
721 msg.msg_dst_id = HIF_TASK; smsdvb_send_statistics_request()
722 msg.msg_flags = 0; smsdvb_send_statistics_request()
723 msg.msg_length = sizeof(msg); smsdvb_send_statistics_request()
732 msg.msg_type = MSG_SMS_GET_STATISTICS_EX_REQ; smsdvb_send_statistics_request()
734 msg.msg_type = MSG_SMS_GET_STATISTICS_REQ; smsdvb_send_statistics_request()
737 msg.msg_type = MSG_SMS_GET_STATISTICS_REQ; smsdvb_send_statistics_request()
740 rc = smsdvb_sendrequest_and_wait(client, &msg, sizeof(msg), smsdvb_send_statistics_request()
863 struct sms_msg_hdr msg; smsdvb_dvbt_set_frontend() member in struct:__anon5510
865 } msg; smsdvb_dvbt_set_frontend() local
874 msg.msg.msg_src_id = DVBT_BDA_CONTROL_MSG_ID; smsdvb_dvbt_set_frontend()
875 msg.msg.msg_dst_id = HIF_TASK; smsdvb_dvbt_set_frontend()
876 msg.msg.msg_flags = 0; smsdvb_dvbt_set_frontend()
877 msg.msg.msg_type = MSG_SMS_RF_TUNE_REQ; smsdvb_dvbt_set_frontend()
878 msg.msg.msg_length = sizeof(msg); smsdvb_dvbt_set_frontend()
879 msg.Data[0] = c->frequency; smsdvb_dvbt_set_frontend()
880 msg.Data[2] = 12000000; smsdvb_dvbt_set_frontend()
887 msg.Data[1] = BW_8_MHZ; smsdvb_dvbt_set_frontend()
890 msg.Data[1] = BW_7_MHZ; smsdvb_dvbt_set_frontend()
893 msg.Data[1] = BW_6_MHZ; smsdvb_dvbt_set_frontend()
906 ret = smsdvb_sendrequest_and_wait(client, &msg, sizeof(msg), smsdvb_dvbt_set_frontend()
918 return smsdvb_sendrequest_and_wait(client, &msg, sizeof(msg), smsdvb_dvbt_set_frontend()
933 struct sms_msg_hdr msg; smsdvb_isdbt_set_frontend() member in struct:__anon5511
935 } msg; smsdvb_isdbt_set_frontend() local
939 msg.msg.msg_src_id = DVBT_BDA_CONTROL_MSG_ID; smsdvb_isdbt_set_frontend()
940 msg.msg.msg_dst_id = HIF_TASK; smsdvb_isdbt_set_frontend()
941 msg.msg.msg_flags = 0; smsdvb_isdbt_set_frontend()
942 msg.msg.msg_type = MSG_SMS_ISDBT_TUNE_REQ; smsdvb_isdbt_set_frontend()
943 msg.msg.msg_length = sizeof(msg); smsdvb_isdbt_set_frontend()
951 msg.Data[0] = c->frequency; smsdvb_isdbt_set_frontend()
952 msg.Data[1] = BW_ISDBT_1SEG; smsdvb_isdbt_set_frontend()
953 msg.Data[2] = 12000000; smsdvb_isdbt_set_frontend()
954 msg.Data[3] = c->isdbt_sb_segment_idx; smsdvb_isdbt_set_frontend()
959 msg.Data[1] = BW_ISDBT_13SEG; smsdvb_isdbt_set_frontend()
961 msg.Data[1] = BW_ISDBT_3SEG; smsdvb_isdbt_set_frontend()
963 msg.Data[1] = BW_ISDBT_13SEG; smsdvb_isdbt_set_frontend()
977 ret = smsdvb_sendrequest_and_wait(client, &msg, sizeof(msg), smsdvb_isdbt_set_frontend()
988 return smsdvb_sendrequest_and_wait(client, &msg, sizeof(msg), smsdvb_isdbt_set_frontend()
/linux-4.1.27/arch/x86/kernel/apic/
H A Dhtirq.c26 struct ht_irq_msg msg; target_ht_irq() local
28 fetch_ht_irq_msg(irq, &msg); target_ht_irq()
30 msg.address_lo &= ~(HT_IRQ_LOW_VECTOR_MASK | HT_IRQ_LOW_DEST_ID_MASK); target_ht_irq()
31 msg.address_hi &= ~(HT_IRQ_HIGH_DEST_ID_MASK); target_ht_irq()
33 msg.address_lo |= HT_IRQ_LOW_VECTOR(vector) | HT_IRQ_LOW_DEST_ID(dest); target_ht_irq()
34 msg.address_hi |= HT_IRQ_HIGH_DEST_ID(dest); target_ht_irq()
36 write_ht_irq_msg(irq, &msg); target_ht_irq()
67 struct ht_irq_msg msg; arch_setup_ht_irq() local
84 msg.address_hi = HT_IRQ_HIGH_DEST_ID(dest); arch_setup_ht_irq()
86 msg.address_lo = arch_setup_ht_irq()
99 write_ht_irq_msg(irq, &msg); arch_setup_ht_irq()
H A Dmsi.c25 struct msi_msg *msg, u8 hpet_id) native_compose_msi_msg()
29 msg->address_hi = MSI_ADDR_BASE_HI; native_compose_msi_msg()
32 msg->address_hi |= MSI_ADDR_EXT_DEST_ID(dest); native_compose_msi_msg()
34 msg->address_lo = native_compose_msi_msg()
44 msg->data = native_compose_msi_msg()
54 struct msi_msg *msg, u8 hpet_id) msi_compose_msg()
73 x86_msi.compose_msi_msg(pdev, irq, dest, msg, hpet_id); msi_compose_msg()
82 struct msi_msg msg; msi_set_affinity() local
90 __get_cached_msi_msg(data->msi_desc, &msg); msi_set_affinity()
92 msg.data &= ~MSI_DATA_VECTOR_MASK; msi_set_affinity()
93 msg.data |= MSI_DATA_VECTOR(cfg->vector); msi_set_affinity()
94 msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK; msi_set_affinity()
95 msg.address_lo |= MSI_ADDR_DEST_ID(dest); msi_set_affinity()
97 __pci_write_msi_msg(data->msi_desc, &msg); msi_set_affinity()
120 struct msi_msg msg; setup_msi_irq() local
124 ret = msi_compose_msg(dev, irq, &msg, -1); setup_msi_irq()
135 pci_write_msi_msg(irq, &msg); setup_msi_irq()
185 struct msi_msg msg; dmar_msi_set_affinity() local
192 dmar_msi_read(irq, &msg); dmar_msi_set_affinity()
194 msg.data &= ~MSI_DATA_VECTOR_MASK; dmar_msi_set_affinity()
195 msg.data |= MSI_DATA_VECTOR(cfg->vector); dmar_msi_set_affinity()
196 msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK; dmar_msi_set_affinity()
197 msg.address_lo |= MSI_ADDR_DEST_ID(dest); dmar_msi_set_affinity()
198 msg.address_hi = MSI_ADDR_BASE_HI | MSI_ADDR_EXT_DEST_ID(dest); dmar_msi_set_affinity()
200 dmar_msi_write(irq, &msg); dmar_msi_set_affinity()
218 struct msi_msg msg; arch_setup_dmar_msi() local
220 ret = msi_compose_msg(NULL, irq, &msg, -1); arch_setup_dmar_msi()
223 dmar_msi_write(irq, &msg); arch_setup_dmar_msi()
239 struct msi_msg msg; hpet_msi_set_affinity() local
247 hpet_msi_read(data->handler_data, &msg); hpet_msi_set_affinity()
249 msg.data &= ~MSI_DATA_VECTOR_MASK; hpet_msi_set_affinity()
250 msg.data |= MSI_DATA_VECTOR(cfg->vector); hpet_msi_set_affinity()
251 msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK; hpet_msi_set_affinity()
252 msg.address_lo |= MSI_ADDR_DEST_ID(dest); hpet_msi_set_affinity()
254 hpet_msi_write(data->handler_data, &msg); hpet_msi_set_affinity()
272 struct msi_msg msg; default_setup_hpet_msi() local
275 ret = msi_compose_msg(NULL, irq, &msg, id); default_setup_hpet_msi()
279 hpet_msi_write(irq_get_handler_data(irq), &msg); default_setup_hpet_msi()
23 native_compose_msi_msg(struct pci_dev *pdev, unsigned int irq, unsigned int dest, struct msi_msg *msg, u8 hpet_id) native_compose_msi_msg() argument
53 msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_msg *msg, u8 hpet_id) msi_compose_msg() argument
/linux-4.1.27/drivers/iio/common/ssp_sensors/
H A Dssp_spi.c88 struct ssp_msg *msg; ssp_create_msg() local
90 msg = kzalloc(sizeof(*msg), GFP_KERNEL); ssp_create_msg()
91 if (!msg) ssp_create_msg()
99 msg->buffer = kzalloc(SSP_HEADER_SIZE_ALIGNED + len, ssp_create_msg()
101 if (!msg->buffer) { ssp_create_msg()
102 kfree(msg); ssp_create_msg()
106 msg->length = len; ssp_create_msg()
107 msg->options = opt; ssp_create_msg()
109 memcpy(msg->buffer, &h, SSP_HEADER_SIZE); ssp_create_msg()
111 return msg; ssp_create_msg()
187 static int ssp_do_transfer(struct ssp_data *data, struct ssp_msg *msg, ssp_do_transfer() argument
195 const bool use_no_irq = msg->length == 0; ssp_do_transfer()
200 msg->done = done; ssp_do_transfer()
208 status = spi_write(data->spi, msg->buffer, SSP_HEADER_SIZE); ssp_do_transfer()
217 list_add_tail(&msg->list, &data->pending_list); ssp_do_transfer()
225 list_del(&msg->list); ssp_do_transfer()
238 list_del(&msg->list); ssp_do_transfer()
254 struct ssp_msg *msg) ssp_spi_sync_command()
256 return ssp_do_transfer(data, msg, NULL, 0); ssp_spi_sync_command()
259 static int ssp_spi_sync(struct ssp_data *data, struct ssp_msg *msg, ssp_spi_sync() argument
264 if (WARN_ON(!msg->length)) ssp_spi_sync()
267 return ssp_do_transfer(data, msg, &done, timeout); ssp_spi_sync()
345 struct ssp_msg *msg, *n; ssp_irq_msg() local
371 list_for_each_entry_safe(msg, n, &data->pending_list, list) { ssp_irq_msg()
372 if (msg->options == msg_options) { ssp_irq_msg()
373 list_del(&msg->list); ssp_irq_msg()
406 &msg->buffer[SSP_HEADER_SIZE_ALIGNED], ssp_irq_msg()
407 msg->length); ssp_irq_msg()
411 &msg->buffer[SSP_HEADER_SIZE_ALIGNED], ssp_irq_msg()
412 msg->length); ssp_irq_msg()
414 msg->options = ssp_irq_msg()
416 msg->length = 1; ssp_irq_msg()
418 list_add_tail(&msg->list, &data->pending_list); ssp_irq_msg()
423 if (msg->done) ssp_irq_msg()
424 if (!completion_done(msg->done)) ssp_irq_msg()
425 complete(msg->done); ssp_irq_msg()
447 dev_err(SSP_DEV, "unknown msg type\n"); ssp_irq_msg()
456 struct ssp_msg *msg, *n; ssp_clean_pending_list() local
459 list_for_each_entry_safe(msg, n, &data->pending_list, list) { ssp_clean_pending_list()
460 list_del(&msg->list); ssp_clean_pending_list()
462 if (msg->done) ssp_clean_pending_list()
463 if (!completion_done(msg->done)) ssp_clean_pending_list()
464 complete(msg->done); ssp_clean_pending_list()
472 struct ssp_msg *msg; ssp_command() local
474 msg = ssp_create_msg(command, 0, SSP_AP2HUB_WRITE, arg); ssp_command()
475 if (!msg) ssp_command()
480 ret = ssp_spi_sync_command(data, msg); ssp_command()
481 ssp_clean_msg(msg); ssp_command()
490 struct ssp_msg *msg; ssp_send_instruction() local
503 msg = ssp_create_msg(inst, length + 2, SSP_AP2HUB_WRITE, 0); ssp_send_instruction()
504 if (!msg) ssp_send_instruction()
507 ssp_fill_buffer(msg, 0, &sensor_type, 1); ssp_send_instruction()
508 ssp_fill_buffer(msg, 1, send_buf, length); ssp_send_instruction()
513 ret = ssp_spi_sync(data, msg, 1000); ssp_send_instruction()
514 ssp_clean_msg(msg); ssp_send_instruction()
523 struct ssp_msg *msg; ssp_get_chipid() local
525 msg = ssp_create_msg(SSP_MSG2SSP_AP_WHOAMI, 1, SSP_AP2HUB_READ, 0); ssp_get_chipid()
526 if (!msg) ssp_get_chipid()
529 ret = ssp_spi_sync(data, msg, 1000); ssp_get_chipid()
531 buffer = SSP_GET_BUFFER_AT_INDEX(msg, 0); ssp_get_chipid()
533 ssp_clean_msg(msg); ssp_get_chipid()
541 struct ssp_msg *msg; ssp_set_magnetic_matrix() local
543 msg = ssp_create_msg(SSP_MSG2SSP_AP_SET_MAGNETIC_STATIC_MATRIX, ssp_set_magnetic_matrix()
546 if (!msg) ssp_set_magnetic_matrix()
549 ssp_fill_buffer(msg, 0, data->sensorhub_info->mag_table, ssp_set_magnetic_matrix()
552 ret = ssp_spi_sync(data, msg, 1000); ssp_set_magnetic_matrix()
553 ssp_clean_msg(msg); ssp_set_magnetic_matrix()
564 struct ssp_msg *msg = ssp_create_msg(SSP_MSG2SSP_AP_SENSOR_SCANNING, 4, ssp_get_sensor_scanning_info() local
566 if (!msg) ssp_get_sensor_scanning_info()
569 ret = ssp_spi_sync(data, msg, 1000); ssp_get_sensor_scanning_info()
575 ssp_get_buffer(msg, 0, &result, 4); ssp_get_sensor_scanning_info()
581 ssp_clean_msg(msg); ssp_get_sensor_scanning_info()
590 struct ssp_msg *msg = ssp_create_msg(SSP_MSG2SSP_AP_FIRMWARE_REV, 4, ssp_get_firmware_rev() local
592 if (!msg) ssp_get_firmware_rev()
595 ret = ssp_spi_sync(data, msg, 1000); ssp_get_firmware_rev()
602 ssp_get_buffer(msg, 0, &result, 4); ssp_get_firmware_rev()
606 ssp_clean_msg(msg); ssp_get_firmware_rev()
253 ssp_spi_sync_command(struct ssp_data *data, struct ssp_msg *msg) ssp_spi_sync_command() argument
/linux-4.1.27/drivers/staging/lustre/lustre/ptlrpc/
H A Dpack_generic.c95 static inline int lustre_msg_check_version_v2(struct lustre_msg_v2 *msg, lustre_msg_check_version_v2() argument
98 __u32 ver = lustre_msg_get_version(msg); lustre_msg_check_version_v2()
102 int lustre_msg_check_version(struct lustre_msg *msg, __u32 version) lustre_msg_check_version() argument
104 switch (msg->lm_magic) { lustre_msg_check_version()
106 CERROR("msg v1 not supported - please upgrade you system\n"); lustre_msg_check_version()
109 return lustre_msg_check_version_v2(msg, version); lustre_msg_check_version()
111 CERROR("incorrect message magic: %08x\n", msg->lm_magic); lustre_msg_check_version()
179 int lustre_packed_msg_size(struct lustre_msg *msg) lustre_packed_msg_size() argument
181 switch (msg->lm_magic) { lustre_packed_msg_size()
183 return lustre_msg_size_v2(msg->lm_bufcount, msg->lm_buflens); lustre_packed_msg_size()
185 CERROR("incorrect message magic: %08x\n", msg->lm_magic); lustre_packed_msg_size()
191 void lustre_init_msg_v2(struct lustre_msg_v2 *msg, int count, __u32 *lens, lustre_init_msg_v2() argument
197 msg->lm_bufcount = count; lustre_init_msg_v2()
199 msg->lm_magic = LUSTRE_MSG_MAGIC_V2; lustre_init_msg_v2()
202 msg->lm_buflens[i] = lens[i]; lustre_init_msg_v2()
207 ptr = (char *)msg + lustre_msg_hdr_size_v2(count); lustre_init_msg_v2()
408 CDEBUG(D_INFO, "msg %p buffer[%d] not present (count %d)\n", lustre_msg_buf_v2()
415 CERROR("msg %p buffer[%d] size %d too small (required %d, opc=%d)\n", lustre_msg_buf_v2()
434 LASSERTF(0, "incorrect message magic: %08x(msg:%p)\n", m->lm_magic, m); lustre_msg_buf()
440 int lustre_shrink_msg_v2(struct lustre_msg_v2 *msg, int segment, lustre_shrink_msg_v2() argument
446 LASSERT(msg); lustre_shrink_msg_v2()
447 LASSERT(msg->lm_bufcount > segment); lustre_shrink_msg_v2()
448 LASSERT(msg->lm_buflens[segment] >= newlen); lustre_shrink_msg_v2()
450 if (msg->lm_buflens[segment] == newlen) lustre_shrink_msg_v2()
453 if (move_data && msg->lm_bufcount > segment + 1) { lustre_shrink_msg_v2()
454 tail = lustre_msg_buf_v2(msg, segment + 1, 0); lustre_shrink_msg_v2()
455 for (n = segment + 1; n < msg->lm_bufcount; n++) lustre_shrink_msg_v2()
456 tail_len += cfs_size_round(msg->lm_buflens[n]); lustre_shrink_msg_v2()
459 msg->lm_buflens[segment] = newlen; lustre_shrink_msg_v2()
462 newpos = lustre_msg_buf_v2(msg, segment + 1, 0); lustre_shrink_msg_v2()
468 return lustre_msg_size_v2(msg->lm_bufcount, msg->lm_buflens); lustre_shrink_msg_v2()
472 * for @msg, shrink @segment to size @newlen. if @move_data is non-zero,
479 * return new msg size after shrinking.
484 * + caller should NOT keep pointers to msg buffers which higher than @segment
487 int lustre_shrink_msg(struct lustre_msg *msg, int segment, lustre_shrink_msg() argument
490 switch (msg->lm_magic) { lustre_shrink_msg()
492 return lustre_shrink_msg_v2(msg, segment, newlen, move_data); lustre_shrink_msg()
494 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic); lustre_shrink_msg()
648 CERROR("bad lustre msg magic: %08x\n", lustre_unpack_req_ptlrpc_body()
660 CERROR("bad lustre msg magic: %08x\n", lustre_unpack_rep_ptlrpc_body()
745 CERROR("can't unpack string in msg %p buffer[%d]\n", m, index); lustre_msg_string()
752 CERROR("can't unpack non-NULL terminated string in msg %p buffer[%d] len %d\n", lustre_msg_string()
759 CERROR("can't unpack short string in msg %p buffer[%d] len %d: strlen %d\n", lustre_msg_string()
764 CERROR("can't unpack oversized string in msg %p buffer[%d] len %d strlen %d: max %d expected\n", lustre_msg_string()
774 static inline void *__lustre_swab_buf(struct lustre_msg *msg, int index, __lustre_swab_buf() argument
779 LASSERT(msg != NULL); __lustre_swab_buf()
780 switch (msg->lm_magic) { __lustre_swab_buf()
782 ptr = lustre_msg_buf_v2(msg, index, min_size); __lustre_swab_buf()
785 CERROR("incorrect message magic: %08x\n", msg->lm_magic); __lustre_swab_buf()
794 static inline struct ptlrpc_body *lustre_msg_ptlrpc_body(struct lustre_msg *msg) lustre_msg_ptlrpc_body() argument
796 return lustre_msg_buf_v2(msg, MSG_PTLRPC_BODY_OFF, lustre_msg_ptlrpc_body()
800 __u32 lustre_msghdr_get_flags(struct lustre_msg *msg) lustre_msghdr_get_flags() argument
802 switch (msg->lm_magic) { lustre_msghdr_get_flags()
808 return msg->lm_flags; lustre_msghdr_get_flags()
810 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic); lustre_msghdr_get_flags()
816 void lustre_msghdr_set_flags(struct lustre_msg *msg, __u32 flags) lustre_msghdr_set_flags() argument
818 switch (msg->lm_magic) { lustre_msghdr_set_flags()
822 msg->lm_flags = flags; lustre_msghdr_set_flags()
825 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic); lustre_msghdr_set_flags()
829 __u32 lustre_msg_get_flags(struct lustre_msg *msg) lustre_msg_get_flags() argument
831 switch (msg->lm_magic) { lustre_msg_get_flags()
833 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg); lustre_msg_get_flags()
835 CERROR("invalid msg %p: no ptlrpc body!\n", msg); lustre_msg_get_flags()
848 void lustre_msg_add_flags(struct lustre_msg *msg, int flags) lustre_msg_add_flags() argument
850 switch (msg->lm_magic) { lustre_msg_add_flags()
852 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg); lustre_msg_add_flags()
853 LASSERTF(pb, "invalid msg %p: no ptlrpc body!\n", msg); lustre_msg_add_flags()
858 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic); lustre_msg_add_flags()
863 void lustre_msg_set_flags(struct lustre_msg *msg, int flags) lustre_msg_set_flags() argument
865 switch (msg->lm_magic) { lustre_msg_set_flags()
867 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg); lustre_msg_set_flags()
868 LASSERTF(pb, "invalid msg %p: no ptlrpc body!\n", msg); lustre_msg_set_flags()
873 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic); lustre_msg_set_flags()
878 void lustre_msg_clear_flags(struct lustre_msg *msg, int flags) lustre_msg_clear_flags() argument
880 switch (msg->lm_magic) { lustre_msg_clear_flags()
882 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg); lustre_msg_clear_flags()
883 LASSERTF(pb, "invalid msg %p: no ptlrpc body!\n", msg); lustre_msg_clear_flags()
888 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic); lustre_msg_clear_flags()
893 __u32 lustre_msg_get_op_flags(struct lustre_msg *msg) lustre_msg_get_op_flags() argument
895 switch (msg->lm_magic) { lustre_msg_get_op_flags()
897 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg); lustre_msg_get_op_flags()
899 CERROR("invalid msg %p: no ptlrpc body!\n", msg); lustre_msg_get_op_flags()
910 void lustre_msg_add_op_flags(struct lustre_msg *msg, int flags) lustre_msg_add_op_flags() argument
912 switch (msg->lm_magic) { lustre_msg_add_op_flags()
914 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg); lustre_msg_add_op_flags()
915 LASSERTF(pb, "invalid msg %p: no ptlrpc body!\n", msg); lustre_msg_add_op_flags()
920 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic); lustre_msg_add_op_flags()
925 void lustre_msg_set_op_flags(struct lustre_msg *msg, int flags) lustre_msg_set_op_flags() argument
927 switch (msg->lm_magic) { lustre_msg_set_op_flags()
929 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg); lustre_msg_set_op_flags()
930 LASSERTF(pb, "invalid msg %p: no ptlrpc body!\n", msg); lustre_msg_set_op_flags()
935 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic); lustre_msg_set_op_flags()
940 struct lustre_handle *lustre_msg_get_handle(struct lustre_msg *msg) lustre_msg_get_handle() argument
942 switch (msg->lm_magic) { lustre_msg_get_handle()
944 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg); lustre_msg_get_handle()
946 CERROR("invalid msg %p: no ptlrpc body!\n", msg); lustre_msg_get_handle()
952 CERROR("incorrect message magic: %08x\n", msg->lm_magic); lustre_msg_get_handle()
958 __u32 lustre_msg_get_type(struct lustre_msg *msg) lustre_msg_get_type() argument
960 switch (msg->lm_magic) { lustre_msg_get_type()
962 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg); lustre_msg_get_type()
964 CERROR("invalid msg %p: no ptlrpc body!\n", msg); lustre_msg_get_type()
970 CERROR("incorrect message magic: %08x\n", msg->lm_magic); lustre_msg_get_type()
976 __u32 lustre_msg_get_version(struct lustre_msg *msg) lustre_msg_get_version() argument
978 switch (msg->lm_magic) { lustre_msg_get_version()
980 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg); lustre_msg_get_version()
982 CERROR("invalid msg %p: no ptlrpc body!\n", msg); lustre_msg_get_version()
988 CERROR("incorrect message magic: %08x\n", msg->lm_magic); lustre_msg_get_version()
994 void lustre_msg_add_version(struct lustre_msg *msg, int version) lustre_msg_add_version() argument
996 switch (msg->lm_magic) { lustre_msg_add_version()
998 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg); lustre_msg_add_version()
999 LASSERTF(pb, "invalid msg %p: no ptlrpc body!\n", msg); lustre_msg_add_version()
1004 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic); lustre_msg_add_version()
1009 __u32 lustre_msg_get_opc(struct lustre_msg *msg) lustre_msg_get_opc() argument
1011 switch (msg->lm_magic) { lustre_msg_get_opc()
1013 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg); lustre_msg_get_opc()
1015 CERROR("invalid msg %p: no ptlrpc body!\n", msg); lustre_msg_get_opc()
1021 CERROR("incorrect message magic: %08x(msg:%p)\n", msg->lm_magic, msg); lustre_msg_get_opc()
1028 __u64 lustre_msg_get_last_xid(struct lustre_msg *msg) lustre_msg_get_last_xid() argument
1030 switch (msg->lm_magic) { lustre_msg_get_last_xid()
1032 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg); lustre_msg_get_last_xid()
1034 CERROR("invalid msg %p: no ptlrpc body!\n", msg); lustre_msg_get_last_xid()
1040 CERROR("incorrect message magic: %08x\n", msg->lm_magic); lustre_msg_get_last_xid()
1046 __u64 lustre_msg_get_last_committed(struct lustre_msg *msg) lustre_msg_get_last_committed() argument
1048 switch (msg->lm_magic) { lustre_msg_get_last_committed()
1050 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg); lustre_msg_get_last_committed()
1052 CERROR("invalid msg %p: no ptlrpc body!\n", msg); lustre_msg_get_last_committed()
1058 CERROR("incorrect message magic: %08x\n", msg->lm_magic); lustre_msg_get_last_committed()
1064 __u64 *lustre_msg_get_versions(struct lustre_msg *msg) lustre_msg_get_versions() argument
1066 switch (msg->lm_magic) { lustre_msg_get_versions()
1070 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg); lustre_msg_get_versions()
1072 CERROR("invalid msg %p: no ptlrpc body!\n", msg); lustre_msg_get_versions()
1078 CERROR("incorrect message magic: %08x\n", msg->lm_magic); lustre_msg_get_versions()
1084 __u64 lustre_msg_get_transno(struct lustre_msg *msg) lustre_msg_get_transno() argument
1086 switch (msg->lm_magic) { lustre_msg_get_transno()
1088 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg); lustre_msg_get_transno()
1090 CERROR("invalid msg %p: no ptlrpc body!\n", msg); lustre_msg_get_transno()
1096 CERROR("incorrect message magic: %08x\n", msg->lm_magic); lustre_msg_get_transno()
1102 int lustre_msg_get_status(struct lustre_msg *msg) lustre_msg_get_status() argument
1104 switch (msg->lm_magic) { lustre_msg_get_status()
1106 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg); lustre_msg_get_status()
1108 CERROR("invalid msg %p: no ptlrpc body!\n", msg); lustre_msg_get_status()
1121 __u64 lustre_msg_get_slv(struct lustre_msg *msg) lustre_msg_get_slv() argument
1123 switch (msg->lm_magic) { lustre_msg_get_slv()
1125 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg); lustre_msg_get_slv()
1127 CERROR("invalid msg %p: no ptlrpc body!\n", msg); lustre_msg_get_slv()
1133 CERROR("invalid msg magic %08x\n", msg->lm_magic); lustre_msg_get_slv()
1140 void lustre_msg_set_slv(struct lustre_msg *msg, __u64 slv) lustre_msg_set_slv() argument
1142 switch (msg->lm_magic) { lustre_msg_set_slv()
1144 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg); lustre_msg_set_slv()
1146 CERROR("invalid msg %p: no ptlrpc body!\n", msg); lustre_msg_set_slv()
1153 CERROR("invalid msg magic %x\n", msg->lm_magic); lustre_msg_set_slv()
1159 __u32 lustre_msg_get_limit(struct lustre_msg *msg) lustre_msg_get_limit() argument
1161 switch (msg->lm_magic) { lustre_msg_get_limit()
1163 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg); lustre_msg_get_limit()
1165 CERROR("invalid msg %p: no ptlrpc body!\n", msg); lustre_msg_get_limit()
1171 CERROR("invalid msg magic %x\n", msg->lm_magic); lustre_msg_get_limit()
1178 void lustre_msg_set_limit(struct lustre_msg *msg, __u64 limit) lustre_msg_set_limit() argument
1180 switch (msg->lm_magic) { lustre_msg_set_limit()
1182 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg); lustre_msg_set_limit()
1184 CERROR("invalid msg %p: no ptlrpc body!\n", msg); lustre_msg_set_limit()
1191 CERROR("invalid msg magic %08x\n", msg->lm_magic); lustre_msg_set_limit()
1197 __u32 lustre_msg_get_conn_cnt(struct lustre_msg *msg) lustre_msg_get_conn_cnt() argument
1199 switch (msg->lm_magic) { lustre_msg_get_conn_cnt()
1201 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg); lustre_msg_get_conn_cnt()
1203 CERROR("invalid msg %p: no ptlrpc body!\n", msg); lustre_msg_get_conn_cnt()
1209 CERROR("incorrect message magic: %08x\n", msg->lm_magic); lustre_msg_get_conn_cnt()
1215 int lustre_msg_is_v1(struct lustre_msg *msg) lustre_msg_is_v1() argument
1217 switch (msg->lm_magic) { lustre_msg_is_v1()
1227 __u32 lustre_msg_get_magic(struct lustre_msg *msg) lustre_msg_get_magic() argument
1229 switch (msg->lm_magic) { lustre_msg_get_magic()
1231 return msg->lm_magic; lustre_msg_get_magic()
1233 CERROR("incorrect message magic: %08x\n", msg->lm_magic); lustre_msg_get_magic()
1239 __u32 lustre_msg_get_timeout(struct lustre_msg *msg) lustre_msg_get_timeout() argument
1241 switch (msg->lm_magic) { lustre_msg_get_timeout()
1246 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg); lustre_msg_get_timeout()
1248 CERROR("invalid msg %p: no ptlrpc body!\n", msg); lustre_msg_get_timeout()
1255 CERROR("incorrect message magic: %08x\n", msg->lm_magic); lustre_msg_get_timeout()
1260 __u32 lustre_msg_get_service_time(struct lustre_msg *msg) lustre_msg_get_service_time() argument
1262 switch (msg->lm_magic) { lustre_msg_get_service_time()
1267 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg); lustre_msg_get_service_time()
1269 CERROR("invalid msg %p: no ptlrpc body!\n", msg); lustre_msg_get_service_time()
1276 CERROR("incorrect message magic: %08x\n", msg->lm_magic); lustre_msg_get_service_time()
1281 char *lustre_msg_get_jobid(struct lustre_msg *msg) lustre_msg_get_jobid() argument
1283 switch (msg->lm_magic) { lustre_msg_get_jobid()
1289 lustre_msg_buf_v2(msg, MSG_PTLRPC_BODY_OFF, lustre_msg_get_jobid()
1297 CERROR("incorrect message magic: %08x\n", msg->lm_magic); lustre_msg_get_jobid()
1303 __u32 lustre_msg_get_cksum(struct lustre_msg *msg) lustre_msg_get_cksum() argument
1305 switch (msg->lm_magic) { lustre_msg_get_cksum()
1307 return msg->lm_cksum; lustre_msg_get_cksum()
1309 CERROR("incorrect message magic: %08x\n", msg->lm_magic); lustre_msg_get_cksum()
1314 __u32 lustre_msg_calc_cksum(struct lustre_msg *msg) lustre_msg_calc_cksum() argument
1316 switch (msg->lm_magic) { lustre_msg_calc_cksum()
1318 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg); lustre_msg_calc_cksum()
1322 lustre_msg_buflen(msg, MSG_PTLRPC_BODY_OFF), lustre_msg_calc_cksum()
1327 CERROR("incorrect message magic: %08x\n", msg->lm_magic); lustre_msg_calc_cksum()
1332 void lustre_msg_set_handle(struct lustre_msg *msg, struct lustre_handle *handle) lustre_msg_set_handle() argument
1334 switch (msg->lm_magic) { lustre_msg_set_handle()
1336 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg); lustre_msg_set_handle()
1337 LASSERTF(pb, "invalid msg %p: no ptlrpc body!\n", msg); lustre_msg_set_handle()
1342 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic); lustre_msg_set_handle()
1347 void lustre_msg_set_type(struct lustre_msg *msg, __u32 type) lustre_msg_set_type() argument
1349 switch (msg->lm_magic) { lustre_msg_set_type()
1351 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg); lustre_msg_set_type()
1352 LASSERTF(pb, "invalid msg %p: no ptlrpc body!\n", msg); lustre_msg_set_type()
1357 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic); lustre_msg_set_type()
1362 void lustre_msg_set_opc(struct lustre_msg *msg, __u32 opc) lustre_msg_set_opc() argument
1364 switch (msg->lm_magic) { lustre_msg_set_opc()
1366 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg); lustre_msg_set_opc()
1367 LASSERTF(pb, "invalid msg %p: no ptlrpc body!\n", msg); lustre_msg_set_opc()
1372 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic); lustre_msg_set_opc()
1377 void lustre_msg_set_last_xid(struct lustre_msg *msg, __u64 last_xid) lustre_msg_set_last_xid() argument
1379 switch (msg->lm_magic) { lustre_msg_set_last_xid()
1381 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg); lustre_msg_set_last_xid()
1382 LASSERTF(pb, "invalid msg %p: no ptlrpc body!\n", msg); lustre_msg_set_last_xid()
1387 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic); lustre_msg_set_last_xid()
1392 void lustre_msg_set_last_committed(struct lustre_msg *msg, __u64 last_committed) lustre_msg_set_last_committed() argument
1394 switch (msg->lm_magic) { lustre_msg_set_last_committed()
1396 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg); lustre_msg_set_last_committed()
1397 LASSERTF(pb, "invalid msg %p: no ptlrpc body!\n", msg); lustre_msg_set_last_committed()
1402 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic); lustre_msg_set_last_committed()
1407 void lustre_msg_set_versions(struct lustre_msg *msg, __u64 *versions) lustre_msg_set_versions() argument
1409 switch (msg->lm_magic) { lustre_msg_set_versions()
1413 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg); lustre_msg_set_versions()
1414 LASSERTF(pb, "invalid msg %p: no ptlrpc body!\n", msg); lustre_msg_set_versions()
1422 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic); lustre_msg_set_versions()
1427 void lustre_msg_set_transno(struct lustre_msg *msg, __u64 transno) lustre_msg_set_transno() argument
1429 switch (msg->lm_magic) { lustre_msg_set_transno()
1431 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg); lustre_msg_set_transno()
1432 LASSERTF(pb, "invalid msg %p: no ptlrpc body!\n", msg); lustre_msg_set_transno()
1437 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic); lustre_msg_set_transno()
1442 void lustre_msg_set_status(struct lustre_msg *msg, __u32 status) lustre_msg_set_status() argument
1444 switch (msg->lm_magic) { lustre_msg_set_status()
1446 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg); lustre_msg_set_status()
1447 LASSERTF(pb, "invalid msg %p: no ptlrpc body!\n", msg); lustre_msg_set_status()
1452 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic); lustre_msg_set_status()
1457 void lustre_msg_set_conn_cnt(struct lustre_msg *msg, __u32 conn_cnt) lustre_msg_set_conn_cnt() argument
1459 switch (msg->lm_magic) { lustre_msg_set_conn_cnt()
1461 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg); lustre_msg_set_conn_cnt()
1462 LASSERTF(pb, "invalid msg %p: no ptlrpc body!\n", msg); lustre_msg_set_conn_cnt()
1467 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic); lustre_msg_set_conn_cnt()
1472 void lustre_msg_set_timeout(struct lustre_msg *msg, __u32 timeout) lustre_msg_set_timeout() argument
1474 switch (msg->lm_magic) { lustre_msg_set_timeout()
1478 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg); lustre_msg_set_timeout()
1479 LASSERTF(pb, "invalid msg %p: no ptlrpc body!\n", msg); lustre_msg_set_timeout()
1484 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic); lustre_msg_set_timeout()
1488 void lustre_msg_set_service_time(struct lustre_msg *msg, __u32 service_time) lustre_msg_set_service_time() argument
1490 switch (msg->lm_magic) { lustre_msg_set_service_time()
1494 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg); lustre_msg_set_service_time()
1495 LASSERTF(pb, "invalid msg %p: no ptlrpc body!\n", msg); lustre_msg_set_service_time()
1500 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic); lustre_msg_set_service_time()
1504 void lustre_msg_set_jobid(struct lustre_msg *msg, char *jobid) lustre_msg_set_jobid() argument
1506 switch (msg->lm_magic) { lustre_msg_set_jobid()
1510 __u32 opc = lustre_msg_get_opc(msg); lustre_msg_set_jobid()
1519 pb = lustre_msg_buf_v2(msg, MSG_PTLRPC_BODY_OFF, lustre_msg_set_jobid()
1521 LASSERTF(pb, "invalid msg %p: no ptlrpc body!\n", msg); lustre_msg_set_jobid()
1530 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic); lustre_msg_set_jobid()
1535 void lustre_msg_set_cksum(struct lustre_msg *msg, __u32 cksum) lustre_msg_set_cksum() argument
1537 switch (msg->lm_magic) { lustre_msg_set_cksum()
1541 msg->lm_cksum = cksum; lustre_msg_set_cksum()
1544 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic); lustre_msg_set_cksum()
2355 CERROR("bad lustre msg magic: %#08X\n", req_ptlrpc_body_swabbed()
H A Dsec_plain.c99 static int plain_unpack_bsd(struct lustre_msg *msg, int swabbed) plain_unpack_bsd() argument
103 if (bulk_sec_desc_unpack(msg, PLAIN_PACK_BULK_OFF, swabbed)) plain_unpack_bsd()
106 bsd = lustre_msg_buf(msg, PLAIN_PACK_BULK_OFF, PLAIN_BSD_SIZE); plain_unpack_bsd()
109 lustre_msg_buflen(msg, PLAIN_PACK_BULK_OFF)); plain_unpack_bsd()
193 struct lustre_msg *msg = req->rq_reqbuf; plain_ctx_sign() local
196 msg->lm_secflvr = req->rq_flvr.sf_rpc; plain_ctx_sign()
198 phdr = lustre_msg_buf(msg, PLAIN_PACK_HDR_OFF, 0); plain_ctx_sign()
209 req->rq_reqdata_len = lustre_msg_size_v2(msg->lm_bufcount, plain_ctx_sign()
210 msg->lm_buflens); plain_ctx_sign()
217 struct lustre_msg *msg = req->rq_repdata; plain_ctx_verify() local
222 if (msg->lm_bufcount != PLAIN_PACK_SEGMENTS) { plain_ctx_verify()
223 CERROR("unexpected reply buf count %u\n", msg->lm_bufcount); plain_ctx_verify()
229 phdr = lustre_msg_buf(msg, PLAIN_PACK_HDR_OFF, sizeof(*phdr)); plain_ctx_verify()
256 lustre_msg_buf(msg, PLAIN_PACK_MSG_OFF, 0), plain_ctx_verify()
257 lustre_msg_buflen(msg, PLAIN_PACK_MSG_OFF), plain_ctx_verify()
259 if (cksum != msg->lm_cksum) { plain_ctx_verify()
262 cpu_to_le32(cksum), msg->lm_cksum); plain_ctx_verify()
277 if (plain_unpack_bsd(msg, swabbed)) plain_ctx_verify()
282 req->rq_repmsg = lustre_msg_buf(msg, PLAIN_PACK_MSG_OFF, 0); plain_ctx_verify()
283 req->rq_replen = lustre_msg_buflen(msg, PLAIN_PACK_MSG_OFF); plain_ctx_verify()
648 /* compute new embedded msg size. */ plain_enlarge_reqbuf()
655 /* compute new wrapper msg size. */ plain_enlarge_reqbuf()
713 struct lustre_msg *msg = req->rq_reqbuf; plain_accept() local
728 if (msg->lm_bufcount < PLAIN_PACK_SEGMENTS) { plain_accept()
729 CERROR("unexpected request buf count %u\n", msg->lm_bufcount); plain_accept()
735 phdr = lustre_msg_buf(msg, PLAIN_PACK_HDR_OFF, sizeof(*phdr)); plain_accept()
755 if (sptlrpc_unpack_user_desc(msg, PLAIN_PACK_USER_OFF, plain_accept()
762 req->rq_user_desc = lustre_msg_buf(msg, PLAIN_PACK_USER_OFF, 0); plain_accept()
766 if (plain_unpack_bsd(msg, swabbed)) plain_accept()
772 req->rq_reqmsg = lustre_msg_buf(msg, PLAIN_PACK_MSG_OFF, 0); plain_accept()
773 req->rq_reqlen = msg->lm_buflens[PLAIN_PACK_MSG_OFF]; plain_accept()
837 struct lustre_msg_v2 *msg = rs->rs_repbuf; plain_authorize() local
842 LASSERT(msg); plain_authorize()
844 if (req->rq_replen != msg->lm_buflens[PLAIN_PACK_MSG_OFF]) plain_authorize()
845 len = lustre_shrink_msg(msg, PLAIN_PACK_MSG_OFF, plain_authorize()
848 len = lustre_msg_size_v2(msg->lm_bufcount, msg->lm_buflens); plain_authorize()
850 msg->lm_secflvr = req->rq_flvr.sf_rpc; plain_authorize()
852 phdr = lustre_msg_buf(msg, PLAIN_PACK_HDR_OFF, 0); plain_authorize()
871 lustre_msg_buf(msg, PLAIN_PACK_MSG_OFF, 0), plain_authorize()
872 lustre_msg_buflen(msg, PLAIN_PACK_MSG_OFF), plain_authorize()
873 NULL, 0, (unsigned char *)&msg->lm_cksum, &hsize); plain_authorize()
/linux-4.1.27/drivers/platform/chrome/
H A Dcros_ec_sysfs.c69 struct cros_ec_command msg = { 0 }; store_ec_reboot() local
71 (struct ec_params_reboot_ec *)msg.outdata; store_ec_reboot()
106 msg.command = EC_CMD_REBOOT_EC; store_ec_reboot()
107 msg.outsize = sizeof(param); store_ec_reboot()
108 ret = cros_ec_cmd_xfer(ec, &msg); store_ec_reboot()
111 if (msg.result != EC_RES_SUCCESS) { store_ec_reboot()
112 dev_dbg(ec->dev, "EC result %d\n", msg.result); store_ec_reboot()
126 struct cros_ec_command msg = { 0 }; show_ec_version() local
132 msg.command = EC_CMD_GET_VERSION; show_ec_version()
133 msg.insize = sizeof(*r_ver); show_ec_version()
134 ret = cros_ec_cmd_xfer(ec, &msg); show_ec_version()
137 if (msg.result != EC_RES_SUCCESS) show_ec_version()
139 "ERROR: EC returned %d\n", msg.result); show_ec_version()
141 r_ver = (struct ec_response_get_version *)msg.indata; show_ec_version()
155 msg.command = EC_CMD_GET_BUILD_INFO; show_ec_version()
156 msg.insize = sizeof(msg.indata); show_ec_version()
157 ret = cros_ec_cmd_xfer(ec, &msg); show_ec_version()
161 else if (msg.result != EC_RES_SUCCESS) show_ec_version()
163 "Build info: EC error %d\n", msg.result); show_ec_version()
165 msg.indata[sizeof(msg.indata) - 1] = '\0'; show_ec_version()
167 "Build info: %s\n", msg.indata); show_ec_version()
171 msg.command = EC_CMD_GET_CHIP_INFO; show_ec_version()
172 msg.insize = sizeof(*r_chip); show_ec_version()
173 ret = cros_ec_cmd_xfer(ec, &msg); show_ec_version()
177 else if (msg.result != EC_RES_SUCCESS) show_ec_version()
179 "Chip info: EC error %d\n", msg.result); show_ec_version()
181 r_chip = (struct ec_response_get_chip_info *)msg.indata; show_ec_version()
195 msg.command = EC_CMD_GET_BOARD_VERSION; show_ec_version()
196 msg.insize = sizeof(*r_board); show_ec_version()
197 ret = cros_ec_cmd_xfer(ec, &msg); show_ec_version()
201 else if (msg.result != EC_RES_SUCCESS) show_ec_version()
203 "Board version: EC error %d\n", msg.result); show_ec_version()
205 r_board = (struct ec_response_board_version *)msg.indata; show_ec_version()
219 struct cros_ec_command msg = { 0 }; show_ec_flashinfo() local
224 msg.command = EC_CMD_FLASH_INFO; show_ec_flashinfo()
225 msg.insize = sizeof(*resp); show_ec_flashinfo()
226 ret = cros_ec_cmd_xfer(ec, &msg); show_ec_flashinfo()
229 if (msg.result != EC_RES_SUCCESS) show_ec_flashinfo()
231 "ERROR: EC returned %d\n", msg.result); show_ec_flashinfo()
233 resp = (struct ec_response_flash_info *)msg.indata; show_ec_flashinfo()
H A Dcros_ec_lightbar.c105 struct cros_ec_command msg = INIT_MSG(param, resp); get_lightbar_version() local
108 param = (struct ec_params_lightbar *)msg.outdata; get_lightbar_version()
110 ret = cros_ec_cmd_xfer(ec, &msg); get_lightbar_version()
114 switch (msg.result) { get_lightbar_version()
124 resp = (struct ec_response_lightbar *)msg.indata; get_lightbar_version()
162 struct cros_ec_command msg = INIT_MSG(param, resp); brightness_store() local
170 param = (struct ec_params_lightbar *)msg.outdata; brightness_store()
177 ret = cros_ec_cmd_xfer(ec, &msg); brightness_store()
181 if (msg.result != EC_RES_SUCCESS) brightness_store()
200 struct cros_ec_command msg = INIT_MSG(param, resp); led_rgb_store() local
218 param = (struct ec_params_lightbar *)msg.outdata; led_rgb_store()
234 ret = cros_ec_cmd_xfer(ec, &msg); led_rgb_store()
238 if (msg.result != EC_RES_SUCCESS) led_rgb_store()
264 struct cros_ec_command msg = INIT_MSG(param, resp); sequence_show() local
268 param = (struct ec_params_lightbar *)msg.outdata; sequence_show()
274 ret = cros_ec_cmd_xfer(ec, &msg); sequence_show()
278 if (msg.result != EC_RES_SUCCESS) sequence_show()
280 "ERROR: EC returned %d\n", msg.result); sequence_show()
282 resp = (struct ec_response_lightbar *)msg.indata; sequence_show()
295 struct cros_ec_command msg = INIT_MSG(param, resp); sequence_store() local
314 param = (struct ec_params_lightbar *)msg.outdata; sequence_store()
321 ret = cros_ec_cmd_xfer(ec, &msg); sequence_store()
325 if (msg.result != EC_RES_SUCCESS) sequence_store()
H A Dcros_ec_lpc.c50 struct cros_ec_command *msg) cros_ec_cmd_xfer_lpc()
57 if (msg->outsize > EC_PROTO2_MAX_PARAM_SIZE || cros_ec_cmd_xfer_lpc()
58 msg->insize > EC_PROTO2_MAX_PARAM_SIZE) { cros_ec_cmd_xfer_lpc()
61 msg->outsize, msg->insize); cros_ec_cmd_xfer_lpc()
67 args.command_version = msg->version; cros_ec_cmd_xfer_lpc()
68 args.data_size = msg->outsize; cros_ec_cmd_xfer_lpc()
71 csum = msg->command + args.flags + cros_ec_cmd_xfer_lpc()
75 for (i = 0; i < msg->outsize; i++) { cros_ec_cmd_xfer_lpc()
76 outb(msg->outdata[i], EC_LPC_ADDR_HOST_PARAM + i); cros_ec_cmd_xfer_lpc()
77 csum += msg->outdata[i]; cros_ec_cmd_xfer_lpc()
88 outb(msg->command, EC_LPC_ADDR_HOST_CMD); cros_ec_cmd_xfer_lpc()
97 msg->result = inb(EC_LPC_ADDR_HOST_DATA); cros_ec_cmd_xfer_lpc()
99 switch (msg->result) { cros_ec_cmd_xfer_lpc()
105 msg->command); cros_ec_cmd_xfer_lpc()
109 msg->command, msg->result); cros_ec_cmd_xfer_lpc()
118 if (args.data_size > msg->insize) { cros_ec_cmd_xfer_lpc()
121 args.data_size, msg->insize); cros_ec_cmd_xfer_lpc()
127 csum = msg->command + args.flags + cros_ec_cmd_xfer_lpc()
132 msg->indata[i] = inb(EC_LPC_ADDR_HOST_PARAM + i); cros_ec_cmd_xfer_lpc()
133 csum += msg->indata[i]; cros_ec_cmd_xfer_lpc()
49 cros_ec_cmd_xfer_lpc(struct cros_ec_device *ec, struct cros_ec_command *msg) cros_ec_cmd_xfer_lpc() argument
/linux-4.1.27/net/tipc/
H A Dnetlink_compat.c66 int (*format)(struct tipc_nl_compat_msg *msg, struct nlattr **attrs);
71 int (*transcode)(struct sk_buff *skb, struct tipc_nl_compat_msg *msg);
168 struct tipc_nl_compat_msg *msg, __tipc_nl_compat_dumpit()
185 buf->sk = msg->dst_sk; __tipc_nl_compat_dumpit()
199 err = (*cmd->format)(msg, attrs); nlmsg_for_each_msg()
203 if (tipc_skb_tailroom(msg->rep) <= 1) { nlmsg_for_each_msg()
223 if ((TIPC_SKB_MAX - msg->rep->len) <= 1) {
224 char *tail = skb_tail_pointer(msg->rep);
238 struct tipc_nl_compat_msg *msg) tipc_nl_compat_dumpit()
243 if (msg->req_type && !TLV_CHECK_TYPE(msg->req, msg->req_type)) tipc_nl_compat_dumpit()
246 msg->rep = tipc_tlv_alloc(msg->rep_size); tipc_nl_compat_dumpit()
247 if (!msg->rep) tipc_nl_compat_dumpit()
250 if (msg->rep_type) tipc_nl_compat_dumpit()
251 tipc_tlv_init(msg->rep, msg->rep_type); tipc_nl_compat_dumpit()
254 (*cmd->header)(msg); tipc_nl_compat_dumpit()
258 kfree_skb(msg->rep); tipc_nl_compat_dumpit()
262 err = __tipc_nl_compat_dumpit(cmd, msg, arg); tipc_nl_compat_dumpit()
264 kfree_skb(msg->rep); tipc_nl_compat_dumpit()
272 struct tipc_nl_compat_msg *msg) __tipc_nl_compat_doit()
284 err = (*cmd->transcode)(trans_buf, msg); __tipc_nl_compat_doit()
307 doit_buf->sk = msg->dst_sk; __tipc_nl_compat_doit()
324 struct tipc_nl_compat_msg *msg) tipc_nl_compat_doit()
328 if (msg->req_type && !TLV_CHECK_TYPE(msg->req, msg->req_type)) tipc_nl_compat_doit()
331 err = __tipc_nl_compat_doit(cmd, msg); tipc_nl_compat_doit()
336 msg->rep = tipc_tlv_alloc(0); tipc_nl_compat_doit()
337 if (!msg->rep) tipc_nl_compat_doit()
343 static int tipc_nl_compat_bearer_dump(struct tipc_nl_compat_msg *msg, tipc_nl_compat_bearer_dump() argument
351 return tipc_add_tlv(msg->rep, TIPC_TLV_BEARER_NAME, tipc_nl_compat_bearer_dump()
357 struct tipc_nl_compat_msg *msg) tipc_nl_compat_bearer_enable()
363 b = (struct tipc_bearer_config *)TLV_DATA(msg->req); tipc_nl_compat_bearer_enable()
389 struct tipc_nl_compat_msg *msg) tipc_nl_compat_bearer_disable()
394 name = (char *)TLV_DATA(msg->req); tipc_nl_compat_bearer_disable()
413 static void __fill_bc_link_stat(struct tipc_nl_compat_msg *msg, __fill_bc_link_stat() argument
416 tipc_tlv_sprintf(msg->rep, " Window:%u packets\n", __fill_bc_link_stat()
419 tipc_tlv_sprintf(msg->rep, __fill_bc_link_stat()
427 tipc_tlv_sprintf(msg->rep, __fill_bc_link_stat()
435 tipc_tlv_sprintf(msg->rep, " RX naks:%u defs:%u dups:%u\n", __fill_bc_link_stat()
440 tipc_tlv_sprintf(msg->rep, " TX naks:%u acks:%u dups:%u\n", __fill_bc_link_stat()
445 tipc_tlv_sprintf(msg->rep, __fill_bc_link_stat()
452 static int tipc_nl_compat_link_stat_dump(struct tipc_nl_compat_msg *msg, tipc_nl_compat_link_stat_dump() argument
468 name = (char *)TLV_DATA(msg->req); tipc_nl_compat_link_stat_dump()
472 tipc_tlv_sprintf(msg->rep, "\nLink <%s>\n", tipc_nl_compat_link_stat_dump()
476 __fill_bc_link_stat(msg, prop, stats); tipc_nl_compat_link_stat_dump()
481 tipc_tlv_sprintf(msg->rep, " ACTIVE"); tipc_nl_compat_link_stat_dump()
483 tipc_tlv_sprintf(msg->rep, " STANDBY"); tipc_nl_compat_link_stat_dump()
485 tipc_tlv_sprintf(msg->rep, " DEFUNCT"); tipc_nl_compat_link_stat_dump()
487 tipc_tlv_sprintf(msg->rep, " MTU:%u Priority:%u", tipc_nl_compat_link_stat_dump()
491 tipc_tlv_sprintf(msg->rep, " Tolerance:%u ms Window:%u packets\n", tipc_nl_compat_link_stat_dump()
495 tipc_tlv_sprintf(msg->rep, tipc_nl_compat_link_stat_dump()
504 tipc_tlv_sprintf(msg->rep, tipc_nl_compat_link_stat_dump()
513 tipc_tlv_sprintf(msg->rep, tipc_nl_compat_link_stat_dump()
519 tipc_tlv_sprintf(msg->rep, tipc_nl_compat_link_stat_dump()
530 tipc_tlv_sprintf(msg->rep, "-16384:%u%% -32768:%u%% -66000:%u%%\n", tipc_nl_compat_link_stat_dump()
538 tipc_tlv_sprintf(msg->rep, tipc_nl_compat_link_stat_dump()
546 tipc_tlv_sprintf(msg->rep, tipc_nl_compat_link_stat_dump()
554 tipc_tlv_sprintf(msg->rep, tipc_nl_compat_link_stat_dump()
563 static int tipc_nl_compat_link_dump(struct tipc_nl_compat_msg *msg, tipc_nl_compat_link_dump() argument
575 return tipc_add_tlv(msg->rep, TIPC_TLV_LINK_INFO, tipc_nl_compat_link_dump()
580 struct tipc_nl_compat_msg *msg) tipc_nl_compat_link_set()
586 lc = (struct tipc_link_config *)TLV_DATA(msg->req); tipc_nl_compat_link_set()
599 if (msg->cmd == TIPC_CMD_SET_LINK_PRI) { tipc_nl_compat_link_set()
602 } else if (msg->cmd == TIPC_CMD_SET_LINK_TOL) { tipc_nl_compat_link_set()
605 } else if (msg->cmd == TIPC_CMD_SET_LINK_WINDOW) { tipc_nl_compat_link_set()
617 struct tipc_nl_compat_msg *msg) tipc_nl_compat_link_reset_stats()
622 name = (char *)TLV_DATA(msg->req); tipc_nl_compat_link_reset_stats()
636 static int tipc_nl_compat_name_table_dump_header(struct tipc_nl_compat_msg *msg) tipc_nl_compat_name_table_dump_header() argument
648 ntq = (struct tipc_name_table_query *)TLV_DATA(msg->req); tipc_nl_compat_name_table_dump_header()
655 tipc_tlv_sprintf(msg->rep, header[i]); tipc_nl_compat_name_table_dump_header()
656 tipc_tlv_sprintf(msg->rep, "\n"); tipc_nl_compat_name_table_dump_header()
661 static int tipc_nl_compat_name_table_dump(struct tipc_nl_compat_msg *msg, tipc_nl_compat_name_table_dump() argument
678 ntq = (struct tipc_name_table_query *)TLV_DATA(msg->req); tipc_nl_compat_name_table_dump()
693 tipc_tlv_sprintf(msg->rep, "%-10u ", tipc_nl_compat_name_table_dump()
699 tipc_tlv_sprintf(msg->rep, "%-10u %-10u ", tipc_nl_compat_name_table_dump()
709 tipc_tlv_sprintf(msg->rep, "%-26s ", port_str); tipc_nl_compat_name_table_dump()
714 tipc_tlv_sprintf(msg->rep, "%-10u %s", tipc_nl_compat_name_table_dump()
718 tipc_tlv_sprintf(msg->rep, "\n"); tipc_nl_compat_name_table_dump()
723 static int __tipc_nl_compat_publ_dump(struct tipc_nl_compat_msg *msg, __tipc_nl_compat_publ_dump() argument
736 tipc_tlv_sprintf(msg->rep, " {%u,%u}", type, lower); __tipc_nl_compat_publ_dump()
738 tipc_tlv_sprintf(msg->rep, " {%u,%u,%u}", type, lower, upper); __tipc_nl_compat_publ_dump()
743 static int tipc_nl_compat_publ_dump(struct tipc_nl_compat_msg *msg, u32 sock) tipc_nl_compat_publ_dump() argument
775 err = __tipc_nl_compat_dumpit(&dump, msg, args); tipc_nl_compat_publ_dump()
782 static int tipc_nl_compat_sk_dump(struct tipc_nl_compat_msg *msg, tipc_nl_compat_sk_dump() argument
792 tipc_tlv_sprintf(msg->rep, "%u:", sock_ref); tipc_nl_compat_sk_dump()
802 tipc_tlv_sprintf(msg->rep, " connected to <%u.%u.%u:%u>", tipc_nl_compat_sk_dump()
809 tipc_tlv_sprintf(msg->rep, " via {%u,%u}\n", tipc_nl_compat_sk_dump()
813 tipc_tlv_sprintf(msg->rep, "\n"); tipc_nl_compat_sk_dump()
815 tipc_tlv_sprintf(msg->rep, " bound to"); tipc_nl_compat_sk_dump()
817 err = tipc_nl_compat_publ_dump(msg, sock_ref); tipc_nl_compat_sk_dump()
821 tipc_tlv_sprintf(msg->rep, "\n"); tipc_nl_compat_sk_dump()
826 static int tipc_nl_compat_media_dump(struct tipc_nl_compat_msg *msg, tipc_nl_compat_media_dump() argument
834 return tipc_add_tlv(msg->rep, TIPC_TLV_MEDIA_NAME, tipc_nl_compat_media_dump()
839 static int tipc_nl_compat_node_dump(struct tipc_nl_compat_msg *msg, tipc_nl_compat_node_dump() argument
850 return tipc_add_tlv(msg->rep, TIPC_TLV_NODE_INFO, &node_info, tipc_nl_compat_node_dump()
855 struct tipc_nl_compat_msg *msg) tipc_nl_compat_net_set()
860 val = ntohl(*(__be32 *)TLV_DATA(msg->req)); tipc_nl_compat_net_set()
866 if (msg->cmd == TIPC_CMD_SET_NODE_ADDR) { tipc_nl_compat_net_set()
869 } else if (msg->cmd == TIPC_CMD_SET_NETID) { tipc_nl_compat_net_set()
878 static int tipc_nl_compat_net_dump(struct tipc_nl_compat_msg *msg, tipc_nl_compat_net_dump() argument
887 return tipc_add_tlv(msg->rep, TIPC_TLV_UNSIGNED, &id, sizeof(id)); tipc_nl_compat_net_dump()
890 static int tipc_cmd_show_stats_compat(struct tipc_nl_compat_msg *msg) tipc_cmd_show_stats_compat() argument
892 msg->rep = tipc_tlv_alloc(ULTRA_STRING_MAX_LEN); tipc_cmd_show_stats_compat()
893 if (!msg->rep) tipc_cmd_show_stats_compat()
896 tipc_tlv_init(msg->rep, TIPC_TLV_ULTRA_STRING); tipc_cmd_show_stats_compat()
897 tipc_tlv_sprintf(msg->rep, "TIPC version " TIPC_MOD_VER "\n"); tipc_cmd_show_stats_compat()
902 static int tipc_nl_compat_handle(struct tipc_nl_compat_msg *msg) tipc_nl_compat_handle() argument
910 switch (msg->cmd) { tipc_nl_compat_handle()
912 msg->rep = tipc_tlv_alloc(0); tipc_nl_compat_handle()
913 if (!msg->rep) tipc_nl_compat_handle()
917 msg->rep_size = MAX_BEARERS * TLV_SPACE(TIPC_MAX_BEARER_NAME); tipc_nl_compat_handle()
920 return tipc_nl_compat_dumpit(&dump, msg); tipc_nl_compat_handle()
922 msg->req_type = TIPC_TLV_BEARER_CONFIG; tipc_nl_compat_handle()
925 return tipc_nl_compat_doit(&doit, msg); tipc_nl_compat_handle()
927 msg->req_type = TIPC_TLV_BEARER_NAME; tipc_nl_compat_handle()
930 return tipc_nl_compat_doit(&doit, msg); tipc_nl_compat_handle()
932 msg->req_type = TIPC_TLV_LINK_NAME; tipc_nl_compat_handle()
933 msg->rep_size = ULTRA_STRING_MAX_LEN; tipc_nl_compat_handle()
934 msg->rep_type = TIPC_TLV_ULTRA_STRING; tipc_nl_compat_handle()
937 return tipc_nl_compat_dumpit(&dump, msg); tipc_nl_compat_handle()
939 msg->req_type = TIPC_TLV_NET_ADDR; tipc_nl_compat_handle()
940 msg->rep_size = ULTRA_STRING_MAX_LEN; tipc_nl_compat_handle()
943 return tipc_nl_compat_dumpit(&dump, msg); tipc_nl_compat_handle()
947 msg->req_type = TIPC_TLV_LINK_CONFIG; tipc_nl_compat_handle()
950 return tipc_nl_compat_doit(&doit, msg); tipc_nl_compat_handle()
952 msg->req_type = TIPC_TLV_LINK_NAME; tipc_nl_compat_handle()
955 return tipc_nl_compat_doit(&doit, msg); tipc_nl_compat_handle()
957 msg->req_type = TIPC_TLV_NAME_TBL_QUERY; tipc_nl_compat_handle()
958 msg->rep_size = ULTRA_STRING_MAX_LEN; tipc_nl_compat_handle()
959 msg->rep_type = TIPC_TLV_ULTRA_STRING; tipc_nl_compat_handle()
963 return tipc_nl_compat_dumpit(&dump, msg); tipc_nl_compat_handle()
965 msg->rep_size = ULTRA_STRING_MAX_LEN; tipc_nl_compat_handle()
966 msg->rep_type = TIPC_TLV_ULTRA_STRING; tipc_nl_compat_handle()
969 return tipc_nl_compat_dumpit(&dump, msg); tipc_nl_compat_handle()
971 msg->rep_size = MAX_MEDIA * TLV_SPACE(TIPC_MAX_MEDIA_NAME); tipc_nl_compat_handle()
974 return tipc_nl_compat_dumpit(&dump, msg); tipc_nl_compat_handle()
976 msg->rep_size = ULTRA_STRING_MAX_LEN; tipc_nl_compat_handle()
979 return tipc_nl_compat_dumpit(&dump, msg); tipc_nl_compat_handle()
981 msg->req_type = TIPC_TLV_NET_ADDR; tipc_nl_compat_handle()
984 return tipc_nl_compat_doit(&doit, msg); tipc_nl_compat_handle()
986 msg->req_type = TIPC_TLV_UNSIGNED; tipc_nl_compat_handle()
989 return tipc_nl_compat_doit(&doit, msg); tipc_nl_compat_handle()
991 msg->rep_size = sizeof(u32); tipc_nl_compat_handle()
994 return tipc_nl_compat_dumpit(&dump, msg); tipc_nl_compat_handle()
996 return tipc_cmd_show_stats_compat(msg); tipc_nl_compat_handle()
1006 struct tipc_nl_compat_msg msg; tipc_nl_compat_recv() local
1012 memset(&msg, 0, sizeof(msg)); tipc_nl_compat_recv()
1015 msg.req = nlmsg_data(req_nlh) + GENL_HDRLEN + TIPC_GENL_HDRLEN; tipc_nl_compat_recv()
1016 msg.cmd = req_userhdr->cmd; tipc_nl_compat_recv()
1017 msg.dst_sk = info->dst_sk; tipc_nl_compat_recv()
1019 if ((msg.cmd & 0xC000) && (!netlink_net_capable(skb, CAP_NET_ADMIN))) { tipc_nl_compat_recv()
1020 msg.rep = tipc_get_err_tlv(TIPC_CFG_NOT_NET_ADMIN); tipc_nl_compat_recv()
1026 if (TLV_GET_LEN(msg.req) && !TLV_OK(msg.req, len)) { tipc_nl_compat_recv()
1027 msg.rep = tipc_get_err_tlv(TIPC_CFG_NOT_SUPPORTED); tipc_nl_compat_recv()
1032 err = tipc_nl_compat_handle(&msg); tipc_nl_compat_recv()
1034 msg.rep = tipc_get_err_tlv(TIPC_CFG_NOT_SUPPORTED); tipc_nl_compat_recv()
1036 msg.rep = tipc_get_err_tlv(TIPC_CFG_TLV_ERROR); tipc_nl_compat_recv()
1038 if (!msg.rep) tipc_nl_compat_recv()
1042 skb_push(msg.rep, len); tipc_nl_compat_recv()
1043 rep_nlh = nlmsg_hdr(msg.rep); tipc_nl_compat_recv()
1045 rep_nlh->nlmsg_len = msg.rep->len; tipc_nl_compat_recv()
1046 genlmsg_unicast(net, msg.rep, NETLINK_CB(skb).portid); tipc_nl_compat_recv()
167 __tipc_nl_compat_dumpit(struct tipc_nl_compat_cmd_dump *cmd, struct tipc_nl_compat_msg *msg, struct sk_buff *arg) __tipc_nl_compat_dumpit() argument
237 tipc_nl_compat_dumpit(struct tipc_nl_compat_cmd_dump *cmd, struct tipc_nl_compat_msg *msg) tipc_nl_compat_dumpit() argument
271 __tipc_nl_compat_doit(struct tipc_nl_compat_cmd_doit *cmd, struct tipc_nl_compat_msg *msg) __tipc_nl_compat_doit() argument
323 tipc_nl_compat_doit(struct tipc_nl_compat_cmd_doit *cmd, struct tipc_nl_compat_msg *msg) tipc_nl_compat_doit() argument
356 tipc_nl_compat_bearer_enable(struct sk_buff *skb, struct tipc_nl_compat_msg *msg) tipc_nl_compat_bearer_enable() argument
388 tipc_nl_compat_bearer_disable(struct sk_buff *skb, struct tipc_nl_compat_msg *msg) tipc_nl_compat_bearer_disable() argument
579 tipc_nl_compat_link_set(struct sk_buff *skb, struct tipc_nl_compat_msg *msg) tipc_nl_compat_link_set() argument
616 tipc_nl_compat_link_reset_stats(struct sk_buff *skb, struct tipc_nl_compat_msg *msg) tipc_nl_compat_link_reset_stats() argument
854 tipc_nl_compat_net_set(struct sk_buff *skb, struct tipc_nl_compat_msg *msg) tipc_nl_compat_net_set() argument
H A Dmsg.c2 * net/tipc/msg.c: TIPC message header routines
39 #include "msg.h"
93 struct tipc_msg *msg; tipc_msg_create() local
100 msg = buf_msg(buf); tipc_msg_create()
101 tipc_msg_init(onode, msg, user, type, hdr_sz, dnode); tipc_msg_create()
102 msg_set_size(msg, hdr_sz + data_sz); tipc_msg_create()
103 msg_set_origport(msg, oport); tipc_msg_create()
104 msg_set_destport(msg, dport); tipc_msg_create()
105 msg_set_errcode(msg, errcode); tipc_msg_create()
107 msg_set_orignode(msg, onode); tipc_msg_create()
108 msg_set_destnode(msg, dnode); tipc_msg_create()
125 struct tipc_msg *msg; tipc_buf_append() local
133 msg = buf_msg(frag); tipc_buf_append()
134 fragid = msg_type(msg); tipc_buf_append()
136 skb_pull(frag, msg_hdr_sz(msg)); tipc_buf_append()
205 struct tipc_msg *msg; tipc_msg_validate() local
219 msg = buf_msg(skb); tipc_msg_validate()
220 if (unlikely(msg_version(msg) != TIPC_VERSION)) tipc_msg_validate()
223 msz = msg_size(msg); tipc_msg_validate()
349 struct tipc_msg *msg = buf_msg(skb); tipc_msg_bundle() local
351 unsigned int msz = msg_size(msg); tipc_msg_bundle()
355 if (likely(msg_user(msg) == MSG_FRAGMENTER)) tipc_msg_bundle()
364 if (unlikely(msg_user(msg) == TUNNEL_PROTOCOL)) tipc_msg_bundle()
366 if (unlikely(msg_user(msg) == BCAST_PROTOCOL)) tipc_msg_bundle()
387 * @pos: position in outer message of msg to be extracted.
388 * Returns position of next msg
394 struct tipc_msg *msg; tipc_msg_extract() local
401 msg = buf_msg(skb); tipc_msg_extract()
402 offset = msg_hdr_sz(msg) + *pos; tipc_msg_extract()
403 if (unlikely(offset > (msg_size(msg) - MIN_H_SIZE))) tipc_msg_extract()
436 struct tipc_msg *msg = buf_msg(*skb); tipc_msg_make_bundle() local
437 u32 msz = msg_size(msg); tipc_msg_make_bundle()
440 if (msg_user(msg) == MSG_FRAGMENTER) tipc_msg_make_bundle()
442 if (msg_user(msg) == TUNNEL_PROTOCOL) tipc_msg_make_bundle()
444 if (msg_user(msg) == BCAST_PROTOCOL) tipc_msg_make_bundle()
455 tipc_msg_init(msg_prevnode(msg), bmsg, MSG_BUNDLER, 0, tipc_msg_make_bundle()
457 msg_set_seqno(bmsg, msg_seqno(msg)); tipc_msg_make_bundle()
458 msg_set_ack(bmsg, msg_ack(msg)); tipc_msg_make_bundle()
459 msg_set_bcast_ack(bmsg, msg_bcast_ack(msg)); tipc_msg_make_bundle()
476 struct tipc_msg *msg = buf_msg(buf); tipc_msg_reverse() local
478 uint rdsz = min_t(uint, msg_data_sz(msg), MAX_FORWARD_SIZE); tipc_msg_reverse()
482 msg = buf_msg(buf); tipc_msg_reverse()
483 if (msg_dest_droppable(msg)) tipc_msg_reverse()
485 if (msg_errcode(msg)) tipc_msg_reverse()
487 memcpy(&ohdr, msg, msg_hdr_sz(msg)); tipc_msg_reverse()
488 msg_set_errcode(msg, err); tipc_msg_reverse()
489 msg_set_origport(msg, msg_destport(&ohdr)); tipc_msg_reverse()
490 msg_set_destport(msg, msg_origport(&ohdr)); tipc_msg_reverse()
491 msg_set_prevnode(msg, own_addr); tipc_msg_reverse()
492 if (!msg_short(msg)) { tipc_msg_reverse()
493 msg_set_orignode(msg, msg_destnode(&ohdr)); tipc_msg_reverse()
494 msg_set_destnode(msg, msg_orignode(&ohdr)); tipc_msg_reverse()
496 msg_set_size(msg, msg_hdr_sz(msg) + rdsz); tipc_msg_reverse()
497 skb_trim(buf, msg_size(msg)); tipc_msg_reverse()
518 struct tipc_msg *msg = buf_msg(skb); tipc_msg_lookup_dest() local
522 if (!msg_isdata(msg)) tipc_msg_lookup_dest()
524 if (!msg_named(msg)) tipc_msg_lookup_dest()
526 if (msg_errcode(msg)) tipc_msg_lookup_dest()
531 if (msg_reroute_cnt(msg)) tipc_msg_lookup_dest()
533 *dnode = addr_domain(net, msg_lookup_scope(msg)); tipc_msg_lookup_dest()
534 dport = tipc_nametbl_translate(net, msg_nametype(msg), tipc_msg_lookup_dest()
535 msg_nameinst(msg), dnode); tipc_msg_lookup_dest()
538 msg_incr_reroute_cnt(msg); tipc_msg_lookup_dest()
540 msg_set_prevnode(msg, own_addr); tipc_msg_lookup_dest()
541 msg_set_destnode(msg, *dnode); tipc_msg_lookup_dest()
542 msg_set_destport(msg, dport); tipc_msg_lookup_dest()
H A Dlink.c172 struct tipc_msg *msg = buf_msg(skb); link_timeout() local
173 u32 length = msg_size(msg); link_timeout()
175 if ((msg_user(msg) == MSG_FRAGMENTER) && link_timeout()
176 (msg_type(msg) == FIRST_FRAGMENT)) { link_timeout()
177 length = msg_size(msg_get_wrapped(msg)); link_timeout()
229 struct tipc_msg *msg; tipc_link_create() local
271 msg = l_ptr->pmsg; tipc_link_create()
272 tipc_msg_init(tn->own_addr, msg, LINK_PROTOCOL, RESET_MSG, INT_H_SIZE, tipc_link_create()
274 msg_set_size(msg, sizeof(l_ptr->proto_msg)); tipc_link_create()
275 msg_set_session(msg, (tn->random & 0xffff)); tipc_link_create()
276 msg_set_bearer_id(msg, b_ptr->identity); tipc_link_create()
277 strcpy((char *)msg_data(msg), if_name); tipc_link_create()
336 * Create pseudo msg to send back to user when congestion abates
341 struct tipc_msg *msg = buf_msg(skb_peek(list)); link_schedule_user() local
342 int imp = msg_importance(msg); link_schedule_user()
343 u32 oport = msg_origport(msg); link_schedule_user()
702 struct tipc_msg *msg = buf_msg(skb_peek(list)); __tipc_link_xmit() local
704 unsigned int imp = msg_importance(msg); __tipc_link_xmit()
714 /* Match backlog limit against msg importance: */ __tipc_link_xmit()
718 if (unlikely(msg_size(msg) > mtu)) { __tipc_link_xmit()
725 msg = buf_msg(skb); skb_queue_walk_safe()
726 msg_set_seqno(msg, seqno); skb_queue_walk_safe()
727 msg_set_ack(msg, ack); skb_queue_walk_safe()
728 msg_set_bcast_ack(msg, bc_last_in); skb_queue_walk_safe()
838 struct tipc_msg *msg; tipc_link_sync_xmit() local
844 msg = buf_msg(skb); tipc_link_sync_xmit()
845 tipc_msg_init(link_own_addr(link), msg, BCAST_PROTOCOL, STATE_MSG, tipc_link_sync_xmit() local
847 msg_set_last_bcast(msg, link->owner->bclink.acked); tipc_link_sync_xmit()
861 struct tipc_msg *msg = buf_msg(buf); tipc_link_sync_rcv() local
863 n->bclink.last_sent = n->bclink.last_in = msg_last_bcast(msg); tipc_link_sync_rcv()
879 struct tipc_msg *msg; tipc_link_push_packets() local
886 msg = buf_msg(skb); tipc_link_push_packets()
887 link->backlog[msg_importance(msg)].len--; tipc_link_push_packets()
888 msg_set_ack(msg, ack); tipc_link_push_packets()
889 msg_set_bcast_ack(msg, link->owner->bclink.last_in); tipc_link_push_packets()
920 struct tipc_msg *msg = buf_msg(buf); link_retransmit_failure() local
935 pr_info("Msg seq number: %u, ", msg_seqno(msg)); link_retransmit_failure()
959 struct tipc_msg *msg; tipc_link_retransmit() local
964 msg = buf_msg(skb); tipc_link_retransmit()
967 if (l_ptr->last_retransmitted == msg_seqno(msg)) { tipc_link_retransmit()
973 l_ptr->last_retransmitted = msg_seqno(msg); tipc_link_retransmit()
980 msg = buf_msg(skb); tipc_link_retransmit()
981 msg_set_ack(msg, mod(l_ptr->next_in_no - 1)); tipc_link_retransmit()
982 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in); tipc_link_retransmit()
1045 struct tipc_msg *msg; tipc_rcv() local
1058 msg = buf_msg(skb); tipc_rcv()
1059 if (unlikely(msg_non_seq(msg))) { tipc_rcv()
1060 if (msg_user(msg) == LINK_CONFIG) tipc_rcv()
1068 if (unlikely(!msg_short(msg) && tipc_rcv()
1069 (msg_destnode(msg) != tn->own_addr))) tipc_rcv()
1073 n_ptr = tipc_node_find(net, msg_prevnode(msg)); tipc_rcv()
1085 msg_user(msg) == LINK_PROTOCOL && tipc_rcv()
1086 (msg_type(msg) == RESET_MSG || tipc_rcv()
1087 msg_type(msg) == ACTIVATE_MSG) && tipc_rcv()
1088 !msg_redundant_link(msg)) tipc_rcv()
1095 seq_no = msg_seqno(msg); tipc_rcv()
1096 ackd = msg_ack(msg); tipc_rcv()
1099 if (unlikely(n_ptr->bclink.acked != msg_bcast_ack(msg))) tipc_rcv()
1100 tipc_bclink_acknowledge(n_ptr, msg_bcast_ack(msg)); tipc_rcv()
1120 if (msg_user(msg) == LINK_PROTOCOL) { tipc_rcv()
1147 if (unlikely((l_ptr->flags & LINK_SYNCHING) && !msg_dup(msg))) { tipc_rcv()
1177 struct tipc_msg *msg = buf_msg(skb); tipc_data_input() local
1178 u32 dport = msg_destport(msg); tipc_data_input()
1180 switch (msg_user(msg)) { tipc_data_input()
1204 pr_warn("Dropping received illegal msg type\n"); tipc_data_input()
1218 struct tipc_msg *msg = buf_msg(skb); tipc_link_input() local
1225 switch (msg_user(msg)) { tipc_link_input()
1227 if (msg_dup(msg)) { tipc_link_input()
1229 link->synch_point = msg_seqno(msg_get_wrapped(msg)); tipc_link_input()
1241 link->stats.recv_bundled += msg_msgcnt(msg); tipc_link_input()
1344 struct tipc_msg *msg = l_ptr->pmsg; tipc_link_proto_xmit() local
1357 msg_set_type(msg, msg_typ); tipc_link_proto_xmit()
1358 msg_set_net_plane(msg, l_ptr->net_plane); tipc_link_proto_xmit()
1359 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in); tipc_link_proto_xmit()
1360 msg_set_last_bcast(msg, tipc_bclink_get_last_sent(l_ptr->owner->net)); tipc_link_proto_xmit()
1369 msg_set_next_sent(msg, next_sent); tipc_link_proto_xmit()
1374 msg_set_seq_gap(msg, gap); tipc_link_proto_xmit()
1377 msg_set_link_tolerance(msg, tolerance); tipc_link_proto_xmit()
1378 msg_set_linkprio(msg, priority); tipc_link_proto_xmit()
1379 msg_set_max_pkt(msg, l_ptr->mtu); tipc_link_proto_xmit()
1380 msg_set_ack(msg, mod(l_ptr->next_in_no - 1)); tipc_link_proto_xmit()
1381 msg_set_probe(msg, probe_msg != 0); tipc_link_proto_xmit()
1386 msg_set_ack(msg, mod(l_ptr->failover_checkpt - 1)); tipc_link_proto_xmit()
1387 msg_set_seq_gap(msg, 0); tipc_link_proto_xmit()
1388 msg_set_next_sent(msg, 1); tipc_link_proto_xmit()
1389 msg_set_probe(msg, 0); tipc_link_proto_xmit()
1390 msg_set_link_tolerance(msg, l_ptr->tolerance); tipc_link_proto_xmit()
1391 msg_set_linkprio(msg, l_ptr->priority); tipc_link_proto_xmit()
1392 msg_set_max_pkt(msg, l_ptr->advertised_mtu); tipc_link_proto_xmit()
1396 msg_set_redundant_link(msg, r_flag); tipc_link_proto_xmit()
1397 msg_set_linkprio(msg, l_ptr->priority); tipc_link_proto_xmit()
1398 msg_set_size(msg, msg_size); tipc_link_proto_xmit()
1400 msg_set_seqno(msg, mod(l_ptr->next_out_no + (0xffff/2))); tipc_link_proto_xmit()
1406 skb_copy_to_linear_data(buf, msg, sizeof(l_ptr->proto_msg)); tipc_link_proto_xmit()
1424 struct tipc_msg *msg = buf_msg(buf); tipc_link_proto_rcv() local
1429 if (l_ptr->net_plane != msg_net_plane(msg)) tipc_link_proto_rcv()
1430 if (link_own_addr(l_ptr) > msg_prevnode(msg)) tipc_link_proto_rcv()
1431 l_ptr->net_plane = msg_net_plane(msg); tipc_link_proto_rcv()
1433 switch (msg_type(msg)) { tipc_link_proto_rcv()
1438 if (less_eq(msg_session(msg), l_ptr->peer_session)) tipc_link_proto_rcv()
1442 if (!msg_redundant_link(msg) && (link_working_working(l_ptr) || tipc_link_proto_rcv()
1456 strcpy((strrchr(l_ptr->name, ':') + 1), (char *)msg_data(msg)); tipc_link_proto_rcv()
1458 msg_tol = msg_link_tolerance(msg); tipc_link_proto_rcv()
1462 if (msg_linkprio(msg) > l_ptr->priority) tipc_link_proto_rcv()
1463 l_ptr->priority = msg_linkprio(msg); tipc_link_proto_rcv()
1465 if (l_ptr->mtu > msg_max_pkt(msg)) tipc_link_proto_rcv()
1466 l_ptr->mtu = msg_max_pkt(msg); tipc_link_proto_rcv()
1472 msg_last_bcast(msg); tipc_link_proto_rcv()
1476 l_ptr->peer_session = msg_session(msg); tipc_link_proto_rcv()
1477 l_ptr->peer_bearer_id = msg_bearer_id(msg); tipc_link_proto_rcv()
1479 if (msg_type(msg) == ACTIVATE_MSG) tipc_link_proto_rcv()
1484 msg_tol = msg_link_tolerance(msg); tipc_link_proto_rcv()
1488 if (msg_linkprio(msg) && tipc_link_proto_rcv()
1489 (msg_linkprio(msg) != l_ptr->priority)) { tipc_link_proto_rcv()
1492 l_ptr->priority, msg_linkprio(msg)); tipc_link_proto_rcv()
1493 l_ptr->priority = msg_linkprio(msg); tipc_link_proto_rcv()
1506 if (less_eq(mod(l_ptr->next_in_no), msg_next_sent(msg))) { tipc_link_proto_rcv()
1507 rec_gap = mod(msg_next_sent(msg) - tipc_link_proto_rcv()
1511 if (msg_probe(msg)) tipc_link_proto_rcv()
1517 msg_last_bcast(msg)); tipc_link_proto_rcv()
1519 if (rec_gap || (msg_probe(msg))) { tipc_link_proto_rcv()
1523 if (msg_seq_gap(msg)) { tipc_link_proto_rcv()
1526 msg_seq_gap(msg)); tipc_link_proto_rcv()
1540 struct tipc_msg *msg, tipc_link_tunnel_xmit()
1545 u32 length = msg_size(msg); tipc_link_tunnel_xmit()
1555 pr_warn("%sunable to send tunnel msg\n", link_co_err); tipc_link_tunnel_xmit()
1559 skb_copy_to_linear_data_offset(skb, INT_H_SIZE, msg, length); tipc_link_tunnel_xmit()
1596 pr_warn("%sunable to send changeover msg\n", tipc_link_failover_send_queue()
1606 struct tipc_msg *msg = buf_msg(skb); tipc_link_failover_send_queue() local
1608 if ((msg_user(msg) == MSG_BUNDLER) && split_bundles) { tipc_link_failover_send_queue()
1609 struct tipc_msg *m = msg_get_wrapped(msg); tipc_link_failover_send_queue()
1612 msgcount = msg_msgcnt(msg); tipc_link_failover_send_queue()
1614 msg_set_seqno(m, msg_seqno(msg)); tipc_link_failover_send_queue()
1621 tipc_link_tunnel_xmit(l_ptr, &tunnel_hdr, msg, tipc_link_failover_send_queue()
1622 msg_link_selector(msg)); tipc_link_failover_send_queue()
1653 struct tipc_msg *msg = buf_msg(skb); skb_queue_walk() local
1654 u32 len = msg_size(msg); skb_queue_walk()
1656 msg_set_ack(msg, mod(link->next_in_no - 1)); skb_queue_walk()
1657 msg_set_bcast_ack(msg, link->owner->bclink.last_in); skb_queue_walk()
1661 pr_warn("%sunable to send duplicate msg\n", skb_queue_walk()
1684 struct tipc_msg *msg = buf_msg(*skb); tipc_link_failover_rcv() local
1687 int bearer_id = msg_bearer_id(msg); tipc_link_failover_rcv()
1690 if (msg_type(msg) != FAILOVER_MSG) { tipc_link_failover_rcv()
1705 link->failover_pkts = msg_msgcnt(msg); tipc_link_failover_rcv()
2012 static int __tipc_nl_add_link(struct net *net, struct tipc_nl_msg *msg, __tipc_nl_add_link() argument
2021 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family, __tipc_nl_add_link()
2026 attrs = nla_nest_start(msg->skb, TIPC_NLA_LINK); __tipc_nl_add_link()
2030 if (nla_put_string(msg->skb, TIPC_NLA_LINK_NAME, link->name)) __tipc_nl_add_link()
2032 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_DEST, __tipc_nl_add_link()
2035 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_MTU, link->mtu)) __tipc_nl_add_link()
2037 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, link->next_in_no)) __tipc_nl_add_link()
2039 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, link->next_out_no)) __tipc_nl_add_link()
2043 if (nla_put_flag(msg->skb, TIPC_NLA_LINK_UP)) __tipc_nl_add_link()
2046 if (nla_put_flag(msg->skb, TIPC_NLA_LINK_ACTIVE)) __tipc_nl_add_link()
2049 prop = nla_nest_start(msg->skb, TIPC_NLA_LINK_PROP); __tipc_nl_add_link()
2052 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, link->priority)) __tipc_nl_add_link()
2054 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_TOL, link->tolerance)) __tipc_nl_add_link()
2056 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN, __tipc_nl_add_link()
2059 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, link->priority)) __tipc_nl_add_link()
2061 nla_nest_end(msg->skb, prop); __tipc_nl_add_link()
2063 err = __tipc_nl_add_stats(msg->skb, &link->stats); __tipc_nl_add_link()
2067 nla_nest_end(msg->skb, attrs); __tipc_nl_add_link()
2068 genlmsg_end(msg->skb, hdr); __tipc_nl_add_link()
2073 nla_nest_cancel(msg->skb, prop); __tipc_nl_add_link()
2075 nla_nest_cancel(msg->skb, attrs); __tipc_nl_add_link()
2077 genlmsg_cancel(msg->skb, hdr); __tipc_nl_add_link()
2083 static int __tipc_nl_add_node_links(struct net *net, struct tipc_nl_msg *msg, __tipc_nl_add_node_links() argument
2095 err = __tipc_nl_add_link(net, msg, node->links[i], NLM_F_MULTI); __tipc_nl_add_node_links()
2109 struct tipc_nl_msg msg; tipc_nl_link_dump() local
2118 msg.skb = skb; tipc_nl_link_dump()
2119 msg.portid = NETLINK_CB(cb->skb).portid; tipc_nl_link_dump()
2120 msg.seq = cb->nlh->nlmsg_seq; tipc_nl_link_dump()
2140 err = __tipc_nl_add_node_links(net, &msg, node, tipc_nl_link_dump()
2149 err = tipc_nl_add_bc_link(net, &msg); tipc_nl_link_dump()
2155 err = __tipc_nl_add_node_links(net, &msg, node, tipc_nl_link_dump()
2179 struct tipc_nl_msg msg; tipc_nl_link_get() local
2198 msg.skb = ans_skb; tipc_nl_link_get()
2199 msg.portid = info->snd_portid; tipc_nl_link_get()
2200 msg.seq = info->snd_seq; tipc_nl_link_get()
2209 err = __tipc_nl_add_link(net, &msg, link, 0); tipc_nl_link_get()
1538 tipc_link_tunnel_xmit(struct tipc_link *l_ptr, struct tipc_msg *tunnel_hdr, struct tipc_msg *msg, u32 selector) tipc_link_tunnel_xmit() argument
H A Deth_media.c52 static int tipc_eth_addr2msg(char *msg, struct tipc_media_addr *addr) tipc_eth_addr2msg() argument
54 memset(msg, 0, TIPC_MEDIA_INFO_SIZE); tipc_eth_addr2msg()
55 msg[TIPC_MEDIA_TYPE_OFFSET] = TIPC_MEDIA_TYPE_ETH; tipc_eth_addr2msg()
56 memcpy(msg + TIPC_MEDIA_ADDR_OFFSET, addr->value, ETH_ALEN); tipc_eth_addr2msg()
63 char *msg) tipc_eth_raw2addr()
68 ether_addr_copy(addr->value, msg); tipc_eth_raw2addr()
74 /* Convert discovery msg addr format to Ethernet media addr format */ tipc_eth_msg2addr()
77 char *msg) tipc_eth_msg2addr()
80 msg += TIPC_MEDIA_ADDR_OFFSET; tipc_eth_msg2addr()
81 return tipc_eth_raw2addr(b, addr, msg); tipc_eth_msg2addr()
61 tipc_eth_raw2addr(struct tipc_bearer *b, struct tipc_media_addr *addr, char *msg) tipc_eth_raw2addr() argument
75 tipc_eth_msg2addr(struct tipc_bearer *b, struct tipc_media_addr *addr, char *msg) tipc_eth_msg2addr() argument
H A Dib_media.c58 static int tipc_ib_addr2msg(char *msg, struct tipc_media_addr *addr) tipc_ib_addr2msg() argument
60 memset(msg, 0, TIPC_MEDIA_INFO_SIZE); tipc_ib_addr2msg()
61 memcpy(msg, addr->value, INFINIBAND_ALEN); tipc_ib_addr2msg()
68 char *msg) tipc_ib_raw2addr()
71 memcpy(addr->value, msg, INFINIBAND_ALEN); tipc_ib_raw2addr()
73 addr->broadcast = !memcmp(msg, b->bcast_addr.value, tipc_ib_raw2addr()
78 /* Convert discovery msg addr format to InfiniBand media addr format */ tipc_ib_msg2addr()
81 char *msg) tipc_ib_msg2addr()
83 return tipc_ib_raw2addr(b, addr, msg); tipc_ib_msg2addr()
66 tipc_ib_raw2addr(struct tipc_bearer *b, struct tipc_media_addr *addr, char *msg) tipc_ib_raw2addr() argument
79 tipc_ib_msg2addr(struct tipc_bearer *b, struct tipc_media_addr *addr, char *msg) tipc_ib_msg2addr() argument
/linux-4.1.27/net/atm/
H A Dsignaling.c36 static void modify_qos(struct atm_vcc *vcc, struct atmsvc_msg *msg) modify_qos() argument
43 msg->type = as_error; modify_qos()
45 msg->reply = -EOPNOTSUPP; modify_qos()
48 msg->reply = vcc->dev->ops->change_qos(vcc, &msg->qos, modify_qos()
49 msg->reply); modify_qos()
50 if (!msg->reply) modify_qos()
51 msg->type = as_okay; modify_qos()
59 *(struct atmsvc_msg *)skb_put(skb, sizeof(struct atmsvc_msg)) = *msg; modify_qos()
65 struct atmsvc_msg *msg; sigd_send() local
69 msg = (struct atmsvc_msg *) skb->data; sigd_send()
71 vcc = *(struct atm_vcc **) &msg->vcc; sigd_send()
72 pr_debug("%d (0x%lx)\n", (int)msg->type, (unsigned long)vcc); sigd_send()
75 switch (msg->type) { sigd_send()
77 sk->sk_err = -msg->reply; sigd_send()
82 msg->local.sas_addr.prv, ATM_ESA_LEN); sigd_send()
84 msg->local.sas_addr.pub, ATM_E164_LEN + 1); sigd_send()
89 session_vcc->itf = msg->pvc.sap_addr.itf; sigd_send()
90 session_vcc->vpi = msg->pvc.sap_addr.vpi; sigd_send()
91 session_vcc->vci = msg->pvc.sap_addr.vci; sigd_send()
93 session_vcc->qos = msg->qos; sigd_send()
98 sk->sk_err = -msg->reply; sigd_send()
102 vcc = *(struct atm_vcc **)&msg->listen_vcc; sigd_send()
120 vcc_release_async(vcc, msg->reply); sigd_send()
123 modify_qos(vcc, msg); sigd_send()
127 sk->sk_err_soft = msg->reply; sigd_send()
132 pr_alert("bad message type %d\n", (int)msg->type); sigd_send()
147 struct atmsvc_msg *msg; sigd_enq2() local
153 msg = (struct atmsvc_msg *)skb_put(skb, sizeof(struct atmsvc_msg)); sigd_enq2()
154 memset(msg, 0, sizeof(*msg)); sigd_enq2()
155 msg->type = type; sigd_enq2()
156 *(struct atm_vcc **) &msg->vcc = vcc; sigd_enq2()
157 *(struct atm_vcc **) &msg->listen_vcc = listen_vcc; sigd_enq2()
158 msg->reply = reply; sigd_enq2()
160 msg->qos = *qos; sigd_enq2()
162 msg->sap = vcc->sap; sigd_enq2()
164 msg->svc = *svc; sigd_enq2()
166 msg->local = vcc->local; sigd_enq2()
168 msg->pvc = *pvc; sigd_enq2()
171 msg->session = ++session; sigd_enq2()
H A Dmpoa_caches.c135 struct k_message msg; cache_hit() local
143 msg.type = SND_MPOA_RES_RQST; cache_hit()
144 msg.content.in_info = entry->ctrl_info; cache_hit()
145 memcpy(msg.MPS_ctrl, mpc->mps_ctrl_addr, ATM_ESA_LEN); cache_hit()
148 msg.qos = qos->qos; cache_hit()
149 msg_to_mpoad(&msg, mpc); cache_hit()
166 msg.type = SND_MPOA_RES_RQST; cache_hit()
167 memcpy(msg.MPS_ctrl, mpc->mps_ctrl_addr, ATM_ESA_LEN); cache_hit()
168 msg.content.in_info = entry->ctrl_info; cache_hit()
171 msg.qos = qos->qos; cache_hit()
172 msg_to_mpoad(&msg, mpc); cache_hit()
194 struct k_message msg; in_cache_remove_entry() local
208 msg.type = STOP_KEEP_ALIVE_SM; in_cache_remove_entry()
209 msg_to_mpoad(&msg, client); in_cache_remove_entry()
256 struct k_message msg; check_resolving_entries() local
284 msg.type = SND_MPOA_RES_RTRY; check_resolving_entries()
285 memcpy(msg.MPS_ctrl, client->mps_ctrl_addr, ATM_ESA_LEN); check_resolving_entries()
286 msg.content.in_info = entry->ctrl_info; check_resolving_entries()
289 msg.qos = qos->qos; check_resolving_entries()
290 msg_to_mpoad(&msg, client); check_resolving_entries()
431 struct k_message msg; eg_cache_remove_entry() local
443 msg.type = STOP_KEEP_ALIVE_SM; eg_cache_remove_entry()
444 msg_to_mpoad(&msg, client); eg_cache_remove_entry()
458 static eg_cache_entry *eg_cache_add_entry(struct k_message *msg, eg_cache_add_entry() argument
469 &msg->content.eg_info.eg_dst_ip); eg_cache_add_entry()
481 entry->ctrl_info = msg->content.eg_info; eg_cache_add_entry()
506 struct k_message msg; clear_expired() local
516 msg.type = SND_EGRESS_PURGE; clear_expired()
517 msg.content.eg_info = entry->ctrl_info; clear_expired()
520 msg_to_mpoad(&msg, client); clear_expired()
/linux-4.1.27/net/sctp/
H A Dchunk.c50 static void sctp_datamsg_init(struct sctp_datamsg *msg) sctp_datamsg_init() argument
52 atomic_set(&msg->refcnt, 1); sctp_datamsg_init()
53 msg->send_failed = 0; sctp_datamsg_init()
54 msg->send_error = 0; sctp_datamsg_init()
55 msg->can_abandon = 0; sctp_datamsg_init()
56 msg->can_delay = 1; sctp_datamsg_init()
57 msg->expires_at = 0; sctp_datamsg_init()
58 INIT_LIST_HEAD(&msg->chunks); sctp_datamsg_init()
64 struct sctp_datamsg *msg; sctp_datamsg_new() local
65 msg = kmalloc(sizeof(struct sctp_datamsg), gfp); sctp_datamsg_new()
66 if (msg) { sctp_datamsg_new()
67 sctp_datamsg_init(msg); sctp_datamsg_new()
70 return msg; sctp_datamsg_new()
73 void sctp_datamsg_free(struct sctp_datamsg *msg) sctp_datamsg_free() argument
80 list_for_each_entry(chunk, &msg->chunks, frag_list) sctp_datamsg_free()
83 sctp_datamsg_put(msg); sctp_datamsg_free()
87 static void sctp_datamsg_destroy(struct sctp_datamsg *msg) sctp_datamsg_destroy() argument
97 notify = msg->send_failed ? -1 : 0; sctp_datamsg_destroy()
100 list_for_each_safe(pos, temp, &msg->chunks) { sctp_datamsg_destroy()
106 if (msg->send_error) sctp_datamsg_destroy()
107 error = msg->send_error; sctp_datamsg_destroy()
134 kfree(msg); sctp_datamsg_destroy()
138 static void sctp_datamsg_hold(struct sctp_datamsg *msg) sctp_datamsg_hold() argument
140 atomic_inc(&msg->refcnt); sctp_datamsg_hold()
144 void sctp_datamsg_put(struct sctp_datamsg *msg) sctp_datamsg_put() argument
146 if (atomic_dec_and_test(&msg->refcnt)) sctp_datamsg_put()
147 sctp_datamsg_destroy(msg); sctp_datamsg_put()
151 static void sctp_datamsg_assign(struct sctp_datamsg *msg, struct sctp_chunk *chunk) sctp_datamsg_assign() argument
153 sctp_datamsg_hold(msg); sctp_datamsg_assign()
154 chunk->msg = msg; sctp_datamsg_assign()
173 struct sctp_datamsg *msg; sctp_datamsg_from_user() local
178 msg = sctp_datamsg_new(GFP_KERNEL); sctp_datamsg_from_user()
179 if (!msg) sctp_datamsg_from_user()
187 msg->expires_at = jiffies + sctp_datamsg_from_user()
189 msg->can_abandon = 1; sctp_datamsg_from_user()
191 pr_debug("%s: msg:%p expires_at:%ld jiffies:%ld\n", __func__, sctp_datamsg_from_user()
192 msg, msg->expires_at, jiffies); sctp_datamsg_from_user()
246 msg->can_delay = 0; sctp_datamsg_from_user()
291 sctp_datamsg_assign(msg, chunk); sctp_datamsg_from_user()
292 list_add_tail(&chunk->frag_list, &msg->chunks); sctp_datamsg_from_user()
327 sctp_datamsg_assign(msg, chunk); sctp_datamsg_from_user()
328 list_add_tail(&chunk->frag_list, &msg->chunks); sctp_datamsg_from_user()
331 return msg; sctp_datamsg_from_user()
337 list_for_each_safe(pos, temp, &msg->chunks) { sctp_datamsg_from_user()
342 sctp_datamsg_put(msg); sctp_datamsg_from_user()
349 struct sctp_datamsg *msg = chunk->msg; sctp_chunk_abandoned() local
351 if (!msg->can_abandon) sctp_chunk_abandoned()
354 if (time_after(jiffies, msg->expires_at)) sctp_chunk_abandoned()
363 chunk->msg->send_failed = 1; sctp_chunk_fail()
364 chunk->msg->send_error = error; sctp_chunk_fail()
/linux-4.1.27/net/ieee802154/
H A Dnetlink.c43 struct sk_buff *msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC); ieee802154_nl_create() local
46 if (!msg) ieee802154_nl_create()
50 hdr = genlmsg_put(msg, 0, ieee802154_seq_num++, ieee802154_nl_create()
54 nlmsg_free(msg); ieee802154_nl_create()
58 return msg; ieee802154_nl_create()
61 int ieee802154_nl_mcast(struct sk_buff *msg, unsigned int group) ieee802154_nl_mcast() argument
63 struct nlmsghdr *nlh = nlmsg_hdr(msg); ieee802154_nl_mcast()
66 genlmsg_end(msg, hdr); ieee802154_nl_mcast()
68 return genlmsg_multicast(&nl802154_family, msg, 0, group, GFP_ATOMIC); ieee802154_nl_mcast()
75 struct sk_buff *msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC); ieee802154_nl_new_reply() local
77 if (!msg) ieee802154_nl_new_reply()
80 hdr = genlmsg_put_reply(msg, info, ieee802154_nl_new_reply()
83 nlmsg_free(msg); ieee802154_nl_new_reply()
87 return msg; ieee802154_nl_new_reply()
90 int ieee802154_nl_reply(struct sk_buff *msg, struct genl_info *info) ieee802154_nl_reply() argument
92 struct nlmsghdr *nlh = nlmsg_hdr(msg); ieee802154_nl_reply()
95 genlmsg_end(msg, hdr); ieee802154_nl_reply()
97 return genlmsg_reply(msg, info); ieee802154_nl_reply()
H A Dnl-phy.c36 static int ieee802154_nl_fill_phy(struct sk_buff *msg, u32 portid, ieee802154_nl_fill_phy() argument
48 hdr = genlmsg_put(msg, 0, seq, &nl802154_family, flags, ieee802154_nl_fill_phy()
54 if (nla_put_string(msg, IEEE802154_ATTR_PHY_NAME, wpan_phy_name(phy)) || ieee802154_nl_fill_phy()
55 nla_put_u8(msg, IEEE802154_ATTR_PAGE, phy->current_page) || ieee802154_nl_fill_phy()
56 nla_put_u8(msg, IEEE802154_ATTR_CHANNEL, phy->current_channel)) ieee802154_nl_fill_phy()
63 nla_put(msg, IEEE802154_ATTR_CHANNEL_PAGE_LIST, ieee802154_nl_fill_phy()
68 genlmsg_end(msg, hdr); ieee802154_nl_fill_phy()
73 genlmsg_cancel(msg, hdr); ieee802154_nl_fill_phy()
84 struct sk_buff *msg; ieee802154_list_phy() local
102 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); ieee802154_list_phy()
103 if (!msg) ieee802154_list_phy()
106 rc = ieee802154_nl_fill_phy(msg, info->snd_portid, info->snd_seq, ieee802154_list_phy()
113 return genlmsg_reply(msg, info); ieee802154_list_phy()
115 nlmsg_free(msg); ieee802154_list_phy()
171 struct sk_buff *msg; ieee802154_add_iface() local
207 msg = ieee802154_nl_new_reply(info, 0, IEEE802154_ADD_IFACE); ieee802154_add_iface()
208 if (!msg) ieee802154_add_iface()
251 if (nla_put_string(msg, IEEE802154_ATTR_PHY_NAME, wpan_phy_name(phy)) || ieee802154_add_iface()
252 nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name)) ieee802154_add_iface()
258 return ieee802154_nl_reply(msg, info); ieee802154_add_iface()
266 nlmsg_free(msg); ieee802154_add_iface()
274 struct sk_buff *msg; ieee802154_del_iface() local
321 msg = ieee802154_nl_new_reply(info, 0, IEEE802154_DEL_IFACE); ieee802154_del_iface()
322 if (!msg) ieee802154_del_iface()
334 if (nla_put_string(msg, IEEE802154_ATTR_PHY_NAME, wpan_phy_name(phy)) || ieee802154_del_iface()
335 nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, name)) ieee802154_del_iface()
339 return ieee802154_nl_reply(msg, info); ieee802154_del_iface()
342 nlmsg_free(msg); ieee802154_del_iface()
H A Dnl-mac.c37 static int nla_put_hwaddr(struct sk_buff *msg, int type, __le64 hwaddr) nla_put_hwaddr() argument
39 return nla_put_u64(msg, type, swab64((__force u64)hwaddr)); nla_put_hwaddr()
47 static int nla_put_shortaddr(struct sk_buff *msg, int type, __le16 addr) nla_put_shortaddr() argument
49 return nla_put_u16(msg, type, le16_to_cpu(addr)); nla_put_shortaddr()
59 struct sk_buff *msg; ieee802154_nl_start_confirm() local
63 msg = ieee802154_nl_create(0, IEEE802154_START_CONF); ieee802154_nl_start_confirm()
64 if (!msg) ieee802154_nl_start_confirm()
67 if (nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name) || ieee802154_nl_start_confirm()
68 nla_put_u32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex) || ieee802154_nl_start_confirm()
69 nla_put(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN, ieee802154_nl_start_confirm()
71 nla_put_u8(msg, IEEE802154_ATTR_STATUS, status)) ieee802154_nl_start_confirm()
73 return ieee802154_nl_mcast(msg, IEEE802154_COORD_MCGRP); ieee802154_nl_start_confirm()
76 nlmsg_free(msg); ieee802154_nl_start_confirm()
80 static int ieee802154_nl_fill_iface(struct sk_buff *msg, u32 portid, ieee802154_nl_fill_iface() argument
90 hdr = genlmsg_put(msg, 0, seq, &nl802154_family, flags, ieee802154_nl_fill_iface()
103 if (nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name) || ieee802154_nl_fill_iface()
104 nla_put_string(msg, IEEE802154_ATTR_PHY_NAME, wpan_phy_name(phy)) || ieee802154_nl_fill_iface()
105 nla_put_u32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex) || ieee802154_nl_fill_iface()
106 nla_put(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN, ieee802154_nl_fill_iface()
108 nla_put_shortaddr(msg, IEEE802154_ATTR_SHORT_ADDR, short_addr) || ieee802154_nl_fill_iface()
109 nla_put_shortaddr(msg, IEEE802154_ATTR_PAN_ID, pan_id)) ieee802154_nl_fill_iface()
119 if (nla_put_s8(msg, IEEE802154_ATTR_TXPOWER, ieee802154_nl_fill_iface()
121 nla_put_u8(msg, IEEE802154_ATTR_LBT_ENABLED, params.lbt) || ieee802154_nl_fill_iface()
122 nla_put_u8(msg, IEEE802154_ATTR_CCA_MODE, ieee802154_nl_fill_iface()
124 nla_put_s32(msg, IEEE802154_ATTR_CCA_ED_LEVEL, ieee802154_nl_fill_iface()
126 nla_put_u8(msg, IEEE802154_ATTR_CSMA_RETRIES, ieee802154_nl_fill_iface()
128 nla_put_u8(msg, IEEE802154_ATTR_CSMA_MIN_BE, ieee802154_nl_fill_iface()
130 nla_put_u8(msg, IEEE802154_ATTR_CSMA_MAX_BE, ieee802154_nl_fill_iface()
132 nla_put_s8(msg, IEEE802154_ATTR_FRAME_RETRIES, ieee802154_nl_fill_iface()
138 genlmsg_end(msg, hdr); ieee802154_nl_fill_iface()
143 genlmsg_cancel(msg, hdr); ieee802154_nl_fill_iface()
412 struct sk_buff *msg; ieee802154_list_iface() local
422 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); ieee802154_list_iface()
423 if (!msg) ieee802154_list_iface()
426 rc = ieee802154_nl_fill_iface(msg, info->snd_portid, info->snd_seq, ieee802154_list_iface()
433 return genlmsg_reply(msg, info); ieee802154_list_iface()
435 nlmsg_free(msg); ieee802154_list_iface()
609 ieee802154_llsec_fill_key_id(struct sk_buff *msg, ieee802154_llsec_fill_key_id() argument
612 if (nla_put_u8(msg, IEEE802154_ATTR_LLSEC_KEY_MODE, desc->mode)) ieee802154_llsec_fill_key_id()
616 if (nla_put_shortaddr(msg, IEEE802154_ATTR_PAN_ID, ieee802154_llsec_fill_key_id()
621 nla_put_shortaddr(msg, IEEE802154_ATTR_SHORT_ADDR, ieee802154_llsec_fill_key_id()
626 nla_put_hwaddr(msg, IEEE802154_ATTR_HW_ADDR, ieee802154_llsec_fill_key_id()
632 nla_put_u8(msg, IEEE802154_ATTR_LLSEC_KEY_ID, desc->id)) ieee802154_llsec_fill_key_id()
636 nla_put_u32(msg, IEEE802154_ATTR_LLSEC_KEY_SOURCE_SHORT, ieee802154_llsec_fill_key_id()
641 nla_put_hwaddr(msg, IEEE802154_ATTR_LLSEC_KEY_SOURCE_EXTENDED, ieee802154_llsec_fill_key_id()
650 struct sk_buff *msg; ieee802154_llsec_getparams() local
669 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); ieee802154_llsec_getparams()
670 if (!msg) ieee802154_llsec_getparams()
673 hdr = genlmsg_put(msg, 0, info->snd_seq, &nl802154_family, 0, ieee802154_llsec_getparams()
682 if (nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name) || ieee802154_llsec_getparams()
683 nla_put_u32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex) || ieee802154_llsec_getparams()
684 nla_put_u8(msg, IEEE802154_ATTR_LLSEC_ENABLED, params.enabled) || ieee802154_llsec_getparams()
685 nla_put_u8(msg, IEEE802154_ATTR_LLSEC_SECLEVEL, params.out_level) || ieee802154_llsec_getparams()
686 nla_put_u32(msg, IEEE802154_ATTR_LLSEC_FRAME_COUNTER, ieee802154_llsec_getparams()
688 ieee802154_llsec_fill_key_id(msg, &params.out_key)) ieee802154_llsec_getparams()
693 return ieee802154_nl_reply(msg, info); ieee802154_llsec_getparams()
695 nlmsg_free(msg); ieee802154_llsec_getparams()
918 ieee802154_nl_fill_key(struct sk_buff *msg, u32 portid, u32 seq, ieee802154_nl_fill_key() argument
925 hdr = genlmsg_put(msg, 0, seq, &nl802154_family, NLM_F_MULTI, ieee802154_nl_fill_key()
930 if (nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name) || ieee802154_nl_fill_key()
931 nla_put_u32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex) || ieee802154_nl_fill_key()
932 ieee802154_llsec_fill_key_id(msg, &key->id) || ieee802154_nl_fill_key()
933 nla_put_u8(msg, IEEE802154_ATTR_LLSEC_KEY_USAGE_FRAME_TYPES, ieee802154_nl_fill_key()
940 if (nla_put(msg, IEEE802154_ATTR_LLSEC_KEY_USAGE_COMMANDS, ieee802154_nl_fill_key()
945 if (nla_put(msg, IEEE802154_ATTR_LLSEC_KEY_BYTES, ieee802154_nl_fill_key()
949 genlmsg_end(msg, hdr); ieee802154_nl_fill_key()
953 genlmsg_cancel(msg, hdr); ieee802154_nl_fill_key()
1055 ieee802154_nl_fill_dev(struct sk_buff *msg, u32 portid, u32 seq, ieee802154_nl_fill_dev() argument
1061 hdr = genlmsg_put(msg, 0, seq, &nl802154_family, NLM_F_MULTI, ieee802154_nl_fill_dev()
1066 if (nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name) || ieee802154_nl_fill_dev()
1067 nla_put_u32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex) || ieee802154_nl_fill_dev()
1068 nla_put_shortaddr(msg, IEEE802154_ATTR_PAN_ID, desc->pan_id) || ieee802154_nl_fill_dev()
1069 nla_put_shortaddr(msg, IEEE802154_ATTR_SHORT_ADDR, ieee802154_nl_fill_dev()
1071 nla_put_hwaddr(msg, IEEE802154_ATTR_HW_ADDR, desc->hwaddr) || ieee802154_nl_fill_dev()
1072 nla_put_u32(msg, IEEE802154_ATTR_LLSEC_FRAME_COUNTER, ieee802154_nl_fill_dev()
1074 nla_put_u8(msg, IEEE802154_ATTR_LLSEC_DEV_OVERRIDE, ieee802154_nl_fill_dev()
1076 nla_put_u8(msg, IEEE802154_ATTR_LLSEC_DEV_KEY_MODE, desc->key_mode)) ieee802154_nl_fill_dev()
1079 genlmsg_end(msg, hdr); ieee802154_nl_fill_dev()
1083 genlmsg_cancel(msg, hdr); ieee802154_nl_fill_dev()
1161 ieee802154_nl_fill_devkey(struct sk_buff *msg, u32 portid, u32 seq, ieee802154_nl_fill_devkey() argument
1168 hdr = genlmsg_put(msg, 0, seq, &nl802154_family, NLM_F_MULTI, ieee802154_nl_fill_devkey()
1173 if (nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name) || ieee802154_nl_fill_devkey()
1174 nla_put_u32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex) || ieee802154_nl_fill_devkey()
1175 nla_put_hwaddr(msg, IEEE802154_ATTR_HW_ADDR, devaddr) || ieee802154_nl_fill_devkey()
1176 nla_put_u32(msg, IEEE802154_ATTR_LLSEC_FRAME_COUNTER, ieee802154_nl_fill_devkey()
1178 ieee802154_llsec_fill_key_id(msg, &devkey->key_id)) ieee802154_nl_fill_devkey()
1181 genlmsg_end(msg, hdr); ieee802154_nl_fill_devkey()
1185 genlmsg_cancel(msg, hdr); ieee802154_nl_fill_devkey()
1290 ieee802154_nl_fill_seclevel(struct sk_buff *msg, u32 portid, u32 seq, ieee802154_nl_fill_seclevel() argument
1296 hdr = genlmsg_put(msg, 0, seq, &nl802154_family, NLM_F_MULTI, ieee802154_nl_fill_seclevel()
1301 if (nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name) || ieee802154_nl_fill_seclevel()
1302 nla_put_u32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex) || ieee802154_nl_fill_seclevel()
1303 nla_put_u8(msg, IEEE802154_ATTR_LLSEC_FRAME_TYPE, sl->frame_type) || ieee802154_nl_fill_seclevel()
1304 nla_put_u8(msg, IEEE802154_ATTR_LLSEC_SECLEVELS, sl->sec_levels) || ieee802154_nl_fill_seclevel()
1305 nla_put_u8(msg, IEEE802154_ATTR_LLSEC_DEV_OVERRIDE, ieee802154_nl_fill_seclevel()
1310 nla_put_u8(msg, IEEE802154_ATTR_LLSEC_CMD_FRAME_ID, ieee802154_nl_fill_seclevel()
1314 genlmsg_end(msg, hdr); ieee802154_nl_fill_seclevel()
1318 genlmsg_cancel(msg, hdr); ieee802154_nl_fill_seclevel()
H A Dnl802154.c240 struct sk_buff *msg) nl802154_send_wpan_phy_channels()
245 nl_page = nla_nest_start(msg, NL802154_ATTR_CHANNELS_SUPPORTED); nl802154_send_wpan_phy_channels()
250 if (nla_put_u32(msg, NL802154_ATTR_SUPPORTED_CHANNEL, nl802154_send_wpan_phy_channels()
254 nla_nest_end(msg, nl_page); nl802154_send_wpan_phy_channels()
261 struct sk_buff *msg, u32 portid, u32 seq, nl802154_send_wpan_phy()
266 hdr = nl802154hdr_put(msg, portid, seq, flags, cmd); nl802154_send_wpan_phy()
270 if (nla_put_u32(msg, NL802154_ATTR_WPAN_PHY, rdev->wpan_phy_idx) || nl802154_send_wpan_phy()
271 nla_put_string(msg, NL802154_ATTR_WPAN_PHY_NAME, nl802154_send_wpan_phy()
273 nla_put_u32(msg, NL802154_ATTR_GENERATION, nl802154_send_wpan_phy()
283 if (nla_put_u8(msg, NL802154_ATTR_PAGE, nl802154_send_wpan_phy()
285 nla_put_u8(msg, NL802154_ATTR_CHANNEL, nl802154_send_wpan_phy()
290 if (nl802154_send_wpan_phy_channels(rdev, msg)) nl802154_send_wpan_phy()
294 if (nla_put_u32(msg, NL802154_ATTR_CCA_MODE, nl802154_send_wpan_phy()
299 if (nla_put_u32(msg, NL802154_ATTR_CCA_OPT, nl802154_send_wpan_phy()
304 if (nla_put_s8(msg, NL802154_ATTR_TX_POWER, nl802154_send_wpan_phy()
309 genlmsg_end(msg, hdr); nl802154_send_wpan_phy()
313 genlmsg_cancel(msg, hdr); nl802154_send_wpan_phy()
424 struct sk_buff *msg; nl802154_get_wpan_phy() local
427 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); nl802154_get_wpan_phy()
428 if (!msg) nl802154_get_wpan_phy()
431 if (nl802154_send_wpan_phy(rdev, NL802154_CMD_NEW_WPAN_PHY, msg, nl802154_get_wpan_phy()
433 nlmsg_free(msg); nl802154_get_wpan_phy()
437 return genlmsg_reply(msg, info); nl802154_get_wpan_phy()
447 nl802154_send_iface(struct sk_buff *msg, u32 portid, u32 seq, int flags, nl802154_send_iface() argument
454 hdr = nl802154hdr_put(msg, portid, seq, flags, nl802154_send_iface()
460 (nla_put_u32(msg, NL802154_ATTR_IFINDEX, dev->ifindex) || nl802154_send_iface()
461 nla_put_string(msg, NL802154_ATTR_IFNAME, dev->name))) nl802154_send_iface()
464 if (nla_put_u32(msg, NL802154_ATTR_WPAN_PHY, rdev->wpan_phy_idx) || nl802154_send_iface()
465 nla_put_u32(msg, NL802154_ATTR_IFTYPE, wpan_dev->iftype) || nl802154_send_iface()
466 nla_put_u64(msg, NL802154_ATTR_WPAN_DEV, wpan_dev_id(wpan_dev)) || nl802154_send_iface()
467 nla_put_u32(msg, NL802154_ATTR_GENERATION, nl802154_send_iface()
473 if (nla_put_le64(msg, NL802154_ATTR_EXTENDED_ADDR, nl802154_send_iface()
475 nla_put_le16(msg, NL802154_ATTR_SHORT_ADDR, nl802154_send_iface()
477 nla_put_le16(msg, NL802154_ATTR_PAN_ID, wpan_dev->pan_id)) nl802154_send_iface()
481 if (nla_put_s8(msg, NL802154_ATTR_MAX_FRAME_RETRIES, nl802154_send_iface()
483 nla_put_u8(msg, NL802154_ATTR_MAX_BE, wpan_dev->max_be) || nl802154_send_iface()
484 nla_put_u8(msg, NL802154_ATTR_MAX_CSMA_BACKOFFS, nl802154_send_iface()
486 nla_put_u8(msg, NL802154_ATTR_MIN_BE, wpan_dev->min_be)) nl802154_send_iface()
490 if (nla_put_u8(msg, NL802154_ATTR_LBT_MODE, wpan_dev->lbt)) nl802154_send_iface()
493 genlmsg_end(msg, hdr); nl802154_send_iface()
497 genlmsg_cancel(msg, hdr); nl802154_send_iface()
546 struct sk_buff *msg; nl802154_get_interface() local
550 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); nl802154_get_interface()
551 if (!msg) nl802154_get_interface()
554 if (nl802154_send_iface(msg, info->snd_portid, info->snd_seq, 0, nl802154_get_interface()
556 nlmsg_free(msg); nl802154_get_interface()
560 return genlmsg_reply(msg, info); nl802154_get_interface()
239 nl802154_send_wpan_phy_channels(struct cfg802154_registered_device *rdev, struct sk_buff *msg) nl802154_send_wpan_phy_channels() argument
259 nl802154_send_wpan_phy(struct cfg802154_registered_device *rdev, enum nl802154_commands cmd, struct sk_buff *msg, u32 portid, u32 seq, int flags) nl802154_send_wpan_phy() argument
/linux-4.1.27/drivers/pci/
H A Dhtirq.c32 struct ht_irq_msg msg; member in struct:ht_irq_cfg
36 void write_ht_irq_msg(unsigned int irq, struct ht_irq_msg *msg) write_ht_irq_msg() argument
41 if (cfg->msg.address_lo != msg->address_lo) { write_ht_irq_msg()
43 pci_write_config_dword(cfg->dev, cfg->pos + 4, msg->address_lo); write_ht_irq_msg()
45 if (cfg->msg.address_hi != msg->address_hi) { write_ht_irq_msg()
47 pci_write_config_dword(cfg->dev, cfg->pos + 4, msg->address_hi); write_ht_irq_msg()
50 cfg->update(cfg->dev, irq, msg); write_ht_irq_msg()
52 cfg->msg = *msg; write_ht_irq_msg()
55 void fetch_ht_irq_msg(unsigned int irq, struct ht_irq_msg *msg) fetch_ht_irq_msg() argument
58 *msg = cfg->msg; fetch_ht_irq_msg()
64 struct ht_irq_msg msg = cfg->msg; mask_ht_irq() local
66 msg.address_lo |= 1; mask_ht_irq()
67 write_ht_irq_msg(data->irq, &msg); mask_ht_irq()
73 struct ht_irq_msg msg = cfg->msg; unmask_ht_irq() local
75 msg.address_lo &= ~1; unmask_ht_irq()
76 write_ht_irq_msg(data->irq, &msg); unmask_ht_irq()
116 /* Initialize msg to a value that will never match the first write. */ __ht_create_irq()
117 cfg->msg.address_lo = 0xffffffff; __ht_create_irq()
118 cfg->msg.address_hi = 0xffffffff; __ht_create_irq()
/linux-4.1.27/ipc/
H A Dmsgutil.c16 #include <linux/msg.h>
53 struct msg_msg *msg; alloc_msg() local
58 msg = kmalloc(sizeof(*msg) + alen, GFP_KERNEL); alloc_msg()
59 if (msg == NULL) alloc_msg()
62 msg->next = NULL; alloc_msg()
63 msg->security = NULL; alloc_msg()
66 pseg = &msg->next; alloc_msg()
79 return msg; alloc_msg()
82 free_msg(msg); alloc_msg()
88 struct msg_msg *msg; load_msg() local
93 msg = alloc_msg(len); load_msg()
94 if (msg == NULL) load_msg()
98 if (copy_from_user(msg + 1, src, alen)) load_msg()
101 for (seg = msg->next; seg != NULL; seg = seg->next) { load_msg()
109 err = security_msg_msg_alloc(msg); load_msg()
113 return msg; load_msg()
116 free_msg(msg); load_msg()
153 int store_msg(void __user *dest, struct msg_msg *msg, size_t len) store_msg() argument
159 if (copy_to_user(dest, msg + 1, alen)) store_msg()
162 for (seg = msg->next; seg != NULL; seg = seg->next) { store_msg()
172 void free_msg(struct msg_msg *msg) free_msg() argument
176 security_msg_msg_free(msg); free_msg()
178 seg = msg->next; free_msg()
179 kfree(msg); free_msg()
H A Dmsg.c2 * linux/ipc/msg.c
11 * /proc/sysvipc/msg support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
26 #include <linux/msg.h>
113 * newque - Create a new msg queue
214 struct msg_msg *msg, *t; freeque() local
223 list_for_each_entry_safe(msg, t, &msq->q_messages, m_list) { freeque()
225 free_msg(msg); freeque()
536 case MSG_STAT: /* msqid is an index rather than a msg queue id */ SYSCALL_DEFINE3()
547 static int testmsg(struct msg_msg *msg, long type, int mode) testmsg() argument
554 if (msg->m_type <= type) testmsg()
558 if (msg->m_type == type) testmsg()
562 if (msg->m_type != type) testmsg()
569 static inline int pipelined_send(struct msg_queue *msq, struct msg_msg *msg) pipelined_send() argument
574 if (testmsg(msg, msr->r_msgtype, msr->r_mode) && pipelined_send()
575 !security_msg_queue_msgrcv(msq, msg, msr->r_tsk, pipelined_send()
579 if (msr->r_maxsize < msg->m_ts) { pipelined_send()
597 msr->r_msg = msg; pipelined_send()
611 struct msg_msg *msg; do_msgsnd() local
622 msg = load_msg(mtext, msgsz); do_msgsnd()
623 if (IS_ERR(msg)) do_msgsnd()
624 return PTR_ERR(msg); do_msgsnd()
626 msg->m_type = mtype; do_msgsnd()
627 msg->m_ts = msgsz; do_msgsnd()
651 err = security_msg_queue_msgsnd(msq, msg, msgflg); do_msgsnd()
699 if (!pipelined_send(msq, msg)) { do_msgsnd()
701 list_add_tail(&msg->m_list, &msq->q_messages); do_msgsnd()
709 msg = NULL; do_msgsnd()
715 if (msg != NULL) do_msgsnd()
716 free_msg(msg); do_msgsnd()
751 static long do_msg_fill(void __user *dest, struct msg_msg *msg, size_t bufsz) do_msg_fill() argument
756 if (put_user(msg->m_type, &msgp->mtype)) do_msg_fill()
759 msgsz = (bufsz > msg->m_ts) ? msg->m_ts : bufsz; do_msg_fill()
760 if (store_msg(msgp->mtext, msg, msgsz)) do_msg_fill()
801 struct msg_msg *msg, *found = NULL; find_msg() local
804 list_for_each_entry(msg, &msq->q_messages, m_list) { find_msg()
805 if (testmsg(msg, *msgtyp, mode) && find_msg()
806 !security_msg_queue_msgrcv(msq, msg, current, find_msg()
808 if (mode == SEARCH_LESSEQUAL && msg->m_type != 1) { find_msg()
809 *msgtyp = msg->m_type - 1; find_msg()
810 found = msg; find_msg()
813 return msg; find_msg()
815 return msg; find_msg()
829 struct msg_msg *msg, *copy = NULL; do_msgrcv() local
856 msg = ERR_PTR(-EACCES); do_msgrcv()
864 msg = ERR_PTR(-EIDRM); do_msgrcv()
868 msg = find_msg(msq, &msgtyp, mode); do_msgrcv()
869 if (!IS_ERR(msg)) { do_msgrcv()
874 if ((bufsz < msg->m_ts) && !(msgflg & MSG_NOERROR)) { do_msgrcv()
875 msg = ERR_PTR(-E2BIG); do_msgrcv()
883 msg = copy_msg(msg, copy); do_msgrcv()
887 list_del(&msg->m_list); do_msgrcv()
891 msq->q_cbytes -= msg->m_ts; do_msgrcv()
892 atomic_sub(msg->m_ts, &ns->msg_bytes); do_msgrcv()
901 msg = ERR_PTR(-ENOMSG); do_msgrcv()
937 msg = (struct msg_msg *)msr_d.r_msg; do_msgrcv()
938 while (msg == NULL) { do_msgrcv()
940 msg = (struct msg_msg *)msr_d.r_msg; do_msgrcv()
947 if (msg != ERR_PTR(-EAGAIN)) do_msgrcv()
958 msg = (struct msg_msg *)msr_d.r_msg; do_msgrcv()
959 if (msg != ERR_PTR(-EAGAIN)) do_msgrcv()
964 msg = ERR_PTR(-ERESTARTNOHAND); do_msgrcv()
975 if (IS_ERR(msg)) { do_msgrcv()
977 return PTR_ERR(msg); do_msgrcv()
980 bufsz = msg_handler(buf, msg, bufsz); do_msgrcv()
981 free_msg(msg); do_msgrcv()
1043 ipc_init_proc_interface("sysvipc/msg", msg_init()
/linux-4.1.27/drivers/net/can/usb/
H A Desd_usb2.c183 } msg; member in struct:esd_usb2_msg
227 struct esd_usb2_msg *msg) esd_usb2_rx_event()
232 u32 id = le32_to_cpu(msg->msg.rx.id) & ESD_IDMASK; esd_usb2_rx_event()
235 u8 state = msg->msg.rx.data[0]; esd_usb2_rx_event()
236 u8 ecc = msg->msg.rx.data[1]; esd_usb2_rx_event()
237 u8 txerr = msg->msg.rx.data[2]; esd_usb2_rx_event()
238 u8 rxerr = msg->msg.rx.data[3]; esd_usb2_rx_event()
315 struct esd_usb2_msg *msg) esd_usb2_rx_can_msg()
326 id = le32_to_cpu(msg->msg.rx.id); esd_usb2_rx_can_msg()
329 esd_usb2_rx_event(priv, msg); esd_usb2_rx_can_msg()
338 cf->can_dlc = get_can_dlc(msg->msg.rx.dlc); esd_usb2_rx_can_msg()
343 if (msg->msg.rx.dlc & ESD_RTR) { esd_usb2_rx_can_msg()
347 cf->data[i] = msg->msg.rx.data[i]; esd_usb2_rx_can_msg()
360 struct esd_usb2_msg *msg) esd_usb2_tx_done_msg()
369 context = &priv->tx_contexts[msg->msg.txdone.hnd & (MAX_TX_URBS - 1)]; esd_usb2_tx_done_msg()
371 if (!msg->msg.txdone.status) { esd_usb2_tx_done_msg()
409 struct esd_usb2_msg *msg; esd_usb2_read_bulk_callback() local
411 msg = (struct esd_usb2_msg *)(urb->transfer_buffer + pos); esd_usb2_read_bulk_callback()
413 switch (msg->msg.hdr.cmd) { esd_usb2_read_bulk_callback()
415 if (msg->msg.rx.net >= dev->net_count) { esd_usb2_read_bulk_callback()
420 esd_usb2_rx_can_msg(dev->nets[msg->msg.rx.net], msg); esd_usb2_read_bulk_callback()
424 if (msg->msg.txdone.net >= dev->net_count) { esd_usb2_read_bulk_callback()
429 esd_usb2_tx_done_msg(dev->nets[msg->msg.txdone.net], esd_usb2_read_bulk_callback()
430 msg); esd_usb2_read_bulk_callback()
434 pos += msg->msg.hdr.len << 2; esd_usb2_read_bulk_callback()
525 static int esd_usb2_send_msg(struct esd_usb2 *dev, struct esd_usb2_msg *msg) esd_usb2_send_msg() argument
531 msg, esd_usb2_send_msg()
532 msg->msg.hdr.len << 2, esd_usb2_send_msg()
538 struct esd_usb2_msg *msg) esd_usb2_wait_msg()
544 msg, esd_usb2_wait_msg()
545 sizeof(*msg), esd_usb2_wait_msg()
623 struct esd_usb2_msg *msg; esd_usb2_start() local
626 msg = kmalloc(sizeof(*msg), GFP_KERNEL); esd_usb2_start()
627 if (!msg) { esd_usb2_start()
645 msg->msg.hdr.cmd = CMD_IDADD; esd_usb2_start()
646 msg->msg.hdr.len = 2 + ESD_MAX_ID_SEGMENT; esd_usb2_start()
647 msg->msg.filter.net = priv->index; esd_usb2_start()
648 msg->msg.filter.option = ESD_ID_ENABLE; /* start with segment 0 */ esd_usb2_start()
650 msg->msg.filter.mask[i] = cpu_to_le32(0xffffffff); esd_usb2_start()
652 msg->msg.filter.mask[ESD_MAX_ID_SEGMENT] = cpu_to_le32(0x00000001); esd_usb2_start()
654 err = esd_usb2_send_msg(dev, msg); esd_usb2_start()
670 kfree(msg); esd_usb2_start()
723 struct esd_usb2_msg *msg; esd_usb2_start_xmit() local
751 msg = (struct esd_usb2_msg *)buf; esd_usb2_start_xmit()
753 msg->msg.hdr.len = 3; /* minimal length */ esd_usb2_start_xmit()
754 msg->msg.hdr.cmd = CMD_CAN_TX; esd_usb2_start_xmit()
755 msg->msg.tx.net = priv->index; esd_usb2_start_xmit()
756 msg->msg.tx.dlc = cf->can_dlc; esd_usb2_start_xmit()
757 msg->msg.tx.id = cpu_to_le32(cf->can_id & CAN_ERR_MASK); esd_usb2_start_xmit()
760 msg->msg.tx.dlc |= ESD_RTR; esd_usb2_start_xmit()
763 msg->msg.tx.id |= cpu_to_le32(ESD_EXTID); esd_usb2_start_xmit()
766 msg->msg.tx.data[i] = cf->data[i]; esd_usb2_start_xmit()
768 msg->msg.hdr.len += (cf->can_dlc + 3) >> 2; esd_usb2_start_xmit()
791 msg->msg.tx.hnd = 0x80000000 | i; /* returned in TX done message */ esd_usb2_start_xmit()
794 msg->msg.hdr.len << 2, esd_usb2_start_xmit()
849 struct esd_usb2_msg *msg; esd_usb2_close() local
852 msg = kmalloc(sizeof(*msg), GFP_KERNEL); esd_usb2_close()
853 if (!msg) esd_usb2_close()
857 msg->msg.hdr.cmd = CMD_IDADD; esd_usb2_close()
858 msg->msg.hdr.len = 2 + ESD_MAX_ID_SEGMENT; esd_usb2_close()
859 msg->msg.filter.net = priv->index; esd_usb2_close()
860 msg->msg.filter.option = ESD_ID_ENABLE; /* start with segment 0 */ esd_usb2_close()
862 msg->msg.filter.mask[i] = 0; esd_usb2_close()
863 if (esd_usb2_send_msg(priv->usb2, msg) < 0) esd_usb2_close()
867 msg->msg.hdr.len = 2; esd_usb2_close()
868 msg->msg.hdr.cmd = CMD_SETBAUD; esd_usb2_close()
869 msg->msg.setbaud.net = priv->index; esd_usb2_close()
870 msg->msg.setbaud.rsvd = 0; esd_usb2_close()
871 msg->msg.setbaud.baud = cpu_to_le32(ESD_USB2_NO_BAUDRATE); esd_usb2_close()
872 if (esd_usb2_send_msg(priv->usb2, msg) < 0) esd_usb2_close()
881 kfree(msg); esd_usb2_close()
909 struct esd_usb2_msg *msg; esd_usb2_set_bittiming() local
936 msg = kmalloc(sizeof(*msg), GFP_KERNEL); esd_usb2_set_bittiming()
937 if (!msg) esd_usb2_set_bittiming()
940 msg->msg.hdr.len = 2; esd_usb2_set_bittiming()
941 msg->msg.hdr.cmd = CMD_SETBAUD; esd_usb2_set_bittiming()
942 msg->msg.setbaud.net = priv->index; esd_usb2_set_bittiming()
943 msg->msg.setbaud.rsvd = 0; esd_usb2_set_bittiming()
944 msg->msg.setbaud.baud = cpu_to_le32(canbtr); esd_usb2_set_bittiming()
948 err = esd_usb2_send_msg(priv->usb2, msg); esd_usb2_set_bittiming()
950 kfree(msg); esd_usb2_set_bittiming()
1054 struct esd_usb2_msg *msg; esd_usb2_probe() local
1069 msg = kmalloc(sizeof(*msg), GFP_KERNEL); esd_usb2_probe()
1070 if (!msg) { esd_usb2_probe()
1076 msg->msg.hdr.cmd = CMD_VERSION; esd_usb2_probe()
1077 msg->msg.hdr.len = 2; esd_usb2_probe()
1078 msg->msg.version.rsvd = 0; esd_usb2_probe()
1079 msg->msg.version.flags = 0; esd_usb2_probe()
1080 msg->msg.version.drv_version = 0; esd_usb2_probe()
1082 err = esd_usb2_send_msg(dev, msg); esd_usb2_probe()
1088 err = esd_usb2_wait_msg(dev, msg); esd_usb2_probe()
1094 dev->net_count = (int)msg->msg.version_reply.nets; esd_usb2_probe()
1095 dev->version = le32_to_cpu(msg->msg.version_reply.version); esd_usb2_probe()
1114 kfree(msg); esd_usb2_probe()
226 esd_usb2_rx_event(struct esd_usb2_net_priv *priv, struct esd_usb2_msg *msg) esd_usb2_rx_event() argument
314 esd_usb2_rx_can_msg(struct esd_usb2_net_priv *priv, struct esd_usb2_msg *msg) esd_usb2_rx_can_msg() argument
359 esd_usb2_tx_done_msg(struct esd_usb2_net_priv *priv, struct esd_usb2_msg *msg) esd_usb2_tx_done_msg() argument
537 esd_usb2_wait_msg(struct esd_usb2 *dev, struct esd_usb2_msg *msg) esd_usb2_wait_msg() argument
H A Dkvaser_usb.c104 /* Can msg ids */
259 u8 msg[14]; member in struct:kvaser_msg_tx_can
282 u8 msg[14]; member in struct:leaf_msg_rx_can
289 u8 msg[14]; member in struct:usbcan_msg_rx_can
556 struct kvaser_msg *msg) kvaser_usb_send_msg()
563 msg, msg->len, &actual_len, kvaser_usb_send_msg()
568 struct kvaser_msg *msg) kvaser_usb_wait_msg()
611 memcpy(msg, tmp, tmp->len); kvaser_usb_wait_msg()
630 struct kvaser_msg *msg; kvaser_usb_send_simple_msg() local
633 msg = kmalloc(sizeof(*msg), GFP_KERNEL); kvaser_usb_send_simple_msg()
634 if (!msg) kvaser_usb_send_simple_msg()
637 msg->id = msg_id; kvaser_usb_send_simple_msg()
638 msg->len = MSG_HEADER_LEN + sizeof(struct kvaser_msg_simple); kvaser_usb_send_simple_msg()
639 msg->u.simple.channel = channel; kvaser_usb_send_simple_msg()
640 msg->u.simple.tid = 0xff; kvaser_usb_send_simple_msg()
642 rc = kvaser_usb_send_msg(dev, msg); kvaser_usb_send_simple_msg()
644 kfree(msg); kvaser_usb_send_simple_msg()
650 struct kvaser_msg msg; kvaser_usb_get_software_info() local
657 err = kvaser_usb_wait_msg(dev, CMD_GET_SOFTWARE_INFO_REPLY, &msg); kvaser_usb_get_software_info()
663 dev->fw_version = le32_to_cpu(msg.u.leaf.softinfo.fw_version); kvaser_usb_get_software_info()
665 le16_to_cpu(msg.u.leaf.softinfo.max_outstanding_tx); kvaser_usb_get_software_info()
668 dev->fw_version = le32_to_cpu(msg.u.usbcan.softinfo.fw_version); kvaser_usb_get_software_info()
670 le16_to_cpu(msg.u.usbcan.softinfo.max_outstanding_tx); kvaser_usb_get_software_info()
679 struct kvaser_msg msg; kvaser_usb_get_card_info() local
686 err = kvaser_usb_wait_msg(dev, CMD_GET_CARD_INFO_REPLY, &msg); kvaser_usb_get_card_info()
690 dev->nchannels = msg.u.cardinfo.nchannels; kvaser_usb_get_card_info()
700 const struct kvaser_msg *msg) kvaser_usb_tx_acknowledge()
710 channel = msg->u.tx_acknowledge_header.channel; kvaser_usb_tx_acknowledge()
711 tid = msg->u.tx_acknowledge_header.tid; kvaser_usb_tx_acknowledge()
778 struct kvaser_msg *msg; kvaser_usb_simple_msg_async() local
795 msg = (struct kvaser_msg *)buf; kvaser_usb_simple_msg_async()
796 msg->len = MSG_HEADER_LEN + sizeof(struct kvaser_msg_simple); kvaser_usb_simple_msg_async()
797 msg->id = msg_id; kvaser_usb_simple_msg_async()
798 msg->u.simple.channel = priv->channel; kvaser_usb_simple_msg_async()
803 buf, msg->len, kvaser_usb_simple_msg_async()
1017 const struct kvaser_msg *msg) kvaser_usbcan_rx_error()
1021 switch (msg->id) { kvaser_usbcan_rx_error()
1024 es.channel = msg->u.usbcan.chip_state_event.channel; kvaser_usbcan_rx_error()
1025 es.status = msg->u.usbcan.chip_state_event.status; kvaser_usbcan_rx_error()
1026 es.txerr = msg->u.usbcan.chip_state_event.tx_errors_count; kvaser_usbcan_rx_error()
1027 es.rxerr = msg->u.usbcan.chip_state_event.rx_errors_count; kvaser_usbcan_rx_error()
1033 es.status = msg->u.usbcan.error_event.status_ch0; kvaser_usbcan_rx_error()
1034 es.txerr = msg->u.usbcan.error_event.tx_errors_count_ch0; kvaser_usbcan_rx_error()
1035 es.rxerr = msg->u.usbcan.error_event.rx_errors_count_ch0; kvaser_usbcan_rx_error()
1037 msg->u.usbcan.error_event.status_ch1; kvaser_usbcan_rx_error()
1045 es.status = msg->u.usbcan.error_event.status_ch1; kvaser_usbcan_rx_error()
1046 es.txerr = msg->u.usbcan.error_event.tx_errors_count_ch1; kvaser_usbcan_rx_error()
1047 es.rxerr = msg->u.usbcan.error_event.rx_errors_count_ch1; kvaser_usbcan_rx_error()
1049 msg->u.usbcan.error_event.status_ch0; kvaser_usbcan_rx_error()
1055 dev_err(dev->udev->dev.parent, "Invalid msg id (%d)\n", kvaser_usbcan_rx_error()
1056 msg->id); kvaser_usbcan_rx_error()
1061 const struct kvaser_msg *msg) kvaser_leaf_rx_error()
1065 switch (msg->id) { kvaser_leaf_rx_error()
1067 es.channel = msg->u.leaf.error_event.channel; kvaser_leaf_rx_error()
1068 es.status = msg->u.leaf.error_event.status; kvaser_leaf_rx_error()
1069 es.txerr = msg->u.leaf.error_event.tx_errors_count; kvaser_leaf_rx_error()
1070 es.rxerr = msg->u.leaf.error_event.rx_errors_count; kvaser_leaf_rx_error()
1071 es.leaf.error_factor = msg->u.leaf.error_event.error_factor; kvaser_leaf_rx_error()
1074 es.channel = msg->u.leaf.log_message.channel; kvaser_leaf_rx_error()
1075 es.status = msg->u.leaf.log_message.data[0]; kvaser_leaf_rx_error()
1076 es.txerr = msg->u.leaf.log_message.data[2]; kvaser_leaf_rx_error()
1077 es.rxerr = msg->u.leaf.log_message.data[3]; kvaser_leaf_rx_error()
1078 es.leaf.error_factor = msg->u.leaf.log_message.data[1]; kvaser_leaf_rx_error()
1081 es.channel = msg->u.leaf.chip_state_event.channel; kvaser_leaf_rx_error()
1082 es.status = msg->u.leaf.chip_state_event.status; kvaser_leaf_rx_error()
1083 es.txerr = msg->u.leaf.chip_state_event.tx_errors_count; kvaser_leaf_rx_error()
1084 es.rxerr = msg->u.leaf.chip_state_event.rx_errors_count; kvaser_leaf_rx_error()
1088 dev_err(dev->udev->dev.parent, "Invalid msg id (%d)\n", kvaser_leaf_rx_error()
1089 msg->id); kvaser_leaf_rx_error()
1097 const struct kvaser_msg *msg) kvaser_usb_rx_can_err()
1103 if (msg->u.rx_can_header.flag & (MSG_FLAG_ERROR_FRAME | kvaser_usb_rx_can_err()
1106 msg->u.rx_can_header.flag); kvaser_usb_rx_can_err()
1112 if (msg->u.rx_can_header.flag & MSG_FLAG_OVERRUN) { kvaser_usb_rx_can_err()
1132 const struct kvaser_msg *msg) kvaser_usb_rx_can_msg()
1138 u8 channel = msg->u.rx_can_header.channel; kvaser_usb_rx_can_msg()
1150 if ((msg->u.rx_can_header.flag & MSG_FLAG_ERROR_FRAME) && kvaser_usb_rx_can_msg()
1151 (dev->family == KVASER_LEAF && msg->id == CMD_LEAF_LOG_MESSAGE)) { kvaser_usb_rx_can_msg()
1152 kvaser_leaf_rx_error(dev, msg); kvaser_usb_rx_can_msg()
1154 } else if (msg->u.rx_can_header.flag & (MSG_FLAG_ERROR_FRAME | kvaser_usb_rx_can_msg()
1157 kvaser_usb_rx_can_err(priv, msg); kvaser_usb_rx_can_msg()
1159 } else if (msg->u.rx_can_header.flag & ~MSG_FLAG_REMOTE_FRAME) { kvaser_usb_rx_can_msg()
1162 msg->u.rx_can_header.flag); kvaser_usb_rx_can_msg()
1168 rx_msg = msg->u.leaf.rx_can.msg; kvaser_usb_rx_can_msg()
1171 rx_msg = msg->u.usbcan.rx_can.msg; kvaser_usb_rx_can_msg()
1181 if (dev->family == KVASER_LEAF && msg->id == CMD_LEAF_LOG_MESSAGE) { kvaser_usb_rx_can_msg()
1182 cf->can_id = le32_to_cpu(msg->u.leaf.log_message.id); kvaser_usb_rx_can_msg()
1188 cf->can_dlc = get_can_dlc(msg->u.leaf.log_message.dlc); kvaser_usb_rx_can_msg()
1190 if (msg->u.leaf.log_message.flags & MSG_FLAG_REMOTE_FRAME) kvaser_usb_rx_can_msg()
1193 memcpy(cf->data, &msg->u.leaf.log_message.data, kvaser_usb_rx_can_msg()
1198 if (msg->id == CMD_RX_EXT_MESSAGE) { kvaser_usb_rx_can_msg()
1208 if (msg->u.rx_can_header.flag & MSG_FLAG_REMOTE_FRAME) kvaser_usb_rx_can_msg()
1221 const struct kvaser_msg *msg) kvaser_usb_start_chip_reply()
1224 u8 channel = msg->u.simple.channel; kvaser_usb_start_chip_reply()
1244 const struct kvaser_msg *msg) kvaser_usb_stop_chip_reply()
1247 u8 channel = msg->u.simple.channel; kvaser_usb_stop_chip_reply()
1261 const struct kvaser_msg *msg) kvaser_usb_handle_message()
1263 switch (msg->id) { kvaser_usb_handle_message()
1265 kvaser_usb_start_chip_reply(dev, msg); kvaser_usb_handle_message()
1269 kvaser_usb_stop_chip_reply(dev, msg); kvaser_usb_handle_message()
1274 kvaser_usb_rx_can_msg(dev, msg); kvaser_usb_handle_message()
1280 kvaser_usb_rx_can_msg(dev, msg); kvaser_usb_handle_message()
1286 kvaser_leaf_rx_error(dev, msg); kvaser_usb_handle_message()
1288 kvaser_usbcan_rx_error(dev, msg); kvaser_usb_handle_message()
1292 kvaser_usb_tx_acknowledge(dev, msg); kvaser_usb_handle_message()
1303 "Unhandled message (%d)\n", msg->id); kvaser_usb_handle_message()
1311 struct kvaser_msg *msg; kvaser_usb_read_bulk_callback() local
1328 msg = urb->transfer_buffer + pos; kvaser_usb_read_bulk_callback()
1339 if (msg->len == 0) { kvaser_usb_read_bulk_callback()
1345 if (pos + msg->len > urb->actual_length) { kvaser_usb_read_bulk_callback()
1350 kvaser_usb_handle_message(dev, msg); kvaser_usb_read_bulk_callback()
1351 pos += msg->len; kvaser_usb_read_bulk_callback()
1448 struct kvaser_msg *msg; kvaser_usb_set_opt_mode() local
1451 msg = kmalloc(sizeof(*msg), GFP_KERNEL); kvaser_usb_set_opt_mode()
1452 if (!msg) kvaser_usb_set_opt_mode()
1455 msg->id = CMD_SET_CTRL_MODE; kvaser_usb_set_opt_mode()
1456 msg->len = MSG_HEADER_LEN + sizeof(struct kvaser_msg_ctrl_mode); kvaser_usb_set_opt_mode()
1457 msg->u.ctrl_mode.tid = 0xff; kvaser_usb_set_opt_mode()
1458 msg->u.ctrl_mode.channel = priv->channel; kvaser_usb_set_opt_mode()
1461 msg->u.ctrl_mode.ctrl_mode = KVASER_CTRL_MODE_SILENT; kvaser_usb_set_opt_mode()
1463 msg->u.ctrl_mode.ctrl_mode = KVASER_CTRL_MODE_NORMAL; kvaser_usb_set_opt_mode()
1465 rc = kvaser_usb_send_msg(priv->dev, msg); kvaser_usb_set_opt_mode()
1467 kfree(msg); kvaser_usb_set_opt_mode()
1581 struct kvaser_msg *msg; kvaser_usb_flush_queue() local
1584 msg = kmalloc(sizeof(*msg), GFP_KERNEL); kvaser_usb_flush_queue()
1585 if (!msg) kvaser_usb_flush_queue()
1588 msg->id = CMD_FLUSH_QUEUE; kvaser_usb_flush_queue()
1589 msg->len = MSG_HEADER_LEN + sizeof(struct kvaser_msg_flush_queue); kvaser_usb_flush_queue()
1590 msg->u.flush_queue.channel = priv->channel; kvaser_usb_flush_queue()
1591 msg->u.flush_queue.flags = 0x00; kvaser_usb_flush_queue()
1593 rc = kvaser_usb_send_msg(priv->dev, msg); kvaser_usb_flush_queue()
1595 kfree(msg); kvaser_usb_flush_queue()
1658 struct kvaser_msg *msg; kvaser_usb_start_xmit() local
1681 msg = buf; kvaser_usb_start_xmit()
1682 msg->len = MSG_HEADER_LEN + sizeof(struct kvaser_msg_tx_can); kvaser_usb_start_xmit()
1683 msg->u.tx_can.channel = priv->channel; kvaser_usb_start_xmit()
1687 msg_tx_can_flags = &msg->u.tx_can.leaf.flags; kvaser_usb_start_xmit()
1690 msg_tx_can_flags = &msg->u.tx_can.usbcan.flags; kvaser_usb_start_xmit()
1697 msg->id = CMD_TX_EXT_MESSAGE; kvaser_usb_start_xmit()
1698 msg->u.tx_can.msg[0] = (cf->can_id >> 24) & 0x1f; kvaser_usb_start_xmit()
1699 msg->u.tx_can.msg[1] = (cf->can_id >> 18) & 0x3f; kvaser_usb_start_xmit()
1700 msg->u.tx_can.msg[2] = (cf->can_id >> 14) & 0x0f; kvaser_usb_start_xmit()
1701 msg->u.tx_can.msg[3] = (cf->can_id >> 6) & 0xff; kvaser_usb_start_xmit()
1702 msg->u.tx_can.msg[4] = cf->can_id & 0x3f; kvaser_usb_start_xmit()
1704 msg->id = CMD_TX_STD_MESSAGE; kvaser_usb_start_xmit()
1705 msg->u.tx_can.msg[0] = (cf->can_id >> 6) & 0x1f; kvaser_usb_start_xmit()
1706 msg->u.tx_can.msg[1] = cf->can_id & 0x3f; kvaser_usb_start_xmit()
1709 msg->u.tx_can.msg[5] = cf->can_dlc; kvaser_usb_start_xmit()
1710 memcpy(&msg->u.tx_can.msg[6], cf->data, cf->can_dlc); kvaser_usb_start_xmit()
1743 msg->u.tx_can.tid = context->echo_index; kvaser_usb_start_xmit()
1748 buf, msg->len, kvaser_usb_start_xmit()
1806 struct kvaser_msg *msg; kvaser_usb_set_bittiming() local
1809 msg = kmalloc(sizeof(*msg), GFP_KERNEL); kvaser_usb_set_bittiming()
1810 if (!msg) kvaser_usb_set_bittiming()
1813 msg->id = CMD_SET_BUS_PARAMS; kvaser_usb_set_bittiming()
1814 msg->len = MSG_HEADER_LEN + sizeof(struct kvaser_msg_busparams); kvaser_usb_set_bittiming()
1815 msg->u.busparams.channel = priv->channel; kvaser_usb_set_bittiming()
1816 msg->u.busparams.tid = 0xff; kvaser_usb_set_bittiming()
1817 msg->u.busparams.bitrate = cpu_to_le32(bt->bitrate); kvaser_usb_set_bittiming()
1818 msg->u.busparams.sjw = bt->sjw; kvaser_usb_set_bittiming()
1819 msg->u.busparams.tseg1 = bt->prop_seg + bt->phase_seg1; kvaser_usb_set_bittiming()
1820 msg->u.busparams.tseg2 = bt->phase_seg2; kvaser_usb_set_bittiming()
1823 msg->u.busparams.no_samp = 3; kvaser_usb_set_bittiming()
1825 msg->u.busparams.no_samp = 1; kvaser_usb_set_bittiming()
1827 rc = kvaser_usb_send_msg(dev, msg); kvaser_usb_set_bittiming()
1829 kfree(msg); kvaser_usb_set_bittiming()
555 kvaser_usb_send_msg(const struct kvaser_usb *dev, struct kvaser_msg *msg) kvaser_usb_send_msg() argument
567 kvaser_usb_wait_msg(const struct kvaser_usb *dev, u8 id, struct kvaser_msg *msg) kvaser_usb_wait_msg() argument
699 kvaser_usb_tx_acknowledge(const struct kvaser_usb *dev, const struct kvaser_msg *msg) kvaser_usb_tx_acknowledge() argument
1016 kvaser_usbcan_rx_error(const struct kvaser_usb *dev, const struct kvaser_msg *msg) kvaser_usbcan_rx_error() argument
1060 kvaser_leaf_rx_error(const struct kvaser_usb *dev, const struct kvaser_msg *msg) kvaser_leaf_rx_error() argument
1096 kvaser_usb_rx_can_err(const struct kvaser_usb_net_priv *priv, const struct kvaser_msg *msg) kvaser_usb_rx_can_err() argument
1131 kvaser_usb_rx_can_msg(const struct kvaser_usb *dev, const struct kvaser_msg *msg) kvaser_usb_rx_can_msg() argument
1220 kvaser_usb_start_chip_reply(const struct kvaser_usb *dev, const struct kvaser_msg *msg) kvaser_usb_start_chip_reply() argument
1243 kvaser_usb_stop_chip_reply(const struct kvaser_usb *dev, const struct kvaser_msg *msg) kvaser_usb_stop_chip_reply() argument
1260 kvaser_usb_handle_message(const struct kvaser_usb *dev, const struct kvaser_msg *msg) kvaser_usb_handle_message() argument
H A Dems_usb.c131 u8 msg[8]; member in struct:cpc_can_msg
204 u8 length; /* length of data within union 'msg' */
218 } msg; member in struct:ems_cpc_msg
309 static void ems_usb_rx_can_msg(struct ems_usb *dev, struct ems_cpc_msg *msg) ems_usb_rx_can_msg() argument
320 cf->can_id = le32_to_cpu(msg->msg.can_msg.id); ems_usb_rx_can_msg()
321 cf->can_dlc = get_can_dlc(msg->msg.can_msg.length & 0xF); ems_usb_rx_can_msg()
323 if (msg->type == CPC_MSG_TYPE_EXT_CAN_FRAME || ems_usb_rx_can_msg()
324 msg->type == CPC_MSG_TYPE_EXT_RTR_FRAME) ems_usb_rx_can_msg()
327 if (msg->type == CPC_MSG_TYPE_RTR_FRAME || ems_usb_rx_can_msg()
328 msg->type == CPC_MSG_TYPE_EXT_RTR_FRAME) { ems_usb_rx_can_msg()
332 cf->data[i] = msg->msg.can_msg.msg[i]; ems_usb_rx_can_msg()
341 static void ems_usb_rx_err(struct ems_usb *dev, struct ems_cpc_msg *msg) ems_usb_rx_err() argument
351 if (msg->type == CPC_MSG_TYPE_CAN_STATE) { ems_usb_rx_err()
352 u8 state = msg->msg.can_state; ems_usb_rx_err()
367 } else if (msg->type == CPC_MSG_TYPE_CAN_FRAME_ERROR) { ems_usb_rx_err()
368 u8 ecc = msg->msg.error.cc.regs.sja1000.ecc; ems_usb_rx_err()
369 u8 txerr = msg->msg.error.cc.regs.sja1000.txerr; ems_usb_rx_err()
370 u8 rxerr = msg->msg.error.cc.regs.sja1000.rxerr; ems_usb_rx_err()
403 } else if (msg->type == CPC_MSG_TYPE_OVERRUN) { ems_usb_rx_err()
444 struct ems_cpc_msg *msg; ems_usb_read_bulk_callback() local
453 msg = (struct ems_cpc_msg *)&ibuf[start]; ems_usb_read_bulk_callback()
455 switch (msg->type) { ems_usb_read_bulk_callback()
458 ems_usb_rx_err(dev, msg); ems_usb_read_bulk_callback()
465 ems_usb_rx_can_msg(dev, msg); ems_usb_read_bulk_callback()
470 ems_usb_rx_err(dev, msg); ems_usb_read_bulk_callback()
475 ems_usb_rx_err(dev, msg); ems_usb_read_bulk_callback()
479 start += CPC_MSG_HEADER_LEN + msg->length; ems_usb_read_bulk_callback()
545 static int ems_usb_command_msg(struct ems_usb *dev, struct ems_cpc_msg *msg) ems_usb_command_msg() argument
550 memcpy(&dev->tx_msg_buffer[CPC_HEADER_SIZE], msg, ems_usb_command_msg()
551 msg->length + CPC_MSG_HEADER_LEN); ems_usb_command_msg()
558 msg->length + CPC_MSG_HEADER_LEN + CPC_HEADER_SIZE, ems_usb_command_msg()
567 dev->active_params.msg.can_params.cc_params.sja1000.mode = mode; ems_usb_write_mode()
585 cmd.msg.generic[0] = val; ems_usb_control_cmd()
748 struct ems_cpc_msg *msg; ems_usb_start_xmit() local
772 msg = (struct ems_cpc_msg *)&buf[CPC_HEADER_SIZE]; ems_usb_start_xmit()
774 msg->msg.can_msg.id = cpu_to_le32(cf->can_id & CAN_ERR_MASK); ems_usb_start_xmit()
775 msg->msg.can_msg.length = cf->can_dlc; ems_usb_start_xmit()
778 msg->type = cf->can_id & CAN_EFF_FLAG ? ems_usb_start_xmit()
781 msg->length = CPC_CAN_MSG_MIN_SIZE; ems_usb_start_xmit()
783 msg->type = cf->can_id & CAN_EFF_FLAG ? ems_usb_start_xmit()
787 msg->msg.can_msg.msg[i] = cf->data[i]; ems_usb_start_xmit()
789 msg->length = CPC_CAN_MSG_MIN_SIZE + cf->can_dlc; ems_usb_start_xmit()
938 dev->active_params.msg.can_params.cc_params.sja1000.btr0 = btr0; ems_usb_set_bittiming()
939 dev->active_params.msg.can_params.cc_params.sja1000.btr1 = btr1; ems_usb_set_bittiming()
944 static void init_params_sja1000(struct ems_cpc_msg *msg) init_params_sja1000() argument
947 &msg->msg.can_params.cc_params.sja1000; init_params_sja1000()
949 msg->type = CPC_CMD_TYPE_CAN_PARAMS; init_params_sja1000()
950 msg->length = sizeof(struct cpc_can_params); init_params_sja1000()
951 msg->msgid = 0; init_params_sja1000()
953 msg->msg.can_params.cc_type = CPC_CC_TYPE_SJA1000; init_params_sja1000()
/linux-4.1.27/drivers/staging/wlan-ng/
H A Dprism2mgmt.c104 * msgp ptr to msg buffer
120 struct p80211msg_dot11req_scan *msg = msgp; prism2mgmt_scan() local
135 msg->resultcode.data = P80211ENUM_resultcode_not_supported; prism2mgmt_scan()
148 msg->resultcode.data = prism2mgmt_scan()
161 msg->resultcode.data = prism2mgmt_scan()
171 if (msg->scantype.data != P80211ENUM_scantype_active) prism2mgmt_scan()
172 word = cpu_to_le16(msg->maxchanneltime.data); prism2mgmt_scan()
191 for (i = 0; i < msg->channellist.data.len; i++) { prism2mgmt_scan()
192 u8 channel = msg->channellist.data.data[i]; prism2mgmt_scan()
202 scanreq.ssid.len = cpu_to_le16(msg->ssid.data.len); prism2mgmt_scan()
203 memcpy(scanreq.ssid.data, msg->ssid.data.data, msg->ssid.data.len); prism2mgmt_scan()
210 msg->resultcode.data = prism2mgmt_scan()
224 msg->resultcode.data = prism2mgmt_scan()
238 msg->resultcode.data = prism2mgmt_scan()
248 msg->resultcode.data = prism2mgmt_scan()
259 msg->resultcode.data = prism2mgmt_scan()
270 msg->resultcode.data = prism2mgmt_scan()
279 msg->resultcode.data = prism2mgmt_scan()
287 timeout = msg->channellist.data.len * msg->maxchanneltime.data; prism2mgmt_scan()
300 msg->resultcode.data = prism2mgmt_scan()
308 msg->numbss.status = P80211ENUM_msgitem_status_data_ok; prism2mgmt_scan()
312 msg->numbss.data = hw->scanflag; prism2mgmt_scan()
323 msg->resultcode.data = prism2mgmt_scan()
335 msg->resultcode.data = prism2mgmt_scan()
341 msg->resultcode.data = P80211ENUM_resultcode_success; prism2mgmt_scan()
344 msg->resultcode.status = P80211ENUM_msgitem_status_data_ok; prism2mgmt_scan()
357 * msgp ptr to msg buffer
516 * msgp ptr to msg buffer
532 struct p80211msg_dot11req_start *msg = msgp; prism2mgmt_start() local
542 memcpy(&wlandev->ssid, &msg->ssid.data, sizeof(msg->ssid.data)); prism2mgmt_start()
551 msg->resultcode.status = P80211ENUM_msgitem_status_data_ok; prism2mgmt_start()
552 msg->resultcode.data = P80211ENUM_resultcode_not_supported; prism2mgmt_start()
556 msg->resultcode.status = P80211ENUM_msgitem_status_data_ok; prism2mgmt_start()
561 pstr = (p80211pstrd_t *) &(msg->ssid.data); prism2mgmt_start()
582 word = msg->beaconperiod.data; prism2mgmt_start()
591 word = msg->dschannel.data; prism2mgmt_start()
599 word = p80211rate_to_p2bit(msg->basicrate1.data); prism2mgmt_start()
600 if (msg->basicrate2.status == P80211ENUM_msgitem_status_data_ok) prism2mgmt_start()
601 word |= p80211rate_to_p2bit(msg->basicrate2.data); prism2mgmt_start()
603 if (msg->basicrate3.status == P80211ENUM_msgitem_status_data_ok) prism2mgmt_start()
604 word |= p80211rate_to_p2bit(msg->basicrate3.data); prism2mgmt_start()
606 if (msg->basicrate4.status == P80211ENUM_msgitem_status_data_ok) prism2mgmt_start()
607 word |= p80211rate_to_p2bit(msg->basicrate4.data); prism2mgmt_start()
609 if (msg->basicrate5.status == P80211ENUM_msgitem_status_data_ok) prism2mgmt_start()
610 word |= p80211rate_to_p2bit(msg->basicrate5.data); prism2mgmt_start()
612 if (msg->basicrate6.status == P80211ENUM_msgitem_status_data_ok) prism2mgmt_start()
613 word |= p80211rate_to_p2bit(msg->basicrate6.data); prism2mgmt_start()
615 if (msg->basicrate7.status == P80211ENUM_msgitem_status_data_ok) prism2mgmt_start()
616 word |= p80211rate_to_p2bit(msg->basicrate7.data); prism2mgmt_start()
618 if (msg->basicrate8.status == P80211ENUM_msgitem_status_data_ok) prism2mgmt_start()
619 word |= p80211rate_to_p2bit(msg->basicrate8.data); prism2mgmt_start()
629 word = p80211rate_to_p2bit(msg->operationalrate1.data); prism2mgmt_start()
630 if (msg->operationalrate2.status == P80211ENUM_msgitem_status_data_ok) prism2mgmt_start()
631 word |= p80211rate_to_p2bit(msg->operationalrate2.data); prism2mgmt_start()
633 if (msg->operationalrate3.status == P80211ENUM_msgitem_status_data_ok) prism2mgmt_start()
634 word |= p80211rate_to_p2bit(msg->operationalrate3.data); prism2mgmt_start()
636 if (msg->operationalrate4.status == P80211ENUM_msgitem_status_data_ok) prism2mgmt_start()
637 word |= p80211rate_to_p2bit(msg->operationalrate4.data); prism2mgmt_start()
639 if (msg->operationalrate5.status == P80211ENUM_msgitem_status_data_ok) prism2mgmt_start()
640 word |= p80211rate_to_p2bit(msg->operationalrate5.data); prism2mgmt_start()
642 if (msg->operationalrate6.status == P80211ENUM_msgitem_status_data_ok) prism2mgmt_start()
643 word |= p80211rate_to_p2bit(msg->operationalrate6.data); prism2mgmt_start()
645 if (msg->operationalrate7.status == P80211ENUM_msgitem_status_data_ok) prism2mgmt_start()
646 word |= p80211rate_to_p2bit(msg->operationalrate7.data); prism2mgmt_start()
648 if (msg->operationalrate8.status == P80211ENUM_msgitem_status_data_ok) prism2mgmt_start()
649 word |= p80211rate_to_p2bit(msg->operationalrate8.data); prism2mgmt_start()
666 if (msg->bsstype.data == P80211ENUM_bsstype_independent) { prism2mgmt_start()
680 msg->resultcode.data = P80211ENUM_resultcode_success; prism2mgmt_start()
685 msg->resultcode.data = P80211ENUM_resultcode_invalid_parameters; prism2mgmt_start()
700 * msgp ptr to msg buffer
714 struct p80211msg_p2req_readpda *msg = msgp; prism2mgmt_readpda() local
723 msg->resultcode.data = prism2mgmt_readpda()
725 msg->resultcode.status = P80211ENUM_msgitem_status_data_ok; prism2mgmt_readpda()
731 msg->pda.data, prism2mgmt_readpda()
738 msg->resultcode.data = prism2mgmt_readpda()
740 msg->resultcode.status = prism2mgmt_readpda()
744 msg->pda.status = P80211ENUM_msgitem_status_data_ok; prism2mgmt_readpda()
745 msg->resultcode.data = P80211ENUM_resultcode_success; prism2mgmt_readpda()
746 msg->resultcode.status = P80211ENUM_msgitem_status_data_ok; prism2mgmt_readpda()
766 * msgp ptr to msg buffer
780 struct p80211msg_p2req_ramdl_state *msg = msgp; prism2mgmt_ramdl_state() local
785 msg->resultcode.data = prism2mgmt_ramdl_state()
787 msg->resultcode.status = P80211ENUM_msgitem_status_data_ok; prism2mgmt_ramdl_state()
796 msg->resultcode.status = P80211ENUM_msgitem_status_data_ok; prism2mgmt_ramdl_state()
797 if (msg->enable.data == P80211ENUM_truth_true) { prism2mgmt_ramdl_state()
798 if (hfa384x_drvr_ramdl_enable(hw, msg->exeaddr.data)) { prism2mgmt_ramdl_state()
799 msg->resultcode.data = prism2mgmt_ramdl_state()
802 msg->resultcode.data = P80211ENUM_resultcode_success; prism2mgmt_ramdl_state()
806 msg->resultcode.data = P80211ENUM_resultcode_success; prism2mgmt_ramdl_state()
821 * msgp ptr to msg buffer
835 struct p80211msg_p2req_ramdl_write *msg = msgp; prism2mgmt_ramdl_write() local
843 msg->resultcode.data = prism2mgmt_ramdl_write()
845 msg->resultcode.status = P80211ENUM_msgitem_status_data_ok; prism2mgmt_ramdl_write()
849 msg->resultcode.status = P80211ENUM_msgitem_status_data_ok; prism2mgmt_ramdl_write()
851 if (msg->len.data > sizeof(msg->data.data)) { prism2mgmt_ramdl_write()
852 msg->resultcode.status = prism2mgmt_ramdl_write()
857 addr = msg->addr.data; prism2mgmt_ramdl_write()
858 len = msg->len.data; prism2mgmt_ramdl_write()
859 buf = msg->data.data; prism2mgmt_ramdl_write()
861 msg->resultcode.data = P80211ENUM_resultcode_refused; prism2mgmt_ramdl_write()
863 msg->resultcode.data = P80211ENUM_resultcode_success; prism2mgmt_ramdl_write()
882 * msgp ptr to msg buffer
897 struct p80211msg_p2req_flashdl_state *msg = msgp; prism2mgmt_flashdl_state() local
902 msg->resultcode.data = prism2mgmt_flashdl_state()
904 msg->resultcode.status = P80211ENUM_msgitem_status_data_ok; prism2mgmt_flashdl_state()
913 msg->resultcode.status = P80211ENUM_msgitem_status_data_ok; prism2mgmt_flashdl_state()
914 if (msg->enable.data == P80211ENUM_truth_true) { prism2mgmt_flashdl_state()
916 msg->resultcode.data = prism2mgmt_flashdl_state()
919 msg->resultcode.data = P80211ENUM_resultcode_success; prism2mgmt_flashdl_state()
923 msg->resultcode.data = P80211ENUM_resultcode_success; prism2mgmt_flashdl_state()
937 msg->resultcode.data = prism2mgmt_flashdl_state()
953 * msgp ptr to msg buffer
967 struct p80211msg_p2req_flashdl_write *msg = msgp; prism2mgmt_flashdl_write() local
975 msg->resultcode.data = prism2mgmt_flashdl_write()
977 msg->resultcode.status = P80211ENUM_msgitem_status_data_ok; prism2mgmt_flashdl_write()
986 msg->resultcode.status = P80211ENUM_msgitem_status_data_ok; prism2mgmt_flashdl_write()
988 if (msg->len.data > sizeof(msg->data.data)) { prism2mgmt_flashdl_write()
989 msg->resultcode.status = prism2mgmt_flashdl_write()
994 addr = msg->addr.data; prism2mgmt_flashdl_write()
995 len = msg->len.data; prism2mgmt_flashdl_write()
996 buf = msg->data.data; prism2mgmt_flashdl_write()
998 msg->resultcode.data = P80211ENUM_resultcode_refused; prism2mgmt_flashdl_write()
1000 msg->resultcode.data = P80211ENUM_resultcode_success; prism2mgmt_flashdl_write()
1012 * msgp ptr to msg buffer
1030 struct p80211msg_lnxreq_autojoin *msg = msgp; prism2mgmt_autojoin() local
1038 memcpy(&wlandev->ssid, &msg->ssid.data, sizeof(msg->ssid.data)); prism2mgmt_autojoin()
1048 if (msg->authtype.data == P80211ENUM_authalg_sharedkey) prism2mgmt_autojoin()
1057 pstr = (p80211pstrd_t *) &(msg->ssid.data); prism2mgmt_autojoin()
1070 msg->resultcode.status = P80211ENUM_msgitem_status_data_ok; prism2mgmt_autojoin()
1071 msg->resultcode.data = P80211ENUM_resultcode_success; prism2mgmt_autojoin()
1083 * msgp ptr to msg buffer
1098 struct p80211msg_lnxreq_wlansniff *msg = msgp; prism2mgmt_wlansniff() local
1103 msg->resultcode.status = P80211ENUM_msgitem_status_data_ok; prism2mgmt_wlansniff()
1104 switch (msg->enable.data) { prism2mgmt_wlansniff()
1108 msg->resultcode.data = prism2mgmt_wlansniff()
1167 msg->resultcode.data = P80211ENUM_resultcode_success; prism2mgmt_wlansniff()
1214 word = msg->channel.data; prism2mgmt_wlansniff()
1239 if ((msg->keepwepflags.status == prism2mgmt_wlansniff()
1241 && (msg->keepwepflags.data != prism2mgmt_wlansniff()
1261 if ((msg->stripfcs.status == P80211ENUM_msgitem_status_data_ok) prism2mgmt_wlansniff()
1262 && (msg->stripfcs.data == P80211ENUM_truth_true)) { prism2mgmt_wlansniff()
1269 if (msg->packet_trunc.status == prism2mgmt_wlansniff()
1271 hw->sniff_truncate = msg->packet_trunc.data; prism2mgmt_wlansniff()
1297 if ((msg->prismheader.status == prism2mgmt_wlansniff()
1299 && (msg->prismheader.data == P80211ENUM_truth_true)) { prism2mgmt_wlansniff()
1303 if ((msg->wlanheader.status == prism2mgmt_wlansniff()
1305 && (msg->wlanheader.data == P80211ENUM_truth_true)) { prism2mgmt_wlansniff()
1312 msg->resultcode.data = P80211ENUM_resultcode_success; prism2mgmt_wlansniff()
1315 msg->resultcode.data = P80211ENUM_resultcode_invalid_parameters; prism2mgmt_wlansniff()
1320 msg->resultcode.data = P80211ENUM_resultcode_refused; prism2mgmt_wlansniff()
H A Dprism2mib.c92 struct p80211msg_dot11req_mibset *msg, void *data);
99 struct p80211msg_dot11req_mibset *msg,
106 struct p80211msg_dot11req_mibset *msg, void *data);
112 struct p80211msg_dot11req_mibset *msg, void *data);
118 struct p80211msg_dot11req_mibset *msg,
125 struct p80211msg_dot11req_mibset *msg,
132 struct p80211msg_dot11req_mibset *msg,
139 struct p80211msg_dot11req_mibset *msg,
146 struct p80211msg_dot11req_mibset *msg, void *data);
247 * msgp ptr to msg buffer
268 struct p80211msg_dot11req_mibset *msg = msgp; prism2mgmt_mibset_mibget() local
271 msg->resultcode.status = P80211ENUM_msgitem_status_data_ok; prism2mgmt_mibset_mibget()
272 msg->resultcode.data = P80211ENUM_resultcode_success; prism2mgmt_mibset_mibget()
287 mibitem = (p80211itemd_t *) msg->mibattribute.data; prism2mgmt_mibset_mibget()
294 msg->resultcode.data = P80211ENUM_resultcode_not_supported; prism2mgmt_mibset_mibget()
304 isget = (msg->msgcode == DIDmsg_dot11req_mibget); prism2mgmt_mibset_mibget()
308 msg->resultcode.data = prism2mgmt_mibset_mibget()
314 msg->resultcode.data = prism2mgmt_mibset_mibget()
328 result = mib->func(mib, isget, wlandev, hw, msg, (void *)mibitem->data); prism2mgmt_mibset_mibget()
330 if (msg->resultcode.data == P80211ENUM_resultcode_success) { prism2mgmt_mibset_mibget()
333 msg->resultcode.data = prism2mgmt_mibset_mibget()
337 msg->mibattribute.status = prism2mgmt_mibset_mibget()
365 * msg Message structure.
378 struct p80211msg_dot11req_mibset *msg, prism2mib_bytearea2pstr()
415 * msg Message structure.
428 struct p80211msg_dot11req_mibset *msg, void *data) prism2mib_uint32()
462 * msg Message structure.
475 struct p80211msg_dot11req_mibset *msg, void *data) prism2mib_flag()
519 * msg Message structure.
532 struct p80211msg_dot11req_mibset *msg, prism2mib_wepdefaultkey()
569 * msg Message structure.
582 struct p80211msg_dot11req_mibset *msg, prism2mib_privacyinvoked()
592 return prism2mib_flag(mib, isget, wlandev, hw, msg, data); prism2mib_privacyinvoked()
611 * msg Message structure.
624 struct p80211msg_dot11req_mibset *msg, prism2mib_excludeunencrypted()
628 return prism2mib_flag(mib, isget, wlandev, hw, msg, data); prism2mib_excludeunencrypted()
647 * msg Message structure.
660 struct p80211msg_dot11req_mibset *msg, prism2mib_fragmentationthreshold()
670 msg->resultcode.data = prism2mib_fragmentationthreshold()
675 result = prism2mib_uint32(mib, isget, wlandev, hw, msg, data); prism2mib_fragmentationthreshold()
696 * msg Message structure.
709 struct p80211msg_dot11req_mibset *msg, void *data) prism2mib_priv()
792 * msg wlan message
814 * msg wlan message
374 prism2mib_bytearea2pstr(struct mibrec *mib, int isget, wlandevice_t *wlandev, hfa384x_t *hw, struct p80211msg_dot11req_mibset *msg, void *data) prism2mib_bytearea2pstr() argument
424 prism2mib_uint32(struct mibrec *mib, int isget, wlandevice_t *wlandev, hfa384x_t *hw, struct p80211msg_dot11req_mibset *msg, void *data) prism2mib_uint32() argument
471 prism2mib_flag(struct mibrec *mib, int isget, wlandevice_t *wlandev, hfa384x_t *hw, struct p80211msg_dot11req_mibset *msg, void *data) prism2mib_flag() argument
528 prism2mib_wepdefaultkey(struct mibrec *mib, int isget, wlandevice_t *wlandev, hfa384x_t *hw, struct p80211msg_dot11req_mibset *msg, void *data) prism2mib_wepdefaultkey() argument
578 prism2mib_privacyinvoked(struct mibrec *mib, int isget, wlandevice_t *wlandev, hfa384x_t *hw, struct p80211msg_dot11req_mibset *msg, void *data) prism2mib_privacyinvoked() argument
620 prism2mib_excludeunencrypted(struct mibrec *mib, int isget, wlandevice_t *wlandev, hfa384x_t *hw, struct p80211msg_dot11req_mibset *msg, void *data) prism2mib_excludeunencrypted() argument
656 prism2mib_fragmentationthreshold(struct mibrec *mib, int isget, wlandevice_t *wlandev, hfa384x_t *hw, struct p80211msg_dot11req_mibset *msg, void *data) prism2mib_fragmentationthreshold() argument
705 prism2mib_priv(struct mibrec *mib, int isget, wlandevice_t *wlandev, hfa384x_t *hw, struct p80211msg_dot11req_mibset *msg, void *data) prism2mib_priv() argument
H A Dp80211req.c75 static void p80211req_handlemsg(wlandevice_t *wlandev, struct p80211msg *msg);
98 struct p80211msg *msg = (struct p80211msg *) msgbuf; p80211req_dorequest() local
102 msg->msgcode == DIDmsg_lnxreq_ifstate) || p80211req_dorequest()
110 (msg->msgcode != DIDmsg_dot11req_mibget)) { p80211req_dorequest()
121 /* Allow p80211 to look at msg and handle if desired. */ p80211req_dorequest()
124 p80211req_handlemsg(wlandev, msg); p80211req_dorequest()
128 wlandev->mlmerequest(wlandev, msg); p80211req_dorequest()
131 return 0; /* if result==0, msg->status still may contain an err */ p80211req_dorequest()
144 * msg message structure
147 * nothing (any results are set in the status field of the msg)
152 static void p80211req_handlemsg(wlandevice_t *wlandev, struct p80211msg *msg) p80211req_handlemsg() argument
154 switch (msg->msgcode) { p80211req_handlemsg()
158 (struct p80211msg_lnxreq_hostwep *) msg; p80211req_handlemsg()
170 int isget = (msg->msgcode == DIDmsg_dot11req_mibget); p80211req_handlemsg()
172 (struct p80211msg_dot11req_mibget *) msg; p80211req_handlemsg()
176 } /* switch msg->msgcode */ p80211req_handlemsg()
/linux-4.1.27/drivers/hsi/clients/
H A Dhsi_char.c129 static void hsc_add_tail(struct hsc_channel *channel, struct hsi_msg *msg, hsc_add_tail() argument
135 list_add_tail(&msg->link, queue); hsc_add_tail()
142 struct hsi_msg *msg = NULL; hsc_get_first_msg() local
150 msg = list_first_entry(queue, struct hsi_msg, link); hsc_get_first_msg()
151 list_del(&msg->link); hsc_get_first_msg()
155 return msg; hsc_get_first_msg()
158 static inline void hsc_msg_free(struct hsi_msg *msg) hsc_msg_free() argument
160 kfree(sg_virt(msg->sgt.sgl)); hsc_msg_free()
161 hsi_free_msg(msg); hsc_msg_free()
166 struct hsi_msg *msg, *tmp; hsc_free_list() local
168 list_for_each_entry_safe(msg, tmp, list, link) { list_for_each_entry_safe()
169 list_del(&msg->link); list_for_each_entry_safe()
170 hsc_msg_free(msg); list_for_each_entry_safe()
188 struct hsi_msg *msg; hsc_msg_alloc() local
191 msg = hsi_alloc_msg(1, GFP_KERNEL); hsc_msg_alloc()
192 if (!msg) hsc_msg_alloc()
196 hsi_free_msg(msg); hsc_msg_alloc()
199 sg_init_one(msg->sgt.sgl, buf, alloc_size); hsc_msg_alloc()
203 return msg; hsc_msg_alloc()
210 struct hsi_msg *msg; hsc_msgs_alloc() local
214 msg = hsc_msg_alloc(max_data_size); hsc_msgs_alloc()
215 if (!msg) hsc_msgs_alloc()
217 msg->channel = channel->ch; hsc_msgs_alloc()
218 list_add_tail(&msg->link, &channel->free_msgs_list); hsc_msgs_alloc()
228 static inline unsigned int hsc_msg_len_get(struct hsi_msg *msg) hsc_msg_len_get() argument
230 return msg->sgt.sgl->length; hsc_msg_len_get()
233 static inline void hsc_msg_len_set(struct hsi_msg *msg, unsigned int len) hsc_msg_len_set() argument
235 msg->sgt.sgl->length = len; hsc_msg_len_set()
238 static void hsc_rx_completed(struct hsi_msg *msg) hsc_rx_completed() argument
240 struct hsc_client_data *cl_data = hsi_client_drvdata(msg->cl); hsc_rx_completed()
241 struct hsc_channel *channel = cl_data->channels + msg->channel; hsc_rx_completed()
244 hsc_add_tail(channel, msg, &channel->rx_msgs_queue); hsc_rx_completed()
247 hsc_add_tail(channel, msg, &channel->free_msgs_list); hsc_rx_completed()
251 static void hsc_rx_msg_destructor(struct hsi_msg *msg) hsc_rx_msg_destructor() argument
253 msg->status = HSI_STATUS_ERROR; hsc_rx_msg_destructor()
254 hsc_msg_len_set(msg, 0); hsc_rx_msg_destructor()
255 hsc_rx_completed(msg); hsc_rx_msg_destructor()
258 static void hsc_tx_completed(struct hsi_msg *msg) hsc_tx_completed() argument
260 struct hsc_client_data *cl_data = hsi_client_drvdata(msg->cl); hsc_tx_completed()
261 struct hsc_channel *channel = cl_data->channels + msg->channel; hsc_tx_completed()
264 hsc_add_tail(channel, msg, &channel->tx_msgs_queue); hsc_tx_completed()
267 hsc_add_tail(channel, msg, &channel->free_msgs_list); hsc_tx_completed()
271 static void hsc_tx_msg_destructor(struct hsi_msg *msg) hsc_tx_msg_destructor() argument
273 msg->status = HSI_STATUS_ERROR; hsc_tx_msg_destructor()
274 hsc_msg_len_set(msg, 0); hsc_tx_msg_destructor()
275 hsc_tx_completed(msg); hsc_tx_msg_destructor()
278 static void hsc_break_req_destructor(struct hsi_msg *msg) hsc_break_req_destructor() argument
280 struct hsc_client_data *cl_data = hsi_client_drvdata(msg->cl); hsc_break_req_destructor()
282 hsi_free_msg(msg); hsc_break_req_destructor()
286 static void hsc_break_received(struct hsi_msg *msg) hsc_break_received() argument
288 struct hsc_client_data *cl_data = hsi_client_drvdata(msg->cl); hsc_break_received()
307 hsi_flush(msg->cl); hsc_break_received()
308 ret = hsi_async_read(msg->cl, msg); hsc_break_received()
310 hsc_break_req_destructor(msg); hsc_break_received()
316 struct hsi_msg *msg; hsc_break_request() local
322 msg = hsi_alloc_msg(0, GFP_KERNEL); hsc_break_request()
323 if (!msg) { hsc_break_request()
327 msg->break_frame = 1; hsc_break_request()
328 msg->complete = hsc_break_received; hsc_break_request()
329 msg->destructor = hsc_break_req_destructor; hsc_break_request()
330 ret = hsi_async_read(cl, msg); hsc_break_request()
332 hsc_break_req_destructor(msg); hsc_break_request()
339 struct hsi_msg *msg; hsc_break_send() local
342 msg = hsi_alloc_msg(0, GFP_ATOMIC); hsc_break_send()
343 if (!msg) hsc_break_send()
345 msg->break_frame = 1; hsc_break_send()
346 msg->complete = hsi_free_msg; hsc_break_send()
347 msg->destructor = hsi_free_msg; hsc_break_send()
348 ret = hsi_async_write(cl, msg); hsc_break_send()
350 hsi_free_msg(msg); hsc_break_send()
429 struct hsi_msg *msg; hsc_read() local
442 msg = hsc_get_first_msg(channel, &channel->free_msgs_list); hsc_read()
443 if (!msg) { hsc_read()
447 hsc_msg_len_set(msg, len); hsc_read()
448 msg->complete = hsc_rx_completed; hsc_read()
449 msg->destructor = hsc_rx_msg_destructor; hsc_read()
450 ret = hsi_async_read(channel->cl, msg); hsc_read()
452 hsc_add_tail(channel, msg, &channel->free_msgs_list); hsc_read()
464 msg = hsc_get_first_msg(channel, &channel->rx_msgs_queue); hsc_read()
465 if (msg) { hsc_read()
466 if (msg->status != HSI_STATUS_ERROR) { hsc_read()
468 sg_virt(msg->sgt.sgl), hsc_msg_len_get(msg)); hsc_read()
472 ret = hsc_msg_len_get(msg); hsc_read()
476 hsc_add_tail(channel, msg, &channel->free_msgs_list); hsc_read()
488 struct hsi_msg *msg; hsc_write() local
499 msg = hsc_get_first_msg(channel, &channel->free_msgs_list); hsc_write()
500 if (!msg) { hsc_write()
504 if (copy_from_user(sg_virt(msg->sgt.sgl), (void __user *)buf, len)) { hsc_write()
508 hsc_msg_len_set(msg, len); hsc_write()
509 msg->complete = hsc_tx_completed; hsc_write()
510 msg->destructor = hsc_tx_msg_destructor; hsc_write()
511 ret = hsi_async_write(channel->cl, msg); hsc_write()
523 msg = hsc_get_first_msg(channel, &channel->tx_msgs_queue); hsc_write()
524 if (msg) { hsc_write()
525 if (msg->status == HSI_STATUS_ERROR) hsc_write()
528 ret = hsc_msg_len_get(msg); hsc_write()
530 hsc_add_tail(channel, msg, &channel->free_msgs_list); hsc_write()
H A Dssi_protocol.c161 static void ssip_rxcmd_complete(struct hsi_msg *msg);
163 static inline void ssip_set_cmd(struct hsi_msg *msg, u32 cmd) ssip_set_cmd() argument
167 data = sg_virt(msg->sgt.sgl); ssip_set_cmd()
171 static inline u32 ssip_get_cmd(struct hsi_msg *msg) ssip_get_cmd() argument
175 data = sg_virt(msg->sgt.sgl); ssip_get_cmd()
180 static void ssip_skb_to_msg(struct sk_buff *skb, struct hsi_msg *msg) ssip_skb_to_msg() argument
186 BUG_ON(msg->sgt.nents != (unsigned int)(skb_shinfo(skb)->nr_frags + 1)); ssip_skb_to_msg()
188 sg = msg->sgt.sgl; ssip_skb_to_msg()
198 static void ssip_free_data(struct hsi_msg *msg) ssip_free_data() argument
202 skb = msg->context; ssip_free_data()
203 pr_debug("free data: msg %p context %p skb %p\n", msg, msg->context, ssip_free_data()
205 msg->destructor = NULL; ssip_free_data()
207 hsi_free_msg(msg); ssip_free_data()
213 struct hsi_msg *msg; ssip_alloc_data() local
215 msg = hsi_alloc_msg(skb_shinfo(skb)->nr_frags + 1, flags); ssip_alloc_data()
216 if (!msg) ssip_alloc_data()
218 ssip_skb_to_msg(skb, msg); ssip_alloc_data()
219 msg->destructor = ssip_free_data; ssip_alloc_data()
220 msg->channel = ssi->channel_id_data; ssip_alloc_data()
221 msg->context = skb; ssip_alloc_data()
223 return msg; ssip_alloc_data()
226 static inline void ssip_release_cmd(struct hsi_msg *msg) ssip_release_cmd() argument
228 struct ssi_protocol *ssi = hsi_client_drvdata(msg->cl); ssip_release_cmd()
230 dev_dbg(&msg->cl->device, "Release cmd 0x%08x\n", ssip_get_cmd(msg)); ssip_release_cmd()
232 list_add_tail(&msg->link, &ssi->cmdqueue); ssip_release_cmd()
238 struct hsi_msg *msg; ssip_claim_cmd() local
243 msg = list_first_entry(&ssi->cmdqueue, struct hsi_msg, link); ssip_claim_cmd()
244 list_del(&msg->link); ssip_claim_cmd()
246 msg->destructor = ssip_release_cmd; ssip_claim_cmd()
248 return msg; ssip_claim_cmd()
253 struct hsi_msg *msg, *tmp; ssip_free_cmds() local
255 list_for_each_entry_safe(msg, tmp, &ssi->cmdqueue, link) { ssip_free_cmds()
256 list_del(&msg->link); ssip_free_cmds()
257 msg->destructor = NULL; ssip_free_cmds()
258 kfree(sg_virt(msg->sgt.sgl)); ssip_free_cmds()
259 hsi_free_msg(msg); ssip_free_cmds()
265 struct hsi_msg *msg; ssip_alloc_cmds() local
270 msg = hsi_alloc_msg(1, GFP_KERNEL); ssip_alloc_cmds()
271 if (!msg) ssip_alloc_cmds()
275 hsi_free_msg(msg); ssip_alloc_cmds()
278 sg_init_one(msg->sgt.sgl, buf, sizeof(*buf)); ssip_alloc_cmds()
279 msg->channel = ssi->channel_id_cmd; ssip_alloc_cmds()
280 list_add_tail(&msg->link, &ssi->cmdqueue); ssip_alloc_cmds()
400 struct hsi_msg *msg; ssip_reset() local
420 msg = list_entry(head, struct hsi_msg, link); ssip_reset()
423 ssip_free_data(msg); ssip_reset()
432 struct hsi_msg *msg; ssip_dump_state() local
444 list_for_each_entry(msg, &ssi->txqueue, link) ssip_dump_state()
445 dev_err(&cl->device, "pending TX data (%p)\n", msg); ssip_dump_state()
452 struct hsi_msg *msg; ssip_error() local
456 msg = ssip_claim_cmd(ssi); ssip_error()
457 msg->complete = ssip_rxcmd_complete; ssip_error()
458 hsi_async_read(cl, msg); ssip_error()
498 struct hsi_msg *msg; ssip_send_bootinfo_req_cmd() local
501 msg = ssip_claim_cmd(ssi); ssip_send_bootinfo_req_cmd()
502 ssip_set_cmd(msg, SSIP_BOOTINFO_REQ_CMD(SSIP_LOCAL_VERID)); ssip_send_bootinfo_req_cmd()
503 msg->complete = ssip_release_cmd; ssip_send_bootinfo_req_cmd()
504 hsi_async_write(cl, msg); ssip_send_bootinfo_req_cmd()
506 msg = ssip_claim_cmd(ssi); ssip_send_bootinfo_req_cmd()
507 msg->complete = ssip_rxcmd_complete; ssip_send_bootinfo_req_cmd()
508 hsi_async_read(cl, msg); ssip_send_bootinfo_req_cmd()
514 struct hsi_msg *msg; ssip_start_rx() local
536 msg = ssip_claim_cmd(ssi); ssip_start_rx()
537 ssip_set_cmd(msg, SSIP_READY_CMD); ssip_start_rx()
538 msg->complete = ssip_release_cmd; ssip_start_rx()
540 hsi_async_write(cl, msg); ssip_start_rx()
554 static void ssip_free_strans(struct hsi_msg *msg) ssip_free_strans() argument
556 ssip_free_data(msg->context); ssip_free_strans()
557 ssip_release_cmd(msg); ssip_free_strans()
560 static void ssip_strans_complete(struct hsi_msg *msg) ssip_strans_complete() argument
562 struct hsi_client *cl = msg->cl; ssip_strans_complete()
566 data = msg->context; ssip_strans_complete()
567 ssip_release_cmd(msg); ssip_strans_complete()
577 struct hsi_msg *msg, *dmsg; ssip_xmit() local
590 msg = ssip_claim_cmd(ssi); ssip_xmit()
592 msg->context = dmsg; ssip_xmit()
593 msg->complete = ssip_strans_complete; ssip_xmit()
594 msg->destructor = ssip_free_strans; ssip_xmit()
597 ssip_set_cmd(msg, SSIP_START_TRANS_CMD(SSIP_BYTES_TO_FRAMES(skb->len), ssip_xmit()
606 return hsi_async_write(cl, msg); ssip_xmit()
641 static void ssip_rx_data_complete(struct hsi_msg *msg) ssip_rx_data_complete() argument
643 struct hsi_client *cl = msg->cl; ssip_rx_data_complete()
647 if (msg->status == HSI_STATUS_ERROR) { ssip_rx_data_complete()
649 ssip_free_data(msg); ssip_rx_data_complete()
654 skb = msg->context; ssip_rx_data_complete()
656 hsi_free_msg(msg); ssip_rx_data_complete()
662 struct hsi_msg *msg; ssip_rx_bootinforeq() local
686 msg = ssip_claim_cmd(ssi); ssip_rx_bootinforeq()
687 ssip_set_cmd(msg, SSIP_BOOTINFO_RESP_CMD(SSIP_LOCAL_VERID)); ssip_rx_bootinforeq()
688 msg->complete = ssip_release_cmd; ssip_rx_bootinforeq()
689 hsi_async_write(cl, msg); ssip_rx_bootinforeq()
773 struct hsi_msg *msg; ssip_rx_strans() local
800 msg = ssip_alloc_data(ssi, skb, GFP_ATOMIC); ssip_rx_strans()
801 if (unlikely(!msg)) { ssip_rx_strans()
802 dev_err(&cl->device, "No memory for RX data msg\n"); ssip_rx_strans()
805 msg->complete = ssip_rx_data_complete; ssip_rx_strans()
806 hsi_async_read(cl, msg); ssip_rx_strans()
815 static void ssip_rxcmd_complete(struct hsi_msg *msg) ssip_rxcmd_complete() argument
817 struct hsi_client *cl = msg->cl; ssip_rxcmd_complete()
818 u32 cmd = ssip_get_cmd(msg); ssip_rxcmd_complete()
821 if (msg->status == HSI_STATUS_ERROR) { ssip_rxcmd_complete()
823 ssip_release_cmd(msg); ssip_rxcmd_complete()
827 hsi_async_read(cl, msg); ssip_rxcmd_complete()
854 static void ssip_swbreak_complete(struct hsi_msg *msg) ssip_swbreak_complete() argument
856 struct hsi_client *cl = msg->cl; ssip_swbreak_complete()
859 ssip_release_cmd(msg); ssip_swbreak_complete()
876 static void ssip_tx_data_complete(struct hsi_msg *msg) ssip_tx_data_complete() argument
878 struct hsi_client *cl = msg->cl; ssip_tx_data_complete()
882 if (msg->status == HSI_STATUS_ERROR) { ssip_tx_data_complete()
901 ssip_free_data(msg); ssip_tx_data_complete()
972 struct hsi_msg *msg; ssip_pn_xmit() local
991 msg = ssip_alloc_data(ssi, skb, GFP_ATOMIC); ssip_pn_xmit()
992 if (!msg) { ssip_pn_xmit()
996 msg->complete = ssip_tx_data_complete; ssip_pn_xmit()
1004 list_add_tail(&msg->link, &ssi->txqueue); ssip_pn_xmit()
1029 hsi_free_msg(msg); ssip_pn_xmit()
H A Dcmt_speech.c46 u32 msg; member in struct:char_queue
163 entry->msg = message; cs_notify()
181 data = entry->msg; cs_pop_entry()
210 static inline void cs_set_cmd(struct hsi_msg *msg, u32 cmd) cs_set_cmd() argument
212 u32 *data = sg_virt(msg->sgt.sgl); cs_set_cmd()
216 static inline u32 cs_get_cmd(struct hsi_msg *msg) cs_get_cmd() argument
218 u32 *data = sg_virt(msg->sgt.sgl); cs_get_cmd()
222 static void cs_release_cmd(struct hsi_msg *msg) cs_release_cmd() argument
224 struct cs_hsi_iface *hi = msg->context; cs_release_cmd()
226 list_add_tail(&msg->link, &hi->cmdqueue); cs_release_cmd()
229 static void cs_cmd_destructor(struct hsi_msg *msg) cs_cmd_destructor() argument
231 struct cs_hsi_iface *hi = msg->context; cs_cmd_destructor()
240 if (msg->ttype == HSI_MSG_READ) cs_cmd_destructor()
243 else if (msg->ttype == HSI_MSG_WRITE && cs_cmd_destructor()
247 cs_release_cmd(msg); cs_cmd_destructor()
254 struct hsi_msg *msg; cs_claim_cmd() local
258 msg = list_first_entry(&ssi->cmdqueue, struct hsi_msg, link); cs_claim_cmd()
259 list_del(&msg->link); cs_claim_cmd()
260 msg->destructor = cs_cmd_destructor; cs_claim_cmd()
262 return msg; cs_claim_cmd()
267 struct hsi_msg *msg, *tmp; cs_free_cmds() local
269 list_for_each_entry_safe(msg, tmp, &ssi->cmdqueue, link) { cs_free_cmds()
270 list_del(&msg->link); cs_free_cmds()
271 msg->destructor = NULL; cs_free_cmds()
272 kfree(sg_virt(msg->sgt.sgl)); cs_free_cmds()
273 hsi_free_msg(msg); cs_free_cmds()
279 struct hsi_msg *msg; cs_alloc_cmds() local
286 msg = hsi_alloc_msg(1, GFP_KERNEL); cs_alloc_cmds()
287 if (!msg) cs_alloc_cmds()
291 hsi_free_msg(msg); cs_alloc_cmds()
294 sg_init_one(msg->sgt.sgl, buf, sizeof(*buf)); cs_alloc_cmds()
295 msg->channel = cs_char_data.channel_id_cmd; cs_alloc_cmds()
296 msg->context = hi; cs_alloc_cmds()
297 list_add_tail(&msg->link, &hi->cmdqueue); cs_alloc_cmds()
307 static void cs_hsi_data_destructor(struct hsi_msg *msg) cs_hsi_data_destructor() argument
309 struct cs_hsi_iface *hi = msg->context; cs_hsi_data_destructor()
310 const char *dir = (msg->ttype == HSI_MSG_READ) ? "TX" : "RX"; cs_hsi_data_destructor()
318 if (msg->ttype == HSI_MSG_READ) cs_hsi_data_destructor()
324 msg->status = HSI_STATUS_COMPLETED; cs_hsi_data_destructor()
365 static void cs_hsi_free_data_msg(struct hsi_msg *msg) cs_hsi_free_data_msg() argument
367 WARN_ON(msg->status != HSI_STATUS_COMPLETED && cs_hsi_free_data_msg()
368 msg->status != HSI_STATUS_ERROR); cs_hsi_free_data_msg()
369 hsi_free_msg(msg); cs_hsi_free_data_msg()
379 struct hsi_msg *msg, const char *info, __cs_hsi_error_pre()
383 dev_err(&hi->cl->device, "HSI %s error, msg %d, state %u\n", __cs_hsi_error_pre()
384 info, msg->status, *state); __cs_hsi_error_pre()
405 struct hsi_msg *msg) cs_hsi_control_read_error()
407 __cs_hsi_error_pre(hi, msg, "control read", &hi->control_state); cs_hsi_control_read_error()
408 cs_release_cmd(msg); cs_hsi_control_read_error()
414 struct hsi_msg *msg) cs_hsi_control_write_error()
416 __cs_hsi_error_pre(hi, msg, "control write", &hi->control_state); cs_hsi_control_write_error()
417 cs_release_cmd(msg); cs_hsi_control_write_error()
423 static void cs_hsi_data_read_error(struct cs_hsi_iface *hi, struct hsi_msg *msg) cs_hsi_data_read_error() argument
425 __cs_hsi_error_pre(hi, msg, "data read", &hi->data_state); cs_hsi_data_read_error()
431 struct hsi_msg *msg) cs_hsi_data_write_error()
433 __cs_hsi_error_pre(hi, msg, "data write", &hi->data_state); cs_hsi_data_write_error()
438 static void cs_hsi_read_on_control_complete(struct hsi_msg *msg) cs_hsi_read_on_control_complete() argument
440 u32 cmd = cs_get_cmd(msg); cs_hsi_read_on_control_complete()
441 struct cs_hsi_iface *hi = msg->context; cs_hsi_read_on_control_complete()
445 if (msg->status == HSI_STATUS_ERROR) { cs_hsi_read_on_control_complete()
447 cs_hsi_control_read_error(hi, msg); cs_hsi_read_on_control_complete()
452 cs_release_cmd(msg); cs_hsi_read_on_control_complete()
466 static void cs_hsi_peek_on_control_complete(struct hsi_msg *msg) cs_hsi_peek_on_control_complete() argument
468 struct cs_hsi_iface *hi = msg->context; cs_hsi_peek_on_control_complete()
471 if (msg->status == HSI_STATUS_ERROR) { cs_hsi_peek_on_control_complete()
473 cs_hsi_control_read_error(hi, msg); cs_hsi_peek_on_control_complete()
480 msg->sgt.nents = 1; cs_hsi_peek_on_control_complete()
481 msg->complete = cs_hsi_read_on_control_complete; cs_hsi_peek_on_control_complete()
482 ret = hsi_async_read(hi->cl, msg); cs_hsi_peek_on_control_complete()
484 cs_hsi_control_read_error(hi, msg); cs_hsi_peek_on_control_complete()
489 struct hsi_msg *msg; cs_hsi_read_on_control() local
507 msg = cs_claim_cmd(hi); cs_hsi_read_on_control()
510 msg->sgt.nents = 0; cs_hsi_read_on_control()
511 msg->complete = cs_hsi_peek_on_control_complete; cs_hsi_read_on_control()
512 ret = hsi_async_read(hi->cl, msg); cs_hsi_read_on_control()
514 cs_hsi_control_read_error(hi, msg); cs_hsi_read_on_control()
517 static void cs_hsi_write_on_control_complete(struct hsi_msg *msg) cs_hsi_write_on_control_complete() argument
519 struct cs_hsi_iface *hi = msg->context; cs_hsi_write_on_control_complete()
520 if (msg->status == HSI_STATUS_COMPLETED) { cs_hsi_write_on_control_complete()
523 cs_release_cmd(msg); cs_hsi_write_on_control_complete()
525 } else if (msg->status == HSI_STATUS_ERROR) { cs_hsi_write_on_control_complete()
526 cs_hsi_control_write_error(hi, msg); cs_hsi_write_on_control_complete()
530 msg->status); cs_hsi_write_on_control_complete()
536 struct hsi_msg *msg; cs_hsi_write_on_control() local
551 msg = cs_claim_cmd(hi); cs_hsi_write_on_control()
554 cs_set_cmd(msg, message); cs_hsi_write_on_control()
555 msg->sgt.nents = 1; cs_hsi_write_on_control()
556 msg->complete = cs_hsi_write_on_control_complete; cs_hsi_write_on_control()
559 ret = hsi_async_write(hi->cl, msg); cs_hsi_write_on_control()
563 cs_hsi_control_write_error(hi, msg); cs_hsi_write_on_control()
581 static void cs_hsi_read_on_data_complete(struct hsi_msg *msg) cs_hsi_read_on_data_complete() argument
583 struct cs_hsi_iface *hi = msg->context; cs_hsi_read_on_data_complete()
586 if (unlikely(msg->status == HSI_STATUS_ERROR)) { cs_hsi_read_on_data_complete()
587 cs_hsi_data_read_error(hi, msg); cs_hsi_read_on_data_complete()
608 static void cs_hsi_peek_on_data_complete(struct hsi_msg *msg) cs_hsi_peek_on_data_complete() argument
610 struct cs_hsi_iface *hi = msg->context; cs_hsi_peek_on_data_complete()
614 if (unlikely(msg->status == HSI_STATUS_ERROR)) { cs_hsi_peek_on_data_complete()
615 cs_hsi_data_read_error(hi, msg); cs_hsi_peek_on_data_complete()
620 cs_hsi_data_read_error(hi, msg); cs_hsi_peek_on_data_complete()
632 sg_init_one(msg->sgt.sgl, address, hi->buf_size); cs_hsi_peek_on_data_complete()
633 msg->sgt.nents = 1; cs_hsi_peek_on_data_complete()
634 msg->complete = cs_hsi_read_on_data_complete; cs_hsi_peek_on_data_complete()
635 ret = hsi_async_read(hi->cl, msg); cs_hsi_peek_on_data_complete()
637 cs_hsi_data_read_error(hi, msg); cs_hsi_peek_on_data_complete()
684 static void cs_hsi_write_on_data_complete(struct hsi_msg *msg) cs_hsi_write_on_data_complete() argument
686 struct cs_hsi_iface *hi = msg->context; cs_hsi_write_on_data_complete()
688 if (msg->status == HSI_STATUS_COMPLETED) { cs_hsi_write_on_data_complete()
695 cs_hsi_data_write_error(hi, msg); cs_hsi_write_on_data_complete()
378 __cs_hsi_error_pre(struct cs_hsi_iface *hi, struct hsi_msg *msg, const char *info, unsigned int *state) __cs_hsi_error_pre() argument
404 cs_hsi_control_read_error(struct cs_hsi_iface *hi, struct hsi_msg *msg) cs_hsi_control_read_error() argument
413 cs_hsi_control_write_error(struct cs_hsi_iface *hi, struct hsi_msg *msg) cs_hsi_control_write_error() argument
430 cs_hsi_data_write_error(struct cs_hsi_iface *hi, struct hsi_msg *msg) cs_hsi_data_write_error() argument
/linux-4.1.27/tools/perf/ui/
H A Dhelpline.h11 void (*push)(const char *msg);
20 void ui_helpline__push(const char *msg);
23 void ui_helpline__puts(const char *msg);
H A Dlibslang.h15 #define slsmg_printf(msg, args...) \
16 SLsmg_printf((char *)(msg), ##args)
17 #define slsmg_write_nstring(msg, len) \
18 SLsmg_write_nstring((char *)(msg), len)
H A Dhelpline.c15 static void nop_helpline__push(const char *msg __maybe_unused) nop_helpline__push()
38 void ui_helpline__push(const char *msg) ui_helpline__push() argument
40 helpline_fns->push(msg); ui_helpline__push()
64 void ui_helpline__puts(const char *msg) ui_helpline__puts() argument
67 ui_helpline__push(msg); ui_helpline__puts()
/linux-4.1.27/sound/soc/intel/atom/sst/
H A Dsst_ipc.c40 struct sst_block *msg = NULL; sst_create_block() local
43 msg = kzalloc(sizeof(*msg), GFP_KERNEL); sst_create_block()
44 if (!msg) sst_create_block()
46 msg->condition = false; sst_create_block()
47 msg->on = true; sst_create_block()
48 msg->msg_id = msg_id; sst_create_block()
49 msg->drv_id = drv_id; sst_create_block()
51 list_add_tail(&msg->node, &ctx->block_list); sst_create_block()
54 return msg; sst_create_block()
96 "Block not found or a response received for a short msg for ipc %d, drv_id %d\n", sst_wake_up_block()
127 struct ipc_post *msg = ipc_msg; sst_post_message_mrfld() local
140 "sst: Busy wait failed, cant send this msg\n"); sst_post_message_mrfld()
153 "Empty msg queue... NO Action\n"); sst_post_message_mrfld()
163 /* copy msg from list */ sst_post_message_mrfld()
164 msg = list_entry(sst_drv_ctx->ipc_dispatch_list.next, sst_post_message_mrfld()
166 list_del(&msg->node); sst_post_message_mrfld()
169 msg->mrfld_header.p.header_high.full); sst_post_message_mrfld()
171 msg->mrfld_header.p.header_low_payload); sst_post_message_mrfld()
173 if (msg->mrfld_header.p.header_high.part.large) sst_post_message_mrfld()
175 msg->mailbox_data, sst_post_message_mrfld()
176 msg->mrfld_header.p.header_low_payload); sst_post_message_mrfld()
178 sst_shim_write64(sst_drv_ctx->shim, SST_IPCX, msg->mrfld_header.full); sst_post_message_mrfld()
182 kfree(msg->mailbox_data); sst_post_message_mrfld()
183 kfree(msg); sst_post_message_mrfld()
217 * process_fw_init - process the FW init msg
219 * @msg: IPC message mailbox data from FW
221 * This function processes the FW init msg from FW
225 void *msg) process_fw_init()
228 (struct ipc_header_fw_init *)msg; process_fw_init()
231 dev_dbg(sst_drv_ctx->dev, "*** FW Init msg came***\n"); process_fw_init()
245 struct ipc_post *msg) process_fw_async_msg()
255 msg_high = msg->mrfld_header.p.header_high; process_fw_async_msg()
256 msg_low = msg->mrfld_header.p.header_low_payload; process_fw_async_msg()
257 msg_id = ((struct ipc_dsp_hdr *)msg->mailbox_data)->cmd_id; process_fw_async_msg()
258 data_offset = (msg->mailbox_data + sizeof(struct ipc_dsp_hdr)); process_fw_async_msg()
263 pipe_id = ((struct ipc_dsp_hdr *)msg->mailbox_data)->pipe_id; process_fw_async_msg()
278 pipe_id = ((struct ipc_dsp_hdr *)msg->mailbox_data)->pipe_id; process_fw_async_msg()
288 dev_err(sst_drv_ctx->dev, "FW sent async error msg:\n"); process_fw_async_msg()
299 pipe_id = ((struct ipc_dsp_hdr *)msg->mailbox_data)->pipe_id; process_fw_async_msg()
309 "Unrecognized async msg from FW msg_id %#x\n", msg_id); process_fw_async_msg()
314 struct ipc_post *msg) sst_process_reply_mrfld()
323 msg_high = msg->mrfld_header.p.header_high; sst_process_reply_mrfld()
324 msg_low = msg->mrfld_header.p.header_low_payload; sst_process_reply_mrfld()
327 msg->mrfld_header.p.header_high.full, sst_process_reply_mrfld()
328 msg->mrfld_header.p.header_low_payload); sst_process_reply_mrfld()
335 process_fw_async_msg(sst_drv_ctx, msg); sst_process_reply_mrfld()
358 memcpy(data, (void *) msg->mailbox_data, msg_low); sst_process_reply_mrfld()
224 process_fw_init(struct intel_sst_drv *sst_drv_ctx, void *msg) process_fw_init() argument
244 process_fw_async_msg(struct intel_sst_drv *sst_drv_ctx, struct ipc_post *msg) process_fw_async_msg() argument
313 sst_process_reply_mrfld(struct intel_sst_drv *sst_drv_ctx, struct ipc_post *msg) sst_process_reply_mrfld() argument
H A Dsst_pvt.c129 * Observed that FW processes the alloc msg and replies even sst_wait_timeout()
167 struct ipc_post *msg; sst_create_ipc_msg() local
169 msg = kzalloc(sizeof(struct ipc_post), GFP_ATOMIC); sst_create_ipc_msg()
170 if (!msg) sst_create_ipc_msg()
173 msg->mailbox_data = kzalloc(SST_MAILBOX_SIZE, GFP_ATOMIC); sst_create_ipc_msg()
174 if (!msg->mailbox_data) { sst_create_ipc_msg()
175 kfree(msg); sst_create_ipc_msg()
179 msg->mailbox_data = NULL; sst_create_ipc_msg()
181 msg->is_large = large; sst_create_ipc_msg()
182 *arg = msg; sst_create_ipc_msg()
234 struct ipc_post *msg = NULL; sst_prepare_and_post_msg() local
245 &msg, large, sst, &block, ipc_msg, pvt_id); sst_prepare_and_post_msg()
247 ret = sst_create_ipc_msg(&msg, large); sst_prepare_and_post_msg()
256 sst_fill_header_mrfld(&msg->mrfld_header, ipc_msg, sst_prepare_and_post_msg()
258 msg->mrfld_header.p.header_low_payload = sizeof(dsp_hdr) + mbox_data_len; sst_prepare_and_post_msg()
259 msg->mrfld_header.p.header_high.part.res_rqd = !sync; sst_prepare_and_post_msg()
261 msg->mrfld_header.p.header_high.full); sst_prepare_and_post_msg()
263 msg->mrfld_header.p.header_high.part.res_rqd); sst_prepare_and_post_msg()
264 dev_dbg(sst->dev, "msg->mrfld_header.p.header_low_payload:%d", sst_prepare_and_post_msg()
265 msg->mrfld_header.p.header_low_payload); sst_prepare_and_post_msg()
268 memcpy(msg->mailbox_data, &dsp_hdr, sizeof(dsp_hdr)); sst_prepare_and_post_msg()
270 memcpy(msg->mailbox_data + sizeof(dsp_hdr), sst_prepare_and_post_msg()
276 sst->ops->post_message(sst, msg, true); sst_prepare_and_post_msg()
278 sst_add_to_dispatch_list_and_post(sst, msg); sst_prepare_and_post_msg()
314 int msg, int task_id, int large, int drv_id) sst_fill_header_mrfld()
317 header->p.header_high.part.msg_id = msg; sst_fill_header_mrfld()
326 void sst_fill_header_dsp(struct ipc_dsp_hdr *dsp, int msg, sst_fill_header_dsp() argument
329 dsp->cmd_id = msg; sst_fill_header_dsp()
417 struct ipc_post *msg) sst_add_to_dispatch_list_and_post()
422 list_add_tail(&msg->node, &sst->ipc_dispatch_list); sst_add_to_dispatch_list_and_post()
313 sst_fill_header_mrfld(union ipc_header_mrfld *header, int msg, int task_id, int large, int drv_id) sst_fill_header_mrfld() argument
416 sst_add_to_dispatch_list_and_post(struct intel_sst_drv *sst, struct ipc_post *msg) sst_add_to_dispatch_list_and_post() argument
/linux-4.1.27/drivers/leds/
H A Dleds-ipaq-micro.c39 struct ipaq_micro_msg msg = { micro_leds_brightness_set() local
44 msg.tx_data[0] = LED_GREEN; micro_leds_brightness_set()
45 msg.tx_data[1] = 0; micro_leds_brightness_set()
47 msg.tx_data[2] = 0; /* Duty cycle 256 */ micro_leds_brightness_set()
48 msg.tx_data[3] = 1; micro_leds_brightness_set()
50 msg.tx_data[2] = 1; micro_leds_brightness_set()
51 msg.tx_data[3] = 0; /* Duty cycle 256 */ micro_leds_brightness_set()
53 ipaq_micro_tx_msg_sync(micro, &msg); micro_leds_brightness_set()
76 struct ipaq_micro_msg msg = { micro_leds_blink_set() local
81 msg.tx_data[0] = LED_GREEN; micro_leds_blink_set()
91 msg.tx_data[1] = 0; micro_leds_blink_set()
93 msg.tx_data[2] = 0; micro_leds_blink_set()
95 msg.tx_data[2] = (u8) DIV_ROUND_CLOSEST(*delay_on, 100); micro_leds_blink_set()
97 msg.tx_data[3] = 0; micro_leds_blink_set()
99 msg.tx_data[3] = (u8) DIV_ROUND_CLOSEST(*delay_off, 100); micro_leds_blink_set()
100 return ipaq_micro_tx_msg_sync(micro, &msg); micro_leds_blink_set()
/linux-4.1.27/drivers/media/pci/cx23885/
H A Dcx23885-i2c.c78 const struct i2c_msg *msg, int joined_rlen) i2c_sendbytes()
86 dprintk(1, "%s(msg->wlen=%d, nextmsg->rlen=%d)\n", __func__, i2c_sendbytes()
87 msg->len, joined_rlen); i2c_sendbytes()
89 dprintk(1, "%s(msg->len=%d)\n", __func__, msg->len); i2c_sendbytes()
92 if (msg->len == 0) { i2c_sendbytes()
93 cx_write(bus->reg_addr, msg->addr << 25); i2c_sendbytes()
106 addr = (msg->addr << 25) | msg->buf[0]; i2c_sendbytes()
107 wdata = msg->buf[0]; i2c_sendbytes()
110 if (msg->len > 1) i2c_sendbytes()
122 printk(" <W %02x %02x", msg->addr << 1, msg->buf[0]); i2c_sendbytes()
127 for (cnt = 1; cnt < msg->len; cnt++) { i2c_sendbytes()
129 wdata = msg->buf[cnt]; i2c_sendbytes()
132 if (cnt < msg->len - 1) i2c_sendbytes()
144 dprintk(1, " %02x", msg->buf[cnt]); i2c_sendbytes()
149 return msg->len; i2c_sendbytes()
159 const struct i2c_msg *msg, int joined) i2c_readbytes()
168 dprintk(1, "%s(msg->len=%d)\n", __func__, msg->len); i2c_readbytes()
171 if (msg->len == 0) { i2c_readbytes()
172 cx_write(bus->reg_addr, msg->addr << 25); i2c_readbytes()
188 dprintk(1, " <R %02x", (msg->addr << 1) + 1); i2c_readbytes()
191 for (cnt = 0; cnt < msg->len; cnt++) { i2c_readbytes()
195 if (cnt < msg->len - 1) i2c_readbytes()
198 cx_write(bus->reg_addr, msg->addr << 25); i2c_readbytes()
203 msg->buf[cnt] = cx_read(bus->reg_rdata) & 0xff; i2c_readbytes()
205 dprintk(1, " %02x", msg->buf[cnt]); i2c_readbytes()
210 return msg->len; i2c_readbytes()
367 struct i2c_msg msg; cx23885_av_clk() local
378 msg.addr = 0x44; cx23885_av_clk()
379 msg.flags = I2C_M_TEN; cx23885_av_clk()
380 msg.len = 3; cx23885_av_clk()
381 msg.buf = buffer; cx23885_av_clk()
383 i2c_xfer(&dev->i2c_bus[2].i2c_adap, &msg, 1); cx23885_av_clk()
77 i2c_sendbytes(struct i2c_adapter *i2c_adap, const struct i2c_msg *msg, int joined_rlen) i2c_sendbytes() argument
158 i2c_readbytes(struct i2c_adapter *i2c_adap, const struct i2c_msg *msg, int joined) i2c_readbytes() argument
H A Dnetup-init.c29 struct i2c_msg msg = { i2c_av_write() local
40 ret = i2c_transfer(i2c, &msg, 1); i2c_av_write()
50 struct i2c_msg msg = { i2c_av_write4() local
64 ret = i2c_transfer(i2c, &msg, 1); i2c_av_write4()
74 struct i2c_msg msg = { i2c_av_read() local
84 ret = i2c_transfer(i2c, &msg, 1); i2c_av_read()
89 msg.flags = I2C_M_RD; i2c_av_read()
90 msg.len = 1; i2c_av_read()
92 ret = i2c_transfer(i2c, &msg, 1); i2c_av_read()
H A Dnetup-eeprom.c34 struct i2c_msg msg[] = { netup_eeprom_read() local
52 ret = i2c_transfer(i2c_adap, msg, 2); netup_eeprom_read()
68 struct i2c_msg msg[] = { netup_eeprom_write() local
80 ret = i2c_transfer(i2c_adap, msg, 1); netup_eeprom_write()
/linux-4.1.27/arch/sparc/kernel/
H A Dsstate.c21 static void do_set_sstate(unsigned long state, const char *msg) do_set_sstate() argument
28 err = sun4v_mach_set_soft_state(state, kimage_addr_to_ra(msg)); do_set_sstate()
31 "state[%lx] msg[%s], err=%lu\n", do_set_sstate()
32 state, msg, err); do_set_sstate()
51 const char *msg; sstate_reboot_call() local
56 msg = rebooting_msg; sstate_reboot_call()
60 msg = halting_msg; sstate_reboot_call()
64 msg = poweroff_msg; sstate_reboot_call()
68 do_set_sstate(HV_SOFT_STATE_TRANSITION, msg); sstate_reboot_call()
/linux-4.1.27/drivers/media/usb/dvb-usb/
H A Ddw2102.c165 static int dw2102_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[], dw2102_i2c_transfer() argument
181 value = msg[0].buf[0];/* register */ dw2102_i2c_transfer()
182 for (i = 0; i < msg[1].len; i++) { dw2102_i2c_transfer()
185 msg[1].buf[i] = buf6[0]; dw2102_i2c_transfer()
189 switch (msg[0].addr) { dw2102_i2c_transfer()
193 buf6[1] = msg[0].buf[0]; dw2102_i2c_transfer()
194 buf6[2] = msg[0].buf[1]; dw2102_i2c_transfer()
199 if (msg[0].flags == 0) { dw2102_i2c_transfer()
204 buf6[3] = msg[0].buf[0]; dw2102_i2c_transfer()
205 buf6[4] = msg[0].buf[1]; dw2102_i2c_transfer()
206 buf6[5] = msg[0].buf[2]; dw2102_i2c_transfer()
207 buf6[6] = msg[0].buf[3]; dw2102_i2c_transfer()
214 msg[0].buf[0] = buf6[0]; dw2102_i2c_transfer()
220 msg[0].buf[0] = buf6[0]; dw2102_i2c_transfer()
221 msg[0].buf[1] = buf6[1]; dw2102_i2c_transfer()
225 buf6[1] = msg[0].buf[0]; dw2102_i2c_transfer()
239 struct i2c_msg msg[], int num) dw2102_serit_i2c_transfer()
252 buf6[0] = msg[0].addr << 1; dw2102_serit_i2c_transfer()
253 buf6[1] = msg[0].len; dw2102_serit_i2c_transfer()
254 buf6[2] = msg[0].buf[0]; dw2102_serit_i2c_transfer()
256 buf6, msg[0].len + 2, DW210X_WRITE_MSG); dw2102_serit_i2c_transfer()
259 buf6, msg[1].len + 2, DW210X_READ_MSG); dw2102_serit_i2c_transfer()
260 memcpy(msg[1].buf, buf6 + 2, msg[1].len); dw2102_serit_i2c_transfer()
264 switch (msg[0].addr) { dw2102_serit_i2c_transfer()
267 buf6[0] = msg[0].addr << 1; dw2102_serit_i2c_transfer()
268 buf6[1] = msg[0].len; dw2102_serit_i2c_transfer()
269 memcpy(buf6 + 2, msg[0].buf, msg[0].len); dw2102_serit_i2c_transfer()
271 msg[0].len + 2, DW210X_WRITE_MSG); dw2102_serit_i2c_transfer()
276 msg[0].buf[0] = buf6[0]; dw2102_serit_i2c_transfer()
277 msg[0].buf[1] = buf6[1]; dw2102_serit_i2c_transfer()
281 buf6[1] = msg[0].buf[0]; dw2102_serit_i2c_transfer()
293 static int dw2102_earda_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[], int num) dw2102_earda_i2c_transfer() argument
309 if (2 + msg[1].len > sizeof(ibuf)) { dw2102_earda_i2c_transfer()
311 msg[1].len); dw2102_earda_i2c_transfer()
316 obuf[0] = msg[0].addr << 1; dw2102_earda_i2c_transfer()
317 obuf[1] = msg[0].len; dw2102_earda_i2c_transfer()
318 obuf[2] = msg[0].buf[0]; dw2102_earda_i2c_transfer()
320 obuf, msg[0].len + 2, DW210X_WRITE_MSG); dw2102_earda_i2c_transfer()
323 ibuf, msg[1].len + 2, DW210X_READ_MSG); dw2102_earda_i2c_transfer()
324 memcpy(msg[1].buf, ibuf + 2, msg[1].len); dw2102_earda_i2c_transfer()
329 switch (msg[0].addr) { dw2102_earda_i2c_transfer()
334 if (2 + msg[0].len > sizeof(obuf)) { dw2102_earda_i2c_transfer()
336 msg[1].len); dw2102_earda_i2c_transfer()
341 obuf[0] = msg[0].addr << 1; dw2102_earda_i2c_transfer()
342 obuf[1] = msg[0].len; dw2102_earda_i2c_transfer()
343 memcpy(obuf + 2, msg[0].buf, msg[0].len); dw2102_earda_i2c_transfer()
345 obuf, msg[0].len + 2, DW210X_WRITE_MSG); dw2102_earda_i2c_transfer()
352 if (2 + msg[0].len > sizeof(obuf)) { dw2102_earda_i2c_transfer()
354 msg[1].len); dw2102_earda_i2c_transfer()
359 obuf[0] = msg[0].addr << 1; dw2102_earda_i2c_transfer()
360 obuf[1] = msg[0].len; dw2102_earda_i2c_transfer()
361 memcpy(obuf + 2, msg[0].buf, msg[0].len); dw2102_earda_i2c_transfer()
363 obuf, msg[0].len + 2, DW210X_WRITE_MSG); dw2102_earda_i2c_transfer()
370 memcpy(msg[0].buf, ibuf , 2); dw2102_earda_i2c_transfer()
376 obuf[1] = msg[0].buf[0]; dw2102_earda_i2c_transfer()
392 static int dw2104_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[], int num) dw2104_i2c_transfer() argument
403 switch (msg[j].addr) { dw2104_i2c_transfer()
408 memcpy(msg[j].buf, ibuf , 2); dw2104_i2c_transfer()
414 obuf[1] = msg[j].buf[0]; dw2104_i2c_transfer()
424 if (msg[j].flags == I2C_M_RD) { dw2104_i2c_transfer()
428 if (2 + msg[j].len > sizeof(ibuf)) { dw2104_i2c_transfer()
430 msg[j].len); dw2104_i2c_transfer()
436 (msg[j].addr << 1) + 1, 0, dw2104_i2c_transfer()
437 ibuf, msg[j].len + 2, dw2104_i2c_transfer()
439 memcpy(msg[j].buf, ibuf + 2, msg[j].len); dw2104_i2c_transfer()
441 } else if (((msg[j].buf[0] == 0xb0) && dw2104_i2c_transfer()
442 (msg[j].addr == 0x68)) || dw2104_i2c_transfer()
443 ((msg[j].buf[0] == 0xf7) && dw2104_i2c_transfer()
444 (msg[j].addr == 0x55))) { dw2104_i2c_transfer()
447 obuf[0] = msg[j].addr << 1; dw2104_i2c_transfer()
448 obuf[1] = (msg[j].len > 15 ? 17 : msg[j].len); dw2104_i2c_transfer()
449 obuf[2] = msg[j].buf[0]; dw2104_i2c_transfer()
450 len = msg[j].len - 1; dw2104_i2c_transfer()
453 memcpy(obuf + 3, msg[j].buf + i, dw2104_i2c_transfer()
465 if (2 + msg[j].len > sizeof(obuf)) { dw2104_i2c_transfer()
467 msg[j].len); dw2104_i2c_transfer()
472 obuf[0] = msg[j].addr << 1; dw2104_i2c_transfer()
473 obuf[1] = msg[j].len; dw2104_i2c_transfer()
474 memcpy(obuf + 2, msg[j].buf, msg[j].len); dw2104_i2c_transfer()
476 obuf, msg[j].len + 2, dw2104_i2c_transfer()
491 static int dw3101_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[], dw3101_i2c_transfer() argument
509 if (2 + msg[1].len > sizeof(ibuf)) { dw3101_i2c_transfer()
511 msg[1].len); dw3101_i2c_transfer()
515 obuf[0] = msg[0].addr << 1; dw3101_i2c_transfer()
516 obuf[1] = msg[0].len; dw3101_i2c_transfer()
517 obuf[2] = msg[0].buf[0]; dw3101_i2c_transfer()
519 obuf, msg[0].len + 2, DW210X_WRITE_MSG); dw3101_i2c_transfer()
522 ibuf, msg[1].len + 2, DW210X_READ_MSG); dw3101_i2c_transfer()
523 memcpy(msg[1].buf, ibuf + 2, msg[1].len); dw3101_i2c_transfer()
528 switch (msg[0].addr) { dw3101_i2c_transfer()
534 if (2 + msg[0].len > sizeof(obuf)) { dw3101_i2c_transfer()
536 msg[0].len); dw3101_i2c_transfer()
540 obuf[0] = msg[0].addr << 1; dw3101_i2c_transfer()
541 obuf[1] = msg[0].len; dw3101_i2c_transfer()
542 memcpy(obuf + 2, msg[0].buf, msg[0].len); dw3101_i2c_transfer()
544 obuf, msg[0].len + 2, DW210X_WRITE_MSG); dw3101_i2c_transfer()
551 memcpy(msg[0].buf, ibuf , 2); dw3101_i2c_transfer()
560 deb_xfer("%02x:%02x: %s ", i, msg[i].addr, dw3101_i2c_transfer()
561 msg[i].flags == 0 ? ">>>" : "<<<"); dw3101_i2c_transfer()
562 debug_dump(msg[i].buf, msg[i].len, deb_xfer); dw3101_i2c_transfer()
571 static int s6x0_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[], s6x0_i2c_transfer() argument
585 switch (msg[j].addr) { s6x0_i2c_transfer()
590 memcpy(msg[j].buf, ibuf + 3, 2); s6x0_i2c_transfer()
597 obuf[1] = msg[j].buf[1];/* off-on */ s6x0_i2c_transfer()
601 obuf[1] = msg[j].buf[0];/* 13v-18v */ s6x0_i2c_transfer()
610 obuf[1] = msg[j].buf[0]; s6x0_i2c_transfer()
621 if (msg[j].flags == I2C_M_RD) { s6x0_i2c_transfer()
625 if (msg[j].len > sizeof(ibuf)) { s6x0_i2c_transfer()
627 msg[j].len); s6x0_i2c_transfer()
633 ibuf, msg[j].len, s6x0_i2c_transfer()
635 memcpy(msg[j].buf, ibuf, msg[j].len); s6x0_i2c_transfer()
637 } else if ((msg[j].buf[0] == 0xb0) && s6x0_i2c_transfer()
638 (msg[j].addr == 0x68)) { s6x0_i2c_transfer()
641 obuf[0] = (msg[j].len > 16 ? s6x0_i2c_transfer()
642 18 : msg[j].len + 1); s6x0_i2c_transfer()
643 obuf[1] = msg[j].addr << 1; s6x0_i2c_transfer()
644 obuf[2] = msg[j].buf[0]; s6x0_i2c_transfer()
645 len = msg[j].len - 1; s6x0_i2c_transfer()
648 memcpy(obuf + 3, msg[j].buf + i, s6x0_i2c_transfer()
660 if (2 + msg[j].len > sizeof(obuf)) { s6x0_i2c_transfer()
662 msg[j].len); s6x0_i2c_transfer()
667 obuf[0] = msg[j + 1].len; s6x0_i2c_transfer()
668 obuf[1] = (msg[j].addr << 1); s6x0_i2c_transfer()
669 memcpy(obuf + 2, msg[j].buf, msg[j].len); s6x0_i2c_transfer()
673 obuf, msg[j].len + 2, s6x0_i2c_transfer()
680 if (2 + msg[j].len > sizeof(obuf)) { s6x0_i2c_transfer()
682 msg[j].len); s6x0_i2c_transfer()
686 obuf[0] = msg[j].len + 1; s6x0_i2c_transfer()
687 obuf[1] = (msg[j].addr << 1); s6x0_i2c_transfer()
688 memcpy(obuf + 2, msg[j].buf, msg[j].len); s6x0_i2c_transfer()
690 obuf, msg[j].len + 2, s6x0_i2c_transfer()
705 static int su3000_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[], su3000_i2c_transfer() argument
718 switch (msg[0].addr) { su3000_i2c_transfer()
720 obuf[0] = msg[0].buf[0] + 0x36; su3000_i2c_transfer()
730 msg[0].buf[1] = ibuf[0]; su3000_i2c_transfer()
731 msg[0].buf[0] = ibuf[1]; su3000_i2c_transfer()
736 obuf[1] = msg[0].addr; su3000_i2c_transfer()
737 obuf[2] = msg[0].len; su3000_i2c_transfer()
739 memcpy(&obuf[3], msg[0].buf, msg[0].len); su3000_i2c_transfer()
741 if (dvb_usb_generic_rw(d, obuf, msg[0].len + 3, su3000_i2c_transfer()
750 obuf[1] = msg[0].len; su3000_i2c_transfer()
751 obuf[2] = msg[1].len; su3000_i2c_transfer()
752 obuf[3] = msg[0].addr; su3000_i2c_transfer()
753 memcpy(&obuf[4], msg[0].buf, msg[0].len); su3000_i2c_transfer()
755 if (dvb_usb_generic_rw(d, obuf, msg[0].len + 4, su3000_i2c_transfer()
756 ibuf, msg[1].len + 1, 0) < 0) su3000_i2c_transfer()
759 memcpy(msg[1].buf, &ibuf[1], msg[1].len); su3000_i2c_transfer()
838 struct i2c_msg msg[] = { s6x0_read_mac_address() local
854 ret = s6x0_i2c_transfer(&d->i2c_adap, msg, 2); s6x0_read_mac_address()
877 struct i2c_msg msg = { su3000_streaming_ctrl() local
884 i2c_transfer(&adap->dev->i2c_adap, &msg, 1); su3000_streaming_ctrl()
910 struct i2c_msg msg[] = { su3000_read_mac_address() local
927 if (i2c_transfer(&d->i2c_adap, msg, 2) != 2) su3000_read_mac_address()
954 struct i2c_msg msg = { dw210x_set_voltage() local
964 msg.buf = command_18v; dw210x_set_voltage()
966 msg.buf = command_13v; dw210x_set_voltage()
968 i2c_transfer(&udev_adap->dev->i2c_adap, &msg, 1); dw210x_set_voltage()
990 struct i2c_msg msg = { dw210x_led_ctrl() local
1000 msg.buf = led_on; dw210x_led_ctrl()
1001 i2c_transfer(&udev_adap->dev->i2c_adap, &msg, 1); dw210x_led_ctrl()
1577 struct i2c_msg msg = { dw2102_rc_query() local
1584 if (d->props.i2c_algo->master_xfer(&d->i2c_adap, &msg, 1) == 1) { dw2102_rc_query()
1585 if (msg.buf[0] != 0xff) { dw2102_rc_query()
1598 struct i2c_msg msg = { prof_rc_query() local
1605 if (d->props.i2c_algo->master_xfer(&d->i2c_adap, &msg, 1) == 1) { prof_rc_query()
1606 if (msg.buf[0] != 0xff) { prof_rc_query()
1619 struct i2c_msg msg = { su3000_rc_query() local
1626 if (d->props.i2c_algo->master_xfer(&d->i2c_adap, &msg, 1) == 1) { su3000_rc_query()
1627 if (msg.buf[0] != 0xff) { su3000_rc_query()
238 dw2102_serit_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[], int num) dw2102_serit_i2c_transfer() argument
H A Dfriio-fe.c32 struct i2c_msg msg[2]; jdvbt90502_reg_read() local
38 msg[0].addr = state->config.demod_address; jdvbt90502_reg_read()
39 msg[0].flags = 0; jdvbt90502_reg_read()
40 msg[0].buf = wbuf; jdvbt90502_reg_read()
41 msg[0].len = sizeof(wbuf); jdvbt90502_reg_read()
43 msg[1].addr = msg[0].addr; jdvbt90502_reg_read()
44 msg[1].flags = I2C_M_RD; jdvbt90502_reg_read()
45 msg[1].buf = buf; jdvbt90502_reg_read()
46 msg[1].len = count; jdvbt90502_reg_read()
48 ret = i2c_transfer(state->i2c, msg, 2); jdvbt90502_reg_read()
60 struct i2c_msg msg; jdvbt90502_single_reg_write() local
66 msg.addr = state->config.demod_address; jdvbt90502_single_reg_write()
67 msg.flags = 0; jdvbt90502_single_reg_write()
68 msg.buf = wbuf; jdvbt90502_single_reg_write()
69 msg.len = sizeof(wbuf); jdvbt90502_single_reg_write()
71 if (i2c_transfer(state->i2c, &msg, 1) != 1) { jdvbt90502_single_reg_write()
131 struct i2c_msg msg[2]; jdvbt90502_pll_set_freq() local
151 msg[0].addr = state->config.demod_address; jdvbt90502_pll_set_freq()
152 msg[0].flags = 0; jdvbt90502_pll_set_freq()
153 msg[0].buf = pll_freq_cmd; jdvbt90502_pll_set_freq()
154 msg[0].len = sizeof(pll_freq_cmd); jdvbt90502_pll_set_freq()
156 ret = i2c_transfer(state->i2c, &msg[0], 1); jdvbt90502_pll_set_freq()
170 msg[1].addr = msg[0].addr; jdvbt90502_pll_set_freq()
171 msg[1].flags = 0; jdvbt90502_pll_set_freq()
172 msg[1].buf = pll_agc_cmd; jdvbt90502_pll_set_freq()
173 msg[1].len = sizeof(pll_agc_cmd); jdvbt90502_pll_set_freq()
175 ret = i2c_transfer(state->i2c, &msg[1], 1); jdvbt90502_pll_set_freq()
376 struct i2c_msg msg; jdvbt90502_init() local
382 msg.addr = state->config.demod_address; jdvbt90502_init()
383 msg.flags = 0; jdvbt90502_init()
384 msg.len = 2; jdvbt90502_init()
386 msg.buf = init_code[i]; jdvbt90502_init()
387 ret = i2c_transfer(state->i2c, &msg, 1); jdvbt90502_init()
H A Dfriio.c27 * This is done by a control msg to the FE with the I2C data accompanied, and
53 deb_xfer("not supported ctrl-msg, aborting."); gl861_i2c_ctrlmsg_data()
109 static int gl861_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[], gl861_i2c_xfer() argument
124 if (i + 1 < num && (msg[i + 1].flags & I2C_M_RD)) { gl861_i2c_xfer()
125 if (gl861_i2c_msg(d, msg[i].addr, gl861_i2c_xfer()
126 msg[i].buf, msg[i].len, gl861_i2c_xfer()
127 msg[i + 1].buf, msg[i + 1].len) < 0) gl861_i2c_xfer()
131 if (gl861_i2c_msg(d, msg[i].addr, msg[i].buf, gl861_i2c_xfer()
132 msg[i].len, NULL, 0) < 0) gl861_i2c_xfer()
150 struct i2c_msg msg; friio_ext_ctl() local
159 msg.addr = 0x00; friio_ext_ctl()
160 msg.flags = 0; friio_ext_ctl()
161 msg.len = 2; friio_ext_ctl()
162 msg.buf = buf; friio_ext_ctl()
168 ret = gl861_i2c_xfer(&adap->dev->i2c_adap, &msg, 1); friio_ext_ctl()
170 ret += gl861_i2c_xfer(&adap->dev->i2c_adap, &msg, 1); friio_ext_ctl()
173 ret += gl861_i2c_xfer(&adap->dev->i2c_adap, &msg, 1); friio_ext_ctl()
175 ret += gl861_i2c_xfer(&adap->dev->i2c_adap, &msg, 1); friio_ext_ctl()
183 ret += gl861_i2c_xfer(&adap->dev->i2c_adap, &msg, 1); friio_ext_ctl()
185 ret += gl861_i2c_xfer(&adap->dev->i2c_adap, &msg, 1); friio_ext_ctl()
191 ret += gl861_i2c_xfer(&adap->dev->i2c_adap, &msg, 1); friio_ext_ctl()
193 ret += gl861_i2c_xfer(&adap->dev->i2c_adap, &msg, 1); friio_ext_ctl()
H A Ddtv5100.c70 static int dtv5100_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[], dtv5100_i2c_xfer() argument
84 if (i+1 < num && (msg[i+1].flags & I2C_M_RD)) { dtv5100_i2c_xfer()
85 if (dtv5100_i2c_msg(d, msg[i].addr, msg[i].buf, dtv5100_i2c_xfer()
86 msg[i].len, msg[i+1].buf, dtv5100_i2c_xfer()
87 msg[i+1].len) < 0) dtv5100_i2c_xfer()
90 } else if (dtv5100_i2c_msg(d, msg[i].addr, msg[i].buf, dtv5100_i2c_xfer()
91 msg[i].len, NULL, 0) < 0) dtv5100_i2c_xfer()
/linux-4.1.27/drivers/s390/crypto/
H A Dzcrypt_msgtype6.h137 } __packed * msg = ap_msg->message; rng_type6CPRB_msgX() local
143 .ToCardLen1 = sizeof(*msg) - sizeof(msg->hdr), rng_type6CPRB_msgX()
144 .FromCardLen1 = sizeof(*msg) - sizeof(msg->hdr), rng_type6CPRB_msgX()
150 .req_parml = sizeof(*msg) - sizeof(msg->hdr) - rng_type6CPRB_msgX()
151 sizeof(msg->cprbx), rng_type6CPRB_msgX()
152 .rpl_msgbl = sizeof(*msg) - sizeof(msg->hdr), rng_type6CPRB_msgX()
155 msg->hdr = static_type6_hdrX; rng_type6CPRB_msgX()
156 msg->hdr.FromCardLen2 = random_number_length, rng_type6CPRB_msgX()
157 msg->cprbx = local_cprbx; rng_type6CPRB_msgX()
158 msg->cprbx.rpl_datal = random_number_length, rng_type6CPRB_msgX()
159 msg->cprbx.domain = AP_QID_QUEUE(ap_dev->qid); rng_type6CPRB_msgX()
160 memcpy(msg->function_code, msg->hdr.function_code, 0x02); rng_type6CPRB_msgX()
161 msg->rule_length = 0x0a; rng_type6CPRB_msgX()
162 memcpy(msg->rule, "RANDOM ", 8); rng_type6CPRB_msgX()
163 msg->verb_length = 0x02; rng_type6CPRB_msgX()
164 msg->key_length = 0x02; rng_type6CPRB_msgX()
165 ap_msg->length = sizeof(*msg); rng_type6CPRB_msgX()
H A Dzcrypt_msgtype6.c187 } __packed * msg = ap_msg->message; ICAMEX_msg_to_type6MEX_msgX() local
191 msg->length = mex->inputdatalength + 2; ICAMEX_msg_to_type6MEX_msgX()
192 if (copy_from_user(msg->text, mex->inputdata, mex->inputdatalength)) ICAMEX_msg_to_type6MEX_msgX()
196 size = zcrypt_type6_mex_key_en(mex, msg->text+mex->inputdatalength, 1); ICAMEX_msg_to_type6MEX_msgX()
199 size += sizeof(*msg) + mex->inputdatalength; ICAMEX_msg_to_type6MEX_msgX()
202 msg->hdr = static_type6_hdrX; ICAMEX_msg_to_type6MEX_msgX()
203 msg->hdr.ToCardLen1 = size - sizeof(msg->hdr); ICAMEX_msg_to_type6MEX_msgX()
204 msg->hdr.FromCardLen1 = PCIXCC_MAX_ICA_RESPONSE_SIZE - sizeof(msg->hdr); ICAMEX_msg_to_type6MEX_msgX()
206 msg->cprbx = static_cprbx; ICAMEX_msg_to_type6MEX_msgX()
207 msg->cprbx.domain = AP_QID_QUEUE(zdev->ap_dev->qid); ICAMEX_msg_to_type6MEX_msgX()
208 msg->cprbx.rpl_msgbl = msg->hdr.FromCardLen1; ICAMEX_msg_to_type6MEX_msgX()
210 msg->fr = (zdev->user_space_type == ZCRYPT_PCIXCC_MCL2) ? ICAMEX_msg_to_type6MEX_msgX()
213 msg->cprbx.req_parml = size - sizeof(msg->hdr) - sizeof(msg->cprbx); ICAMEX_msg_to_type6MEX_msgX()
255 } __packed * msg = ap_msg->message; ICACRT_msg_to_type6CRT_msgX() local
259 msg->length = crt->inputdatalength + 2; ICACRT_msg_to_type6CRT_msgX()
260 if (copy_from_user(msg->text, crt->inputdata, crt->inputdatalength)) ICACRT_msg_to_type6CRT_msgX()
264 size = zcrypt_type6_crt_key(crt, msg->text + crt->inputdatalength, 1); ICACRT_msg_to_type6CRT_msgX()
267 size += sizeof(*msg) + crt->inputdatalength; /* total size of msg */ ICACRT_msg_to_type6CRT_msgX()
270 msg->hdr = static_type6_hdrX; ICACRT_msg_to_type6CRT_msgX()
271 msg->hdr.ToCardLen1 = size - sizeof(msg->hdr); ICACRT_msg_to_type6CRT_msgX()
272 msg->hdr.FromCardLen1 = PCIXCC_MAX_ICA_RESPONSE_SIZE - sizeof(msg->hdr); ICACRT_msg_to_type6CRT_msgX()
274 msg->cprbx = static_cprbx; ICACRT_msg_to_type6CRT_msgX()
275 msg->cprbx.domain = AP_QID_QUEUE(zdev->ap_dev->qid); ICACRT_msg_to_type6CRT_msgX()
276 msg->cprbx.req_parml = msg->cprbx.rpl_msgbl = ICACRT_msg_to_type6CRT_msgX()
277 size - sizeof(msg->hdr) - sizeof(msg->cprbx); ICACRT_msg_to_type6CRT_msgX()
279 msg->fr = (zdev->user_space_type == ZCRYPT_PCIXCC_MCL2) ? ICACRT_msg_to_type6CRT_msgX()
311 } __packed * msg = ap_msg->message; XCRB_msg_to_type6CPRB_msgX() local
361 msg->hdr = static_type6_hdrX; XCRB_msg_to_type6CPRB_msgX()
362 memcpy(msg->hdr.agent_id , &(xcRB->agent_ID), sizeof(xcRB->agent_ID)); XCRB_msg_to_type6CPRB_msgX()
363 msg->hdr.ToCardLen1 = xcRB->request_control_blk_length; XCRB_msg_to_type6CPRB_msgX()
365 msg->hdr.offset2 = msg->hdr.offset1 + rcblen; XCRB_msg_to_type6CPRB_msgX()
366 msg->hdr.ToCardLen2 = xcRB->request_data_length; XCRB_msg_to_type6CPRB_msgX()
368 msg->hdr.FromCardLen1 = xcRB->reply_control_blk_length; XCRB_msg_to_type6CPRB_msgX()
369 msg->hdr.FromCardLen2 = xcRB->reply_data_length; XCRB_msg_to_type6CPRB_msgX()
372 if (copy_from_user(&(msg->cprbx), xcRB->request_control_blk_addr, XCRB_msg_to_type6CPRB_msgX()
375 if (msg->cprbx.cprb_len + sizeof(msg->hdr.function_code) > XCRB_msg_to_type6CPRB_msgX()
378 function_code = ((unsigned char *)&msg->cprbx) + msg->cprbx.cprb_len; XCRB_msg_to_type6CPRB_msgX()
379 memcpy(msg->hdr.function_code, function_code, XCRB_msg_to_type6CPRB_msgX()
380 sizeof(msg->hdr.function_code)); XCRB_msg_to_type6CPRB_msgX()
415 } __packed * msg = ap_msg->message; xcrb_msg_to_type6_ep11cprb_msgx() local
443 msg->hdr = static_type6_ep11_hdr; xcrb_msg_to_type6_ep11cprb_msgx()
444 msg->hdr.ToCardLen1 = xcRB->req_len; xcrb_msg_to_type6_ep11cprb_msgx()
445 msg->hdr.FromCardLen1 = xcRB->resp_len; xcrb_msg_to_type6_ep11cprb_msgx()
448 if (copy_from_user(&(msg->cprbx.cprb_len), xcrb_msg_to_type6_ep11cprb_msgx()
461 if (!((msg->cprbx.flags & 0x80) == 0x80)) { xcrb_msg_to_type6_ep11cprb_msgx()
462 msg->cprbx.target_id = (unsigned int) xcrb_msg_to_type6_ep11cprb_msgx()
465 if ((msg->pld_lenfmt & 0x80) == 0x80) { /*ext.len.fmt 2 or 3*/ xcrb_msg_to_type6_ep11cprb_msgx()
466 switch (msg->pld_lenfmt & 0x03) { xcrb_msg_to_type6_ep11cprb_msgx()
479 payload_hdr = (struct pld_hdr *)((&(msg->pld_lenfmt))+lfmt); xcrb_msg_to_type6_ep11cprb_msgx()
551 struct type86x_reply *msg = reply->message; convert_type86_ica() local
556 service_rc = msg->cprbx.ccp_rtcode; convert_type86_ica()
558 service_rs = msg->cprbx.ccp_rscode; convert_type86_ica()
578 msg->hdr.reply_code); convert_type86_ica()
581 data = msg->text; convert_type86_ica()
582 reply_len = msg->length - 2; convert_type86_ica()
624 struct type86_fmt2_msg *msg = reply->message; convert_type86_xcrb() local
629 data + msg->fmt2.offset1, msg->fmt2.count1)) convert_type86_xcrb()
631 xcRB->reply_control_blk_length = msg->fmt2.count1; convert_type86_xcrb()
634 if (msg->fmt2.count2) convert_type86_xcrb()
636 data + msg->fmt2.offset2, msg->fmt2.count2)) convert_type86_xcrb()
638 xcRB->reply_data_length = msg->fmt2.count2; convert_type86_xcrb()
655 struct type86_fmt2_msg *msg = reply->message; convert_type86_ep11_xcrb() local
658 if (xcRB->resp_len < msg->fmt2.count1) convert_type86_ep11_xcrb()
663 data + msg->fmt2.offset1, msg->fmt2.count1)) convert_type86_ep11_xcrb()
665 xcRB->resp_len = msg->fmt2.count1; convert_type86_ep11_xcrb()
677 } __packed * msg = reply->message; convert_type86_rng() local
680 if (msg->cprbx.ccp_rtcode != 0 || msg->cprbx.ccp_rscode != 0) convert_type86_rng()
682 memcpy(buffer, data + msg->fmt2.offset2, msg->fmt2.count2); convert_type86_rng()
683 return msg->fmt2.count2; convert_type86_rng()
691 struct type86x_reply *msg = reply->message; convert_response_ica() local
699 if (msg->cprbx.ccp_rtcode && convert_response_ica()
700 (msg->cprbx.ccp_rscode == 0x14f) && convert_response_ica()
708 if (msg->hdr.reply_code) convert_response_ica()
710 if (msg->cprbx.cprb_ver_id == 0x02) convert_response_ica()
729 struct type86x_reply *msg = reply->message; convert_response_xcrb() local
738 if (msg->hdr.reply_code) { convert_response_xcrb()
739 memcpy(&(xcRB->status), msg->fmt2.apfs, sizeof(u32)); convert_response_xcrb()
742 if (msg->cprbx.cprb_ver_id == 0x02) convert_response_xcrb()
760 struct type86_ep11_reply *msg = reply->message; convert_response_ep11_xcrb() local
768 if (msg->hdr.reply_code) convert_response_ep11_xcrb()
770 if (msg->cprbx.cprb_ver_id == 0x04) convert_response_ep11_xcrb()
787 struct type86x_reply *msg = reply->message; convert_response_rng() local
789 switch (msg->hdr.type) { convert_response_rng()
794 if (msg->hdr.reply_code) convert_response_rng()
796 if (msg->cprbx.cprb_ver_id == 0x02) convert_response_rng()
812 * "msg" has finished with the reply message "reply".
815 * @msg: pointer to the AP message
819 struct ap_message *msg, zcrypt_msgtype6_receive()
827 (struct response_type *) msg->private; zcrypt_msgtype6_receive()
833 memcpy(msg->message, &error_reply, sizeof(error_reply)); zcrypt_msgtype6_receive()
844 memcpy(msg->message, reply->message, length); zcrypt_msgtype6_receive()
849 memcpy(msg->message, reply->message, length); zcrypt_msgtype6_receive()
852 memcpy(msg->message, &error_reply, zcrypt_msgtype6_receive()
856 memcpy(msg->message, reply->message, sizeof(error_reply)); zcrypt_msgtype6_receive()
863 * "msg" has finished with the reply message "reply".
866 * @msg: pointer to the AP message
870 struct ap_message *msg, zcrypt_msgtype6_receive_ep11()
878 (struct response_type *)msg->private; zcrypt_msgtype6_receive_ep11()
884 memcpy(msg->message, &error_reply, sizeof(error_reply)); zcrypt_msgtype6_receive_ep11()
894 memcpy(msg->message, reply->message, length); zcrypt_msgtype6_receive_ep11()
897 memcpy(msg->message, &error_reply, sizeof(error_reply)); zcrypt_msgtype6_receive_ep11()
900 memcpy(msg->message, reply->message, sizeof(error_reply)); zcrypt_msgtype6_receive_ep11()
818 zcrypt_msgtype6_receive(struct ap_device *ap_dev, struct ap_message *msg, struct ap_message *reply) zcrypt_msgtype6_receive() argument
869 zcrypt_msgtype6_receive_ep11(struct ap_device *ap_dev, struct ap_message *msg, struct ap_message *reply) zcrypt_msgtype6_receive_ep11() argument
H A Dzcrypt_pcicc.c176 } __attribute__((packed)) *msg = ap_msg->message; ICAMEX_msg_to_type6MEX_msg() local
180 if (copy_from_user(msg->text, mex->inputdata, mex->inputdatalength)) ICAMEX_msg_to_type6MEX_msg()
183 if (is_PKCS11_padded(msg->text, mex->inputdatalength)) ICAMEX_msg_to_type6MEX_msg()
187 msg->hdr = static_type6_hdr; ICAMEX_msg_to_type6MEX_msg()
188 msg->fr = static_pke_function_and_rules; ICAMEX_msg_to_type6MEX_msg()
190 if (is_PKCS12_padded(msg->text, mex->inputdatalength)) { ICAMEX_msg_to_type6MEX_msg()
192 pad_len = strnlen(msg->text + 2, mex->inputdatalength - 2) + 3; ICAMEX_msg_to_type6MEX_msg()
196 memmove(msg->text, msg->text + pad_len, vud_len); ICAMEX_msg_to_type6MEX_msg()
197 msg->length = cpu_to_le16(vud_len + 2); ICAMEX_msg_to_type6MEX_msg()
200 size = zcrypt_type6_mex_key_en(mex, msg->text + vud_len, 0); ICAMEX_msg_to_type6MEX_msg()
203 size += sizeof(*msg) + vud_len; /* total size of msg */ ICAMEX_msg_to_type6MEX_msg()
206 msg->length = cpu_to_le16(2 + vud_len); ICAMEX_msg_to_type6MEX_msg()
208 msg->hdr.function_code[1] = 'D'; ICAMEX_msg_to_type6MEX_msg()
209 msg->fr.function_code[1] = 'D'; ICAMEX_msg_to_type6MEX_msg()
212 size = zcrypt_type6_mex_key_de(mex, msg->text + vud_len, 0); ICAMEX_msg_to_type6MEX_msg()
215 size += sizeof(*msg) + vud_len; /* total size of msg */ ICAMEX_msg_to_type6MEX_msg()
219 msg->hdr.ToCardLen1 = (size - sizeof(msg->hdr) + 3) & -4; ICAMEX_msg_to_type6MEX_msg()
220 msg->hdr.FromCardLen1 = PCICC_MAX_RESPONSE_SIZE - sizeof(msg->hdr); ICAMEX_msg_to_type6MEX_msg()
222 msg->cprb = static_cprb; ICAMEX_msg_to_type6MEX_msg()
223 msg->cprb.usage_domain[0]= AP_QID_QUEUE(zdev->ap_dev->qid); ICAMEX_msg_to_type6MEX_msg()
224 msg->cprb.req_parml = cpu_to_le16(size - sizeof(msg->hdr) - ICAMEX_msg_to_type6MEX_msg()
225 sizeof(msg->cprb)); ICAMEX_msg_to_type6MEX_msg()
226 msg->cprb.rpl_parml = cpu_to_le16(msg->hdr.FromCardLen1); ICAMEX_msg_to_type6MEX_msg()
263 } __attribute__((packed)) *msg = ap_msg->message; ICACRT_msg_to_type6CRT_msg() local
267 msg->length = cpu_to_le16(2 + crt->inputdatalength); ICACRT_msg_to_type6CRT_msg()
268 if (copy_from_user(msg->text, crt->inputdata, crt->inputdatalength)) ICACRT_msg_to_type6CRT_msg()
271 if (is_PKCS11_padded(msg->text, crt->inputdatalength)) ICACRT_msg_to_type6CRT_msg()
275 size = zcrypt_type6_crt_key(crt, msg->text + crt->inputdatalength, 0); ICACRT_msg_to_type6CRT_msg()
278 size += sizeof(*msg) + crt->inputdatalength; /* total size of msg */ ICACRT_msg_to_type6CRT_msg()
281 msg->hdr = static_type6_hdr; ICACRT_msg_to_type6CRT_msg()
282 msg->hdr.ToCardLen1 = (size - sizeof(msg->hdr) + 3) & -4; ICACRT_msg_to_type6CRT_msg()
283 msg->hdr.FromCardLen1 = PCICC_MAX_RESPONSE_SIZE - sizeof(msg->hdr); ICACRT_msg_to_type6CRT_msg()
285 msg->cprb = static_cprb; ICACRT_msg_to_type6CRT_msg()
286 msg->cprb.usage_domain[0] = AP_QID_QUEUE(zdev->ap_dev->qid); ICACRT_msg_to_type6CRT_msg()
287 msg->cprb.req_parml = msg->cprb.rpl_parml = ICACRT_msg_to_type6CRT_msg()
288 cpu_to_le16(size - sizeof(msg->hdr) - sizeof(msg->cprb)); ICACRT_msg_to_type6CRT_msg()
290 msg->fr = static_pkd_function_and_rules; ICACRT_msg_to_type6CRT_msg()
355 struct type86_reply *msg = reply->message; convert_type86() local
360 service_rc = le16_to_cpu(msg->cprb.ccp_rtcode); convert_type86()
362 service_rs = le16_to_cpu(msg->cprb.ccp_rscode); convert_type86()
382 msg->hdr.reply_code); convert_type86()
385 data = msg->text; convert_type86()
386 reply_len = le16_to_cpu(msg->length) - 2; convert_type86()
420 struct type86_reply *msg = reply->message; convert_response() local
423 switch (msg->hdr.type) { convert_response()
428 if (msg->hdr.reply_code) convert_response()
430 if (msg->cprb.cprb_ver_id == 0x01) convert_response()
446 * "msg" has finished with the reply message "reply".
449 * @msg: pointer to the AP message
453 struct ap_message *msg, zcrypt_pcicc_receive()
465 memcpy(msg->message, &error_reply, sizeof(error_reply)); zcrypt_pcicc_receive()
473 memcpy(msg->message, reply->message, length); zcrypt_pcicc_receive()
475 memcpy(msg->message, reply->message, sizeof error_reply); zcrypt_pcicc_receive()
477 complete((struct completion *) msg->private); zcrypt_pcicc_receive()
452 zcrypt_pcicc_receive(struct ap_device *ap_dev, struct ap_message *msg, struct ap_message *reply) zcrypt_pcicc_receive() argument
/linux-4.1.27/drivers/media/pci/cx25821/
H A Dcx25821-i2c.c81 const struct i2c_msg *msg, int joined_rlen) i2c_sendbytes()
89 dprintk(1, "%s(msg->wlen=%d, nextmsg->rlen=%d)\n", __func__, i2c_sendbytes()
90 msg->len, joined_rlen); i2c_sendbytes()
92 dprintk(1, "%s(msg->len=%d)\n", __func__, msg->len); i2c_sendbytes()
95 if (msg->len == 0) { i2c_sendbytes()
96 cx_write(bus->reg_addr, msg->addr << 25); i2c_sendbytes()
110 addr = (msg->addr << 25) | msg->buf[0]; i2c_sendbytes()
111 wdata = msg->buf[0]; i2c_sendbytes()
115 if (msg->len > 1) i2c_sendbytes()
136 for (cnt = 1; cnt < msg->len; cnt++) { i2c_sendbytes()
138 wdata = msg->buf[cnt]; i2c_sendbytes()
141 if (cnt < msg->len - 1) i2c_sendbytes()
158 dprintk(1, " %02x", msg->buf[cnt]); i2c_sendbytes()
164 return msg->len; i2c_sendbytes()
175 const struct i2c_msg *msg, int joined) i2c_readbytes()
183 dprintk(1, "6-%s(msg->len=%d)\n", __func__, msg->len); i2c_readbytes()
186 if (msg->len == 0) { i2c_readbytes()
187 cx_write(bus->reg_addr, msg->addr << 25); i2c_readbytes()
202 dprintk(1, " <R %02x", (msg->addr << 1) + 1); i2c_readbytes()
205 for (cnt = 0; cnt < msg->len; cnt++) { i2c_readbytes()
209 if (cnt < msg->len - 1) i2c_readbytes()
212 cx_write(bus->reg_addr, msg->addr << 25); i2c_readbytes()
220 msg->buf[cnt] = cx_read(bus->reg_rdata) & 0xff; i2c_readbytes()
223 dprintk(1, " %02x", msg->buf[cnt]); i2c_readbytes()
229 return msg->len; i2c_readbytes()
338 struct i2c_msg msg;
349 msg.addr = 0x44;
350 msg.flags = I2C_M_TEN;
351 msg.len = 3;
352 msg.buf = buffer;
354 i2c_xfer(&dev->i2c_bus[0].i2c_adap, &msg, 1);
80 i2c_sendbytes(struct i2c_adapter *i2c_adap, const struct i2c_msg *msg, int joined_rlen) i2c_sendbytes() argument
174 i2c_readbytes(struct i2c_adapter *i2c_adap, const struct i2c_msg *msg, int joined) i2c_readbytes() argument
/linux-4.1.27/drivers/net/ethernet/qualcomm/
H A Dqca_7k.c51 struct spi_message *msg; qcaspi_read_register() local
57 msg = &qca->spi_msg1; qcaspi_read_register()
62 spi_sync(qca->spi_dev, msg); qcaspi_read_register()
64 msg = &qca->spi_msg2; qcaspi_read_register()
74 ret = spi_sync(qca->spi_dev, msg); qcaspi_read_register()
77 ret = msg->status; qcaspi_read_register()
92 struct spi_message *msg; qcaspi_write_register() local
99 msg = &qca->spi_msg1; qcaspi_write_register()
104 spi_sync(qca->spi_dev, msg); qcaspi_write_register()
106 msg = &qca->spi_msg2; qcaspi_write_register()
116 ret = spi_sync(qca->spi_dev, msg); qcaspi_write_register()
119 ret = msg->status; qcaspi_write_register()
131 struct spi_message *msg = &qca->spi_msg1; qcaspi_tx_cmd() local
140 ret = spi_sync(qca->spi_dev, msg); qcaspi_tx_cmd()
143 ret = msg->status; qcaspi_tx_cmd()
/linux-4.1.27/include/net/
H A Dscm.h37 void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm);
38 void scm_detach_fds_compat(struct msghdr *msg, struct scm_cookie *scm);
39 int __scm_send(struct socket *sock, struct msghdr *msg, struct scm_cookie *scm);
75 static __inline__ int scm_send(struct socket *sock, struct msghdr *msg, scm_send() argument
84 if (msg->msg_controllen <= 0) scm_send()
86 return __scm_send(sock, msg, scm); scm_send()
90 static inline void scm_passec(struct socket *sock, struct msghdr *msg, struct scm_cookie *scm) scm_passec() argument
100 put_cmsg(msg, SOL_SOCKET, SCM_SECURITY, seclen, secdata); scm_passec()
106 static inline void scm_passec(struct socket *sock, struct msghdr *msg, struct scm_cookie *scm) scm_passec() argument
110 static __inline__ void scm_recv(struct socket *sock, struct msghdr *msg, scm_recv() argument
113 if (!msg->msg_control) { scm_recv()
115 msg->msg_flags |= MSG_CTRUNC; scm_recv()
127 put_cmsg(msg, SOL_SOCKET, SCM_CREDENTIALS, sizeof(ucreds), &ucreds); scm_recv()
132 scm_passec(sock, msg, scm); scm_recv()
137 scm_detach_fds(msg, scm); scm_recv()
H A Dtransp_v6.h36 void ip6_datagram_recv_ctl(struct sock *sk, struct msghdr *msg,
38 void ip6_datagram_recv_common_ctl(struct sock *sk, struct msghdr *msg,
40 void ip6_datagram_recv_specific_ctl(struct sock *sk, struct msghdr *msg,
43 int ip6_datagram_send_ctl(struct net *net, struct sock *sk, struct msghdr *msg,
H A Dping.h34 int (*ipv6_recv_error)(struct sock *sk, struct msghdr *msg, int len,
37 struct msghdr *msg,
40 struct msghdr *msg,
62 struct msghdr *msg; member in struct:pingfakehdr
78 int ping_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock,
80 int ping_common_sendmsg(int family, struct msghdr *msg, size_t len,
82 int ping_v6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len);
/linux-4.1.27/drivers/w1/
H A Dw1_netlink.c46 struct w1_netlink_msg *msg; /* advances as w1_netlink_msg is appened */ member in struct:w1_cb_block
61 struct w1_netlink_msg *msg; member in struct:w1_cb_node
108 block->msg = NULL; w1_reply_make_space()
121 * w1_netlink_setup_msg() - prepare to write block->msg
126 * block->cn->len does not include space for block->msg
127 * block->msg advances but remains uninitialized
132 block->msg = (struct w1_netlink_msg *)(block->cn->data + block->cn->len); w1_netlink_setup_msg()
144 block->msg = (struct w1_netlink_msg *)block->cn->data; w1_netlink_setup_msg()
148 /* Append cmd to msg, include cmd->data as well. This is because
164 memcpy(block->msg, block->cur_msg, sizeof(*block->msg)); w1_netlink_queue_cmd()
165 block->cn->len += sizeof(*block->msg); w1_netlink_queue_cmd()
166 block->msg->len = 0; w1_netlink_queue_cmd()
167 block->cmd = (struct w1_netlink_cmd *)(block->msg->data); w1_netlink_queue_cmd()
173 block->msg->len += space; w1_netlink_queue_cmd()
187 memcpy(block->msg, req_msg, sizeof(*req_msg)); w1_netlink_queue_status()
189 block->msg->len = 0; w1_netlink_queue_status()
190 block->msg->status = (u8)-error; w1_netlink_queue_status()
192 struct w1_netlink_cmd *cmd = (struct w1_netlink_cmd *)block->msg->data; w1_netlink_queue_status()
195 block->msg->len += sizeof(*cmd); w1_netlink_queue_status()
204 * @msg: original w1_netlink_msg
208 * Use when a block isn't available to queue the message to and cn, msg
211 static void w1_netlink_send_error(struct cn_msg *cn, struct w1_netlink_msg *msg, w1_netlink_send_error() argument
216 struct w1_netlink_msg msg; w1_netlink_send_error() member in struct:__anon10662
219 memcpy(&packet.msg, msg, sizeof(packet.msg)); w1_netlink_send_error()
220 packet.cn.len = sizeof(packet.msg); w1_netlink_send_error()
221 packet.msg.len = 0; w1_netlink_send_error()
222 packet.msg.status = (u8)-error; w1_netlink_send_error()
229 * @msg: w1_netlink_msg message to be sent
233 void w1_netlink_send(struct w1_master *dev, struct w1_netlink_msg *msg) w1_netlink_send() argument
237 struct w1_netlink_msg msg; w1_netlink_send() member in struct:__anon10663
245 packet.cn.len = sizeof(*msg); w1_netlink_send()
247 memcpy(&packet.msg, msg, sizeof(*msg)); w1_netlink_send()
248 packet.msg.len = 0; w1_netlink_send()
271 block->msg->len += sizeof(*data); w1_send_slave()
420 struct w1_netlink_msg *msg; w1_process_command_root() local
433 msg = (struct w1_netlink_msg *)cn->data; w1_process_command_root()
435 msg->type = W1_LIST_MASTERS; w1_process_command_root()
436 msg->status = 0; w1_process_command_root()
437 msg->len = 0; w1_process_command_root()
438 id = (u32 *)msg->data; w1_process_command_root()
445 msg->len = 0; w1_process_command_root()
446 id = (u32 *)msg->data; w1_process_command_root()
450 msg->len += sizeof(*id); w1_process_command_root()
465 u16 mlen = node->msg->len; w1_process_cb()
469 struct w1_netlink_cmd *cmd = (struct w1_netlink_cmd *)node->msg->data; w1_process_cb()
475 node->block->cur_msg = node->msg; w1_process_cb()
489 w1_netlink_queue_status(node->block, node->msg, cmd, err); w1_process_cb()
498 w1_netlink_queue_status(node->block, node->msg, cmd, err); w1_process_cb()
517 static void w1_list_count_cmds(struct w1_netlink_msg *msg, int *cmd_count, w1_list_count_cmds() argument
520 struct w1_netlink_cmd *cmd = (struct w1_netlink_cmd *)msg->data; w1_list_count_cmds()
521 u16 mlen = msg->len; w1_list_count_cmds()
541 struct w1_master *dev = w1_search_master_id(msg->id.mst.id); w1_list_count_cmds()
556 struct w1_netlink_msg *msg = (struct w1_netlink_msg *)(cn + 1); w1_cn_callback() local
572 w1_netlink_send_error(cn, msg, nsp->portid, -EINVAL); w1_cn_callback()
581 if (msg->len + sizeof(struct w1_netlink_msg) > msg_len) { w1_cn_callback()
589 if (msg->type == W1_MASTER_CMD || msg->type == W1_SLAVE_CMD) { w1_cn_callback()
591 w1_list_count_cmds(msg, &cmd_count, &slave_len); w1_cn_callback()
594 msg_len -= sizeof(struct w1_netlink_msg) + msg->len; w1_cn_callback()
595 msg = (struct w1_netlink_msg *)(((u8 *)msg) + w1_cn_callback()
596 sizeof(struct w1_netlink_msg) + msg->len); w1_cn_callback()
598 msg = (struct w1_netlink_msg *)(cn + 1); w1_cn_callback()
628 w1_netlink_send_error(cn, msg, nsp->portid, -ENOMEM); w1_cn_callback()
656 if (msg->len + sizeof(struct w1_netlink_msg) > msg_len) { w1_cn_callback()
662 if (msg->type == W1_LIST_MASTERS) { w1_cn_callback()
670 if (!msg->len) { w1_cn_callback()
676 if (msg->type == W1_MASTER_CMD) { w1_cn_callback()
677 dev = w1_search_master_id(msg->id.mst.id); w1_cn_callback()
678 } else if (msg->type == W1_SLAVE_CMD) { w1_cn_callback()
679 sl = w1_search_slave((struct w1_reg_num *)msg->id.id); w1_cn_callback()
685 msg->type, msg->len); w1_cn_callback()
700 node->msg = (struct w1_netlink_msg *)((u8 *)&block->request_cn + w1_cn_callback()
701 (size_t)((u8 *)msg - (u8 *)cn)); w1_cn_callback()
717 w1_netlink_send_error(cn, msg, nsp->portid, err); w1_cn_callback()
718 msg_len -= sizeof(struct w1_netlink_msg) + msg->len; w1_cn_callback()
719 msg = (struct w1_netlink_msg *)(((u8 *)msg) + w1_cn_callback()
720 sizeof(struct w1_netlink_msg) + msg->len); w1_cn_callback()
/linux-4.1.27/drivers/media/firewire/
H A Dfiredtv-ci.c81 return avc_ca_app_info(fdtv, reply->msg, &reply->length); fdtv_ca_app_info()
88 return avc_ca_info(fdtv, reply->msg, &reply->length); fdtv_ca_info()
95 return avc_ca_get_mmi(fdtv, reply->msg, &reply->length); fdtv_ca_get_mmi()
128 struct ca_msg *msg = arg; fdtv_ca_pmt() local
134 if (msg->msg[3] & 0x80) { fdtv_ca_pmt()
136 for (i = 0; i < (msg->msg[3] & 0x7f); i++) fdtv_ca_pmt()
137 data_length = (data_length << 8) + msg->msg[data_pos++]; fdtv_ca_pmt()
139 data_length = msg->msg[3]; fdtv_ca_pmt()
142 return avc_ca_pmt(fdtv, &msg->msg[data_pos], data_length); fdtv_ca_pmt()
147 struct ca_msg *msg = arg; fdtv_ca_send_msg() local
152 (msg->msg[0] << 16) + (msg->msg[1] << 8) + msg->msg[2]; fdtv_ca_send_msg()
/linux-4.1.27/drivers/media/usb/cx231xx/
H A Dcx231xx-i2c.c65 const struct i2c_msg *msg, int tuner_type) is_tuner()
72 if (msg->addr != dev->board.tuner_addr) is_tuner()
85 const struct i2c_msg *msg) cx231xx_i2c_send_bytes()
98 if (is_tuner(dev, bus, msg, TUNER_XC5000)) { cx231xx_i2c_send_bytes()
99 size = msg->len; cx231xx_i2c_send_bytes()
106 if (msg->len >= 2) cx231xx_i2c_send_bytes()
107 saddr = msg->buf[0] << 8 | msg->buf[1]; cx231xx_i2c_send_bytes()
108 else if (msg->len == 1) cx231xx_i2c_send_bytes()
109 saddr = msg->buf[0]; cx231xx_i2c_send_bytes()
132 msg->addr, msg->len, saddr); cx231xx_i2c_send_bytes()
135 msg->addr, cx231xx_i2c_send_bytes()
136 msg->buf, cx231xx_i2c_send_bytes()
137 msg->len); cx231xx_i2c_send_bytes()
146 buf_ptr = (u8 *) (msg->buf + 1); cx231xx_i2c_send_bytes()
150 req_data.dev_addr = msg->addr; cx231xx_i2c_send_bytes()
151 req_data.direction = msg->flags; cx231xx_i2c_send_bytes()
153 req_data.saddr_dat = msg->buf[0]; cx231xx_i2c_send_bytes()
177 req_data.dev_addr = msg->addr; cx231xx_i2c_send_bytes()
178 req_data.direction = msg->flags; cx231xx_i2c_send_bytes()
181 req_data.buf_size = msg->len; cx231xx_i2c_send_bytes()
182 req_data.p_buffer = msg->buf; cx231xx_i2c_send_bytes()
196 const struct i2c_msg *msg) cx231xx_i2c_recv_bytes()
205 if (is_tuner(dev, bus, msg, TUNER_XC5000)) { cx231xx_i2c_recv_bytes()
206 if (msg->len == 2) cx231xx_i2c_recv_bytes()
207 saddr = msg->buf[0] << 8 | msg->buf[1]; cx231xx_i2c_recv_bytes()
208 else if (msg->len == 1) cx231xx_i2c_recv_bytes()
209 saddr = msg->buf[0]; cx231xx_i2c_recv_bytes()
218 msg->buf[0] = 0; cx231xx_i2c_recv_bytes()
219 if (msg->len == 2) cx231xx_i2c_recv_bytes()
220 msg->buf[1] = 0; cx231xx_i2c_recv_bytes()
234 msg->addr, msg->len, cx231xx_i2c_recv_bytes()
235 msg->buf[0] << 8 | msg->buf[1]); cx231xx_i2c_recv_bytes()
238 dev->cx231xx_gpio_i2c_write(dev, msg->addr, cx231xx_i2c_recv_bytes()
239 msg->buf, cx231xx_i2c_recv_bytes()
240 msg->len); cx231xx_i2c_recv_bytes()
242 dev->cx231xx_gpio_i2c_read(dev, msg->addr, cx231xx_i2c_recv_bytes()
243 msg->buf, cx231xx_i2c_recv_bytes()
244 msg->len); cx231xx_i2c_recv_bytes()
250 req_data.dev_addr = msg->addr; cx231xx_i2c_recv_bytes()
251 req_data.direction = msg->flags; cx231xx_i2c_recv_bytes()
252 req_data.saddr_len = msg->len; cx231xx_i2c_recv_bytes()
253 req_data.saddr_dat = msg->buf[0] << 8 | msg->buf[1]; cx231xx_i2c_recv_bytes()
254 req_data.buf_size = msg->len; cx231xx_i2c_recv_bytes()
255 req_data.p_buffer = msg->buf; cx231xx_i2c_recv_bytes()
263 req_data.dev_addr = msg->addr; cx231xx_i2c_recv_bytes()
264 req_data.direction = msg->flags; cx231xx_i2c_recv_bytes()
267 req_data.buf_size = msg->len; cx231xx_i2c_recv_bytes()
268 req_data.p_buffer = msg->buf; cx231xx_i2c_recv_bytes()
346 const struct i2c_msg *msg) cx231xx_i2c_check_for_device()
355 req_data.dev_addr = msg->addr; cx231xx_i2c_check_for_device()
64 is_tuner(struct cx231xx *dev, struct cx231xx_i2c *bus, const struct i2c_msg *msg, int tuner_type) is_tuner() argument
84 cx231xx_i2c_send_bytes(struct i2c_adapter *i2c_adap, const struct i2c_msg *msg) cx231xx_i2c_send_bytes() argument
195 cx231xx_i2c_recv_bytes(struct i2c_adapter *i2c_adap, const struct i2c_msg *msg) cx231xx_i2c_recv_bytes() argument
345 cx231xx_i2c_check_for_device(struct i2c_adapter *i2c_adap, const struct i2c_msg *msg) cx231xx_i2c_check_for_device() argument
/linux-4.1.27/Documentation/connector/
H A Dcn_test.c38 static void cn_test_callback(struct cn_msg *msg, struct netlink_skb_parms *nsp) cn_test_callback() argument
41 __func__, jiffies, msg->id.idx, msg->id.val, cn_test_callback()
42 msg->seq, msg->ack, msg->len, cn_test_callback()
43 msg->len ? (char *)msg->data : ""); cn_test_callback()
56 struct cn_msg *msg = NULL;
62 size0 = sizeof(*msg) + sizeof(*ctl) + 3 * sizeof(*req);
78 msg = nlmsg_data(nlh);
80 memset(msg, 0, size0);
82 msg->id.idx = -1;
83 msg->id.val = -1;
84 msg->seq = 0x123;
85 msg->ack = 0x345;
86 msg->len = size0 - sizeof(*msg);
88 ctl = (struct cn_ctl_msg *)(msg + 1);
93 ctl->len = msg->len - sizeof(*ctl);
/linux-4.1.27/drivers/net/wan/
H A Dixp4xx_hss.c273 struct msg { struct
360 static void hss_npe_send(struct port *port, struct msg *msg, const char* what) hss_npe_send() argument
362 u32 *val = (u32*)msg; hss_npe_send()
363 if (npe_send_message(port->npe, msg, what)) { hss_npe_send()
372 struct msg msg; hss_config_set_lut() local
375 memset(&msg, 0, sizeof(msg)); hss_config_set_lut()
376 msg.cmd = PORT_CONFIG_WRITE; hss_config_set_lut()
377 msg.hss_port = port->id; hss_config_set_lut()
380 msg.data32 >>= 2; hss_config_set_lut()
381 msg.data32 |= TDMMAP_HDLC << 30; hss_config_set_lut()
384 msg.index = HSS_CONFIG_TX_LUT + ((ch / 4) & ~3); hss_config_set_lut()
385 hss_npe_send(port, &msg, "HSS_SET_TX_LUT"); hss_config_set_lut()
387 msg.index += HSS_CONFIG_RX_LUT - HSS_CONFIG_TX_LUT; hss_config_set_lut()
388 hss_npe_send(port, &msg, "HSS_SET_RX_LUT"); hss_config_set_lut()
395 struct msg msg; hss_config() local
397 memset(&msg, 0, sizeof(msg)); hss_config()
398 msg.cmd = PORT_CONFIG_WRITE; hss_config()
399 msg.hss_port = port->id; hss_config()
400 msg.index = HSS_CONFIG_TX_PCR; hss_config()
401 msg.data32 = PCR_FRM_PULSE_DISABLED | PCR_MSB_ENDIAN | hss_config()
404 msg.data32 |= PCR_SYNC_CLK_DIR_OUTPUT; hss_config()
405 hss_npe_send(port, &msg, "HSS_SET_TX_PCR"); hss_config()
407 msg.index = HSS_CONFIG_RX_PCR; hss_config()
408 msg.data32 ^= PCR_TX_DATA_ENABLE | PCR_DCLK_EDGE_RISING; hss_config()
409 hss_npe_send(port, &msg, "HSS_SET_RX_PCR"); hss_config()
411 memset(&msg, 0, sizeof(msg)); hss_config()
412 msg.cmd = PORT_CONFIG_WRITE; hss_config()
413 msg.hss_port = port->id; hss_config()
414 msg.index = HSS_CONFIG_CORE_CR; hss_config()
415 msg.data32 = (port->loopback ? CCR_LOOPBACK : 0) | hss_config()
417 hss_npe_send(port, &msg, "HSS_SET_CORE_CR"); hss_config()
419 memset(&msg, 0, sizeof(msg)); hss_config()
420 msg.cmd = PORT_CONFIG_WRITE; hss_config()
421 msg.hss_port = port->id; hss_config()
422 msg.index = HSS_CONFIG_CLOCK_CR; hss_config()
423 msg.data32 = port->clock_reg; hss_config()
424 hss_npe_send(port, &msg, "HSS_SET_CLOCK_CR"); hss_config()
426 memset(&msg, 0, sizeof(msg)); hss_config()
427 msg.cmd = PORT_CONFIG_WRITE; hss_config()
428 msg.hss_port = port->id; hss_config()
429 msg.index = HSS_CONFIG_TX_FCR; hss_config()
430 msg.data16a = FRAME_OFFSET; hss_config()
431 msg.data16b = FRAME_SIZE - 1; hss_config()
432 hss_npe_send(port, &msg, "HSS_SET_TX_FCR"); hss_config()
434 memset(&msg, 0, sizeof(msg)); hss_config()
435 msg.cmd = PORT_CONFIG_WRITE; hss_config()
436 msg.hss_port = port->id; hss_config()
437 msg.index = HSS_CONFIG_RX_FCR; hss_config()
438 msg.data16a = FRAME_OFFSET; hss_config()
439 msg.data16b = FRAME_SIZE - 1; hss_config()
440 hss_npe_send(port, &msg, "HSS_SET_RX_FCR"); hss_config()
444 memset(&msg, 0, sizeof(msg)); hss_config()
445 msg.cmd = PORT_CONFIG_LOAD; hss_config()
446 msg.hss_port = port->id; hss_config()
447 hss_npe_send(port, &msg, "HSS_LOAD_CONFIG"); hss_config()
449 if (npe_recv_message(port->npe, &msg, "HSS_LOAD_CONFIG") || hss_config()
451 msg.cmd != PORT_CONFIG_LOAD || msg.data32) { hss_config()
457 npe_recv_message(port->npe, &msg, "FLUSH_IT"); hss_config()
462 struct msg msg; hss_set_hdlc_cfg() local
464 memset(&msg, 0, sizeof(msg)); hss_set_hdlc_cfg()
465 msg.cmd = PKT_PIPE_HDLC_CFG_WRITE; hss_set_hdlc_cfg()
466 msg.hss_port = port->id; hss_set_hdlc_cfg()
467 msg.data8a = port->hdlc_cfg; /* rx_cfg */ hss_set_hdlc_cfg()
468 msg.data8b = port->hdlc_cfg | (PKT_EXTRA_FLAGS << 3); /* tx_cfg */ hss_set_hdlc_cfg()
469 hss_npe_send(port, &msg, "HSS_SET_HDLC_CFG"); hss_set_hdlc_cfg()
474 struct msg msg; hss_get_status() local
476 memset(&msg, 0, sizeof(msg)); hss_get_status()
477 msg.cmd = PORT_ERROR_READ; hss_get_status()
478 msg.hss_port = port->id; hss_get_status()
479 hss_npe_send(port, &msg, "PORT_ERROR_READ"); hss_get_status()
480 if (npe_recv_message(port->npe, &msg, "PORT_ERROR_READ")) { hss_get_status()
485 return msg.data32; hss_get_status()
490 struct msg msg; hss_start_hdlc() local
492 memset(&msg, 0, sizeof(msg)); hss_start_hdlc()
493 msg.cmd = PKT_PIPE_FLOW_ENABLE; hss_start_hdlc()
494 msg.hss_port = port->id; hss_start_hdlc()
495 msg.data32 = 0; hss_start_hdlc()
496 hss_npe_send(port, &msg, "HSS_ENABLE_PKT_PIPE"); hss_start_hdlc()
501 struct msg msg; hss_stop_hdlc() local
503 memset(&msg, 0, sizeof(msg)); hss_stop_hdlc()
504 msg.cmd = PKT_PIPE_FLOW_DISABLE; hss_stop_hdlc()
505 msg.hss_port = port->id; hss_stop_hdlc()
506 hss_npe_send(port, &msg, "HSS_DISABLE_PKT_PIPE"); hss_stop_hdlc()
512 struct msg msg; hss_load_firmware() local
524 memset(&msg, 0, sizeof(msg)); hss_load_firmware()
525 msg.cmd = PKT_NUM_PIPES_WRITE; hss_load_firmware()
526 msg.hss_port = port->id; hss_load_firmware()
527 msg.data8a = PKT_NUM_PIPES; hss_load_firmware()
528 hss_npe_send(port, &msg, "HSS_SET_PKT_PIPES"); hss_load_firmware()
530 msg.cmd = PKT_PIPE_FIFO_SIZEW_WRITE; hss_load_firmware()
531 msg.data8a = PKT_PIPE_FIFO_SIZEW; hss_load_firmware()
532 hss_npe_send(port, &msg, "HSS_SET_PKT_FIFO"); hss_load_firmware()
534 msg.cmd = PKT_PIPE_MODE_WRITE; hss_load_firmware()
535 msg.data8a = NPE_PKT_MODE_HDLC; hss_load_firmware()
536 /* msg.data8b = inv_mask */ hss_load_firmware()
537 /* msg.data8c = or_mask */ hss_load_firmware()
538 hss_npe_send(port, &msg, "HSS_SET_PKT_MODE"); hss_load_firmware()
540 msg.cmd = PKT_PIPE_RX_SIZE_WRITE; hss_load_firmware()
541 msg.data16a = HDLC_MAX_MRU; /* including CRC */ hss_load_firmware()
542 hss_npe_send(port, &msg, "HSS_SET_PKT_RX_SIZE"); hss_load_firmware()
544 msg.cmd = PKT_PIPE_IDLE_PATTERN_WRITE; hss_load_firmware()
545 msg.data32 = 0x7F7F7F7F; /* ??? FIXME */ hss_load_firmware()
546 hss_npe_send(port, &msg, "HSS_SET_PKT_IDLE"); hss_load_firmware()
/linux-4.1.27/arch/s390/mm/
H A Dcmm.c363 static void cmm_smsg_target(const char *from, char *msg) cmm_smsg_target() argument
369 if (!cmm_skip_blanks(msg + strlen(SMSG_PREFIX), &msg)) cmm_smsg_target()
371 if (strncmp(msg, "SHRINK", 6) == 0) { cmm_smsg_target()
372 if (!cmm_skip_blanks(msg + 6, &msg)) cmm_smsg_target()
374 nr = simple_strtoul(msg, &msg, 0); cmm_smsg_target()
375 cmm_skip_blanks(msg, &msg); cmm_smsg_target()
376 if (*msg == '\0') cmm_smsg_target()
378 } else if (strncmp(msg, "RELEASE", 7) == 0) { cmm_smsg_target()
379 if (!cmm_skip_blanks(msg + 7, &msg)) cmm_smsg_target()
381 nr = simple_strtoul(msg, &msg, 0); cmm_smsg_target()
382 cmm_skip_blanks(msg, &msg); cmm_smsg_target()
383 if (*msg == '\0') cmm_smsg_target()
385 } else if (strncmp(msg, "REUSE", 5) == 0) { cmm_smsg_target()
386 if (!cmm_skip_blanks(msg + 5, &msg)) cmm_smsg_target()
388 nr = simple_strtoul(msg, &msg, 0); cmm_smsg_target()
389 if (!cmm_skip_blanks(msg, &msg)) cmm_smsg_target()
391 seconds = simple_strtoul(msg, &msg, 0); cmm_smsg_target()
392 cmm_skip_blanks(msg, &msg); cmm_smsg_target()
393 if (*msg == '\0') cmm_smsg_target()
/linux-4.1.27/drivers/media/pci/pt1/
H A Dva1j5jf8007s.c179 struct i2c_msg msg; va1j5jf8007s_set_frequency_1() local
194 msg.addr = state->config->demod_address; va1j5jf8007s_set_frequency_1()
195 msg.flags = 0; va1j5jf8007s_set_frequency_1()
196 msg.len = sizeof(buf); va1j5jf8007s_set_frequency_1()
197 msg.buf = buf; va1j5jf8007s_set_frequency_1()
199 if (i2c_transfer(state->adap, &msg, 1) != 1) va1j5jf8007s_set_frequency_1()
208 struct i2c_msg msg; va1j5jf8007s_set_frequency_2() local
214 msg.addr = state->config->demod_address; va1j5jf8007s_set_frequency_2()
215 msg.flags = 0; va1j5jf8007s_set_frequency_2()
216 msg.len = sizeof(buf); va1j5jf8007s_set_frequency_2()
217 msg.buf = buf; va1j5jf8007s_set_frequency_2()
219 if (i2c_transfer(state->adap, &msg, 1) != 1) va1j5jf8007s_set_frequency_2()
229 struct i2c_msg msg; va1j5jf8007s_set_frequency_3() local
238 msg.addr = state->config->demod_address; va1j5jf8007s_set_frequency_3()
239 msg.flags = 0; va1j5jf8007s_set_frequency_3()
240 msg.len = sizeof(buf); va1j5jf8007s_set_frequency_3()
241 msg.buf = buf; va1j5jf8007s_set_frequency_3()
243 if (i2c_transfer(state->adap, &msg, 1) != 1) va1j5jf8007s_set_frequency_3()
281 struct i2c_msg msg; va1j5jf8007s_set_modulation() local
286 msg.addr = state->config->demod_address; va1j5jf8007s_set_modulation()
287 msg.flags = 0; va1j5jf8007s_set_modulation()
288 msg.len = sizeof(buf); va1j5jf8007s_set_modulation()
289 msg.buf = buf; va1j5jf8007s_set_modulation()
291 if (i2c_transfer(state->adap, &msg, 1) != 1) va1j5jf8007s_set_modulation()
330 struct i2c_msg msg; va1j5jf8007s_set_ts_id() local
340 msg.addr = state->config->demod_address; va1j5jf8007s_set_ts_id()
341 msg.flags = 0; va1j5jf8007s_set_ts_id()
342 msg.len = sizeof(buf); va1j5jf8007s_set_ts_id()
343 msg.buf = buf; va1j5jf8007s_set_ts_id()
345 if (i2c_transfer(state->adap, &msg, 1) != 1) va1j5jf8007s_set_ts_id()
513 struct i2c_msg msg; va1j5jf8007s_init_frequency() local
520 msg.addr = state->config->demod_address; va1j5jf8007s_init_frequency()
521 msg.flags = 0; va1j5jf8007s_init_frequency()
522 msg.len = sizeof(buf); va1j5jf8007s_init_frequency()
523 msg.buf = buf; va1j5jf8007s_init_frequency()
525 if (i2c_transfer(state->adap, &msg, 1) != 1) va1j5jf8007s_init_frequency()
534 struct i2c_msg msg; va1j5jf8007s_set_sleep() local
539 msg.addr = state->config->demod_address; va1j5jf8007s_set_sleep()
540 msg.flags = 0; va1j5jf8007s_set_sleep()
541 msg.len = sizeof(buf); va1j5jf8007s_set_sleep()
542 msg.buf = buf; va1j5jf8007s_set_sleep()
544 if (i2c_transfer(state->adap, &msg, 1) != 1) va1j5jf8007s_set_sleep()
652 struct i2c_msg msg; va1j5jf8007s_prepare_2() local
670 msg.addr = addr; va1j5jf8007s_prepare_2()
671 msg.flags = 0; va1j5jf8007s_prepare_2()
672 msg.len = 2; va1j5jf8007s_prepare_2()
673 msg.buf = buf; va1j5jf8007s_prepare_2()
676 if (i2c_transfer(state->adap, &msg, 1) != 1) va1j5jf8007s_prepare_2()
709 struct i2c_msg msg; va1j5jf8007s_attach() local
725 msg.addr = state->config->demod_address; va1j5jf8007s_attach()
726 msg.flags = 0; va1j5jf8007s_attach()
727 msg.len = sizeof(buf); va1j5jf8007s_attach()
728 msg.buf = buf; va1j5jf8007s_attach()
730 if (i2c_transfer(state->adap, &msg, 1) != 1) { va1j5jf8007s_attach()
H A Dva1j5jf8007t.c165 struct i2c_msg msg; va1j5jf8007t_set_frequency() local
177 msg.addr = state->config->demod_address; va1j5jf8007t_set_frequency()
178 msg.flags = 0; va1j5jf8007t_set_frequency()
179 msg.len = sizeof(buf); va1j5jf8007t_set_frequency()
180 msg.buf = buf; va1j5jf8007t_set_frequency()
182 if (i2c_transfer(state->adap, &msg, 1) != 1) va1j5jf8007t_set_frequency()
220 struct i2c_msg msg; va1j5jf8007t_set_modulation() local
225 msg.addr = state->config->demod_address; va1j5jf8007t_set_modulation()
226 msg.flags = 0; va1j5jf8007t_set_modulation()
227 msg.len = sizeof(buf); va1j5jf8007t_set_modulation()
228 msg.buf = buf; va1j5jf8007t_set_modulation()
230 if (i2c_transfer(state->adap, &msg, 1) != 1) va1j5jf8007t_set_modulation()
359 struct i2c_msg msg; va1j5jf8007t_init_frequency() local
369 msg.addr = state->config->demod_address; va1j5jf8007t_init_frequency()
370 msg.flags = 0; va1j5jf8007t_init_frequency()
371 msg.len = sizeof(buf); va1j5jf8007t_init_frequency()
372 msg.buf = buf; va1j5jf8007t_init_frequency()
374 if (i2c_transfer(state->adap, &msg, 1) != 1) va1j5jf8007t_init_frequency()
383 struct i2c_msg msg; va1j5jf8007t_set_sleep() local
388 msg.addr = state->config->demod_address; va1j5jf8007t_set_sleep()
389 msg.flags = 0; va1j5jf8007t_set_sleep()
390 msg.len = sizeof(buf); va1j5jf8007t_set_sleep()
391 msg.buf = buf; va1j5jf8007t_set_sleep()
393 if (i2c_transfer(state->adap, &msg, 1) != 1) va1j5jf8007t_set_sleep()
470 struct i2c_msg msg; va1j5jf8007t_prepare() local
488 msg.addr = state->config->demod_address; va1j5jf8007t_prepare()
489 msg.flags = 0; va1j5jf8007t_prepare()
490 msg.len = sizeof(buf); va1j5jf8007t_prepare()
491 msg.buf = buf; va1j5jf8007t_prepare()
495 if (i2c_transfer(state->adap, &msg, 1) != 1) va1j5jf8007t_prepare()
509 struct i2c_msg msg; va1j5jf8007t_attach() local
525 msg.addr = state->config->demod_address; va1j5jf8007t_attach()
526 msg.flags = 0; va1j5jf8007t_attach()
527 msg.len = sizeof(buf); va1j5jf8007t_attach()
528 msg.buf = buf; va1j5jf8007t_attach()
530 if (i2c_transfer(state->adap, &msg, 1) != 1) { va1j5jf8007t_attach()
/linux-4.1.27/drivers/staging/unisys/uislib/
H A Duislib.c122 init_msg_header(struct controlvm_message *msg, u32 id, uint rsp, uint svr) init_msg_header() argument
124 memset(msg, 0, sizeof(struct controlvm_message)); init_msg_header()
125 msg->hdr.id = id; init_msg_header()
126 msg->hdr.flags.response_expected = rsp; init_msg_header()
127 msg->hdr.flags.server = svr; init_msg_header()
145 create_bus(struct controlvm_message *msg, char *buf) create_bus() argument
157 bus_no = msg->cmd.create_bus.bus_no; create_bus()
158 dev_count = msg->cmd.create_bus.dev_count; create_bus()
176 if (msg->hdr.flags.test_message) { create_bus()
189 bus->bus_inst_uuid = msg->cmd.create_bus.bus_inst_uuid; create_bus()
209 if ((msg->cmd.create_bus.channel_addr != 0) && create_bus()
210 (msg->cmd.create_bus.channel_bytes != 0)) { create_bus()
211 bus->bus_channel_bytes = msg->cmd.create_bus.channel_bytes; create_bus()
213 init_vbus_channel(msg->cmd.create_bus.channel_addr, create_bus()
214 msg->cmd.create_bus.channel_bytes); create_bus()
216 /* the msg is bound for virtpci; send guest_msgs struct to callback */ create_bus()
217 if (!msg->hdr.flags.server) { create_bus()
224 cmd.add_vbus.bus_uuid = msg->cmd.create_bus.bus_data_type_uuid; create_bus()
225 cmd.add_vbus.instance_uuid = msg->cmd.create_bus.bus_inst_uuid; create_bus()
258 destroy_bus(struct controlvm_message *msg, char *buf) destroy_bus() argument
265 bus_no = msg->cmd.destroy_bus.bus_no; destroy_bus()
291 if (msg->hdr.flags.server) destroy_bus()
323 static int create_device(struct controlvm_message *msg, char *buf) create_device() argument
333 bus_no = msg->cmd.create_device.bus_no; create_device()
334 dev_no = msg->cmd.create_device.dev_no; create_device()
346 dev->channel_uuid = msg->cmd.create_device.data_type_uuid; create_device()
347 dev->intr = msg->cmd.create_device.intr; create_device()
348 dev->channel_addr = msg->cmd.create_device.channel_addr; create_device()
354 if (msg->hdr.flags.test_message) { create_device()
363 if (min_size > msg->cmd.create_device.channel_bytes) { create_device()
371 msg->cmd.create_device.channel_bytes); create_device()
379 dev->instance_uuid = msg->cmd.create_device.dev_inst_uuid; create_device()
380 dev->channel_bytes = msg->cmd.create_device.channel_bytes; create_device()
404 /* the msg is bound for virtpci; send create_device()
407 if (msg->hdr.flags.server) { create_device()
481 if (!msg->hdr.flags.test_message) { create_device()
490 static int pause_device(struct controlvm_message *msg) pause_device() argument
498 bus_no = msg->cmd.device_change_state.bus_no; pause_device()
499 dev_no = msg->cmd.device_change_state.dev_no; pause_device()
523 /* the msg is bound for virtpci; send pause_device()
547 static int resume_device(struct controlvm_message *msg) resume_device() argument
555 bus_no = msg->cmd.device_change_state.bus_no; resume_device()
556 dev_no = msg->cmd.device_change_state.dev_no; resume_device()
580 /* the msg is bound for virtpci; send resume_device()
605 static int destroy_device(struct controlvm_message *msg, char *buf) destroy_device() argument
613 bus_no = msg->cmd.destroy_device.bus_no; destroy_device()
614 dev_no = msg->cmd.destroy_device.bus_no; destroy_device()
638 /* the msg is bound for virtpci; send destroy_device()
669 if (!msg->hdr.flags.test_message) destroy_device()
678 init_chipset(struct controlvm_message *msg, char *buf) init_chipset() argument
682 max_bus_count = msg->cmd.init_chipset.bus_count; init_chipset()
683 platform_no = msg->cmd.init_chipset.platform_number; init_chipset()
693 if (!msg->hdr.flags.test_message) init_chipset()
704 struct controlvm_message msg; delete_bus_glue() local
706 init_msg_header(&msg, CONTROLVM_BUS_DESTROY, 0, 0); delete_bus_glue()
707 msg.cmd.destroy_bus.bus_no = bus_no; delete_bus_glue()
708 if (destroy_bus(&msg, NULL) != CONTROLVM_RESP_SUCCESS) delete_bus_glue()
715 struct controlvm_message msg; delete_device_glue() local
717 init_msg_header(&msg, CONTROLVM_DEVICE_DESTROY, 0, 0); delete_device_glue()
718 msg.cmd.destroy_device.bus_no = bus_no; delete_device_glue()
719 msg.cmd.destroy_device.dev_no = dev_no; delete_device_glue()
720 if (destroy_device(&msg, NULL) != CONTROLVM_RESP_SUCCESS) delete_device_glue()
729 struct controlvm_message msg; uislib_client_inject_add_bus() local
736 init_msg_header(&msg, CONTROLVM_CHIPSET_INIT, 0, 0); uislib_client_inject_add_bus()
744 msg.cmd.init_chipset.bus_count = 23; uislib_client_inject_add_bus()
745 msg.cmd.init_chipset.switch_count = 0; uislib_client_inject_add_bus()
746 if (init_chipset(&msg, NULL) != CONTROLVM_RESP_SUCCESS) uislib_client_inject_add_bus()
755 init_msg_header(&msg, CONTROLVM_BUS_CREATE, 0, 0); uislib_client_inject_add_bus()
756 msg.cmd.create_bus.bus_no = bus_no; uislib_client_inject_add_bus()
757 msg.cmd.create_bus.dev_count = 23; /* devNo+1; */ uislib_client_inject_add_bus()
758 msg.cmd.create_bus.channel_addr = channel_addr; uislib_client_inject_add_bus()
759 msg.cmd.create_bus.channel_bytes = n_channel_bytes; uislib_client_inject_add_bus()
760 if (create_bus(&msg, NULL) != CONTROLVM_RESP_SUCCESS) { uislib_client_inject_add_bus()
781 struct controlvm_message msg; uislib_client_inject_pause_vhba() local
784 init_msg_header(&msg, CONTROLVM_DEVICE_CHANGESTATE, 0, 0); uislib_client_inject_pause_vhba()
785 msg.cmd.device_change_state.bus_no = bus_no; uislib_client_inject_pause_vhba()
786 msg.cmd.device_change_state.dev_no = dev_no; uislib_client_inject_pause_vhba()
787 msg.cmd.device_change_state.state = segment_state_standby; uislib_client_inject_pause_vhba()
788 rc = pause_device(&msg); uislib_client_inject_pause_vhba()
798 struct controlvm_message msg; uislib_client_inject_resume_vhba() local
801 init_msg_header(&msg, CONTROLVM_DEVICE_CHANGESTATE, 0, 0); uislib_client_inject_resume_vhba()
802 msg.cmd.device_change_state.bus_no = bus_no; uislib_client_inject_resume_vhba()
803 msg.cmd.device_change_state.dev_no = dev_no; uislib_client_inject_resume_vhba()
804 msg.cmd.device_change_state.state = segment_state_running; uislib_client_inject_resume_vhba()
805 rc = resume_device(&msg); uislib_client_inject_resume_vhba()
818 struct controlvm_message msg; uislib_client_inject_add_vhba() local
827 init_msg_header(&msg, CONTROLVM_DEVICE_CREATE, 0, 0); uislib_client_inject_add_vhba()
832 msg.hdr.flags.test_message = 1; uislib_client_inject_add_vhba()
833 msg.cmd.create_device.bus_no = bus_no; uislib_client_inject_add_vhba()
834 msg.cmd.create_device.dev_no = dev_no; uislib_client_inject_add_vhba()
835 msg.cmd.create_device.dev_inst_uuid = inst_uuid; uislib_client_inject_add_vhba()
837 msg.cmd.create_device.intr = *intr; uislib_client_inject_add_vhba()
839 memset(&msg.cmd.create_device.intr, 0, uislib_client_inject_add_vhba()
841 msg.cmd.create_device.channel_addr = phys_chan_addr; uislib_client_inject_add_vhba()
847 msg.cmd.create_device.channel_bytes = chan_bytes; uislib_client_inject_add_vhba()
848 msg.cmd.create_device.data_type_uuid = spar_vhba_channel_protocol_uuid; uislib_client_inject_add_vhba()
849 if (create_device(&msg, NULL) != CONTROLVM_RESP_SUCCESS) { uislib_client_inject_add_vhba()
873 struct controlvm_message msg; uislib_client_inject_add_vnic() local
882 init_msg_header(&msg, CONTROLVM_DEVICE_CREATE, 0, 0); uislib_client_inject_add_vnic()
887 msg.hdr.flags.test_message = 1; uislib_client_inject_add_vnic()
888 msg.cmd.create_device.bus_no = bus_no; uislib_client_inject_add_vnic()
889 msg.cmd.create_device.dev_no = dev_no; uislib_client_inject_add_vnic()
890 msg.cmd.create_device.dev_inst_uuid = inst_uuid; uislib_client_inject_add_vnic()
892 msg.cmd.create_device.intr = *intr; uislib_client_inject_add_vnic()
894 memset(&msg.cmd.create_device.intr, 0, uislib_client_inject_add_vnic()
896 msg.cmd.create_device.channel_addr = phys_chan_addr; uislib_client_inject_add_vnic()
902 msg.cmd.create_device.channel_bytes = chan_bytes; uislib_client_inject_add_vnic()
903 msg.cmd.create_device.data_type_uuid = spar_vnic_channel_protocol_uuid; uislib_client_inject_add_vnic()
904 if (create_device(&msg, NULL) != CONTROLVM_RESP_SUCCESS) { uislib_client_inject_add_vnic()
919 struct controlvm_message msg; uislib_client_inject_pause_vnic() local
922 init_msg_header(&msg, CONTROLVM_DEVICE_CHANGESTATE, 0, 0); uislib_client_inject_pause_vnic()
923 msg.cmd.device_change_state.bus_no = bus_no; uislib_client_inject_pause_vnic()
924 msg.cmd.device_change_state.dev_no = dev_no; uislib_client_inject_pause_vnic()
925 msg.cmd.device_change_state.state = segment_state_standby; uislib_client_inject_pause_vnic()
926 rc = pause_device(&msg); uislib_client_inject_pause_vnic()
936 struct controlvm_message msg; uislib_client_inject_resume_vnic() local
939 init_msg_header(&msg, CONTROLVM_DEVICE_CHANGESTATE, 0, 0); uislib_client_inject_resume_vnic()
940 msg.cmd.device_change_state.bus_no = bus_no; uislib_client_inject_resume_vnic()
941 msg.cmd.device_change_state.dev_no = dev_no; uislib_client_inject_resume_vnic()
942 msg.cmd.device_change_state.state = segment_state_running; uislib_client_inject_resume_vnic()
943 rc = resume_device(&msg); uislib_client_inject_resume_vnic()
/linux-4.1.27/drivers/gpu/drm/msm/edp/
H A Dedp_aux.c41 static int edp_msg_fifo_tx(struct edp_aux *aux, struct drm_dp_aux_msg *msg) edp_msg_fifo_tx() argument
45 bool native = msg->request & (DP_AUX_NATIVE_WRITE & DP_AUX_NATIVE_READ); edp_msg_fifo_tx()
46 bool read = msg->request & (DP_AUX_I2C_READ & DP_AUX_NATIVE_READ); edp_msg_fifo_tx()
47 u8 *msgdata = msg->buffer; edp_msg_fifo_tx()
53 len = msg->size + 4; edp_msg_fifo_tx()
62 data[0] = (msg->address >> 16) & 0xf; /* addr[19:16] */ edp_msg_fifo_tx()
66 data[1] = (msg->address >> 8) & 0xff; /* addr[15:8] */ edp_msg_fifo_tx()
67 data[2] = msg->address & 0xff; /* addr[7:0] */ edp_msg_fifo_tx()
68 data[3] = (msg->size - 1) & 0xff; /* len[7:0] */ edp_msg_fifo_tx()
88 static int edp_msg_fifo_rx(struct edp_aux *aux, struct drm_dp_aux_msg *msg) edp_msg_fifo_rx() argument
93 u32 len = msg->size; edp_msg_fifo_rx()
98 dp = msg->buffer; edp_msg_fifo_rx()
118 ssize_t edp_aux_transfer(struct drm_dp_aux *drm_aux, struct drm_dp_aux_msg *msg) edp_aux_transfer() argument
122 bool native = msg->request & (DP_AUX_NATIVE_WRITE & DP_AUX_NATIVE_READ); edp_aux_transfer()
123 bool read = msg->request & (DP_AUX_I2C_READ & DP_AUX_NATIVE_READ); edp_aux_transfer()
126 if ((msg->size == 0) || (msg->buffer == NULL)) { edp_aux_transfer()
127 msg->reply = native ? edp_aux_transfer()
129 return msg->size; edp_aux_transfer()
132 /* msg sanity check */ edp_aux_transfer()
133 if ((native && (msg->size > AUX_CMD_NATIVE_MAX)) || edp_aux_transfer()
134 (msg->size > AUX_CMD_I2C_MAX)) { edp_aux_transfer()
135 pr_err("%s: invalid msg: size(%zu), request(%x)\n", edp_aux_transfer()
136 __func__, msg->size, msg->request); edp_aux_transfer()
145 ret = edp_msg_fifo_tx(aux, msg); edp_aux_transfer()
165 ret = edp_msg_fifo_rx(aux, msg); edp_aux_transfer()
170 msg->reply = native ? edp_aux_transfer()
174 msg->reply = native ? edp_aux_transfer()
184 ret = msg->size; edp_aux_transfer()
/linux-4.1.27/drivers/video/fbdev/
H A Dhyperv_fb.c241 struct synthvid_msg *msg) synthvid_send()
246 msg->pipe_hdr.type = PIPE_MSG_DATA; synthvid_send()
247 msg->pipe_hdr.size = msg->vid_hdr.size; synthvid_send()
249 ret = vmbus_sendpacket(hdev->channel, msg, synthvid_send()
250 msg->vid_hdr.size + sizeof(struct pipe_msg_hdr), synthvid_send()
265 struct synthvid_msg msg; synthvid_send_situ() local
270 memset(&msg, 0, sizeof(struct synthvid_msg)); synthvid_send_situ()
272 msg.vid_hdr.type = SYNTHVID_SITUATION_UPDATE; synthvid_send_situ()
273 msg.vid_hdr.size = sizeof(struct synthvid_msg_hdr) + synthvid_send_situ()
275 msg.situ.user_ctx = 0; synthvid_send_situ()
276 msg.situ.video_output_count = 1; synthvid_send_situ()
277 msg.situ.video_output[0].active = 1; synthvid_send_situ()
278 msg.situ.video_output[0].vram_offset = 0; synthvid_send_situ()
279 msg.situ.video_output[0].depth_bits = info->var.bits_per_pixel; synthvid_send_situ()
280 msg.situ.video_output[0].width_pixels = info->var.xres; synthvid_send_situ()
281 msg.situ.video_output[0].height_pixels = info->var.yres; synthvid_send_situ()
282 msg.situ.video_output[0].pitch_bytes = info->fix.line_length; synthvid_send_situ()
284 synthvid_send(hdev, &msg); synthvid_send_situ()
292 struct synthvid_msg msg; synthvid_send_ptr() local
294 memset(&msg, 0, sizeof(struct synthvid_msg)); synthvid_send_ptr()
295 msg.vid_hdr.type = SYNTHVID_POINTER_POSITION; synthvid_send_ptr()
296 msg.vid_hdr.size = sizeof(struct synthvid_msg_hdr) + synthvid_send_ptr()
298 msg.ptr_pos.is_visible = 1; synthvid_send_ptr()
299 msg.ptr_pos.video_output = 0; synthvid_send_ptr()
300 msg.ptr_pos.image_x = 0; synthvid_send_ptr()
301 msg.ptr_pos.image_y = 0; synthvid_send_ptr()
302 synthvid_send(hdev, &msg); synthvid_send_ptr()
304 memset(&msg, 0, sizeof(struct synthvid_msg)); synthvid_send_ptr()
305 msg.vid_hdr.type = SYNTHVID_POINTER_SHAPE; synthvid_send_ptr()
306 msg.vid_hdr.size = sizeof(struct synthvid_msg_hdr) + synthvid_send_ptr()
308 msg.ptr_shape.part_idx = CURSOR_COMPLETE; synthvid_send_ptr()
309 msg.ptr_shape.is_argb = 1; synthvid_send_ptr()
310 msg.ptr_shape.width = 1; synthvid_send_ptr()
311 msg.ptr_shape.height = 1; synthvid_send_ptr()
312 msg.ptr_shape.hot_x = 0; synthvid_send_ptr()
313 msg.ptr_shape.hot_y = 0; synthvid_send_ptr()
314 msg.ptr_shape.data[0] = 0; synthvid_send_ptr()
315 msg.ptr_shape.data[1] = 1; synthvid_send_ptr()
316 msg.ptr_shape.data[2] = 1; synthvid_send_ptr()
317 msg.ptr_shape.data[3] = 1; synthvid_send_ptr()
318 synthvid_send(hdev, &msg); synthvid_send_ptr()
327 struct synthvid_msg msg; synthvid_update() local
329 memset(&msg, 0, sizeof(struct synthvid_msg)); synthvid_update()
331 msg.vid_hdr.type = SYNTHVID_DIRT; synthvid_update()
332 msg.vid_hdr.size = sizeof(struct synthvid_msg_hdr) + synthvid_update()
334 msg.dirt.video_output = 0; synthvid_update()
335 msg.dirt.dirt_count = 1; synthvid_update()
336 msg.dirt.rect[0].x1 = 0; synthvid_update()
337 msg.dirt.rect[0].y1 = 0; synthvid_update()
338 msg.dirt.rect[0].x2 = info->var.xres; synthvid_update()
339 msg.dirt.rect[0].y2 = info->var.yres; synthvid_update()
341 synthvid_send(hdev, &msg); synthvid_update()
356 struct synthvid_msg *msg; synthvid_recv_sub() local
362 msg = (struct synthvid_msg *)par->recv_buf; synthvid_recv_sub()
365 if (msg->vid_hdr.type == SYNTHVID_VERSION_RESPONSE || synthvid_recv_sub()
366 msg->vid_hdr.type == SYNTHVID_VRAM_LOCATION_ACK) { synthvid_recv_sub()
367 memcpy(par->init_buf, msg, MAX_VMBUS_PKT_SIZE); synthvid_recv_sub()
373 if (msg->vid_hdr.type == SYNTHVID_FEATURE_CHANGE) { synthvid_recv_sub()
379 par->update = msg->feature_chg.is_dirt_needed; synthvid_recv_sub()
417 struct synthvid_msg *msg = (struct synthvid_msg *)par->init_buf; synthvid_negotiate_ver() local
421 memset(msg, 0, sizeof(struct synthvid_msg)); synthvid_negotiate_ver()
422 msg->vid_hdr.type = SYNTHVID_VERSION_REQUEST; synthvid_negotiate_ver()
423 msg->vid_hdr.size = sizeof(struct synthvid_msg_hdr) + synthvid_negotiate_ver()
425 msg->ver_req.version = ver; synthvid_negotiate_ver()
426 synthvid_send(hdev, msg); synthvid_negotiate_ver()
434 if (!msg->ver_resp.is_accepted) { synthvid_negotiate_ver()
491 struct synthvid_msg *msg = (struct synthvid_msg *)par->init_buf; synthvid_send_config() local
496 memset(msg, 0, sizeof(struct synthvid_msg)); synthvid_send_config()
497 msg->vid_hdr.type = SYNTHVID_VRAM_LOCATION; synthvid_send_config()
498 msg->vid_hdr.size = sizeof(struct synthvid_msg_hdr) + synthvid_send_config()
500 msg->vram.user_ctx = msg->vram.vram_gpa = info->fix.smem_start; synthvid_send_config()
501 msg->vram.is_vram_gpa_specified = 1; synthvid_send_config()
502 synthvid_send(hdev, msg); synthvid_send_config()
510 if (msg->vram_ack.user_ctx != info->fix.smem_start) { synthvid_send_config()
240 synthvid_send(struct hv_device *hdev, struct synthvid_msg *msg) synthvid_send() argument
/linux-4.1.27/drivers/hsi/controllers/
H A Domap_ssi_port.c33 static inline int hsi_dummy_msg(struct hsi_msg *msg __maybe_unused) hsi_dummy_msg()
194 static int ssi_claim_lch(struct hsi_msg *msg) ssi_claim_lch() argument
197 struct hsi_port *port = hsi_get_port(msg->cl); ssi_claim_lch()
203 if (!omap_ssi->gdd_trn[lch].msg) { ssi_claim_lch()
204 omap_ssi->gdd_trn[lch].msg = msg; ssi_claim_lch()
205 omap_ssi->gdd_trn[lch].sg = msg->sgt.sgl; ssi_claim_lch()
212 static int ssi_start_dma(struct hsi_msg *msg, int lch) ssi_start_dma() argument
214 struct hsi_port *port = hsi_get_port(msg->cl); ssi_start_dma()
226 if (msg->ttype == HSI_MSG_READ) { ssi_start_dma()
227 err = dma_map_sg(&ssi->device, msg->sgt.sgl, msg->sgt.nents, ssi_start_dma()
236 ccr = msg->channel + 0x10 + (port->num * 8); /* Sync */ ssi_start_dma()
240 SSI_SSR_BUFFER_CH_REG(msg->channel); ssi_start_dma()
241 d_addr = sg_dma_address(msg->sgt.sgl); ssi_start_dma()
243 err = dma_map_sg(&ssi->device, msg->sgt.sgl, msg->sgt.nents, ssi_start_dma()
252 ccr = (msg->channel + 1 + (port->num * 8)) & 0xf; /* Sync */ ssi_start_dma()
255 s_addr = sg_dma_address(msg->sgt.sgl); ssi_start_dma()
257 SSI_SST_BUFFER_CH_REG(msg->channel); ssi_start_dma()
269 writew_relaxed(SSI_BYTES_TO_FRAMES(msg->sgt.sgl->length), ssi_start_dma()
278 msg->status = HSI_STATUS_PROCEEDING; ssi_start_dma()
283 static int ssi_start_pio(struct hsi_msg *msg) ssi_start_pio() argument
285 struct hsi_port *port = hsi_get_port(msg->cl); ssi_start_pio()
292 if (msg->ttype == HSI_MSG_WRITE) { ssi_start_pio()
293 val = SSI_DATAACCEPT(msg->channel); ssi_start_pio()
297 val = SSI_DATAAVAILABLE(msg->channel) | SSI_ERROROCCURED; ssi_start_pio()
300 msg->ttype ? "write" : "read"); ssi_start_pio()
304 msg->actual_len = 0; ssi_start_pio()
305 msg->status = HSI_STATUS_PROCEEDING; ssi_start_pio()
312 struct hsi_msg *msg; ssi_start_transfer() local
317 msg = list_first_entry(queue, struct hsi_msg, link); ssi_start_transfer()
318 if (msg->status != HSI_STATUS_QUEUED) ssi_start_transfer()
320 if ((msg->sgt.nents) && (msg->sgt.sgl->length > sizeof(u32))) ssi_start_transfer()
321 lch = ssi_claim_lch(msg); ssi_start_transfer()
323 return ssi_start_dma(msg, lch); ssi_start_transfer()
325 return ssi_start_pio(msg); ssi_start_transfer()
328 static int ssi_async_break(struct hsi_msg *msg) ssi_async_break() argument
330 struct hsi_port *port = hsi_get_port(msg->cl); ssi_async_break()
338 if (msg->ttype == HSI_MSG_WRITE) { ssi_async_break()
344 msg->status = HSI_STATUS_COMPLETED; ssi_async_break()
345 msg->complete(msg); ssi_async_break()
356 msg->status = HSI_STATUS_PROCEEDING; ssi_async_break()
357 list_add_tail(&msg->link, &omap_port->brkqueue); ssi_async_break()
366 static int ssi_async(struct hsi_msg *msg) ssi_async() argument
368 struct hsi_port *port = hsi_get_port(msg->cl); ssi_async()
373 BUG_ON(!msg); ssi_async()
375 if (msg->sgt.nents > 1) ssi_async()
378 if (msg->break_frame) ssi_async()
379 return ssi_async_break(msg); ssi_async()
381 if (msg->ttype) { ssi_async()
382 BUG_ON(msg->channel >= omap_port->sst.channels); ssi_async()
383 queue = &omap_port->txqueue[msg->channel]; ssi_async()
385 BUG_ON(msg->channel >= omap_port->ssr.channels); ssi_async()
386 queue = &omap_port->rxqueue[msg->channel]; ssi_async()
388 msg->status = HSI_STATUS_QUEUED; ssi_async()
390 list_add_tail(&msg->link, queue); ssi_async()
393 list_del(&msg->link); ssi_async()
394 msg->status = HSI_STATUS_ERROR; ssi_async()
397 dev_dbg(&port->device, "msg status %d ttype %d ch %d\n", ssi_async()
398 msg->status, msg->ttype, msg->channel); ssi_async()
422 struct hsi_msg *msg; ssi_flush_queue() local
425 msg = list_entry(node, struct hsi_msg, link); list_for_each_safe()
426 if ((cl) && (cl != msg->cl)) list_for_each_safe()
429 pr_debug("flush queue: ch %d, msg %p len %d type %d ctxt %p\n", list_for_each_safe()
430 msg->channel, msg, msg->sgt.sgl->length, list_for_each_safe()
431 msg->ttype, msg->context); list_for_each_safe()
432 if (msg->destructor) list_for_each_safe()
433 msg->destructor(msg); list_for_each_safe()
435 hsi_free_msg(msg); list_for_each_safe()
509 struct hsi_msg *msg; ssi_flush() local
519 msg = omap_ssi->gdd_trn[i].msg; ssi_flush()
520 if (!msg || (port != hsi_get_port(msg->cl))) ssi_flush()
523 if (msg->ttype == HSI_MSG_READ) ssi_flush()
525 omap_ssi->gdd_trn[i].msg = NULL; ssi_flush()
605 struct hsi_msg *msg; ssi_transfer() local
612 msg = list_first_entry(queue, struct hsi_msg, link); ssi_transfer()
613 msg->status = HSI_STATUS_ERROR; ssi_transfer()
614 msg->actual_len = 0; ssi_transfer()
615 list_del(&msg->link); ssi_transfer()
617 msg->complete(msg); ssi_transfer()
630 struct hsi_msg *msg; ssi_cleanup_queues() local
644 msg = list_first_entry(&omap_port->txqueue[i], struct hsi_msg, ssi_cleanup_queues()
646 if ((msg->cl == cl) && (msg->status == HSI_STATUS_PROCEEDING)) { ssi_cleanup_queues()
657 msg = list_first_entry(&omap_port->rxqueue[i], struct hsi_msg, ssi_cleanup_queues()
659 if ((msg->cl == cl) && (msg->status == HSI_STATUS_PROCEEDING)) { ssi_cleanup_queues()
689 struct hsi_msg *msg; ssi_cleanup_gdd() local
695 msg = omap_ssi->gdd_trn[i].msg; ssi_cleanup_gdd()
696 if ((!msg) || (msg->cl != cl)) ssi_cleanup_gdd()
704 if (msg->ttype == HSI_MSG_READ) ssi_cleanup_gdd()
706 omap_ssi->gdd_trn[i].msg = NULL; ssi_cleanup_gdd()
767 struct hsi_msg *msg; ssi_error() local
783 msg = omap_ssi->gdd_trn[i].msg; ssi_error()
784 if ((msg) && (msg->ttype == HSI_MSG_READ)) { ssi_error()
787 omap_ssi->gdd_trn[i].msg = NULL; ssi_error()
807 msg = list_first_entry(&omap_port->rxqueue[i], struct hsi_msg, ssi_error()
809 list_del(&msg->link); ssi_error()
810 msg->status = HSI_STATUS_ERROR; ssi_error()
812 msg->complete(msg); ssi_error()
825 struct hsi_msg *msg; ssi_break_complete() local
840 list_for_each_entry_safe(msg, tmp, &omap_port->brkqueue, link) { ssi_break_complete()
841 msg->status = HSI_STATUS_COMPLETED; ssi_break_complete()
843 list_del(&msg->link); ssi_break_complete()
845 msg->complete(msg); ssi_break_complete()
855 struct hsi_msg *msg; ssi_pio_complete() local
861 msg = list_first_entry(queue, struct hsi_msg, link); ssi_pio_complete()
862 if ((!msg->sgt.nents) || (!msg->sgt.sgl->length)) { ssi_pio_complete()
863 msg->actual_len = 0; ssi_pio_complete()
864 msg->status = HSI_STATUS_PENDING; ssi_pio_complete()
866 if (msg->ttype == HSI_MSG_WRITE) ssi_pio_complete()
867 val = SSI_DATAACCEPT(msg->channel); ssi_pio_complete()
869 val = SSI_DATAAVAILABLE(msg->channel); ssi_pio_complete()
870 if (msg->status == HSI_STATUS_PROCEEDING) { ssi_pio_complete()
871 buf = sg_virt(msg->sgt.sgl) + msg->actual_len; ssi_pio_complete()
872 if (msg->ttype == HSI_MSG_WRITE) ssi_pio_complete()
874 SSI_SST_BUFFER_CH_REG(msg->channel)); ssi_pio_complete()
877 SSI_SSR_BUFFER_CH_REG(msg->channel)); ssi_pio_complete()
878 dev_dbg(&port->device, "ch %d ttype %d 0x%08x\n", msg->channel, ssi_pio_complete()
879 msg->ttype, *buf); ssi_pio_complete()
880 msg->actual_len += sizeof(*buf); ssi_pio_complete()
881 if (msg->actual_len >= msg->sgt.sgl->length) ssi_pio_complete()
882 msg->status = HSI_STATUS_COMPLETED; ssi_pio_complete()
887 if ((msg->status == HSI_STATUS_PROCEEDING) || ssi_pio_complete()
888 ((msg->status == HSI_STATUS_COMPLETED) && ssi_pio_complete()
889 (msg->ttype == HSI_MSG_WRITE))) { ssi_pio_complete()
900 if (msg->ttype == HSI_MSG_WRITE) { ssi_pio_complete()
907 list_del(&msg->link); ssi_pio_complete()
909 msg->complete(msg); ssi_pio_complete()
/linux-4.1.27/drivers/mfd/
H A Dcros_ec.c31 struct cros_ec_command *msg) cros_ec_prepare_tx()
36 BUG_ON(msg->outsize > EC_PROTO2_MAX_PARAM_SIZE); cros_ec_prepare_tx()
38 out[0] = EC_CMD_VERSION0 + msg->version; cros_ec_prepare_tx()
39 out[1] = msg->command; cros_ec_prepare_tx()
40 out[2] = msg->outsize; cros_ec_prepare_tx()
42 for (i = 0; i < msg->outsize; i++) cros_ec_prepare_tx()
43 csum += out[EC_MSG_TX_HEADER_BYTES + i] = msg->outdata[i]; cros_ec_prepare_tx()
44 out[EC_MSG_TX_HEADER_BYTES + msg->outsize] = (uint8_t)(csum & 0xff); cros_ec_prepare_tx()
46 return EC_MSG_TX_PROTO_BYTES + msg->outsize; cros_ec_prepare_tx()
51 struct cros_ec_command *msg) cros_ec_check_result()
53 switch (msg->result) { cros_ec_check_result()
58 msg->command); cros_ec_check_result()
62 msg->command, msg->result); cros_ec_check_result()
69 struct cros_ec_command *msg) cros_ec_cmd_xfer()
74 ret = ec_dev->cmd_xfer(ec_dev, msg); cros_ec_cmd_xfer()
75 if (msg->result == EC_RES_IN_PROGRESS) { cros_ec_cmd_xfer()
94 msg->result = status_msg.result; cros_ec_cmd_xfer()
30 cros_ec_prepare_tx(struct cros_ec_device *ec_dev, struct cros_ec_command *msg) cros_ec_prepare_tx() argument
50 cros_ec_check_result(struct cros_ec_device *ec_dev, struct cros_ec_command *msg) cros_ec_check_result() argument
68 cros_ec_cmd_xfer(struct cros_ec_device *ec_dev, struct cros_ec_command *msg) cros_ec_cmd_xfer() argument
H A Dipaq-micro.c37 struct ipaq_micro_msg *msg = micro->msg; ipaq_micro_trigger_tx() local
45 checksum = ((msg->id & 0x0f) << 4) | (msg->tx_len & 0x0f); ipaq_micro_trigger_tx()
48 for (i = 0; i < msg->tx_len; i++) { ipaq_micro_trigger_tx()
49 tx->buf[bp++] = msg->tx_data[i]; ipaq_micro_trigger_tx()
50 checksum += msg->tx_data[i]; ipaq_micro_trigger_tx()
65 int ipaq_micro_tx_msg(struct ipaq_micro *micro, struct ipaq_micro_msg *msg) ipaq_micro_tx_msg() argument
69 dev_dbg(micro->dev, "TX msg: %02x, %d bytes\n", msg->id, msg->tx_len); ipaq_micro_tx_msg()
72 if (micro->msg) { ipaq_micro_tx_msg()
73 list_add_tail(&msg->node, &micro->queue); ipaq_micro_tx_msg()
77 micro->msg = msg; ipaq_micro_tx_msg()
88 dev_dbg(micro->dev, "RX msg: %02x, %d bytes\n", id, len); micro_rx_msg()
100 if (micro->msg && micro->msg->id == id) { micro_rx_msg()
101 struct ipaq_micro_msg *msg = micro->msg; micro_rx_msg() local
103 memcpy(msg->rx_data, data, len); micro_rx_msg()
104 msg->rx_len = len; micro_rx_msg()
105 complete(&micro->msg->ack); micro_rx_msg()
107 micro->msg = list_entry(micro->queue.next, micro_rx_msg()
110 list_del_init(&micro->msg->node); micro_rx_msg()
113 micro->msg = NULL; micro_rx_msg()
118 if (!micro->msg) micro_rx_msg()
122 micro->msg->id); micro_rx_msg()
139 "unknown msg %d [%d] ", id, len); micro_rx_msg()
195 struct ipaq_micro_msg msg = { ipaq_micro_get_version() local
199 ipaq_micro_tx_msg_sync(micro, &msg); ipaq_micro_get_version()
200 if (msg.rx_len == 4) { ipaq_micro_get_version()
201 memcpy(micro->version, msg.rx_data, 4); ipaq_micro_get_version()
203 } else if (msg.rx_len == 9) { ipaq_micro_get_version()
204 memcpy(micro->version, msg.rx_data, 4); ipaq_micro_get_version()
209 "illegal version message %d bytes\n", msg.rx_len); ipaq_micro_get_version()
216 struct ipaq_micro_msg msg = { ipaq_micro_eeprom_read() local
222 msg.tx_data[0] = address + i; ipaq_micro_eeprom_read()
223 msg.tx_data[1] = 1; ipaq_micro_eeprom_read()
224 msg.tx_len = 2; ipaq_micro_eeprom_read()
225 ipaq_micro_tx_msg_sync(micro, &msg); ipaq_micro_eeprom_read()
226 memcpy(data + (i * 2), msg.rx_data, 2); ipaq_micro_eeprom_read()
311 if (micro->msg) micro_reset_comm()
312 complete(&micro->msg->ack); micro_reset_comm()
H A Dcros_ec_i2c.c33 struct cros_ec_command *msg) cros_ec_cmd_xfer_i2c()
54 packet_len = msg->insize + 3; cros_ec_cmd_xfer_i2c()
65 packet_len = msg->outsize + 4; cros_ec_cmd_xfer_i2c()
72 out_buf[0] = EC_CMD_VERSION0 + msg->version; cros_ec_cmd_xfer_i2c()
73 out_buf[1] = msg->command; cros_ec_cmd_xfer_i2c()
74 out_buf[2] = msg->outsize; cros_ec_cmd_xfer_i2c()
78 for (i = 0; i < msg->outsize; i++) { cros_ec_cmd_xfer_i2c()
79 out_buf[3 + i] = msg->outdata[i]; cros_ec_cmd_xfer_i2c()
82 out_buf[3 + msg->outsize] = sum; cros_ec_cmd_xfer_i2c()
96 msg->result = i2c_msg[1].buf[0]; cros_ec_cmd_xfer_i2c()
97 ret = cros_ec_check_result(ec_dev, msg); cros_ec_cmd_xfer_i2c()
102 if (len > msg->insize) { cros_ec_cmd_xfer_i2c()
104 len, msg->insize); cros_ec_cmd_xfer_i2c()
112 msg->indata[i] = in_buf[2 + i]; cros_ec_cmd_xfer_i2c()
32 cros_ec_cmd_xfer_i2c(struct cros_ec_device *ec_dev, struct cros_ec_command *msg) cros_ec_cmd_xfer_i2c() argument
H A Dtps65912-spi.c34 struct spi_message msg; tps65912_spi_write() local
44 spi_message_init(&msg); tps65912_spi_write()
45 spi_message_add_tail(&xfer, &msg); tps65912_spi_write()
47 ret = spi_sync(spi, &msg); tps65912_spi_write()
58 struct spi_message msg; tps65912_spi_read() local
71 spi_message_init(&msg); tps65912_spi_read()
72 spi_message_add_tail(&xfer, &msg); tps65912_spi_read()
77 ret = spi_sync(spi, &msg); tps65912_spi_read()
H A D88pm860x-i2c.c92 struct i2c_msg msg[2] = { read_device() local
110 msg[1].len = bytes; read_device()
115 ret = adap->algo->master_xfer(adap, msg, num); read_device()
127 struct i2c_msg msg; write_device() local
132 msg.addr = i2c->addr; write_device()
133 msg.flags = 0; write_device()
134 msg.len = bytes + 1; write_device()
135 msg.buf = buf; write_device()
137 ret = adap->algo->master_xfer(adap, &msg, 1); write_device()
/linux-4.1.27/drivers/xen/xenbus/
H A Dxenbus_xs.c168 struct xs_stored_msg *msg; read_reply() local
192 msg = list_entry(xs_state.reply_list.next, read_reply()
194 list_del(&msg->list); read_reply()
198 *type = msg->hdr.type; read_reply()
200 *len = msg->hdr.len; read_reply()
201 body = msg->u.reply.body; read_reply()
203 kfree(msg); read_reply()
233 void *xenbus_dev_request_and_reply(struct xsd_sockmsg *msg) xenbus_dev_request_and_reply() argument
236 struct xsd_sockmsg req_msg = *msg; xenbus_dev_request_and_reply()
244 err = xb_write(msg, sizeof(*msg) + msg->len); xenbus_dev_request_and_reply()
246 msg->type = XS_ERROR; xenbus_dev_request_and_reply()
249 ret = read_reply(&msg->type, &msg->len); xenbus_dev_request_and_reply()
256 if ((msg->type == XS_TRANSACTION_END) || xenbus_dev_request_and_reply()
258 (msg->type == XS_ERROR))) xenbus_dev_request_and_reply()
272 struct xsd_sockmsg msg; xs_talkv() local
277 msg.tx_id = t.id; xs_talkv()
278 msg.req_id = 0; xs_talkv()
279 msg.type = type; xs_talkv()
280 msg.len = 0; xs_talkv()
282 msg.len += iovec[i].iov_len; xs_talkv()
286 err = xb_write(&msg, sizeof(msg)); xs_talkv()
300 ret = read_reply(&msg.type, len); xs_talkv()
307 if (msg.type == XS_ERROR) { xs_talkv()
313 if (msg.type != type) { xs_talkv()
315 msg.type, type); xs_talkv()
730 struct xs_stored_msg *msg, *tmp; unregister_xenbus_watch() local
756 list_for_each_entry_safe(msg, tmp, &watch_events, list) { unregister_xenbus_watch()
757 if (msg->u.watch.handle != watch) unregister_xenbus_watch()
759 list_del(&msg->list); unregister_xenbus_watch()
760 kfree(msg->u.watch.vec); unregister_xenbus_watch()
761 kfree(msg); unregister_xenbus_watch()
809 struct xs_stored_msg *msg; xenwatch_thread() local
827 msg = list_entry(ent, struct xs_stored_msg, list); xenwatch_thread()
828 msg->u.watch.handle->callback( xenwatch_thread()
829 msg->u.watch.handle, xenwatch_thread()
830 (const char **)msg->u.watch.vec, xenwatch_thread()
831 msg->u.watch.vec_size); xenwatch_thread()
832 kfree(msg->u.watch.vec); xenwatch_thread()
833 kfree(msg); xenwatch_thread()
844 struct xs_stored_msg *msg; process_msg() local
864 msg = kmalloc(sizeof(*msg), GFP_NOIO | __GFP_HIGH); process_msg()
865 if (msg == NULL) { process_msg()
870 err = xb_read(&msg->hdr, sizeof(msg->hdr)); process_msg()
872 kfree(msg); process_msg()
876 if (msg->hdr.len > XENSTORE_PAYLOAD_MAX) { process_msg()
877 kfree(msg); process_msg()
882 body = kmalloc(msg->hdr.len + 1, GFP_NOIO | __GFP_HIGH); process_msg()
884 kfree(msg); process_msg()
889 err = xb_read(body, msg->hdr.len); process_msg()
892 kfree(msg); process_msg()
895 body[msg->hdr.len] = '\0'; process_msg()
897 if (msg->hdr.type == XS_WATCH_EVENT) { process_msg()
898 msg->u.watch.vec = split(body, msg->hdr.len, process_msg()
899 &msg->u.watch.vec_size); process_msg()
900 if (IS_ERR(msg->u.watch.vec)) { process_msg()
901 err = PTR_ERR(msg->u.watch.vec); process_msg()
902 kfree(msg); process_msg()
907 msg->u.watch.handle = find_watch( process_msg()
908 msg->u.watch.vec[XS_WATCH_TOKEN]); process_msg()
909 if (msg->u.watch.handle != NULL) { process_msg()
911 list_add_tail(&msg->list, &watch_events); process_msg()
915 kfree(msg->u.watch.vec); process_msg()
916 kfree(msg); process_msg()
920 msg->u.reply.body = body; process_msg()
922 list_add_tail(&msg->list, &xs_state.reply_list); process_msg()
/linux-4.1.27/drivers/char/ipmi/
H A Dipmi_powernv.c49 struct ipmi_smi_msg *msg, u8 completion_code) send_error_reply()
51 msg->rsp[0] = msg->data[0] | 0x4; send_error_reply()
52 msg->rsp[1] = msg->data[1]; send_error_reply()
53 msg->rsp[2] = completion_code; send_error_reply()
54 msg->rsp_size = 3; send_error_reply()
55 ipmi_smi_msg_received(smi->intf, msg); send_error_reply()
58 static void ipmi_powernv_send(void *send_info, struct ipmi_smi_msg *msg) ipmi_powernv_send() argument
67 if (msg->data_size > IPMI_MAX_MSG_LENGTH) { ipmi_powernv_send()
73 if (msg->data_size < 2) { ipmi_powernv_send()
88 opal_msg->netfn = msg->data[0]; ipmi_powernv_send()
89 opal_msg->cmd = msg->data[1]; ipmi_powernv_send()
90 if (msg->data_size > 2) ipmi_powernv_send()
91 memcpy(opal_msg->data, msg->data + 2, msg->data_size - 2); ipmi_powernv_send()
94 size = sizeof(*opal_msg) + msg->data_size - 2; ipmi_powernv_send()
102 smi->cur_msg = msg; ipmi_powernv_send()
111 send_error_reply(smi, msg, comp); ipmi_powernv_send()
117 struct ipmi_smi_msg *msg; ipmi_powernv_recv() local
122 pr_devel("%s: opal_ipmi_recv(%llx, msg, sz)\n", __func__, ipmi_powernv_recv()
133 msg = smi->cur_msg; ipmi_powernv_recv()
146 ipmi_free_smi_msg(msg); ipmi_powernv_recv()
163 msg->rsp[0] = opal_msg->netfn; ipmi_powernv_recv()
164 msg->rsp[1] = opal_msg->cmd; ipmi_powernv_recv()
166 memcpy(&msg->rsp[2], opal_msg->data, size - sizeof(*opal_msg)); ipmi_powernv_recv()
167 msg->rsp_size = 2 + size - sizeof(*opal_msg); ipmi_powernv_recv()
171 ipmi_smi_msg_received(smi->intf, msg); ipmi_powernv_recv()
48 send_error_reply(struct ipmi_smi_powernv *smi, struct ipmi_smi_msg *msg, u8 completion_code) send_error_reply() argument
H A Dipmi_msghandler.c60 struct ipmi_smi_msg *msg);
424 void (*null_user_handler)(ipmi_smi_t intf, struct ipmi_recv_msg *msg);
504 struct ipmi_recv_msg *msg, *msg2; free_recv_msg_list() local
506 list_for_each_entry_safe(msg, msg2, q, link) { list_for_each_entry_safe()
507 list_del(&msg->link); list_for_each_entry_safe()
508 ipmi_free_recv_msg(msg); list_for_each_entry_safe()
514 struct ipmi_smi_msg *msg, *msg2; free_smi_msg_list() local
516 list_for_each_entry_safe(msg, msg2, q, link) { list_for_each_entry_safe()
517 list_del(&msg->link); list_for_each_entry_safe()
518 ipmi_free_smi_msg(msg); list_for_each_entry_safe()
733 static void deliver_response(struct ipmi_recv_msg *msg) deliver_response() argument
735 if (!msg->user) { deliver_response()
736 ipmi_smi_t intf = msg->user_msg_data; deliver_response()
740 intf->null_user_handler(intf, msg); deliver_response()
746 ipmi_free_recv_msg(msg); deliver_response()
748 ipmi_user_t user = msg->user; deliver_response()
749 user->handler->ipmi_recv_hndl(msg, user->handler_data); deliver_response()
754 deliver_err_response(struct ipmi_recv_msg *msg, int err) deliver_err_response() argument
756 msg->recv_type = IPMI_RESPONSE_RECV_TYPE; deliver_err_response()
757 msg->msg_data[0] = err; deliver_err_response()
758 msg->msg.netfn |= 1; /* Convert to a response. */ deliver_err_response()
759 msg->msg.data_len = 1; deliver_err_response()
760 msg->msg.data = msg->msg_data; deliver_err_response()
761 deliver_response(msg); deliver_err_response()
833 struct ipmi_recv_msg *msg = intf->seq_table[seq].recv_msg; intf_find_seq() local
835 if ((msg->addr.channel == channel) && (msg->msg.cmd == cmd) intf_find_seq()
836 && (msg->msg.netfn == netfn) intf_find_seq()
837 && (ipmi_addr_equal(addr, &(msg->addr)))) { intf_find_seq()
838 *recv_msg = msg; intf_find_seq()
886 struct ipmi_recv_msg *msg = NULL; intf_err_seq() local
901 msg = ent->recv_msg; intf_err_seq()
906 if (msg) intf_err_seq()
907 deliver_err_response(msg, err); intf_err_seq()
1228 struct ipmi_recv_msg *msg, *msg2; ipmi_set_gets_events() local
1255 list_for_each_entry_safe(msg, msg2, &intf->waiting_events, link) ipmi_set_gets_events()
1256 list_move_tail(&msg->link, &msgs); ipmi_set_gets_events()
1267 list_for_each_entry_safe(msg, msg2, &msgs, link) { ipmi_set_gets_events()
1268 msg->user = user; ipmi_set_gets_events()
1270 deliver_response(msg); ipmi_set_gets_events()
1404 struct kernel_ipmi_msg *msg, format_ipmb_msg()
1421 smi_msg->data[i+4] = (msg->netfn << 2) | (ipmb_addr->lun & 0x3); format_ipmb_msg()
1425 smi_msg->data[i+8] = msg->cmd; format_ipmb_msg()
1428 if (msg->data_len > 0) format_ipmb_msg()
1429 memcpy(&(smi_msg->data[i+9]), msg->data, format_ipmb_msg()
1430 msg->data_len); format_ipmb_msg()
1431 smi_msg->data_size = msg->data_len + 9; format_ipmb_msg()
1448 struct kernel_ipmi_msg *msg, format_lan_msg()
1460 smi_msg->data[5] = (msg->netfn << 2) | (lan_addr->lun & 0x3); format_lan_msg()
1464 smi_msg->data[9] = msg->cmd; format_lan_msg()
1467 if (msg->data_len > 0) format_lan_msg()
1468 memcpy(&(smi_msg->data[10]), msg->data, format_lan_msg()
1469 msg->data_len); format_lan_msg()
1470 smi_msg->data_size = msg->data_len + 10; format_lan_msg()
1533 struct kernel_ipmi_msg *msg, i_ipmi_request()
1582 recv_msg->msg = *msg; i_ipmi_request()
1587 if (msg->netfn & 1) { i_ipmi_request()
1602 if ((msg->netfn == IPMI_NETFN_APP_REQUEST) i_ipmi_request()
1603 && ((msg->cmd == IPMI_SEND_MSG_CMD) i_ipmi_request()
1604 || (msg->cmd == IPMI_GET_MSG_CMD) i_ipmi_request()
1605 || (msg->cmd == IPMI_READ_EVENT_MSG_BUFFER_CMD))) { i_ipmi_request()
1615 if (((msg->netfn == IPMI_NETFN_APP_REQUEST) i_ipmi_request()
1616 && ((msg->cmd == IPMI_COLD_RESET_CMD) i_ipmi_request()
1617 || (msg->cmd == IPMI_WARM_RESET_CMD))) i_ipmi_request()
1618 || (msg->netfn == IPMI_NETFN_FIRMWARE_REQUEST)) { i_ipmi_request()
1631 if ((msg->data_len + 2) > IPMI_MAX_MSG_LENGTH) { i_ipmi_request()
1637 smi_msg->data[0] = (msg->netfn << 2) | (smi_addr->lun & 0x3); i_ipmi_request()
1638 smi_msg->data[1] = msg->cmd; i_ipmi_request()
1641 if (msg->data_len > 0) i_ipmi_request()
1642 memcpy(&(smi_msg->data[2]), msg->data, msg->data_len); i_ipmi_request()
1643 smi_msg->data_size = msg->data_len + 2; i_ipmi_request()
1689 if ((msg->data_len + 10 + broadcast) > IPMI_MAX_MSG_LENGTH) { i_ipmi_request()
1704 if (recv_msg->msg.netfn & 0x1) { i_ipmi_request()
1710 format_ipmb_msg(smi_msg, msg, ipmb_addr, msgid, i_ipmi_request()
1752 format_ipmb_msg(smi_msg, msg, ipmb_addr, i_ipmi_request()
1763 recv_msg->msg.data = recv_msg->msg_data; i_ipmi_request()
1764 recv_msg->msg.data_len = smi_msg->data_size; i_ipmi_request()
1803 if ((msg->data_len + 12) > IPMI_MAX_MSG_LENGTH) { i_ipmi_request()
1818 if (recv_msg->msg.netfn & 0x1) { i_ipmi_request()
1824 format_lan_msg(smi_msg, msg, lan_addr, msgid, i_ipmi_request()
1865 format_lan_msg(smi_msg, msg, lan_addr, i_ipmi_request()
1875 recv_msg->msg.data = recv_msg->msg_data; i_ipmi_request()
1876 recv_msg->msg.data_len = smi_msg->data_size; i_ipmi_request()
1931 struct kernel_ipmi_msg *msg, ipmi_request_settime()
1949 msg, ipmi_request_settime()
1963 struct kernel_ipmi_msg *msg, ipmi_request_supply_msgs()
1981 msg, ipmi_request_supply_msgs()
2575 struct kernel_ipmi_msg msg; send_guid_cmd() local
2582 msg.netfn = IPMI_NETFN_APP_REQUEST; send_guid_cmd()
2583 msg.cmd = IPMI_GET_DEVICE_GUID_CMD; send_guid_cmd()
2584 msg.data = NULL; send_guid_cmd()
2585 msg.data_len = 0; send_guid_cmd()
2590 &msg, send_guid_cmd()
2601 guid_handler(ipmi_smi_t intf, struct ipmi_recv_msg *msg) guid_handler() argument
2603 if ((msg->addr.addr_type != IPMI_SYSTEM_INTERFACE_ADDR_TYPE) guid_handler()
2604 || (msg->msg.netfn != IPMI_NETFN_APP_RESPONSE) guid_handler()
2605 || (msg->msg.cmd != IPMI_GET_DEVICE_GUID_CMD)) guid_handler()
2609 if (msg->msg.data[0] != 0) { guid_handler()
2615 if (msg->msg.data_len < 17) { guid_handler()
2621 msg->msg.data_len); guid_handler()
2625 memcpy(intf->bmc->guid, msg->msg.data, 16); guid_handler()
2649 struct kernel_ipmi_msg msg; send_channel_info_cmd() local
2657 msg.netfn = IPMI_NETFN_APP_REQUEST; send_channel_info_cmd()
2658 msg.cmd = IPMI_GET_CHANNEL_INFO_CMD; send_channel_info_cmd()
2659 msg.data = data; send_channel_info_cmd()
2660 msg.data_len = 1; send_channel_info_cmd()
2666 &msg, send_channel_info_cmd()
2677 channel_handler(ipmi_smi_t intf, struct ipmi_recv_msg *msg) channel_handler() argument
2682 if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) channel_handler()
2683 && (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE) channel_handler()
2684 && (msg->msg.cmd == IPMI_GET_CHANNEL_INFO_CMD)) { channel_handler()
2686 if (msg->msg.data[0] != 0) { channel_handler()
2689 if (msg->msg.data[0] == IPMI_INVALID_COMMAND_ERR) { channel_handler()
2707 if (msg->msg.data_len < 4) { channel_handler()
2712 intf->channels[chan].medium = msg->msg.data[2] & 0x7f; channel_handler()
2713 intf->channels[chan].protocol = msg->msg.data[3] & 0x1f; channel_handler()
2921 struct ipmi_smi_msg *msg, deliver_smi_err_response()
2924 msg->rsp[0] = msg->data[0] | 4; deliver_smi_err_response()
2925 msg->rsp[1] = msg->data[1]; deliver_smi_err_response()
2926 msg->rsp[2] = err; deliver_smi_err_response()
2927 msg->rsp_size = 3; deliver_smi_err_response()
2929 handle_one_recv_msg(intf, msg); deliver_smi_err_response()
2936 struct ipmi_smi_msg *msg; cleanup_smi_msgs() local
2960 msg = list_entry(entry, struct ipmi_smi_msg, link); cleanup_smi_msgs()
2961 deliver_smi_err_response(intf, msg, IPMI_ERR_UNSPECIFIED); cleanup_smi_msgs()
3018 struct ipmi_smi_msg *msg) handle_ipmb_get_msg_rsp()
3027 if (msg->rsp_size < 11) { handle_ipmb_get_msg_rsp()
3033 if (msg->rsp[2] != 0) { handle_ipmb_get_msg_rsp()
3039 ipmb_addr.slave_addr = msg->rsp[6]; handle_ipmb_get_msg_rsp()
3040 ipmb_addr.channel = msg->rsp[3] & 0x0f; handle_ipmb_get_msg_rsp()
3041 ipmb_addr.lun = msg->rsp[7] & 3; handle_ipmb_get_msg_rsp()
3048 msg->rsp[7] >> 2, handle_ipmb_get_msg_rsp()
3049 msg->rsp[3] & 0x0f, handle_ipmb_get_msg_rsp()
3050 msg->rsp[8], handle_ipmb_get_msg_rsp()
3051 (msg->rsp[4] >> 2) & (~1), handle_ipmb_get_msg_rsp()
3063 &(msg->rsp[9]), handle_ipmb_get_msg_rsp()
3064 msg->rsp_size - 9); handle_ipmb_get_msg_rsp()
3070 recv_msg->msg.netfn = msg->rsp[4] >> 2; handle_ipmb_get_msg_rsp()
3071 recv_msg->msg.data = recv_msg->msg_data; handle_ipmb_get_msg_rsp()
3072 recv_msg->msg.data_len = msg->rsp_size - 10; handle_ipmb_get_msg_rsp()
3081 struct ipmi_smi_msg *msg) handle_ipmb_get_msg_cmd()
3092 if (msg->rsp_size < 10) { handle_ipmb_get_msg_cmd()
3098 if (msg->rsp[2] != 0) { handle_ipmb_get_msg_cmd()
3103 netfn = msg->rsp[4] >> 2; handle_ipmb_get_msg_cmd()
3104 cmd = msg->rsp[8]; handle_ipmb_get_msg_cmd()
3105 chan = msg->rsp[3] & 0xf; handle_ipmb_get_msg_cmd()
3120 msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2); handle_ipmb_get_msg_cmd()
3121 msg->data[1] = IPMI_SEND_MSG_CMD; handle_ipmb_get_msg_cmd()
3122 msg->data[2] = msg->rsp[3]; handle_ipmb_get_msg_cmd()
3123 msg->data[3] = msg->rsp[6]; handle_ipmb_get_msg_cmd()
3124 msg->data[4] = ((netfn + 1) << 2) | (msg->rsp[7] & 0x3); handle_ipmb_get_msg_cmd()
3125 msg->data[5] = ipmb_checksum(&(msg->data[3]), 2); handle_ipmb_get_msg_cmd()
3126 msg->data[6] = intf->channels[msg->rsp[3] & 0xf].address; handle_ipmb_get_msg_cmd()
3128 msg->data[7] = (msg->rsp[7] & 0xfc) | (msg->rsp[4] & 0x3); handle_ipmb_get_msg_cmd()
3129 msg->data[8] = msg->rsp[8]; /* cmd */ handle_ipmb_get_msg_cmd()
3130 msg->data[9] = IPMI_INVALID_CMD_COMPLETION_CODE; handle_ipmb_get_msg_cmd()
3131 msg->data[10] = ipmb_checksum(&(msg->data[6]), 4); handle_ipmb_get_msg_cmd()
3132 msg->data_size = 11; handle_ipmb_get_msg_cmd()
3138 for (m = 0; m < msg->data_size; m++) handle_ipmb_get_msg_cmd()
3139 printk(" %2.2x", msg->data[m]); handle_ipmb_get_msg_cmd()
3145 smi_send(intf, intf->handlers, msg, 0); handle_ipmb_get_msg_cmd()
3171 ipmb_addr->slave_addr = msg->rsp[6]; handle_ipmb_get_msg_cmd()
3172 ipmb_addr->lun = msg->rsp[7] & 3; handle_ipmb_get_msg_cmd()
3173 ipmb_addr->channel = msg->rsp[3] & 0xf; handle_ipmb_get_msg_cmd()
3181 recv_msg->msgid = msg->rsp[7] >> 2; handle_ipmb_get_msg_cmd()
3182 recv_msg->msg.netfn = msg->rsp[4] >> 2; handle_ipmb_get_msg_cmd()
3183 recv_msg->msg.cmd = msg->rsp[8]; handle_ipmb_get_msg_cmd()
3184 recv_msg->msg.data = recv_msg->msg_data; handle_ipmb_get_msg_cmd()
3190 recv_msg->msg.data_len = msg->rsp_size - 10; handle_ipmb_get_msg_cmd()
3192 &(msg->rsp[9]), handle_ipmb_get_msg_cmd()
3193 msg->rsp_size - 10); handle_ipmb_get_msg_cmd()
3202 struct ipmi_smi_msg *msg) handle_lan_get_msg_rsp()
3212 if (msg->rsp_size < 13) { handle_lan_get_msg_rsp()
3218 if (msg->rsp[2] != 0) { handle_lan_get_msg_rsp()
3224 lan_addr.session_handle = msg->rsp[4]; handle_lan_get_msg_rsp()
3225 lan_addr.remote_SWID = msg->rsp[8]; handle_lan_get_msg_rsp()
3226 lan_addr.local_SWID = msg->rsp[5]; handle_lan_get_msg_rsp()
3227 lan_addr.channel = msg->rsp[3] & 0x0f; handle_lan_get_msg_rsp()
3228 lan_addr.privilege = msg->rsp[3] >> 4; handle_lan_get_msg_rsp()
3229 lan_addr.lun = msg->rsp[9] & 3; handle_lan_get_msg_rsp()
3236 msg->rsp[9] >> 2, handle_lan_get_msg_rsp()
3237 msg->rsp[3] & 0x0f, handle_lan_get_msg_rsp()
3238 msg->rsp[10], handle_lan_get_msg_rsp()
3239 (msg->rsp[6] >> 2) & (~1), handle_lan_get_msg_rsp()
3251 &(msg->rsp[11]), handle_lan_get_msg_rsp()
3252 msg->rsp_size - 11); handle_lan_get_msg_rsp()
3258 recv_msg->msg.netfn = msg->rsp[6] >> 2; handle_lan_get_msg_rsp()
3259 recv_msg->msg.data = recv_msg->msg_data; handle_lan_get_msg_rsp()
3260 recv_msg->msg.data_len = msg->rsp_size - 12; handle_lan_get_msg_rsp()
3269 struct ipmi_smi_msg *msg) handle_lan_get_msg_cmd()
3280 if (msg->rsp_size < 12) { handle_lan_get_msg_cmd()
3286 if (msg->rsp[2] != 0) { handle_lan_get_msg_cmd()
3291 netfn = msg->rsp[6] >> 2; handle_lan_get_msg_cmd()
3292 cmd = msg->rsp[10]; handle_lan_get_msg_cmd()
3293 chan = msg->rsp[3] & 0xf; handle_lan_get_msg_cmd()
3329 lan_addr->session_handle = msg->rsp[4]; handle_lan_get_msg_cmd()
3330 lan_addr->remote_SWID = msg->rsp[8]; handle_lan_get_msg_cmd()
3331 lan_addr->local_SWID = msg->rsp[5]; handle_lan_get_msg_cmd()
3332 lan_addr->lun = msg->rsp[9] & 3; handle_lan_get_msg_cmd()
3333 lan_addr->channel = msg->rsp[3] & 0xf; handle_lan_get_msg_cmd()
3334 lan_addr->privilege = msg->rsp[3] >> 4; handle_lan_get_msg_cmd()
3342 recv_msg->msgid = msg->rsp[9] >> 2; handle_lan_get_msg_cmd()
3343 recv_msg->msg.netfn = msg->rsp[6] >> 2; handle_lan_get_msg_cmd()
3344 recv_msg->msg.cmd = msg->rsp[10]; handle_lan_get_msg_cmd()
3345 recv_msg->msg.data = recv_msg->msg_data; handle_lan_get_msg_cmd()
3351 recv_msg->msg.data_len = msg->rsp_size - 12; handle_lan_get_msg_cmd()
3353 &(msg->rsp[11]), handle_lan_get_msg_cmd()
3354 msg->rsp_size - 12); handle_lan_get_msg_cmd()
3369 struct ipmi_smi_msg *msg) handle_oem_get_msg_cmd()
3384 if (msg->rsp_size < 4) { handle_oem_get_msg_cmd()
3390 if (msg->rsp[2] != 0) { handle_oem_get_msg_cmd()
3399 netfn = msg->rsp[0] >> 2; handle_oem_get_msg_cmd()
3400 cmd = msg->rsp[1]; handle_oem_get_msg_cmd()
3401 chan = msg->rsp[3] & 0xf; handle_oem_get_msg_cmd()
3446 smi_addr->lun = msg->rsp[0] & 3; handle_oem_get_msg_cmd()
3451 recv_msg->msg.netfn = msg->rsp[0] >> 2; handle_oem_get_msg_cmd()
3452 recv_msg->msg.cmd = msg->rsp[1]; handle_oem_get_msg_cmd()
3453 recv_msg->msg.data = recv_msg->msg_data; handle_oem_get_msg_cmd()
3459 recv_msg->msg.data_len = msg->rsp_size - 4; handle_oem_get_msg_cmd()
3461 &(msg->rsp[4]), handle_oem_get_msg_cmd()
3462 msg->rsp_size - 4); handle_oem_get_msg_cmd()
3471 struct ipmi_smi_msg *msg) copy_event_into_recv_msg()
3479 smi_addr->lun = msg->rsp[0] & 3; copy_event_into_recv_msg()
3481 recv_msg->msg.netfn = msg->rsp[0] >> 2; copy_event_into_recv_msg()
3482 recv_msg->msg.cmd = msg->rsp[1]; copy_event_into_recv_msg()
3483 memcpy(recv_msg->msg_data, &(msg->rsp[3]), msg->rsp_size - 3); copy_event_into_recv_msg()
3484 recv_msg->msg.data = recv_msg->msg_data; copy_event_into_recv_msg()
3485 recv_msg->msg.data_len = msg->rsp_size - 3; copy_event_into_recv_msg()
3489 struct ipmi_smi_msg *msg) handle_read_event_rsp()
3498 if (msg->rsp_size < 19) { handle_read_event_rsp()
3504 if (msg->rsp[2] != 0) { handle_read_event_rsp()
3543 copy_event_into_recv_msg(recv_msg, msg); handle_read_event_rsp()
3572 copy_event_into_recv_msg(recv_msg, msg); handle_read_event_rsp()
3592 struct ipmi_smi_msg *msg) handle_bmc_rsp()
3597 recv_msg = (struct ipmi_recv_msg *) msg->user_data; handle_bmc_rsp()
3618 recv_msg->msgid = msg->msgid; handle_bmc_rsp()
3623 smi_addr->lun = msg->rsp[0] & 3; handle_bmc_rsp()
3624 recv_msg->msg.netfn = msg->rsp[0] >> 2; handle_bmc_rsp()
3625 recv_msg->msg.cmd = msg->rsp[1]; handle_bmc_rsp()
3627 &(msg->rsp[2]), handle_bmc_rsp()
3628 msg->rsp_size - 2); handle_bmc_rsp()
3629 recv_msg->msg.data = recv_msg->msg_data; handle_bmc_rsp()
3630 recv_msg->msg.data_len = msg->rsp_size - 2; handle_bmc_rsp()
3643 struct ipmi_smi_msg *msg) handle_one_recv_msg()
3651 for (m = 0; m < msg->rsp_size; m++) handle_one_recv_msg()
3652 printk(" %2.2x", msg->rsp[m]); handle_one_recv_msg()
3655 if (msg->rsp_size < 2) { handle_one_recv_msg()
3659 (msg->data[0] >> 2) | 1, msg->data[1], msg->rsp_size); handle_one_recv_msg()
3662 msg->rsp[0] = msg->data[0] | (1 << 2); handle_one_recv_msg()
3663 msg->rsp[1] = msg->data[1]; handle_one_recv_msg()
3664 msg->rsp[2] = IPMI_ERR_UNSPECIFIED; handle_one_recv_msg()
3665 msg->rsp_size = 3; handle_one_recv_msg()
3666 } else if (((msg->rsp[0] >> 2) != ((msg->data[0] >> 2) | 1)) handle_one_recv_msg()
3667 || (msg->rsp[1] != msg->data[1])) { handle_one_recv_msg()
3674 (msg->data[0] >> 2) | 1, msg->data[1], handle_one_recv_msg()
3675 msg->rsp[0] >> 2, msg->rsp[1]); handle_one_recv_msg()
3678 msg->rsp[0] = msg->data[0] | (1 << 2); handle_one_recv_msg()
3679 msg->rsp[1] = msg->data[1]; handle_one_recv_msg()
3680 msg->rsp[2] = IPMI_ERR_UNSPECIFIED; handle_one_recv_msg()
3681 msg->rsp_size = 3; handle_one_recv_msg()
3684 if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2)) handle_one_recv_msg()
3685 && (msg->rsp[1] == IPMI_SEND_MSG_CMD) handle_one_recv_msg()
3686 && (msg->user_data != NULL)) { handle_one_recv_msg()
3691 struct ipmi_recv_msg *recv_msg = msg->user_data; handle_one_recv_msg()
3694 if (msg->rsp_size < 2) handle_one_recv_msg()
3698 chan = msg->data[2] & 0x0f; handle_one_recv_msg()
3711 recv_msg->msg.data = recv_msg->msg_data; handle_one_recv_msg()
3712 recv_msg->msg.data_len = 1; handle_one_recv_msg()
3713 recv_msg->msg_data[0] = msg->rsp[2]; handle_one_recv_msg()
3715 } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2)) handle_one_recv_msg()
3716 && (msg->rsp[1] == IPMI_GET_MSG_CMD)) { handle_one_recv_msg()
3718 chan = msg->rsp[3] & 0xf; handle_one_recv_msg()
3738 if (msg->rsp[4] & 0x04) { handle_one_recv_msg()
3743 requeue = handle_ipmb_get_msg_rsp(intf, msg); handle_one_recv_msg()
3749 requeue = handle_ipmb_get_msg_cmd(intf, msg); handle_one_recv_msg()
3755 if (msg->rsp[6] & 0x04) { handle_one_recv_msg()
3760 requeue = handle_lan_get_msg_rsp(intf, msg); handle_one_recv_msg()
3766 requeue = handle_lan_get_msg_cmd(intf, msg); handle_one_recv_msg()
3777 requeue = handle_oem_get_msg_cmd(intf, msg); handle_one_recv_msg()
3787 } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2)) handle_one_recv_msg()
3788 && (msg->rsp[1] == IPMI_READ_EVENT_MSG_BUFFER_CMD)) { handle_one_recv_msg()
3790 requeue = handle_read_event_rsp(intf, msg); handle_one_recv_msg()
3793 requeue = handle_bmc_rsp(intf, msg); handle_one_recv_msg()
3897 struct ipmi_smi_msg *msg) ipmi_smi_msg_received()
3902 if ((msg->data_size >= 2) ipmi_smi_msg_received()
3903 && (msg->data[0] == (IPMI_NETFN_APP_REQUEST << 2)) ipmi_smi_msg_received()
3904 && (msg->data[1] == IPMI_SEND_MSG_CMD) ipmi_smi_msg_received()
3905 && (msg->user_data == NULL)) { ipmi_smi_msg_received()
3923 if ((msg->rsp_size >= 3) && (msg->rsp[2] != 0) ipmi_smi_msg_received()
3924 && (msg->rsp[2] != IPMI_NODE_BUSY_ERR) ipmi_smi_msg_received()
3925 && (msg->rsp[2] != IPMI_LOST_ARBITRATION_ERR) ipmi_smi_msg_received()
3926 && (msg->rsp[2] != IPMI_BUS_ERR) ipmi_smi_msg_received()
3927 && (msg->rsp[2] != IPMI_NAK_ON_WRITE_ERR)) { ipmi_smi_msg_received()
3928 int chan = msg->rsp[3] & 0xf; ipmi_smi_msg_received()
3940 intf_err_seq(intf, msg->msgid, msg->rsp[2]); ipmi_smi_msg_received()
3943 intf_start_seq_timer(intf, msg->msgid); ipmi_smi_msg_received()
3946 ipmi_free_smi_msg(msg); ipmi_smi_msg_received()
3954 list_add_tail(&msg->link, &intf->waiting_rcv_msgs); ipmi_smi_msg_received()
3962 if (msg == intf->curr_msg) ipmi_smi_msg_received()
3996 memcpy(smi_msg->data, recv_msg->msg.data, recv_msg->msg.data_len); smi_from_recv_msg()
3997 smi_msg->data_size = recv_msg->msg.data_len; smi_from_recv_msg()
4017 struct ipmi_recv_msg *msg; check_msg_timeout() local
4035 msg = ent->recv_msg; check_msg_timeout()
4036 list_add_tail(&msg->link, timeouts); check_msg_timeout()
4096 struct ipmi_recv_msg *msg, *msg2; ipmi_timeout_handler() local
4114 list_for_each_entry_safe(msg, msg2, &timeouts, link) ipmi_timeout_handler()
4115 deliver_err_response(msg, IPMI_TIMEOUT_COMPLETION_CODE); ipmi_timeout_handler()
4206 static void free_smi_msg(struct ipmi_smi_msg *msg) free_smi_msg() argument
4209 kfree(msg); free_smi_msg()
4225 static void free_recv_msg(struct ipmi_recv_msg *msg) free_recv_msg() argument
4228 kfree(msg); free_recv_msg()
4244 void ipmi_free_recv_msg(struct ipmi_recv_msg *msg) ipmi_free_recv_msg() argument
4246 if (msg->user) ipmi_free_recv_msg()
4247 kref_put(&msg->user->refcount, free_user); ipmi_free_recv_msg()
4248 msg->done(msg); ipmi_free_recv_msg()
4256 static void dummy_smi_done_handler(struct ipmi_smi_msg *msg) dummy_smi_done_handler() argument
4261 static void dummy_recv_done_handler(struct ipmi_recv_msg *msg) dummy_recv_done_handler() argument
4271 struct kernel_ipmi_msg *msg) ipmi_panic_request_and_wait()
4284 msg, ipmi_panic_request_and_wait()
4299 static void event_receiver_fetcher(ipmi_smi_t intf, struct ipmi_recv_msg *msg) event_receiver_fetcher() argument
4301 if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) event_receiver_fetcher()
4302 && (msg->msg.netfn == IPMI_NETFN_SENSOR_EVENT_RESPONSE) event_receiver_fetcher()
4303 && (msg->msg.cmd == IPMI_GET_EVENT_RECEIVER_CMD) event_receiver_fetcher()
4304 && (msg->msg.data[0] == IPMI_CC_NO_ERROR)) { event_receiver_fetcher()
4306 intf->event_receiver = msg->msg.data[1]; event_receiver_fetcher()
4307 intf->event_receiver_lun = msg->msg.data[2] & 0x3; event_receiver_fetcher()
4311 static void device_id_fetcher(ipmi_smi_t intf, struct ipmi_recv_msg *msg) device_id_fetcher() argument
4313 if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) device_id_fetcher()
4314 && (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE) device_id_fetcher()
4315 && (msg->msg.cmd == IPMI_GET_DEVICE_ID_CMD) device_id_fetcher()
4316 && (msg->msg.data[0] == IPMI_CC_NO_ERROR)) { device_id_fetcher()
4321 intf->local_sel_device = (msg->msg.data[6] >> 2) & 1; device_id_fetcher()
4322 intf->local_event_generator = (msg->msg.data[6] >> 5) & 1; device_id_fetcher()
4329 struct kernel_ipmi_msg msg; send_panic_events() local
4341 msg.netfn = 0x04; /* Sensor or Event. */ send_panic_events()
4342 msg.cmd = 2; /* Platform event command. */ send_panic_events()
4343 msg.data = data; send_panic_events()
4344 msg.data_len = 8; send_panic_events()
4370 ipmi_panic_request_and_wait(intf, &addr, &msg); send_panic_events()
4413 msg.netfn = IPMI_NETFN_APP_REQUEST; send_panic_events()
4414 msg.cmd = IPMI_GET_DEVICE_ID_CMD; send_panic_events()
4415 msg.data = NULL; send_panic_events()
4416 msg.data_len = 0; send_panic_events()
4418 ipmi_panic_request_and_wait(intf, &addr, &msg); send_panic_events()
4422 msg.netfn = IPMI_NETFN_SENSOR_EVENT_REQUEST; send_panic_events()
4423 msg.cmd = IPMI_GET_EVENT_RECEIVER_CMD; send_panic_events()
4424 msg.data = NULL; send_panic_events()
4425 msg.data_len = 0; send_panic_events()
4427 ipmi_panic_request_and_wait(intf, &addr, &msg); send_panic_events()
4461 msg.netfn = IPMI_NETFN_STORAGE_REQUEST; /* Storage. */ send_panic_events()
4462 msg.cmd = IPMI_ADD_SEL_ENTRY_CMD; send_panic_events()
4463 msg.data = data; send_panic_events()
4464 msg.data_len = 16; send_panic_events()
4484 ipmi_panic_request_and_wait(intf, &addr, &msg); send_panic_events()
1403 format_ipmb_msg(struct ipmi_smi_msg *smi_msg, struct kernel_ipmi_msg *msg, struct ipmi_ipmb_addr *ipmb_addr, long msgid, unsigned char ipmb_seq, int broadcast, unsigned char source_address, unsigned char source_lun) format_ipmb_msg() argument
1447 format_lan_msg(struct ipmi_smi_msg *smi_msg, struct kernel_ipmi_msg *msg, struct ipmi_lan_addr *lan_addr, long msgid, unsigned char ipmb_seq, unsigned char source_lun) format_lan_msg() argument
1529 i_ipmi_request(ipmi_user_t user, ipmi_smi_t intf, struct ipmi_addr *addr, long msgid, struct kernel_ipmi_msg *msg, void *user_msg_data, void *supplied_smi, struct ipmi_recv_msg *supplied_recv, int priority, unsigned char source_address, unsigned char source_lun, int retries, unsigned int retry_time_ms) i_ipmi_request() argument
1928 ipmi_request_settime(ipmi_user_t user, struct ipmi_addr *addr, long msgid, struct kernel_ipmi_msg *msg, void *user_msg_data, int priority, int retries, unsigned int retry_time_ms) ipmi_request_settime() argument
1960 ipmi_request_supply_msgs(ipmi_user_t user, struct ipmi_addr *addr, long msgid, struct kernel_ipmi_msg *msg, void *user_msg_data, void *supplied_smi, struct ipmi_recv_msg *supplied_recv, int priority) ipmi_request_supply_msgs() argument
2920 deliver_smi_err_response(ipmi_smi_t intf, struct ipmi_smi_msg *msg, unsigned char err) deliver_smi_err_response() argument
3017 handle_ipmb_get_msg_rsp(ipmi_smi_t intf, struct ipmi_smi_msg *msg) handle_ipmb_get_msg_rsp() argument
3080 handle_ipmb_get_msg_cmd(ipmi_smi_t intf, struct ipmi_smi_msg *msg) handle_ipmb_get_msg_cmd() argument
3201 handle_lan_get_msg_rsp(ipmi_smi_t intf, struct ipmi_smi_msg *msg) handle_lan_get_msg_rsp() argument
3268 handle_lan_get_msg_cmd(ipmi_smi_t intf, struct ipmi_smi_msg *msg) handle_lan_get_msg_cmd() argument
3368 handle_oem_get_msg_cmd(ipmi_smi_t intf, struct ipmi_smi_msg *msg) handle_oem_get_msg_cmd() argument
3470 copy_event_into_recv_msg(struct ipmi_recv_msg *recv_msg, struct ipmi_smi_msg *msg) copy_event_into_recv_msg() argument
3488 handle_read_event_rsp(ipmi_smi_t intf, struct ipmi_smi_msg *msg) handle_read_event_rsp() argument
3591 handle_bmc_rsp(ipmi_smi_t intf, struct ipmi_smi_msg *msg) handle_bmc_rsp() argument
3642 handle_one_recv_msg(ipmi_smi_t intf, struct ipmi_smi_msg *msg) handle_one_recv_msg() argument
3896 ipmi_smi_msg_received(ipmi_smi_t intf, struct ipmi_smi_msg *msg) ipmi_smi_msg_received() argument
4269 ipmi_panic_request_and_wait(ipmi_smi_t intf, struct ipmi_addr *addr, struct kernel_ipmi_msg *msg) ipmi_panic_request_and_wait() argument
H A Dipmi_devintf.c61 static void file_receive_handler(struct ipmi_recv_msg *msg, file_receive_handler() argument
71 list_add_tail(&(msg->link), &(priv->recv_msgs)); file_receive_handler()
160 struct ipmi_recv_msg *msg, *next; ipmi_release() local
166 list_for_each_entry_safe(msg, next, &priv->recv_msgs, link) ipmi_release()
167 ipmi_free_recv_msg(msg); ipmi_release()
182 struct kernel_ipmi_msg msg; handle_send_req() local
190 msg.netfn = req->msg.netfn; handle_send_req()
191 msg.cmd = req->msg.cmd; handle_send_req()
192 msg.data_len = req->msg.data_len; handle_send_req()
193 msg.data = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL); handle_send_req()
194 if (!msg.data) handle_send_req()
204 if (req->msg.data != NULL) { handle_send_req()
205 if (req->msg.data_len > IPMI_MAX_MSG_LENGTH) { handle_send_req()
210 if (copy_from_user(msg.data, handle_send_req()
211 req->msg.data, handle_send_req()
212 req->msg.data_len)) handle_send_req()
218 msg.data_len = 0; handle_send_req()
224 &msg, handle_send_req()
230 kfree(msg.data); handle_send_req()
282 struct ipmi_recv_msg *msg; ipmi_ioctl() local
310 msg = list_entry(entry, struct ipmi_recv_msg, link); ipmi_ioctl()
314 addr_len = ipmi_addr_length(msg->addr.addr_type); ipmi_ioctl()
321 if (copy_to_user(rsp.addr, &(msg->addr), addr_len)) { ipmi_ioctl()
327 rsp.recv_type = msg->recv_type; ipmi_ioctl()
328 rsp.msgid = msg->msgid; ipmi_ioctl()
329 rsp.msg.netfn = msg->msg.netfn; ipmi_ioctl()
330 rsp.msg.cmd = msg->msg.cmd; ipmi_ioctl()
332 if (msg->msg.data_len > 0) { ipmi_ioctl()
333 if (rsp.msg.data_len < msg->msg.data_len) { ipmi_ioctl()
336 msg->msg.data_len = rsp.msg.data_len; ipmi_ioctl()
342 if (copy_to_user(rsp.msg.data, ipmi_ioctl()
343 msg->msg.data, ipmi_ioctl()
344 msg->msg.data_len)) ipmi_ioctl()
349 rsp.msg.data_len = msg->msg.data_len; ipmi_ioctl()
351 rsp.msg.data_len = 0; ipmi_ioctl()
360 ipmi_free_recv_msg(msg); ipmi_ioctl()
679 struct compat_ipmi_msg msg; member in struct:compat_ipmi_req
687 struct compat_ipmi_msg msg; member in struct:compat_ipmi_recv
735 get_compat_ipmi_msg(&p64->msg, &p32->msg)) get_compat_ipmi_req()
762 get_compat_ipmi_msg(&p64->msg, &p32->msg)) get_compat_ipmi_recv()
775 put_compat_ipmi_msg(&p64->msg, &p32->msg)) put_compat_ipmi_recv()
H A Dipmi_ssif.c299 struct ipmi_smi_msg *msg);
319 struct ipmi_smi_msg *msg) deliver_recv_msg()
324 ipmi_free_smi_msg(msg); deliver_recv_msg()
325 } else if (msg->rsp_size < 0) { deliver_recv_msg()
326 return_hosed_msg(ssif_info, msg); deliver_recv_msg()
329 msg->rsp_size); deliver_recv_msg()
331 ipmi_smi_msg_received(intf, msg); deliver_recv_msg()
336 struct ipmi_smi_msg *msg) return_hosed_msg()
341 msg->rsp[0] = msg->data[0] | 4; return_hosed_msg()
342 msg->rsp[1] = msg->data[1]; return_hosed_msg()
343 msg->rsp[2] = 0xFF; /* Unknown error. */ return_hosed_msg()
344 msg->rsp_size = 3; return_hosed_msg()
346 deliver_recv_msg(ssif_info, msg); return_hosed_msg()
357 unsigned char msg[3]; start_clear_flags() local
364 msg[0] = (IPMI_NETFN_APP_REQUEST << 2); start_clear_flags()
365 msg[1] = IPMI_CLEAR_MSG_FLAGS_CMD; start_clear_flags()
366 msg[2] = WDT_PRE_TIMEOUT_INT; start_clear_flags()
368 if (start_send(ssif_info, msg, 3) != 0) { start_clear_flags()
389 struct ipmi_smi_msg *msg) check_start_send()
391 if (start_send(ssif_info, msg->data, msg->data_size) != 0) { check_start_send()
398 ipmi_free_smi_msg(msg); check_start_send()
404 struct ipmi_smi_msg *msg; start_event_fetch() local
408 msg = ipmi_alloc_smi_msg(); start_event_fetch()
409 if (!msg) { start_event_fetch()
414 ssif_info->curr_msg = msg; start_event_fetch()
418 msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2); start_event_fetch()
419 msg->data[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD; start_event_fetch()
420 msg->data_size = 2; start_event_fetch()
422 check_start_send(ssif_info, flags, msg); start_event_fetch()
428 struct ipmi_smi_msg *msg; start_recv_msg_fetch() local
430 msg = ipmi_alloc_smi_msg(); start_recv_msg_fetch()
431 if (!msg) { start_recv_msg_fetch()
436 ssif_info->curr_msg = msg; start_recv_msg_fetch()
440 msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2); start_recv_msg_fetch()
441 msg->data[1] = IPMI_GET_MSG_CMD; start_recv_msg_fetch()
442 msg->data_size = 2; start_recv_msg_fetch()
444 check_start_send(ssif_info, flags, msg); start_recv_msg_fetch()
596 struct ipmi_smi_msg *msg; msg_done_handler() local
726 msg = ssif_info->curr_msg; msg_done_handler()
727 if (msg) { msg_done_handler()
728 msg->rsp_size = len; msg_done_handler()
729 if (msg->rsp_size > IPMI_MAX_MSG_LENGTH) msg_done_handler()
730 msg->rsp_size = IPMI_MAX_MSG_LENGTH; msg_done_handler()
731 memcpy(msg->rsp, data, msg->rsp_size); msg_done_handler()
738 if (!msg) msg_done_handler()
742 return_hosed_msg(ssif_info, msg); msg_done_handler()
744 deliver_recv_msg(ssif_info, msg); msg_done_handler()
785 if ((result < 0) || (len < 3) || (msg->rsp[2] != 0)) { msg_done_handler()
787 msg->done(msg); msg_done_handler()
792 } else if (msg->rsp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 msg_done_handler()
793 || msg->rsp[1] != IPMI_READ_EVENT_MSG_BUFFER_CMD) { msg_done_handler()
795 msg->rsp[0], msg->rsp[1]); msg_done_handler()
796 msg->done(msg); msg_done_handler()
803 deliver_recv_msg(ssif_info, msg); msg_done_handler()
808 if ((result < 0) || (len < 3) || (msg->rsp[2] != 0)) { msg_done_handler()
810 msg->done(msg); msg_done_handler()
812 /* Take off the msg flag. */ msg_done_handler()
815 } else if (msg->rsp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 msg_done_handler()
816 || msg->rsp[1] != IPMI_GET_MSG_CMD) { msg_done_handler()
818 msg->rsp[0], msg->rsp[1]); msg_done_handler()
819 msg->done(msg); msg_done_handler()
821 /* Take off the msg flag. */ msg_done_handler()
827 deliver_recv_msg(ssif_info, msg); msg_done_handler()
1000 struct ipmi_smi_msg *msg; start_next_msg() local
1022 msg = ssif_info->curr_msg; start_next_msg()
1024 return_hosed_msg(ssif_info, msg); start_next_msg()
1032 struct ipmi_smi_msg *msg) sender()
1038 ssif_info->waiting_msg = msg; sender()
1048 msg->data[0], msg->data[1], sender()
1206 static int do_cmd(struct i2c_client *client, int len, unsigned char *msg, do_cmd() argument
1214 ret = i2c_smbus_write_block_data(client, SSIF_IPMI_REQUEST, len, msg); do_cmd()
1238 (resp[0] != (msg[0] | (1 << 2))) || do_cmd()
1239 (resp[1] != msg[1])) do_cmd()
1253 unsigned char msg[3]; ssif_detect() local
1262 msg[0] = IPMI_NETFN_APP_REQUEST << 2; ssif_detect()
1263 msg[1] = IPMI_GET_DEVICE_ID_CMD; ssif_detect()
1264 rv = do_cmd(client, 2, msg, &len, resp); ssif_detect()
1412 unsigned char msg[3]; ssif_probe() local
1454 msg[0] = IPMI_NETFN_APP_REQUEST << 2; ssif_probe()
1455 msg[1] = IPMI_GET_DEVICE_ID_CMD; ssif_probe()
1456 rv = do_cmd(client, 2, msg, &len, resp); ssif_probe()
1468 msg[0] = IPMI_NETFN_APP_REQUEST << 2; ssif_probe()
1469 msg[1] = IPMI_GET_SYSTEM_INTERFACE_CAPABILITIES_CMD; ssif_probe()
1470 msg[2] = 0; /* SSIF */ ssif_probe()
1471 rv = do_cmd(client, 3, msg, &len, resp); ssif_probe()
1541 msg[0] = IPMI_NETFN_APP_REQUEST << 2; ssif_probe()
1542 msg[1] = IPMI_CLEAR_MSG_FLAGS_CMD; ssif_probe()
1543 msg[2] = WDT_PRE_TIMEOUT_INT; ssif_probe()
1544 rv = do_cmd(client, 3, msg, &len, resp); ssif_probe()
1550 msg[0] = IPMI_NETFN_APP_REQUEST << 2; ssif_probe()
1551 msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD; ssif_probe()
1552 rv = do_cmd(client, 2, msg, &len, resp); ssif_probe()
1568 msg[0] = IPMI_NETFN_APP_REQUEST << 2; ssif_probe()
1569 msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD; ssif_probe()
1570 msg[2] = ssif_info->global_enables | IPMI_BMC_EVT_MSG_BUFF; ssif_probe()
1571 rv = do_cmd(client, 3, msg, &len, resp); ssif_probe()
1585 msg[0] = IPMI_NETFN_APP_REQUEST << 2; ssif_probe()
1586 msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD; ssif_probe()
1587 msg[2] = ssif_info->global_enables | IPMI_BMC_RCV_MSG_INTR; ssif_probe()
1588 rv = do_cmd(client, 3, msg, &len, resp); ssif_probe()
318 deliver_recv_msg(struct ssif_info *ssif_info, struct ipmi_smi_msg *msg) deliver_recv_msg() argument
335 return_hosed_msg(struct ssif_info *ssif_info, struct ipmi_smi_msg *msg) return_hosed_msg() argument
388 check_start_send(struct ssif_info *ssif_info, unsigned long *flags, struct ipmi_smi_msg *msg) check_start_send() argument
1031 sender(void *send_info, struct ipmi_smi_msg *msg) sender() argument
/linux-4.1.27/arch/powerpc/include/asm/
H A Ddbell.h40 static inline void _ppc_msgsnd(u32 msg) _ppc_msgsnd() argument
43 __asm__ __volatile__ (PPC_MSGSND(%0) : : "r" (msg)); _ppc_msgsnd()
45 __asm__ __volatile__ (PPC_MSGSNDP(%0) : : "r" (msg)); _ppc_msgsnd()
54 static inline void _ppc_msgsnd(u32 msg) _ppc_msgsnd() argument
56 __asm__ __volatile__ (PPC_MSGSND(%0) : : "r" (msg)); _ppc_msgsnd()
67 u32 msg = PPC_DBELL_TYPE(type) | (flags & PPC_DBELL_MSG_BRDCAST) | ppc_msgsnd() local
70 _ppc_msgsnd(msg); ppc_msgsnd()
/linux-4.1.27/net/core/
H A Dscm.c133 int __scm_send(struct socket *sock, struct msghdr *msg, struct scm_cookie *p) __scm_send() argument
138 for_each_cmsghdr(cmsg, msg) { for_each_cmsghdr()
149 if (!CMSG_OK(msg, cmsg)) for_each_cmsghdr()
215 int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data) put_cmsg() argument
218 = (__force struct cmsghdr __user *)msg->msg_control; put_cmsg()
223 if (MSG_CMSG_COMPAT & msg->msg_flags) put_cmsg()
224 return put_cmsg_compat(msg, level, type, len, data); put_cmsg()
226 if (cm==NULL || msg->msg_controllen < sizeof(*cm)) { put_cmsg()
227 msg->msg_flags |= MSG_CTRUNC; put_cmsg()
230 if (msg->msg_controllen < cmlen) { put_cmsg()
231 msg->msg_flags |= MSG_CTRUNC; put_cmsg()
232 cmlen = msg->msg_controllen; put_cmsg()
244 if (msg->msg_controllen < cmlen) put_cmsg()
245 cmlen = msg->msg_controllen; put_cmsg()
246 msg->msg_control += cmlen; put_cmsg()
247 msg->msg_controllen -= cmlen; put_cmsg()
254 void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm) scm_detach_fds() argument
257 = (__force struct cmsghdr __user*)msg->msg_control; scm_detach_fds()
265 if (MSG_CMSG_COMPAT & msg->msg_flags) { scm_detach_fds()
266 scm_detach_fds_compat(msg, scm); scm_detach_fds()
270 if (msg->msg_controllen > sizeof(struct cmsghdr)) scm_detach_fds()
271 fdmax = ((msg->msg_controllen - sizeof(struct cmsghdr)) scm_detach_fds()
285 err = get_unused_fd_flags(MSG_CMSG_CLOEXEC & msg->msg_flags scm_detach_fds()
314 if (msg->msg_controllen < cmlen) scm_detach_fds()
315 cmlen = msg->msg_controllen; scm_detach_fds()
316 msg->msg_control += cmlen; scm_detach_fds()
317 msg->msg_controllen -= cmlen; scm_detach_fds()
321 msg->msg_flags |= MSG_CTRUNC; scm_detach_fds()
/linux-4.1.27/drivers/staging/i2o/
H A Ddebug.c8 static void i2o_report_fail_status(u8 req_status, u32 *msg);
19 u32 *msg = (u32 *) m; i2o_report_status() local
20 u8 cmd = (msg[1] >> 24) & 0xFF; i2o_report_status()
21 u8 req_status = (msg[4] >> 24) & 0xFF; i2o_report_status()
22 u16 detailed_status = msg[4] & 0xFFFF; i2o_report_status()
37 if (msg[0] & MSG_FAIL) { i2o_report_status()
38 i2o_report_fail_status(req_status, msg); i2o_report_status()
55 u32 *msg = (u32 *) m; i2o_dump_message() local
59 msg[0] >> 16 & 0xffff, msg); i2o_dump_message()
60 for (i = 0; i < ((msg[0] >> 16) & 0xffff); i++) i2o_dump_message()
61 printk(KERN_INFO " msg[%d] = %0#10x\n", i, msg[i]); i2o_dump_message()
70 static void i2o_report_fail_status(u8 req_status, u32 *msg) i2o_report_fail_status() argument
102 (msg[1] >> 12) & 0xFFF, msg[1] & 0xFFF); i2o_report_fail_status()
104 (msg[4] >> 8) & 0xFF, msg[4] & 0xFF); i2o_report_fail_status()
106 msg[5] >> 16, msg[5] & 0xFFF); i2o_report_fail_status()
108 printk(KERN_ERR " Severity: 0x%02X\n", (msg[4] >> 16) & 0xFF); i2o_report_fail_status()
109 if (msg[4] & (1 << 16)) i2o_report_fail_status()
111 "this msg can never be delivered/processed.\n"); i2o_report_fail_status()
112 if (msg[4] & (1 << 17)) i2o_report_fail_status()
114 "this msg can no longer be delivered/processed.\n"); i2o_report_fail_status()
115 if (msg[4] & (1 << 18)) i2o_report_fail_status()
118 if (msg[4] & (1 << 19)) i2o_report_fail_status()
H A Diop.c59 * set in msg. The returned message is the physical page frame offset
61 * available returns I2O_QUEUE_EMPTY and msg is leaved untouched.
66 struct i2o_message *msg; i2o_msg_get_wait() local
68 while (IS_ERR(msg = i2o_msg_get(c))) { i2o_msg_get_wait()
77 return msg; i2o_msg_get_wait()
282 struct i2o_message *msg; i2o_iop_quiesce() local
293 msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET); i2o_iop_quiesce()
294 if (IS_ERR(msg)) i2o_iop_quiesce()
295 return PTR_ERR(msg); i2o_iop_quiesce()
297 msg->u.head[0] = cpu_to_le32(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0); i2o_iop_quiesce()
298 msg->u.head[1] = i2o_iop_quiesce()
303 if ((rc = i2o_msg_post_wait(c, msg, 240))) i2o_iop_quiesce()
323 struct i2o_message *msg; i2o_iop_enable() local
333 msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET); i2o_iop_enable()
334 if (IS_ERR(msg)) i2o_iop_enable()
335 return PTR_ERR(msg); i2o_iop_enable()
337 msg->u.head[0] = cpu_to_le32(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0); i2o_iop_enable()
338 msg->u.head[1] = i2o_iop_enable()
343 if ((rc = i2o_msg_post_wait(c, msg, 240))) i2o_iop_enable()
394 struct i2o_message *msg; i2o_iop_clear() local
397 msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET); i2o_iop_clear()
398 if (IS_ERR(msg)) i2o_iop_clear()
399 return PTR_ERR(msg); i2o_iop_clear()
404 msg->u.head[0] = cpu_to_le32(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0); i2o_iop_clear()
405 msg->u.head[1] = i2o_iop_clear()
409 if ((rc = i2o_msg_post_wait(c, msg, 30))) i2o_iop_clear()
433 struct i2o_message *msg; i2o_iop_init_outbound_queue() local
441 msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET); i2o_iop_init_outbound_queue()
442 if (IS_ERR(msg)) i2o_iop_init_outbound_queue()
443 return PTR_ERR(msg); i2o_iop_init_outbound_queue()
445 msg->u.head[0] = cpu_to_le32(EIGHT_WORD_MSG_SIZE | SGL_OFFSET_6); i2o_iop_init_outbound_queue()
446 msg->u.head[1] = i2o_iop_init_outbound_queue()
449 msg->u.s.icntxt = cpu_to_le32(i2o_exec_driver.context); i2o_iop_init_outbound_queue()
450 msg->u.s.tcntxt = cpu_to_le32(0x00000000); i2o_iop_init_outbound_queue()
451 msg->body[0] = cpu_to_le32(PAGE_SIZE); i2o_iop_init_outbound_queue()
452 /* Outbound msg frame size in words and Initcode */ i2o_iop_init_outbound_queue()
453 msg->body[1] = cpu_to_le32(I2O_OUTBOUND_MSG_FRAME_SIZE << 16 | 0x80); i2o_iop_init_outbound_queue()
454 msg->body[2] = cpu_to_le32(0xd0000004); i2o_iop_init_outbound_queue()
455 msg->body[3] = cpu_to_le32(i2o_dma_low(c->status.phys)); i2o_iop_init_outbound_queue()
456 msg->body[4] = cpu_to_le32(i2o_dma_high(c->status.phys)); i2o_iop_init_outbound_queue()
458 i2o_msg_post(c, msg); i2o_iop_init_outbound_queue()
493 struct i2o_message *msg; i2o_iop_reset() local
500 msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET); i2o_iop_reset()
501 if (IS_ERR(msg)) i2o_iop_reset()
502 return PTR_ERR(msg); i2o_iop_reset()
509 msg->u.head[0] = cpu_to_le32(EIGHT_WORD_MSG_SIZE | SGL_OFFSET_0); i2o_iop_reset()
510 msg->u.head[1] = i2o_iop_reset()
513 msg->u.s.icntxt = cpu_to_le32(i2o_exec_driver.context); i2o_iop_reset()
514 msg->u.s.tcntxt = cpu_to_le32(0x00000000); i2o_iop_reset()
515 msg->body[0] = cpu_to_le32(0x00000000); i2o_iop_reset()
516 msg->body[1] = cpu_to_le32(0x00000000); i2o_iop_reset()
517 msg->body[2] = cpu_to_le32(i2o_dma_low(c->status.phys)); i2o_iop_reset()
518 msg->body[3] = cpu_to_le32(i2o_dma_high(c->status.phys)); i2o_iop_reset()
520 i2o_msg_post(c, msg); i2o_iop_reset()
549 while (IS_ERR(msg = i2o_msg_get_wait(c, I2O_TIMEOUT_RESET))) { i2o_iop_reset()
552 rc = PTR_ERR(msg); i2o_iop_reset()
557 i2o_msg_nop(c, msg); i2o_iop_reset()
703 struct i2o_message *msg; i2o_iop_systab_set() local
714 msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET); i2o_iop_systab_set()
715 if (IS_ERR(msg)) i2o_iop_systab_set()
716 return PTR_ERR(msg); i2o_iop_systab_set()
721 i2o_msg_nop(c, msg); i2o_iop_systab_set()
725 msg->u.head[0] = cpu_to_le32(I2O_MESSAGE_SIZE(12) | SGL_OFFSET_6); i2o_iop_systab_set()
726 msg->u.head[1] = i2o_iop_systab_set()
736 msg->body[0] = cpu_to_le32(c->unit + 2); i2o_iop_systab_set()
737 msg->body[1] = cpu_to_le32(0x00000000); i2o_iop_systab_set()
738 msg->body[2] = cpu_to_le32(0x54000000 | i2o_systab.len); i2o_iop_systab_set()
739 msg->body[3] = cpu_to_le32(i2o_systab.phys); i2o_iop_systab_set()
740 msg->body[4] = cpu_to_le32(0x54000000 | sb->current_mem_size); i2o_iop_systab_set()
741 msg->body[5] = cpu_to_le32(sb->current_mem_base); i2o_iop_systab_set()
742 msg->body[6] = cpu_to_le32(0xd4000000 | sb->current_io_size); i2o_iop_systab_set()
743 msg->body[6] = cpu_to_le32(sb->current_io_base); i2o_iop_systab_set()
745 rc = i2o_msg_post_wait(c, msg, 120); i2o_iop_systab_set()
922 struct i2o_message *msg; i2o_status_get() local
929 msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET); i2o_status_get()
930 if (IS_ERR(msg)) i2o_status_get()
931 return PTR_ERR(msg); i2o_status_get()
933 msg->u.head[0] = cpu_to_le32(NINE_WORD_MSG_SIZE | SGL_OFFSET_0); i2o_status_get()
934 msg->u.head[1] = i2o_status_get()
937 msg->u.s.icntxt = cpu_to_le32(i2o_exec_driver.context); i2o_status_get()
938 msg->u.s.tcntxt = cpu_to_le32(0x00000000); i2o_status_get()
939 msg->body[0] = cpu_to_le32(0x00000000); i2o_status_get()
940 msg->body[1] = cpu_to_le32(0x00000000); i2o_status_get()
941 msg->body[2] = cpu_to_le32(i2o_dma_low(c->status_block.phys)); i2o_status_get()
942 msg->body[3] = cpu_to_le32(i2o_dma_high(c->status_block.phys)); i2o_status_get()
943 msg->body[4] = cpu_to_le32(sizeof(i2o_status_block)); /* always 88 bytes */ i2o_status_get()
945 i2o_msg_post(c, msg); i2o_status_get()
983 struct i2o_message *msg; i2o_hrt_get() local
985 msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET); i2o_hrt_get()
986 if (IS_ERR(msg)) i2o_hrt_get()
987 return PTR_ERR(msg); i2o_hrt_get()
989 msg->u.head[0] = cpu_to_le32(SIX_WORD_MSG_SIZE | SGL_OFFSET_4); i2o_hrt_get()
990 msg->u.head[1] = i2o_hrt_get()
993 msg->body[0] = cpu_to_le32(0xd0000000 | c->hrt.len); i2o_hrt_get()
994 msg->body[1] = cpu_to_le32(c->hrt.phys); i2o_hrt_get()
996 rc = i2o_msg_post_wait_mem(c, msg, 20, &c->hrt); i2o_hrt_get()
1167 struct i2o_message *msg; i2o_event_register() local
1169 msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET); i2o_event_register()
1170 if (IS_ERR(msg)) i2o_event_register()
1171 return PTR_ERR(msg); i2o_event_register()
1173 msg->u.head[0] = cpu_to_le32(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0); i2o_event_register()
1174 msg->u.head[1] = i2o_event_register()
1177 msg->u.s.icntxt = cpu_to_le32(drv->context); i2o_event_register()
1178 msg->u.s.tcntxt = cpu_to_le32(tcntxt); i2o_event_register()
1179 msg->body[0] = cpu_to_le32(evt_mask); i2o_event_register()
1181 i2o_msg_post(c, msg); i2o_event_register()
H A Di2o_config.c237 struct i2o_message *msg; i2o_cfg_swdl() local
263 msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET); i2o_cfg_swdl()
264 if (IS_ERR(msg)) i2o_cfg_swdl()
265 return PTR_ERR(msg); i2o_cfg_swdl()
268 i2o_msg_nop(c, msg); i2o_cfg_swdl()
273 i2o_msg_nop(c, msg); i2o_cfg_swdl()
278 msg->u.head[0] = cpu_to_le32(NINE_WORD_MSG_SIZE | SGL_OFFSET_7); i2o_cfg_swdl()
279 msg->u.head[1] = i2o_cfg_swdl()
282 msg->u.head[2] = cpu_to_le32(i2o_config_driver.context); i2o_cfg_swdl()
283 msg->u.head[3] = cpu_to_le32(0); i2o_cfg_swdl()
284 msg->body[0] = i2o_cfg_swdl()
288 msg->body[1] = cpu_to_le32(swlen); i2o_cfg_swdl()
289 msg->body[2] = cpu_to_le32(kxfer.sw_id); i2o_cfg_swdl()
290 msg->body[3] = cpu_to_le32(0xD0000000 | fragsize); i2o_cfg_swdl()
291 msg->body[4] = cpu_to_le32(buffer.phys); i2o_cfg_swdl()
294 status = i2o_msg_post_wait_mem(c, msg, 60, &buffer); i2o_cfg_swdl()
315 struct i2o_message *msg; i2o_cfg_swul() local
342 msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET); i2o_cfg_swul()
343 if (IS_ERR(msg)) i2o_cfg_swul()
344 return PTR_ERR(msg); i2o_cfg_swul()
347 i2o_msg_nop(c, msg); i2o_cfg_swul()
351 msg->u.head[0] = cpu_to_le32(NINE_WORD_MSG_SIZE | SGL_OFFSET_7); i2o_cfg_swul()
352 msg->u.head[1] = i2o_cfg_swul()
354 msg->u.head[2] = cpu_to_le32(i2o_config_driver.context); i2o_cfg_swul()
355 msg->u.head[3] = cpu_to_le32(0); i2o_cfg_swul()
356 msg->body[0] = i2o_cfg_swul()
359 msg->body[1] = cpu_to_le32(swlen); i2o_cfg_swul()
360 msg->body[2] = cpu_to_le32(kxfer.sw_id); i2o_cfg_swul()
361 msg->body[3] = cpu_to_le32(0xD0000000 | fragsize); i2o_cfg_swul()
362 msg->body[4] = cpu_to_le32(buffer.phys); i2o_cfg_swul()
365 status = i2o_msg_post_wait_mem(c, msg, 60, &buffer); i2o_cfg_swul()
388 struct i2o_message *msg; i2o_cfg_swdel() local
402 msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET); i2o_cfg_swdel()
403 if (IS_ERR(msg)) i2o_cfg_swdel()
404 return PTR_ERR(msg); i2o_cfg_swdel()
406 msg->u.head[0] = cpu_to_le32(SEVEN_WORD_MSG_SIZE | SGL_OFFSET_0); i2o_cfg_swdel()
407 msg->u.head[1] = i2o_cfg_swdel()
409 msg->u.head[2] = cpu_to_le32(i2o_config_driver.context); i2o_cfg_swdel()
410 msg->u.head[3] = cpu_to_le32(0); i2o_cfg_swdel()
411 msg->body[0] = i2o_cfg_swdel()
413 msg->body[1] = cpu_to_le32(swlen); i2o_cfg_swdel()
414 msg->body[2] = cpu_to_le32(kxfer.sw_id); i2o_cfg_swdel()
416 token = i2o_msg_post_wait(c, msg, 10); i2o_cfg_swdel()
430 struct i2o_message *msg; i2o_cfg_validate() local
437 msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET); i2o_cfg_validate()
438 if (IS_ERR(msg)) i2o_cfg_validate()
439 return PTR_ERR(msg); i2o_cfg_validate()
441 msg->u.head[0] = cpu_to_le32(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0); i2o_cfg_validate()
442 msg->u.head[1] = i2o_cfg_validate()
444 msg->u.head[2] = cpu_to_le32(i2o_config_driver.context); i2o_cfg_validate()
445 msg->u.head[3] = cpu_to_le32(0); i2o_cfg_validate()
447 token = i2o_msg_post_wait(c, msg, 10); i2o_cfg_validate()
460 struct i2o_message *msg; i2o_cfg_evt_reg() local
479 msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET); i2o_cfg_evt_reg()
480 if (IS_ERR(msg)) i2o_cfg_evt_reg()
481 return PTR_ERR(msg); i2o_cfg_evt_reg()
483 msg->u.head[0] = cpu_to_le32(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0); i2o_cfg_evt_reg()
484 msg->u.head[1] = i2o_cfg_evt_reg()
487 msg->u.head[2] = cpu_to_le32(i2o_config_driver.context); i2o_cfg_evt_reg()
488 msg->u.head[3] = cpu_to_le32(i2o_cntxt_list_add(c, fp->private_data)); i2o_cfg_evt_reg()
489 msg->body[0] = cpu_to_le32(kdesc.evt_mask); i2o_cfg_evt_reg()
491 i2o_msg_post(c, msg); i2o_cfg_evt_reg()
541 struct i2o_message *msg; i2o_cfg_passthru32() local
546 if (get_user(iop, &cmd->iop) || get_user(i, &cmd->msg)) i2o_cfg_passthru32()
574 msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET); i2o_cfg_passthru32()
575 if (IS_ERR(msg)) i2o_cfg_passthru32()
576 return PTR_ERR(msg); i2o_cfg_passthru32()
580 if (copy_from_user(msg, user_msg, size)) { i2o_cfg_passthru32()
584 i2o_dump_message(msg); i2o_cfg_passthru32()
600 sg_offset = (msg->u.head[0] >> 4) & 0x0f; i2o_cfg_passthru32()
611 sg = (struct sg_simple_element *)((&msg->u.head[0]) + i2o_cfg_passthru32()
665 rcode = i2o_msg_post_wait(c, msg, 60); i2o_cfg_passthru32()
666 msg = NULL; i2o_cfg_passthru32()
682 // get user msg size in u32s i2o_cfg_passthru32()
747 if (msg) i2o_cfg_passthru32()
748 i2o_msg_nop(c, msg); i2o_cfg_passthru32()
792 struct i2o_message *msg; i2o_cfg_passthru() local
795 if (get_user(iop, &cmd->iop) || get_user(user_msg, &cmd->msg)) i2o_cfg_passthru()
819 msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET); i2o_cfg_passthru()
820 if (IS_ERR(msg)) i2o_cfg_passthru()
821 return PTR_ERR(msg); i2o_cfg_passthru()
825 if (copy_from_user(msg, user_msg, size)) i2o_cfg_passthru()
842 sg_offset = (msg->u.head[0] >> 4) & 0x0f; i2o_cfg_passthru()
854 sg = (struct sg_simple_element *)((&msg->u.head[0]) + i2o_cfg_passthru()
905 rcode = i2o_msg_post_wait(c, msg, 60); i2o_cfg_passthru()
906 msg = NULL; i2o_cfg_passthru()
922 // get user msg size in u32s i2o_cfg_passthru()
988 if (msg) i2o_cfg_passthru()
989 i2o_msg_nop(c, msg); i2o_cfg_passthru()
H A Dexec-osm.c54 struct i2o_message *msg; /* pointer to the reply message */ member in struct:i2o_exec_wait
106 * @msg: message to post
122 int i2o_msg_post_wait_mem(struct i2o_controller *c, struct i2o_message *msg, i2o_msg_post_wait_mem() argument
133 i2o_msg_nop(c, msg); i2o_msg_post_wait_mem()
148 msg->u.s.icntxt = cpu_to_le32(i2o_exec_driver.context); i2o_msg_post_wait_mem()
150 msg->u.s.tcntxt = cpu_to_le32(wait->tcntxt); i2o_msg_post_wait_mem()
163 i2o_msg_post(c, msg); i2o_msg_post_wait_mem()
172 rc = le32_to_cpu(wait->msg->body[0]) >> 24; i2o_msg_post_wait_mem()
201 * @msg: pointer to the I2O reply message
217 struct i2o_message *msg, u32 context) i2o_msg_post_wait_complete()
237 wait->msg = msg; i2o_msg_post_wait_complete()
385 struct i2o_message *msg; i2o_exec_lct_notify() local
397 msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET); i2o_exec_lct_notify()
398 if (IS_ERR(msg)) { i2o_exec_lct_notify()
400 return PTR_ERR(msg); i2o_exec_lct_notify()
403 msg->u.head[0] = cpu_to_le32(EIGHT_WORD_MSG_SIZE | SGL_OFFSET_6); i2o_exec_lct_notify()
404 msg->u.head[1] = cpu_to_le32(I2O_CMD_LCT_NOTIFY << 24 | HOST_TID << 12 | i2o_exec_lct_notify()
406 msg->u.s.icntxt = cpu_to_le32(i2o_exec_driver.context); i2o_exec_lct_notify()
407 msg->u.s.tcntxt = cpu_to_le32(0x00000000); i2o_exec_lct_notify()
408 msg->body[0] = cpu_to_le32(0xffffffff); i2o_exec_lct_notify()
409 msg->body[1] = cpu_to_le32(change_ind); i2o_exec_lct_notify()
410 msg->body[2] = cpu_to_le32(0xd0000000 | c->dlct.len); i2o_exec_lct_notify()
411 msg->body[3] = cpu_to_le32(c->dlct.phys); i2o_exec_lct_notify()
413 i2o_msg_post(c, msg); i2o_exec_lct_notify()
450 * @msg: pointer to the I2O reply message
461 struct i2o_message *msg) i2o_exec_reply()
465 if (le32_to_cpu(msg->u.head[0]) & MSG_FAIL) { i2o_exec_reply()
474 pm = le32_to_cpu(msg->body[3]); i2o_exec_reply()
478 i2o_report_status(KERN_INFO, "i2o_core", msg); i2o_exec_reply()
480 /* Release the preserved msg */ i2o_exec_reply()
483 context = le32_to_cpu(msg->u.s.tcntxt); i2o_exec_reply()
486 return i2o_msg_post_wait_complete(c, m, msg, context); i2o_exec_reply()
488 if ((le32_to_cpu(msg->u.head[1]) >> 24) == I2O_CMD_LCT_NOTIFY) { i2o_exec_reply()
512 i2o_dump_message(msg); i2o_exec_reply()
546 struct i2o_message *msg; i2o_exec_lct_get() local
551 msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET); i2o_exec_lct_get()
552 if (IS_ERR(msg)) i2o_exec_lct_get()
553 return PTR_ERR(msg); i2o_exec_lct_get()
555 msg->u.head[0] = i2o_exec_lct_get()
557 msg->u.head[1] = i2o_exec_lct_get()
560 msg->body[0] = cpu_to_le32(0xffffffff); i2o_exec_lct_get()
561 msg->body[1] = cpu_to_le32(0x00000000); i2o_exec_lct_get()
562 msg->body[2] = cpu_to_le32(0xd0000000 | c->dlct.len); i2o_exec_lct_get()
563 msg->body[3] = cpu_to_le32(c->dlct.phys); i2o_exec_lct_get()
565 rc = i2o_msg_post_wait(c, msg, I2O_TIMEOUT_LCT_GET); i2o_exec_lct_get()
216 i2o_msg_post_wait_complete(struct i2o_controller *c, u32 m, struct i2o_message *msg, u32 context) i2o_msg_post_wait_complete() argument
460 i2o_exec_reply(struct i2o_controller *c, u32 m, struct i2o_message *msg) i2o_exec_reply() argument
H A Di2o_block.c138 struct i2o_message *msg; i2o_block_device_flush() local
140 msg = i2o_msg_get_wait(dev->iop, I2O_TIMEOUT_MESSAGE_GET); i2o_block_device_flush()
141 if (IS_ERR(msg)) i2o_block_device_flush()
142 return PTR_ERR(msg); i2o_block_device_flush()
144 msg->u.head[0] = cpu_to_le32(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0); i2o_block_device_flush()
145 msg->u.head[1] = i2o_block_device_flush()
148 msg->body[0] = cpu_to_le32(60 << 16); i2o_block_device_flush()
151 return i2o_msg_post_wait(dev->iop, msg, 60); i2o_block_device_flush()
166 struct i2o_message *msg; i2o_block_device_mount() local
168 msg = i2o_msg_get_wait(dev->iop, I2O_TIMEOUT_MESSAGE_GET); i2o_block_device_mount()
169 if (IS_ERR(msg)) i2o_block_device_mount()
170 return PTR_ERR(msg); i2o_block_device_mount()
172 msg->u.head[0] = cpu_to_le32(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0); i2o_block_device_mount()
173 msg->u.head[1] = i2o_block_device_mount()
176 msg->body[0] = cpu_to_le32(-1); i2o_block_device_mount()
177 msg->body[1] = cpu_to_le32(0x00000000); i2o_block_device_mount()
180 return i2o_msg_post_wait(dev->iop, msg, 2); i2o_block_device_mount()
195 struct i2o_message *msg; i2o_block_device_lock() local
197 msg = i2o_msg_get_wait(dev->iop, I2O_TIMEOUT_MESSAGE_GET); i2o_block_device_lock()
198 if (IS_ERR(msg)) i2o_block_device_lock()
199 return PTR_ERR(msg); i2o_block_device_lock()
201 msg->u.head[0] = cpu_to_le32(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0); i2o_block_device_lock()
202 msg->u.head[1] = i2o_block_device_lock()
205 msg->body[0] = cpu_to_le32(-1); i2o_block_device_lock()
208 return i2o_msg_post_wait(dev->iop, msg, 2); i2o_block_device_lock()
223 struct i2o_message *msg; i2o_block_device_unlock() local
225 msg = i2o_msg_get_wait(dev->iop, I2O_TIMEOUT_MESSAGE_GET); i2o_block_device_unlock()
226 if (IS_ERR(msg)) i2o_block_device_unlock()
227 return PTR_ERR(msg); i2o_block_device_unlock()
229 msg->u.head[0] = cpu_to_le32(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0); i2o_block_device_unlock()
230 msg->u.head[1] = i2o_block_device_unlock()
233 msg->body[0] = cpu_to_le32(media_id); i2o_block_device_unlock()
236 return i2o_msg_post_wait(dev->iop, msg, 2); i2o_block_device_unlock()
252 struct i2o_message *msg; i2o_block_device_power() local
255 msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET); i2o_block_device_power()
256 if (IS_ERR(msg)) i2o_block_device_power()
257 return PTR_ERR(msg); i2o_block_device_power()
259 msg->u.head[0] = cpu_to_le32(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0); i2o_block_device_power()
260 msg->u.head[1] = i2o_block_device_power()
263 msg->body[0] = cpu_to_le32(op << 24); i2o_block_device_power()
266 rc = i2o_msg_post_wait(c, msg, 60); i2o_block_device_power()
455 * @msg: the actual I2O message reply
461 struct i2o_message *msg) i2o_block_reply()
466 req = i2o_cntxt_list_get(c, le32_to_cpu(msg->u.s.tcntxt)); i2o_block_reply()
477 if ((le32_to_cpu(msg->body[0]) >> 24) != 0) { i2o_block_reply()
478 u32 status = le32_to_cpu(msg->body[0]); i2o_block_reply()
492 "0x%04x\n", (le32_to_cpu(msg->u.head[1]) >> 12 & 0xfff), i2o_block_reply()
500 i2o_block_end_request(req, error, le32_to_cpu(msg->body[1])); i2o_block_reply()
730 struct i2o_message *msg; i2o_block_transfer() local
748 msg = i2o_msg_get(c); i2o_block_transfer()
749 if (IS_ERR(msg)) { i2o_block_transfer()
750 rc = PTR_ERR(msg); i2o_block_transfer()
760 msg->u.s.icntxt = cpu_to_le32(i2o_block_driver.context); i2o_block_transfer()
761 msg->u.s.tcntxt = cpu_to_le32(tcntxt); i2o_block_transfer()
763 mptr = &msg->body[0]; i2o_block_transfer()
820 msg->u.head[1] = i2o_block_transfer()
850 msg->u.head[1] = cpu_to_le32(cmd | HOST_TID << 12 | tid); i2o_block_transfer()
864 msg->u.head[0] = i2o_block_transfer()
865 cpu_to_le32(I2O_MESSAGE_SIZE(mptr - &msg->u.head[0]) | sgl_offset); i2o_block_transfer()
870 i2o_msg_post(c, msg); i2o_block_transfer()
878 i2o_msg_nop(c, msg); i2o_block_transfer()
460 i2o_block_reply(struct i2o_controller *c, u32 m, struct i2o_message *msg) i2o_block_reply() argument
/linux-4.1.27/fs/nfs/
H A Dnfs42proc.c35 static int _nfs42_proc_fallocate(struct rpc_message *msg, struct file *filep, _nfs42_proc_fallocate() argument
51 msg->rpc_argp = &args; _nfs42_proc_fallocate()
52 msg->rpc_resp = &res; _nfs42_proc_fallocate()
62 status = nfs4_call_sync(server->client, server, msg, _nfs42_proc_fallocate()
71 static int nfs42_proc_fallocate(struct rpc_message *msg, struct file *filep, nfs42_proc_fallocate() argument
79 err = _nfs42_proc_fallocate(msg, filep, offset, len); nfs42_proc_fallocate()
90 struct rpc_message msg = { nfs42_proc_allocate() local
101 err = nfs42_proc_fallocate(&msg, filep, offset, len); nfs42_proc_allocate()
111 struct rpc_message msg = { nfs42_proc_deallocate() local
123 err = nfs42_proc_fallocate(&msg, filep, offset, len); nfs42_proc_deallocate()
143 struct rpc_message msg = { nfs42_proc_llseek() local
159 status = nfs4_call_sync(server->client, server, &msg, nfs42_proc_llseek()
H A Dnfs3proc.c31 nfs3_rpc_wrapper(struct rpc_clnt *clnt, struct rpc_message *msg, int flags) nfs3_rpc_wrapper() argument
35 res = rpc_call_sync(clnt, msg, flags); nfs3_rpc_wrapper()
44 #define rpc_call_sync(clnt, msg, flags) nfs3_rpc_wrapper(clnt, msg, flags)
63 struct rpc_message msg = { do_proc_get_root() local
72 status = rpc_call_sync(client, &msg, 0); do_proc_get_root()
75 msg.rpc_proc = &nfs3_procedures[NFS3PROC_GETATTR]; do_proc_get_root()
76 msg.rpc_resp = info->fattr; do_proc_get_root()
77 status = rpc_call_sync(client, &msg, 0); do_proc_get_root()
105 struct rpc_message msg = { nfs3_proc_getattr() local
114 status = rpc_call_sync(server->client, &msg, 0); nfs3_proc_getattr()
128 struct rpc_message msg = { nfs3_proc_setattr() local
137 msg.rpc_cred = nfs_file_cred(sattr->ia_file); nfs3_proc_setattr()
139 status = rpc_call_sync(NFS_CLIENT(inode), &msg, 0); nfs3_proc_setattr()
160 struct rpc_message msg = { nfs3_proc_lookup() local
173 status = rpc_call_sync(NFS_CLIENT(dir), &msg, 0); nfs3_proc_lookup()
176 msg.rpc_proc = &nfs3_procedures[NFS3PROC_GETATTR]; nfs3_proc_lookup()
177 msg.rpc_argp = fhandle; nfs3_proc_lookup()
178 msg.rpc_resp = fattr; nfs3_proc_lookup()
179 status = rpc_call_sync(NFS_CLIENT(dir), &msg, 0); nfs3_proc_lookup()
192 struct rpc_message msg = { nfs3_proc_access() local
221 status = rpc_call_sync(NFS_CLIENT(inode), &msg, 0); nfs3_proc_access()
248 struct rpc_message msg = { nfs3_proc_readlink() local
258 msg.rpc_resp = fattr; nfs3_proc_readlink()
260 status = rpc_call_sync(NFS_CLIENT(inode), &msg, 0); nfs3_proc_readlink()
269 struct rpc_message msg; member in struct:nfs3_createdata
288 data->msg.rpc_argp = &data->arg; nfs3_alloc_createdata()
289 data->msg.rpc_resp = &data->res; nfs3_alloc_createdata()
303 status = rpc_call_sync(NFS_CLIENT(dir), &data->msg, 0); nfs3_do_create()
332 data->msg.rpc_proc = &nfs3_procedures[NFS3PROC_CREATE]; nfs3_proc_create()
414 struct rpc_message msg = { nfs3_proc_remove() local
426 status = rpc_call_sync(NFS_CLIENT(dir), &msg, 0); nfs3_proc_remove()
435 nfs3_proc_unlink_setup(struct rpc_message *msg, struct inode *dir) nfs3_proc_unlink_setup() argument
437 msg->rpc_proc = &nfs3_procedures[NFS3PROC_REMOVE]; nfs3_proc_unlink_setup()
457 nfs3_proc_rename_setup(struct rpc_message *msg, struct inode *dir) nfs3_proc_rename_setup() argument
459 msg->rpc_proc = &nfs3_procedures[NFS3PROC_RENAME]; nfs3_proc_rename_setup()
492 struct rpc_message msg = { nfs3_proc_link() local
505 status = rpc_call_sync(NFS_CLIENT(inode), &msg, 0); nfs3_proc_link()
530 data->msg.rpc_proc = &nfs3_procedures[NFS3PROC_SYMLINK]; nfs3_proc_symlink()
563 data->msg.rpc_proc = &nfs3_procedures[NFS3PROC_MKDIR]; nfs3_proc_mkdir()
593 struct rpc_message msg = { nfs3_proc_rmdir() local
604 msg.rpc_resp = dir_attr; nfs3_proc_rmdir()
605 status = rpc_call_sync(NFS_CLIENT(dir), &msg, 0); nfs3_proc_rmdir()
640 struct rpc_message msg = { nfs3_proc_readdir() local
649 msg.rpc_proc = &nfs3_procedures[NFS3PROC_READDIRPLUS]; nfs3_proc_readdir()
658 status = rpc_call_sync(NFS_CLIENT(dir), &msg, 0); nfs3_proc_readdir()
689 data->msg.rpc_proc = &nfs3_procedures[NFS3PROC_MKNOD]; nfs3_proc_mknod()
733 struct rpc_message msg = { nfs3_proc_statfs() local
742 status = rpc_call_sync(server->client, &msg, 0); nfs3_proc_statfs()
751 struct rpc_message msg = { do_proc_fsinfo() local
760 status = rpc_call_sync(client, &msg, 0); do_proc_fsinfo()
785 struct rpc_message msg = { nfs3_proc_pathconf() local
794 status = rpc_call_sync(server->client, &msg, 0); nfs3_proc_pathconf()
815 struct rpc_message *msg) nfs3_proc_read_setup()
817 msg->rpc_proc = &nfs3_procedures[NFS3PROC_READ]; nfs3_proc_read_setup()
842 struct rpc_message *msg) nfs3_proc_write_setup()
844 msg->rpc_proc = &nfs3_procedures[NFS3PROC_WRITE]; nfs3_proc_write_setup()
863 static void nfs3_proc_commit_setup(struct nfs_commit_data *data, struct rpc_message *msg) nfs3_proc_commit_setup() argument
865 msg->rpc_proc = &nfs3_procedures[NFS3PROC_COMMIT]; nfs3_proc_commit_setup()
814 nfs3_proc_read_setup(struct nfs_pgio_header *hdr, struct rpc_message *msg) nfs3_proc_read_setup() argument
841 nfs3_proc_write_setup(struct nfs_pgio_header *hdr, struct rpc_message *msg) nfs3_proc_write_setup() argument
H A Dproc.c58 struct rpc_message msg = { nfs_proc_get_root() local
67 status = rpc_call_sync(server->client, &msg, 0); nfs_proc_get_root()
70 status = rpc_call_sync(server->nfs_client->cl_rpcclient, &msg, 0); nfs_proc_get_root()
75 msg.rpc_proc = &nfs_procedures[NFSPROC_STATFS]; nfs_proc_get_root()
76 msg.rpc_resp = &fsinfo; nfs_proc_get_root()
77 status = rpc_call_sync(server->client, &msg, 0); nfs_proc_get_root()
80 status = rpc_call_sync(server->nfs_client->cl_rpcclient, &msg, 0); nfs_proc_get_root()
103 struct rpc_message msg = { nfs_proc_getattr() local
112 status = rpc_call_sync(server->client, &msg, 0); nfs_proc_getattr()
126 struct rpc_message msg = { nfs_proc_setattr() local
138 msg.rpc_cred = nfs_file_cred(sattr->ia_file); nfs_proc_setattr()
140 status = rpc_call_sync(NFS_CLIENT(inode), &msg, 0); nfs_proc_setattr()
161 struct rpc_message msg = { nfs_proc_lookup() local
170 status = rpc_call_sync(NFS_CLIENT(dir), &msg, 0); nfs_proc_lookup()
184 struct rpc_message msg = { nfs_proc_readlink() local
191 status = rpc_call_sync(NFS_CLIENT(inode), &msg, 0); nfs_proc_readlink()
233 struct rpc_message msg = { nfs_proc_create() local
242 msg.rpc_argp = &data->arg; nfs_proc_create()
243 msg.rpc_resp = &data->res; nfs_proc_create()
244 status = rpc_call_sync(NFS_CLIENT(dir), &msg, 0); nfs_proc_create()
262 struct rpc_message msg = { nfs_proc_mknod() local
282 msg.rpc_argp = &data->arg; nfs_proc_mknod()
283 msg.rpc_resp = &data->res; nfs_proc_mknod()
285 status = rpc_call_sync(NFS_CLIENT(dir), &msg, 0); nfs_proc_mknod()
291 status = rpc_call_sync(NFS_CLIENT(dir), &msg, 0); nfs_proc_mknod()
308 struct rpc_message msg = { nfs_proc_remove() local
315 status = rpc_call_sync(NFS_CLIENT(dir), &msg, 0); nfs_proc_remove()
323 nfs_proc_unlink_setup(struct rpc_message *msg, struct inode *dir) nfs_proc_unlink_setup() argument
325 msg->rpc_proc = &nfs_procedures[NFSPROC_REMOVE]; nfs_proc_unlink_setup()
340 nfs_proc_rename_setup(struct rpc_message *msg, struct inode *dir) nfs_proc_rename_setup() argument
342 msg->rpc_proc = &nfs_procedures[NFSPROC_RENAME]; nfs_proc_rename_setup()
368 struct rpc_message msg = { nfs_proc_link() local
375 status = rpc_call_sync(NFS_CLIENT(inode), &msg, 0); nfs_proc_link()
396 struct rpc_message msg = { nfs_proc_symlink() local
413 status = rpc_call_sync(NFS_CLIENT(dir), &msg, 0); nfs_proc_symlink()
436 struct rpc_message msg = { nfs_proc_mkdir() local
445 msg.rpc_argp = &data->arg; nfs_proc_mkdir()
446 msg.rpc_resp = &data->res; nfs_proc_mkdir()
448 status = rpc_call_sync(NFS_CLIENT(dir), &msg, 0); nfs_proc_mkdir()
466 struct rpc_message msg = { nfs_proc_rmdir() local
473 status = rpc_call_sync(NFS_CLIENT(dir), &msg, 0); nfs_proc_rmdir()
497 struct rpc_message msg = { nfs_proc_readdir() local
505 status = rpc_call_sync(NFS_CLIENT(dir), &msg, 0); nfs_proc_readdir()
518 struct rpc_message msg = { nfs_proc_statfs() local
527 status = rpc_call_sync(server->client, &msg, 0); nfs_proc_statfs()
546 struct rpc_message msg = { nfs_proc_fsinfo() local
555 status = rpc_call_sync(server->client, &msg, 0); nfs_proc_fsinfo()
598 struct rpc_message *msg) nfs_proc_read_setup()
600 msg->rpc_proc = &nfs_procedures[NFSPROC_READ]; nfs_proc_read_setup()
618 struct rpc_message *msg) nfs_proc_write_setup()
622 msg->rpc_proc = &nfs_procedures[NFSPROC_WRITE]; nfs_proc_write_setup()
631 nfs_proc_commit_setup(struct nfs_commit_data *data, struct rpc_message *msg) nfs_proc_commit_setup() argument
597 nfs_proc_read_setup(struct nfs_pgio_header *hdr, struct rpc_message *msg) nfs_proc_read_setup() argument
617 nfs_proc_write_setup(struct nfs_pgio_header *hdr, struct rpc_message *msg) nfs_proc_write_setup() argument
/linux-4.1.27/net/mac80211/
H A Dtrace_msg.h21 __dynamic_array(char, msg, MAX_MSG_LEN)
25 WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
30 TP_printk("%s", __get_str(msg))
/linux-4.1.27/tools/perf/ui/tui/
H A Dhelpline.c18 static void tui_helpline__push(const char *msg) tui_helpline__push() argument
24 SLsmg_write_nstring((char *)msg, SLtt_Screen_Cols); tui_helpline__push()
26 strncpy(ui_helpline__current, msg, sz)[sz - 1] = '\0'; tui_helpline__push()
/linux-4.1.27/include/linux/
H A Dhtirq.h10 void fetch_ht_irq_msg(unsigned int irq, struct ht_irq_msg *msg);
11 void write_ht_irq_msg(unsigned int irq, struct ht_irq_msg *msg);
21 struct ht_irq_msg *msg);
/linux-4.1.27/drivers/media/tuners/
H A Dtda827x.c137 struct i2c_msg *msg, tuner_transfer()
145 rc = i2c_transfer(priv->i2c_adap, msg, size); tuner_transfer()
162 struct i2c_msg msg = { .addr = priv->i2c_addr, .flags = 0, tda827xo_set_params() local
206 msg.len = 14; tda827xo_set_params()
207 rc = tuner_transfer(fe, &msg, 1); tda827xo_set_params()
215 msg.len = 2; tda827xo_set_params()
217 rc = tuner_transfer(fe, &msg, 1); tda827xo_set_params()
236 struct i2c_msg msg = { .addr = priv->i2c_addr, .flags = 0, tda827xo_sleep() local
240 tuner_transfer(fe, &msg, 1); tda827xo_sleep()
258 struct i2c_msg msg = { .addr = priv->i2c_addr, .flags = 0 }; tda827xo_set_analog_params() local
288 msg.buf = tuner_reg; tda827xo_set_analog_params()
289 msg.len = 8; tda827xo_set_analog_params()
290 tuner_transfer(fe, &msg, 1); tda827xo_set_analog_params()
292 msg.buf = reg2; tda827xo_set_analog_params()
293 msg.len = 2; tda827xo_set_analog_params()
296 tuner_transfer(fe, &msg, 1); tda827xo_set_analog_params()
300 tuner_transfer(fe, &msg, 1); tda827xo_set_analog_params()
304 tuner_transfer(fe, &msg, 1); tda827xo_set_analog_params()
309 tuner_transfer(fe, &msg, 1); tda827xo_set_analog_params()
314 tuner_transfer(fe, &msg, 1); tda827xo_set_analog_params()
319 tuner_transfer(fe, &msg, 1); tda827xo_set_analog_params()
323 tuner_transfer(fe, &msg, 1); tda827xo_set_analog_params()
327 tuner_transfer(fe, &msg, 1); tda827xo_set_analog_params()
338 struct i2c_msg msg = { .addr = priv->i2c_addr, .flags = 0, tda827xo_agcf() local
341 tuner_transfer(fe, &msg, 1); tda827xo_agcf()
448 struct i2c_msg msg = { .addr = priv->i2c_addr, .flags = 0, tda827xa_sleep() local
453 tuner_transfer(fe, &msg, 1); tda827xa_sleep()
468 struct i2c_msg msg = { .flags = 0, .buf = buf, .len = sizeof(buf) }; tda827xa_lna_gain() local
474 msg.addr = priv->cfg->switch_addr; tda827xa_lna_gain()
504 tuner_transfer(fe, &msg, 1); tda827xa_lna_gain()
521 struct i2c_msg msg = { .addr = priv->i2c_addr, .flags = 0, tda827xa_set_params() local
575 msg.len = 11; tda827xa_set_params()
576 rc = tuner_transfer(fe, &msg, 1); tda827xa_set_params()
585 msg.len = 5; tda827xa_set_params()
586 rc = tuner_transfer(fe, &msg, 1); tda827xa_set_params()
592 msg.len = 2; tda827xa_set_params()
593 rc = tuner_transfer(fe, &msg, 1); tda827xa_set_params()
598 msg.flags = I2C_M_RD; tda827xa_set_params()
599 rc = tuner_transfer(fe, &msg, 1); tda827xa_set_params()
602 msg.flags = 0; tda827xa_set_params()
610 rc = tuner_transfer(fe, &msg, 1); tda827xa_set_params()
617 rc = tuner_transfer(fe, &msg, 1); tda827xa_set_params()
623 rc = tuner_transfer(fe, &msg, 1); tda827xa_set_params()
630 rc = tuner_transfer(fe, &msg, 1); tda827xa_set_params()
637 rc = tuner_transfer(fe, &msg, 1); tda827xa_set_params()
645 rc = tuner_transfer(fe, &msg, 1); tda827xa_set_params()
668 struct i2c_msg msg = { .addr = priv->i2c_addr, .flags = 0, tda827xa_set_analog_params() local
704 msg.len = 11; tda827xa_set_analog_params()
705 tuner_transfer(fe, &msg, 1); tda827xa_set_analog_params()
712 msg.len = 5; tda827xa_set_analog_params()
713 tuner_transfer(fe, &msg, 1); tda827xa_set_analog_params()
717 msg.len = 2; tda827xa_set_analog_params()
718 tuner_transfer(fe, &msg, 1); tda827xa_set_analog_params()
722 tuner_transfer(fe, &msg, 1); tda827xa_set_analog_params()
724 msg.flags = I2C_M_RD; tda827xa_set_analog_params()
725 tuner_transfer(fe, &msg, 1); tda827xa_set_analog_params()
726 msg.flags = 0; tda827xa_set_analog_params()
735 tuner_transfer(fe, &msg, 1); tda827xa_set_analog_params()
740 tuner_transfer(fe, &msg, 1); tda827xa_set_analog_params()
744 tuner_transfer(fe, &msg, 1); tda827xa_set_analog_params()
748 tuner_transfer(fe, &msg, 1); tda827xa_set_analog_params()
752 tuner_transfer(fe, &msg, 1); tda827xa_set_analog_params()
763 struct i2c_msg msg = {.addr = priv->i2c_addr, .flags = 0, tda827xa_agcf() local
765 tuner_transfer(fe, &msg, 1); tda827xa_agcf()
858 struct i2c_msg msg = { .addr = priv->i2c_addr, .flags = I2C_M_RD, tda827x_probe_version() local
861 rc = tuner_transfer(fe, &msg, 1); tda827x_probe_version()
865 __func__, msg.addr << 1); tda827x_probe_version()
136 tuner_transfer(struct dvb_frontend *fe, struct i2c_msg *msg, const int size) tuner_transfer() argument
/linux-4.1.27/net/wireless/
H A Dnl80211.c581 static int nl80211_msg_put_channel(struct sk_buff *msg, nl80211_msg_put_channel() argument
592 if (nla_put_u32(msg, NL80211_FREQUENCY_ATTR_FREQ, nl80211_msg_put_channel()
597 nla_put_flag(msg, NL80211_FREQUENCY_ATTR_DISABLED)) nl80211_msg_put_channel()
600 if (nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_IR)) nl80211_msg_put_channel()
602 if (nla_put_flag(msg, __NL80211_FREQUENCY_ATTR_NO_IBSS)) nl80211_msg_put_channel()
606 if (nla_put_flag(msg, NL80211_FREQUENCY_ATTR_RADAR)) nl80211_msg_put_channel()
613 if (nla_put_u32(msg, NL80211_FREQUENCY_ATTR_DFS_STATE, nl80211_msg_put_channel()
616 if (nla_put_u32(msg, NL80211_FREQUENCY_ATTR_DFS_TIME, nl80211_msg_put_channel()
619 if (nla_put_u32(msg, nl80211_msg_put_channel()
628 nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_HT40_MINUS)) nl80211_msg_put_channel()
631 nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_HT40_PLUS)) nl80211_msg_put_channel()
634 nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_80MHZ)) nl80211_msg_put_channel()
637 nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_160MHZ)) nl80211_msg_put_channel()
640 nla_put_flag(msg, NL80211_FREQUENCY_ATTR_INDOOR_ONLY)) nl80211_msg_put_channel()
643 nla_put_flag(msg, NL80211_FREQUENCY_ATTR_GO_CONCURRENT)) nl80211_msg_put_channel()
646 nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_20MHZ)) nl80211_msg_put_channel()
649 nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_10MHZ)) nl80211_msg_put_channel()
653 if (nla_put_u32(msg, NL80211_FREQUENCY_ATTR_MAX_TX_POWER, nl80211_msg_put_channel()
918 static int nl80211_put_iftypes(struct sk_buff *msg, u32 attr, u16 ifmodes) nl80211_put_iftypes() argument
920 struct nlattr *nl_modes = nla_nest_start(msg, attr); nl80211_put_iftypes()
928 if ((ifmodes & 1) && nla_put_flag(msg, i)) nl80211_put_iftypes()
934 nla_nest_end(msg, nl_modes); nl80211_put_iftypes()
942 struct sk_buff *msg, nl80211_put_iface_combinations()
948 nl_combis = nla_nest_start(msg, nl80211_put_iface_combinations()
959 nl_combi = nla_nest_start(msg, i + 1); nl80211_put_iface_combinations()
963 nl_limits = nla_nest_start(msg, NL80211_IFACE_COMB_LIMITS); nl80211_put_iface_combinations()
970 nl_limit = nla_nest_start(msg, j + 1); nl80211_put_iface_combinations()
973 if (nla_put_u32(msg, NL80211_IFACE_LIMIT_MAX, nl80211_put_iface_combinations()
976 if (nl80211_put_iftypes(msg, NL80211_IFACE_LIMIT_TYPES, nl80211_put_iface_combinations()
979 nla_nest_end(msg, nl_limit); nl80211_put_iface_combinations()
982 nla_nest_end(msg, nl_limits); nl80211_put_iface_combinations()
985 nla_put_flag(msg, NL80211_IFACE_COMB_STA_AP_BI_MATCH)) nl80211_put_iface_combinations()
987 if (nla_put_u32(msg, NL80211_IFACE_COMB_NUM_CHANNELS, nl80211_put_iface_combinations()
989 nla_put_u32(msg, NL80211_IFACE_COMB_MAXNUM, nl80211_put_iface_combinations()
993 (nla_put_u32(msg, NL80211_IFACE_COMB_RADAR_DETECT_WIDTHS, nl80211_put_iface_combinations()
995 nla_put_u32(msg, NL80211_IFACE_COMB_RADAR_DETECT_REGIONS, nl80211_put_iface_combinations()
999 nla_nest_end(msg, nl_combi); nl80211_put_iface_combinations()
1002 nla_nest_end(msg, nl_combis); nl80211_put_iface_combinations()
1011 struct sk_buff *msg) nl80211_send_wowlan_tcp_caps()
1019 nl_tcp = nla_nest_start(msg, NL80211_WOWLAN_TRIG_TCP_CONNECTION); nl80211_send_wowlan_tcp_caps()
1023 if (nla_put_u32(msg, NL80211_WOWLAN_TCP_DATA_PAYLOAD, nl80211_send_wowlan_tcp_caps()
1027 if (nla_put_u32(msg, NL80211_WOWLAN_TCP_DATA_PAYLOAD, nl80211_send_wowlan_tcp_caps()
1031 if (tcp->seq && nla_put_flag(msg, NL80211_WOWLAN_TCP_DATA_PAYLOAD_SEQ)) nl80211_send_wowlan_tcp_caps()
1034 if (tcp->tok && nla_put(msg, NL80211_WOWLAN_TCP_DATA_PAYLOAD_TOKEN, nl80211_send_wowlan_tcp_caps()
1038 if (nla_put_u32(msg, NL80211_WOWLAN_TCP_DATA_INTERVAL, nl80211_send_wowlan_tcp_caps()
1042 if (nla_put_u32(msg, NL80211_WOWLAN_TCP_WAKE_PAYLOAD, nl80211_send_wowlan_tcp_caps()
1046 nla_nest_end(msg, nl_tcp); nl80211_send_wowlan_tcp_caps()
1050 static int nl80211_send_wowlan(struct sk_buff *msg, nl80211_send_wowlan() argument
1059 nl_wowlan = nla_nest_start(msg, NL80211_ATTR_WOWLAN_TRIGGERS_SUPPORTED); nl80211_send_wowlan()
1064 nla_put_flag(msg, NL80211_WOWLAN_TRIG_ANY)) || nl80211_send_wowlan()
1066 nla_put_flag(msg, NL80211_WOWLAN_TRIG_DISCONNECT)) || nl80211_send_wowlan()
1068 nla_put_flag(msg, NL80211_WOWLAN_TRIG_MAGIC_PKT)) || nl80211_send_wowlan()
1070 nla_put_flag(msg, NL80211_WOWLAN_TRIG_GTK_REKEY_SUPPORTED)) || nl80211_send_wowlan()
1072 nla_put_flag(msg, NL80211_WOWLAN_TRIG_GTK_REKEY_FAILURE)) || nl80211_send_wowlan()
1074 nla_put_flag(msg, NL80211_WOWLAN_TRIG_EAP_IDENT_REQUEST)) || nl80211_send_wowlan()
1076 nla_put_flag(msg, NL80211_WOWLAN_TRIG_4WAY_HANDSHAKE)) || nl80211_send_wowlan()
1078 nla_put_flag(msg, NL80211_WOWLAN_TRIG_RFKILL_RELEASE))) nl80211_send_wowlan()
1089 if (nla_put(msg, NL80211_WOWLAN_TRIG_PKT_PATTERN, nl80211_send_wowlan()
1095 nla_put_u32(msg, NL80211_WOWLAN_TRIG_NET_DETECT, nl80211_send_wowlan()
1099 if (large && nl80211_send_wowlan_tcp_caps(rdev, msg)) nl80211_send_wowlan()
1102 nla_nest_end(msg, nl_wowlan); nl80211_send_wowlan()
1108 static int nl80211_send_coalesce(struct sk_buff *msg, nl80211_send_coalesce() argument
1123 if (nla_put(msg, NL80211_ATTR_COALESCE_RULE, sizeof(rule), &rule)) nl80211_send_coalesce()
1129 static int nl80211_send_band_rateinfo(struct sk_buff *msg, nl80211_send_band_rateinfo() argument
1138 (nla_put(msg, NL80211_BAND_ATTR_HT_MCS_SET, nl80211_send_band_rateinfo()
1141 nla_put_u16(msg, NL80211_BAND_ATTR_HT_CAPA, nl80211_send_band_rateinfo()
1143 nla_put_u8(msg, NL80211_BAND_ATTR_HT_AMPDU_FACTOR, nl80211_send_band_rateinfo()
1145 nla_put_u8(msg, NL80211_BAND_ATTR_HT_AMPDU_DENSITY, nl80211_send_band_rateinfo()
1151 (nla_put(msg, NL80211_BAND_ATTR_VHT_MCS_SET, nl80211_send_band_rateinfo()
1154 nla_put_u32(msg, NL80211_BAND_ATTR_VHT_CAPA, nl80211_send_band_rateinfo()
1159 nl_rates = nla_nest_start(msg, NL80211_BAND_ATTR_RATES); nl80211_send_band_rateinfo()
1164 nl_rate = nla_nest_start(msg, i); nl80211_send_band_rateinfo()
1169 if (nla_put_u32(msg, NL80211_BITRATE_ATTR_RATE, nl80211_send_band_rateinfo()
1173 nla_put_flag(msg, nl80211_send_band_rateinfo()
1177 nla_nest_end(msg, nl_rate); nl80211_send_band_rateinfo()
1180 nla_nest_end(msg, nl_rates); nl80211_send_band_rateinfo()
1186 nl80211_send_mgmt_stypes(struct sk_buff *msg, nl80211_send_mgmt_stypes() argument
1197 nl_ifs = nla_nest_start(msg, NL80211_ATTR_TX_FRAME_TYPES); nl80211_send_mgmt_stypes()
1202 nl_ftypes = nla_nest_start(msg, ift); nl80211_send_mgmt_stypes()
1209 nla_put_u16(msg, NL80211_ATTR_FRAME_TYPE, nl80211_send_mgmt_stypes()
1215 nla_nest_end(msg, nl_ftypes); nl80211_send_mgmt_stypes()
1218 nla_nest_end(msg, nl_ifs); nl80211_send_mgmt_stypes()
1220 nl_ifs = nla_nest_start(msg, NL80211_ATTR_RX_FRAME_TYPES); nl80211_send_mgmt_stypes()
1225 nl_ftypes = nla_nest_start(msg, ift); nl80211_send_mgmt_stypes()
1232 nla_put_u16(msg, NL80211_ATTR_FRAME_TYPE, nl80211_send_mgmt_stypes()
1238 nla_nest_end(msg, nl_ftypes); nl80211_send_mgmt_stypes()
1240 nla_nest_end(msg, nl_ifs); nl80211_send_mgmt_stypes()
1254 struct sk_buff *msg, u32 portid, u32 seq, nl80211_send_wiphy()
1268 hdr = nl80211hdr_put(msg, portid, seq, flags, cmd); nl80211_send_wiphy()
1275 if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || nl80211_send_wiphy()
1276 nla_put_string(msg, NL80211_ATTR_WIPHY_NAME, nl80211_send_wiphy()
1278 nla_put_u32(msg, NL80211_ATTR_GENERATION, nl80211_send_wiphy()
1287 if (nla_put_u8(msg, NL80211_ATTR_WIPHY_RETRY_SHORT, nl80211_send_wiphy()
1289 nla_put_u8(msg, NL80211_ATTR_WIPHY_RETRY_LONG, nl80211_send_wiphy()
1291 nla_put_u32(msg, NL80211_ATTR_WIPHY_FRAG_THRESHOLD, nl80211_send_wiphy()
1293 nla_put_u32(msg, NL80211_ATTR_WIPHY_RTS_THRESHOLD, nl80211_send_wiphy()
1295 nla_put_u8(msg, NL80211_ATTR_WIPHY_COVERAGE_CLASS, nl80211_send_wiphy()
1297 nla_put_u8(msg, NL80211_ATTR_MAX_NUM_SCAN_SSIDS, nl80211_send_wiphy()
1299 nla_put_u8(msg, NL80211_ATTR_MAX_NUM_SCHED_SCAN_SSIDS, nl80211_send_wiphy()
1301 nla_put_u16(msg, NL80211_ATTR_MAX_SCAN_IE_LEN, nl80211_send_wiphy()
1303 nla_put_u16(msg, NL80211_ATTR_MAX_SCHED_SCAN_IE_LEN, nl80211_send_wiphy()
1305 nla_put_u8(msg, NL80211_ATTR_MAX_MATCH_SETS, nl80211_send_wiphy()
1310 nla_put_flag(msg, NL80211_ATTR_SUPPORT_IBSS_RSN)) nl80211_send_wiphy()
1313 nla_put_flag(msg, NL80211_ATTR_SUPPORT_MESH_AUTH)) nl80211_send_wiphy()
1316 nla_put_flag(msg, NL80211_ATTR_SUPPORT_AP_UAPSD)) nl80211_send_wiphy()
1319 nla_put_flag(msg, NL80211_ATTR_ROAM_SUPPORT)) nl80211_send_wiphy()
1322 nla_put_flag(msg, NL80211_ATTR_TDLS_SUPPORT)) nl80211_send_wiphy()
1325 nla_put_flag(msg, NL80211_ATTR_TDLS_EXTERNAL_SETUP)) nl80211_send_wiphy()
1331 if (nla_put(msg, NL80211_ATTR_CIPHER_SUITES, nl80211_send_wiphy()
1336 if (nla_put_u8(msg, NL80211_ATTR_MAX_NUM_PMKIDS, nl80211_send_wiphy()
1341 nla_put_flag(msg, NL80211_ATTR_CONTROL_PORT_ETHERTYPE)) nl80211_send_wiphy()
1344 if (nla_put_u32(msg, NL80211_ATTR_WIPHY_ANTENNA_AVAIL_TX, nl80211_send_wiphy()
1346 nla_put_u32(msg, NL80211_ATTR_WIPHY_ANTENNA_AVAIL_RX, nl80211_send_wiphy()
1351 nla_put_u32(msg, NL80211_ATTR_PROBE_RESP_OFFLOAD, nl80211_send_wiphy()
1362 if (nla_put_u32(msg, nl80211_send_wiphy()
1365 nla_put_u32(msg, nl80211_send_wiphy()
1376 if (nl80211_put_iftypes(msg, NL80211_ATTR_SUPPORTED_IFTYPES, nl80211_send_wiphy()
1383 nl_bands = nla_nest_start(msg, NL80211_ATTR_WIPHY_BANDS); nl80211_send_wiphy()
1396 nl_band = nla_nest_start(msg, band); nl80211_send_wiphy()
1402 if (nl80211_send_band_rateinfo(msg, sband)) nl80211_send_wiphy()
1410 msg, NL80211_BAND_ATTR_FREQS); nl80211_send_wiphy()
1417 nl_freq = nla_nest_start(msg, i); nl80211_send_wiphy()
1424 msg, chan, nl80211_send_wiphy()
1428 nla_nest_end(msg, nl_freq); nl80211_send_wiphy()
1436 nla_nest_end(msg, nl_freqs); nl80211_send_wiphy()
1439 nla_nest_end(msg, nl_band); nl80211_send_wiphy()
1448 nla_nest_end(msg, nl_bands); nl80211_send_wiphy()
1461 nl_cmds = nla_nest_start(msg, NL80211_ATTR_SUPPORTED_COMMANDS); nl80211_send_wiphy()
1470 if (nla_put_u32(msg, i, NL80211_CMD_ ## n)) \ nl80211_send_wiphy()
1499 if (nla_put_u32(msg, i, NL80211_CMD_SET_WIPHY_NETNS)) nl80211_send_wiphy()
1505 if (nla_put_u32(msg, i, NL80211_CMD_SET_CHANNEL)) nl80211_send_wiphy()
1519 if (nla_put_u32(msg, i, NL80211_CMD_REGISTER_BEACONS)) nl80211_send_wiphy()
1542 if (nla_put_u32(msg, i, NL80211_CMD_CONNECT)) nl80211_send_wiphy()
1548 if (nla_put_u32(msg, i, NL80211_CMD_DISCONNECT)) nl80211_send_wiphy()
1552 nla_nest_end(msg, nl_cmds); nl80211_send_wiphy()
1559 nla_put_u32(msg, nl80211_send_wiphy()
1565 nla_put_flag(msg, NL80211_ATTR_OFFCHANNEL_TX_OK)) nl80211_send_wiphy()
1568 if (nl80211_send_mgmt_stypes(msg, mgmt_stypes)) nl80211_send_wiphy()
1575 if (nl80211_send_wowlan(msg, rdev, state->split)) nl80211_send_wiphy()
1584 if (nl80211_put_iftypes(msg, NL80211_ATTR_SOFTWARE_IFTYPES, nl80211_send_wiphy()
1588 if (nl80211_put_iface_combinations(&rdev->wiphy, msg, nl80211_send_wiphy()
1597 nla_put_u32(msg, NL80211_ATTR_DEVICE_AP_SME, nl80211_send_wiphy()
1609 if (nla_put_u32(msg, NL80211_ATTR_FEATURE_FLAGS, features)) nl80211_send_wiphy()
1613 nla_put(msg, NL80211_ATTR_HT_CAPABILITY_MASK, nl80211_send_wiphy()
1620 nla_put_u32(msg, NL80211_ATTR_MAC_ACL_MAX, nl80211_send_wiphy()
1638 (nla_put(msg, NL80211_ATTR_EXT_CAPA, nl80211_send_wiphy()
1641 nla_put(msg, NL80211_ATTR_EXT_CAPA_MASK, nl80211_send_wiphy()
1647 nla_put(msg, NL80211_ATTR_VHT_CAPABILITY_MASK, nl80211_send_wiphy()
1655 if (nl80211_send_coalesce(msg, rdev)) nl80211_send_wiphy()
1659 (nla_put_flag(msg, NL80211_ATTR_SUPPORT_5_MHZ) || nl80211_send_wiphy()
1660 nla_put_flag(msg, NL80211_ATTR_SUPPORT_10_MHZ))) nl80211_send_wiphy()
1664 nla_put_u32(msg, NL80211_ATTR_MAX_AP_ASSOC_STA, nl80211_send_wiphy()
1675 nested = nla_nest_start(msg, NL80211_ATTR_VENDOR_DATA); nl80211_send_wiphy()
1681 if (nla_put(msg, i + 1, sizeof(*info), info)) nl80211_send_wiphy()
1684 nla_nest_end(msg, nested); nl80211_send_wiphy()
1691 nested = nla_nest_start(msg, nl80211_send_wiphy()
1698 if (nla_put(msg, i + 1, sizeof(*info), info)) nl80211_send_wiphy()
1701 nla_nest_end(msg, nested); nl80211_send_wiphy()
1707 nla_put_u8(msg, NL80211_ATTR_MAX_CSA_COUNTERS, nl80211_send_wiphy()
1712 nla_put_flag(msg, NL80211_ATTR_WIPHY_SELF_MANAGED_REG)) nl80211_send_wiphy()
1715 if (nla_put(msg, NL80211_ATTR_EXT_FEATURES, nl80211_send_wiphy()
1725 genlmsg_end(msg, hdr); nl80211_send_wiphy()
1729 genlmsg_cancel(msg, hdr); nl80211_send_wiphy()
1848 struct sk_buff *msg; nl80211_get_wiphy() local
1852 msg = nlmsg_new(4096, GFP_KERNEL); nl80211_get_wiphy()
1853 if (!msg) nl80211_get_wiphy()
1856 if (nl80211_send_wiphy(rdev, NL80211_CMD_NEW_WIPHY, msg, nl80211_get_wiphy()
1859 nlmsg_free(msg); nl80211_get_wiphy()
1863 return genlmsg_reply(msg, info); nl80211_get_wiphy()
2334 static int nl80211_send_chandef(struct sk_buff *msg, nl80211_send_chandef() argument
2340 if (nla_put_u32(msg, NL80211_ATTR_WIPHY_FREQ, nl80211_send_chandef()
2347 if (nla_put_u32(msg, NL80211_ATTR_WIPHY_CHANNEL_TYPE, nl80211_send_chandef()
2354 if (nla_put_u32(msg, NL80211_ATTR_CHANNEL_WIDTH, chandef->width)) nl80211_send_chandef()
2356 if (nla_put_u32(msg, NL80211_ATTR_CENTER_FREQ1, chandef->center_freq1)) nl80211_send_chandef()
2359 nla_put_u32(msg, NL80211_ATTR_CENTER_FREQ2, chandef->center_freq2)) nl80211_send_chandef()
2364 static int nl80211_send_iface(struct sk_buff *msg, u32 portid, u32 seq, int flags, nl80211_send_iface() argument
2375 hdr = nl80211hdr_put(msg, portid, seq, flags, cmd); nl80211_send_iface()
2380 (nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) || nl80211_send_iface()
2381 nla_put_string(msg, NL80211_ATTR_IFNAME, dev->name))) nl80211_send_iface()
2384 if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || nl80211_send_iface()
2385 nla_put_u32(msg, NL80211_ATTR_IFTYPE, wdev->iftype) || nl80211_send_iface()
2386 nla_put_u64(msg, NL80211_ATTR_WDEV, wdev_id(wdev)) || nl80211_send_iface()
2387 nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, wdev_address(wdev)) || nl80211_send_iface()
2388 nla_put_u32(msg, NL80211_ATTR_GENERATION, nl80211_send_iface()
2399 if (nl80211_send_chandef(msg, &chandef)) nl80211_send_iface()
2405 if (nla_put(msg, NL80211_ATTR_SSID, wdev->ssid_len, wdev->ssid)) nl80211_send_iface()
2409 genlmsg_end(msg, hdr); nl80211_send_iface()
2413 genlmsg_cancel(msg, hdr); nl80211_send_iface()
2462 struct sk_buff *msg; nl80211_get_interface() local
2466 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); nl80211_get_interface()
2467 if (!msg) nl80211_get_interface()
2470 if (nl80211_send_iface(msg, info->snd_portid, info->snd_seq, 0, nl80211_get_interface()
2472 nlmsg_free(msg); nl80211_get_interface()
2476 return genlmsg_reply(msg, info); nl80211_get_interface()
2617 struct sk_buff *msg, *event; nl80211_new_interface() local
2664 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); nl80211_new_interface()
2665 if (!msg) nl80211_new_interface()
2673 nlmsg_free(msg); nl80211_new_interface()
2676 nlmsg_free(msg); nl80211_new_interface()
2715 if (nl80211_send_iface(msg, info->snd_portid, info->snd_seq, 0, nl80211_new_interface()
2717 nlmsg_free(msg); nl80211_new_interface()
2735 return genlmsg_reply(msg, info); nl80211_new_interface()
2742 struct sk_buff *msg; nl80211_del_interface() local
2748 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); nl80211_del_interface()
2749 if (msg && nl80211_send_iface(msg, 0, 0, 0, rdev, wdev, true) < 0) { nl80211_del_interface()
2750 nlmsg_free(msg); nl80211_del_interface()
2751 msg = NULL; nl80211_del_interface()
2765 if (status >= 0 && msg) nl80211_del_interface()
2767 msg, 0, NL80211_MCGRP_CONFIG, nl80211_del_interface()
2770 nlmsg_free(msg); nl80211_del_interface()
2793 struct sk_buff *msg; member in struct:get_key_cookie
2804 nla_put(cookie->msg, NL80211_ATTR_KEY_DATA, get_key_callback()
2807 nla_put(cookie->msg, NL80211_ATTR_KEY_SEQ, get_key_callback()
2810 nla_put_u32(cookie->msg, NL80211_ATTR_KEY_CIPHER, get_key_callback()
2814 key = nla_nest_start(cookie->msg, NL80211_ATTR_KEY); get_key_callback()
2819 nla_put(cookie->msg, NL80211_KEY_DATA, get_key_callback()
2822 nla_put(cookie->msg, NL80211_KEY_SEQ, get_key_callback()
2825 nla_put_u32(cookie->msg, NL80211_KEY_CIPHER, get_key_callback()
2829 if (nla_put_u8(cookie->msg, NL80211_ATTR_KEY_IDX, cookie->idx)) get_key_callback()
2832 nla_nest_end(cookie->msg, key); get_key_callback()
2851 struct sk_buff *msg; nl80211_get_key() local
2879 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); nl80211_get_key()
2880 if (!msg) nl80211_get_key()
2883 hdr = nl80211hdr_put(msg, info->snd_portid, info->snd_seq, 0, nl80211_get_key()
2888 cookie.msg = msg; nl80211_get_key()
2891 if (nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) || nl80211_get_key()
2892 nla_put_u8(msg, NL80211_ATTR_KEY_IDX, key_idx)) nl80211_get_key()
2895 nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, mac_addr)) nl80211_get_key()
2907 genlmsg_end(msg, hdr); nl80211_get_key()
2908 return genlmsg_reply(msg, info); nl80211_get_key()
2913 nlmsg_free(msg); nl80211_get_key()
3578 static bool nl80211_put_sta_rate(struct sk_buff *msg, struct rate_info *info, nl80211_put_sta_rate() argument
3586 rate = nla_nest_start(msg, attr); nl80211_put_sta_rate()
3595 nla_put_u32(msg, NL80211_RATE_INFO_BITRATE32, bitrate)) nl80211_put_sta_rate()
3598 nla_put_u16(msg, NL80211_RATE_INFO_BITRATE, bitrate_compat)) nl80211_put_sta_rate()
3625 if (rate_flg && nla_put_flag(msg, rate_flg)) nl80211_put_sta_rate()
3629 if (nla_put_u8(msg, NL80211_RATE_INFO_MCS, info->mcs)) nl80211_put_sta_rate()
3632 nla_put_flag(msg, NL80211_RATE_INFO_SHORT_GI)) nl80211_put_sta_rate()
3635 if (nla_put_u8(msg, NL80211_RATE_INFO_VHT_MCS, info->mcs)) nl80211_put_sta_rate()
3637 if (nla_put_u8(msg, NL80211_RATE_INFO_VHT_NSS, info->nss)) nl80211_put_sta_rate()
3640 nla_put_flag(msg, NL80211_RATE_INFO_SHORT_GI)) nl80211_put_sta_rate()
3644 nla_nest_end(msg, rate); nl80211_put_sta_rate()
3648 static bool nl80211_put_signal(struct sk_buff *msg, u8 mask, s8 *signal, nl80211_put_signal() argument
3657 attr = nla_nest_start(msg, id); nl80211_put_signal()
3665 if (nla_put_u8(msg, i, signal[i])) nl80211_put_signal()
3669 nla_nest_end(msg, attr); nl80211_put_signal()
3674 static int nl80211_send_station(struct sk_buff *msg, u32 cmd, u32 portid, nl80211_send_station() argument
3683 hdr = nl80211hdr_put(msg, portid, seq, flags, cmd); nl80211_send_station()
3687 if (nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) || nl80211_send_station()
3688 nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, mac_addr) || nl80211_send_station()
3689 nla_put_u32(msg, NL80211_ATTR_GENERATION, sinfo->generation)) nl80211_send_station()
3692 sinfoattr = nla_nest_start(msg, NL80211_ATTR_STA_INFO); nl80211_send_station()
3698 nla_put_ ## type(msg, NL80211_STA_INFO_ ## attr, \ nl80211_send_station()
3708 nla_put_u32(msg, NL80211_STA_INFO_RX_BYTES, nl80211_send_station()
3714 nla_put_u32(msg, NL80211_STA_INFO_TX_BYTES, nl80211_send_station()
3733 if (!nl80211_put_signal(msg, sinfo->chains, nl80211_send_station()
3739 if (!nl80211_put_signal(msg, sinfo->chains, nl80211_send_station()
3745 if (!nl80211_put_sta_rate(msg, &sinfo->txrate, nl80211_send_station()
3750 if (!nl80211_put_sta_rate(msg, &sinfo->rxrate, nl80211_send_station()
3766 bss_param = nla_nest_start(msg, NL80211_STA_INFO_BSS_PARAM); nl80211_send_station()
3771 nla_put_flag(msg, NL80211_STA_BSS_PARAM_CTS_PROT)) || nl80211_send_station()
3773 nla_put_flag(msg, NL80211_STA_BSS_PARAM_SHORT_PREAMBLE)) || nl80211_send_station()
3775 nla_put_flag(msg, NL80211_STA_BSS_PARAM_SHORT_SLOT_TIME)) || nl80211_send_station()
3776 nla_put_u8(msg, NL80211_STA_BSS_PARAM_DTIM_PERIOD, nl80211_send_station()
3778 nla_put_u16(msg, NL80211_STA_BSS_PARAM_BEACON_INTERVAL, nl80211_send_station()
3782 nla_nest_end(msg, bss_param); nl80211_send_station()
3785 nla_put(msg, NL80211_STA_INFO_STA_FLAGS, nl80211_send_station()
3801 tidsattr = nla_nest_start(msg, NL80211_STA_INFO_TID_STATS); nl80211_send_station()
3814 tidattr = nla_nest_start(msg, tid + 1); nl80211_send_station()
3820 nla_put_ ## type(msg, NL80211_TID_STATS_ ## attr, \ nl80211_send_station()
3831 nla_nest_end(msg, tidattr); nl80211_send_station()
3834 nla_nest_end(msg, tidsattr); nl80211_send_station()
3837 nla_nest_end(msg, sinfoattr); nl80211_send_station()
3840 nla_put(msg, NL80211_ATTR_IE, sinfo->assoc_req_ies_len, nl80211_send_station()
3844 genlmsg_end(msg, hdr); nl80211_send_station()
3848 genlmsg_cancel(msg, hdr); nl80211_send_station()
3910 struct sk_buff *msg; nl80211_get_station() local
3928 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); nl80211_get_station()
3929 if (!msg) nl80211_get_station()
3932 if (nl80211_send_station(msg, NL80211_CMD_NEW_STATION, nl80211_get_station()
3935 nlmsg_free(msg); nl80211_get_station()
3939 return genlmsg_reply(msg, info); nl80211_get_station()
4539 static int nl80211_send_mpath(struct sk_buff *msg, u32 portid, u32 seq, nl80211_send_mpath() argument
4547 hdr = nl80211hdr_put(msg, portid, seq, flags, NL80211_CMD_NEW_MPATH); nl80211_send_mpath()
4551 if (nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) || nl80211_send_mpath()
4552 nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, dst) || nl80211_send_mpath()
4553 nla_put(msg, NL80211_ATTR_MPATH_NEXT_HOP, ETH_ALEN, next_hop) || nl80211_send_mpath()
4554 nla_put_u32(msg, NL80211_ATTR_GENERATION, pinfo->generation)) nl80211_send_mpath()
4557 pinfoattr = nla_nest_start(msg, NL80211_ATTR_MPATH_INFO); nl80211_send_mpath()
4561 nla_put_u32(msg, NL80211_MPATH_INFO_FRAME_QLEN, nl80211_send_mpath()
4565 nla_put_u32(msg, NL80211_MPATH_INFO_SN, pinfo->sn)) || nl80211_send_mpath()
4567 nla_put_u32(msg, NL80211_MPATH_INFO_METRIC, nl80211_send_mpath()
4570 nla_put_u32(msg, NL80211_MPATH_INFO_EXPTIME, nl80211_send_mpath()
4573 nla_put_u8(msg, NL80211_MPATH_INFO_FLAGS, nl80211_send_mpath()
4576 nla_put_u32(msg, NL80211_MPATH_INFO_DISCOVERY_TIMEOUT, nl80211_send_mpath()
4579 nla_put_u8(msg, NL80211_MPATH_INFO_DISCOVERY_RETRIES, nl80211_send_mpath()
4583 nla_nest_end(msg, pinfoattr); nl80211_send_mpath()
4585 genlmsg_end(msg, hdr); nl80211_send_mpath()
4589 genlmsg_cancel(msg, hdr); nl80211_send_mpath()
4650 struct sk_buff *msg; nl80211_get_mpath() local
4671 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); nl80211_get_mpath()
4672 if (!msg) nl80211_get_mpath()
4675 if (nl80211_send_mpath(msg, info->snd_portid, info->snd_seq, 0, nl80211_get_mpath()
4677 nlmsg_free(msg); nl80211_get_mpath()
4681 return genlmsg_reply(msg, info); nl80211_get_mpath()
4755 struct sk_buff *msg; nl80211_get_mpp() local
4776 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); nl80211_get_mpp()
4777 if (!msg) nl80211_get_mpp()
4780 if (nl80211_send_mpath(msg, info->snd_portid, info->snd_seq, 0, nl80211_get_mpp()
4782 nlmsg_free(msg); nl80211_get_mpp()
4786 return genlmsg_reply(msg, info); nl80211_get_mpp()
5024 struct sk_buff *msg; nl80211_get_mesh_config() local
5044 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); nl80211_get_mesh_config()
5045 if (!msg) nl80211_get_mesh_config()
5047 hdr = nl80211hdr_put(msg, info->snd_portid, info->snd_seq, 0, nl80211_get_mesh_config()
5051 pinfoattr = nla_nest_start(msg, NL80211_ATTR_MESH_CONFIG); nl80211_get_mesh_config()
5054 if (nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) || nl80211_get_mesh_config()
5055 nla_put_u16(msg, NL80211_MESHCONF_RETRY_TIMEOUT, nl80211_get_mesh_config()
5057 nla_put_u16(msg, NL80211_MESHCONF_CONFIRM_TIMEOUT, nl80211_get_mesh_config()
5059 nla_put_u16(msg, NL80211_MESHCONF_HOLDING_TIMEOUT, nl80211_get_mesh_config()
5061 nla_put_u16(msg, NL80211_MESHCONF_MAX_PEER_LINKS, nl80211_get_mesh_config()
5063 nla_put_u8(msg, NL80211_MESHCONF_MAX_RETRIES, nl80211_get_mesh_config()
5065 nla_put_u8(msg, NL80211_MESHCONF_TTL, nl80211_get_mesh_config()
5067 nla_put_u8(msg, NL80211_MESHCONF_ELEMENT_TTL, nl80211_get_mesh_config()
5069 nla_put_u8(msg, NL80211_MESHCONF_AUTO_OPEN_PLINKS, nl80211_get_mesh_config()
5071 nla_put_u32(msg, NL80211_MESHCONF_SYNC_OFFSET_MAX_NEIGHBOR, nl80211_get_mesh_config()
5073 nla_put_u8(msg, NL80211_MESHCONF_HWMP_MAX_PREQ_RETRIES, nl80211_get_mesh_config()
5075 nla_put_u32(msg, NL80211_MESHCONF_PATH_REFRESH_TIME, nl80211_get_mesh_config()
5077 nla_put_u16(msg, NL80211_MESHCONF_MIN_DISCOVERY_TIMEOUT, nl80211_get_mesh_config()
5079 nla_put_u32(msg, NL80211_MESHCONF_HWMP_ACTIVE_PATH_TIMEOUT, nl80211_get_mesh_config()
5081 nla_put_u16(msg, NL80211_MESHCONF_HWMP_PREQ_MIN_INTERVAL, nl80211_get_mesh_config()
5083 nla_put_u16(msg, NL80211_MESHCONF_HWMP_PERR_MIN_INTERVAL, nl80211_get_mesh_config()
5085 nla_put_u16(msg, NL80211_MESHCONF_HWMP_NET_DIAM_TRVS_TIME, nl80211_get_mesh_config()
5087 nla_put_u8(msg, NL80211_MESHCONF_HWMP_ROOTMODE, nl80211_get_mesh_config()
5089 nla_put_u16(msg, NL80211_MESHCONF_HWMP_RANN_INTERVAL, nl80211_get_mesh_config()
5091 nla_put_u8(msg, NL80211_MESHCONF_GATE_ANNOUNCEMENTS, nl80211_get_mesh_config()
5093 nla_put_u8(msg, NL80211_MESHCONF_FORWARDING, nl80211_get_mesh_config()
5095 nla_put_u32(msg, NL80211_MESHCONF_RSSI_THRESHOLD, nl80211_get_mesh_config()
5097 nla_put_u32(msg, NL80211_MESHCONF_HT_OPMODE, nl80211_get_mesh_config()
5099 nla_put_u32(msg, NL80211_MESHCONF_HWMP_PATH_TO_ROOT_TIMEOUT, nl80211_get_mesh_config()
5101 nla_put_u16(msg, NL80211_MESHCONF_HWMP_ROOT_INTERVAL, nl80211_get_mesh_config()
5103 nla_put_u16(msg, NL80211_MESHCONF_HWMP_CONFIRMATION_INTERVAL, nl80211_get_mesh_config()
5105 nla_put_u32(msg, NL80211_MESHCONF_POWER_MODE, nl80211_get_mesh_config()
5107 nla_put_u16(msg, NL80211_MESHCONF_AWAKE_WINDOW, nl80211_get_mesh_config()
5109 nla_put_u32(msg, NL80211_MESHCONF_PLINK_TIMEOUT, nl80211_get_mesh_config()
5112 nla_nest_end(msg, pinfoattr); nl80211_get_mesh_config()
5113 genlmsg_end(msg, hdr); nl80211_get_mesh_config()
5114 return genlmsg_reply(msg, info); nl80211_get_mesh_config()
5117 genlmsg_cancel(msg, hdr); nl80211_get_mesh_config()
5119 nlmsg_free(msg); nl80211_get_mesh_config()
5392 struct sk_buff *msg) nl80211_put_regdom()
5397 if (nla_put_string(msg, NL80211_ATTR_REG_ALPHA2, regdom->alpha2) || nl80211_put_regdom()
5399 nla_put_u8(msg, NL80211_ATTR_DFS_REGION, regdom->dfs_region))) nl80211_put_regdom()
5402 nl_reg_rules = nla_nest_start(msg, NL80211_ATTR_REG_RULES); nl80211_put_regdom()
5417 nl_reg_rule = nla_nest_start(msg, i); nl80211_put_regdom()
5426 if (nla_put_u32(msg, NL80211_ATTR_REG_RULE_FLAGS, nl80211_put_regdom()
5428 nla_put_u32(msg, NL80211_ATTR_FREQ_RANGE_START, nl80211_put_regdom()
5430 nla_put_u32(msg, NL80211_ATTR_FREQ_RANGE_END, nl80211_put_regdom()
5432 nla_put_u32(msg, NL80211_ATTR_FREQ_RANGE_MAX_BW, nl80211_put_regdom()
5434 nla_put_u32(msg, NL80211_ATTR_POWER_RULE_MAX_ANT_GAIN, nl80211_put_regdom()
5436 nla_put_u32(msg, NL80211_ATTR_POWER_RULE_MAX_EIRP, nl80211_put_regdom()
5438 nla_put_u32(msg, NL80211_ATTR_DFS_CAC_TIME, nl80211_put_regdom()
5442 nla_nest_end(msg, nl_reg_rule); nl80211_put_regdom()
5445 nla_nest_end(msg, nl_reg_rules); nl80211_put_regdom()
5457 struct sk_buff *msg; nl80211_get_reg_do() local
5460 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); nl80211_get_reg_do()
5461 if (!msg) nl80211_get_reg_do()
5464 hdr = nl80211hdr_put(msg, info->snd_portid, info->snd_seq, 0, nl80211_get_reg_do()
5474 nlmsg_free(msg); nl80211_get_reg_do()
5485 nlmsg_free(msg); nl80211_get_reg_do()
5490 nla_put_u32(msg, NL80211_ATTR_WIPHY, get_wiphy_idx(wiphy))) nl80211_get_reg_do()
5495 nla_put_u32(msg, NL80211_ATTR_USER_REG_HINT_TYPE, nl80211_get_reg_do()
5504 if (nl80211_put_regdom(regdom, msg)) nl80211_get_reg_do()
5509 genlmsg_end(msg, hdr); nl80211_get_reg_do()
5510 return genlmsg_reply(msg, info); nl80211_get_reg_do()
5515 genlmsg_cancel(msg, hdr); nl80211_get_reg_do()
5517 nlmsg_free(msg); nl80211_get_reg_do()
5521 static int nl80211_send_regdom(struct sk_buff *msg, struct netlink_callback *cb, nl80211_send_regdom() argument
5525 void *hdr = nl80211hdr_put(msg, NETLINK_CB(cb->skb).portid, seq, flags, nl80211_send_regdom()
5533 if (nl80211_put_regdom(regdom, msg)) nl80211_send_regdom()
5537 nla_put_u32(msg, NL80211_ATTR_USER_REG_HINT_TYPE, nl80211_send_regdom()
5542 nla_put_u32(msg, NL80211_ATTR_WIPHY, get_wiphy_idx(wiphy))) nl80211_send_regdom()
5546 nla_put_flag(msg, NL80211_ATTR_WIPHY_SELF_MANAGED_REG)) nl80211_send_regdom()
5549 genlmsg_end(msg, hdr); nl80211_send_regdom()
5553 genlmsg_cancel(msg, hdr); nl80211_send_regdom()
6517 static int nl80211_send_bss(struct sk_buff *msg, struct netlink_callback *cb, nl80211_send_bss() argument
6530 hdr = nl80211hdr_put(msg, NETLINK_CB(cb->skb).portid, seq, flags, nl80211_send_bss()
6537 if (nla_put_u32(msg, NL80211_ATTR_GENERATION, rdev->bss_generation)) nl80211_send_bss()
6540 nla_put_u32(msg, NL80211_ATTR_IFINDEX, wdev->netdev->ifindex)) nl80211_send_bss()
6542 if (nla_put_u64(msg, NL80211_ATTR_WDEV, wdev_id(wdev))) nl80211_send_bss()
6545 bss = nla_nest_start(msg, NL80211_ATTR_BSS); nl80211_send_bss()
6549 nla_put(msg, NL80211_BSS_BSSID, ETH_ALEN, res->bssid))) nl80211_send_bss()
6555 nla_put_flag(msg, NL80211_BSS_PRESP_DATA)) nl80211_send_bss()
6563 if (nla_put_u64(msg, NL80211_BSS_TSF, ies->tsf)) nl80211_send_bss()
6565 if (ies->len && nla_put(msg, NL80211_BSS_INFORMATION_ELEMENTS, nl80211_send_bss()
6573 if (nla_put_u64(msg, NL80211_BSS_BEACON_TSF, ies->tsf)) nl80211_send_bss()
6575 if (ies->len && nla_put(msg, NL80211_BSS_BEACON_IES, nl80211_send_bss()
6582 nla_put_u16(msg, NL80211_BSS_BEACON_INTERVAL, res->beacon_interval)) nl80211_send_bss()
6584 if (nla_put_u16(msg, NL80211_BSS_CAPABILITY, res->capability) || nl80211_send_bss()
6585 nla_put_u32(msg, NL80211_BSS_FREQUENCY, res->channel->center_freq) || nl80211_send_bss()
6586 nla_put_u32(msg, NL80211_BSS_CHAN_WIDTH, res->scan_width) || nl80211_send_bss()
6587 nla_put_u32(msg, NL80211_BSS_SEEN_MS_AGO, nl80211_send_bss()
6593 if (nla_put_u32(msg, NL80211_BSS_SIGNAL_MBM, res->signal)) nl80211_send_bss()
6597 if (nla_put_u8(msg, NL80211_BSS_SIGNAL_UNSPEC, res->signal)) nl80211_send_bss()
6608 nla_put_u32(msg, NL80211_BSS_STATUS, nl80211_send_bss()
6614 nla_put_u32(msg, NL80211_BSS_STATUS, nl80211_send_bss()
6622 nla_nest_end(msg, bss); nl80211_send_bss()
6624 genlmsg_end(msg, hdr); nl80211_send_bss()
6630 genlmsg_cancel(msg, hdr); nl80211_send_bss()
6672 static int nl80211_send_survey(struct sk_buff *msg, u32 portid, u32 seq, nl80211_send_survey() argument
6684 hdr = nl80211hdr_put(msg, portid, seq, flags, nl80211_send_survey()
6689 if (nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex)) nl80211_send_survey()
6692 infoattr = nla_nest_start(msg, NL80211_ATTR_SURVEY_INFO); nl80211_send_survey()
6697 nla_put_u32(msg, NL80211_SURVEY_INFO_FREQUENCY, nl80211_send_survey()
6702 nla_put_u8(msg, NL80211_SURVEY_INFO_NOISE, survey->noise)) nl80211_send_survey()
6705 nla_put_flag(msg, NL80211_SURVEY_INFO_IN_USE)) nl80211_send_survey()
6708 nla_put_u64(msg, NL80211_SURVEY_INFO_TIME, nl80211_send_survey()
6712 nla_put_u64(msg, NL80211_SURVEY_INFO_TIME_BUSY, nl80211_send_survey()
6716 nla_put_u64(msg, NL80211_SURVEY_INFO_TIME_EXT_BUSY, nl80211_send_survey()
6720 nla_put_u64(msg, NL80211_SURVEY_INFO_TIME_RX, nl80211_send_survey()
6724 nla_put_u64(msg, NL80211_SURVEY_INFO_TIME_TX, nl80211_send_survey()
6728 nla_put_u64(msg, NL80211_SURVEY_INFO_TIME_SCAN, nl80211_send_survey()
6732 nla_nest_end(msg, infoattr); nl80211_send_survey()
6734 genlmsg_end(msg, hdr); nl80211_send_survey()
6738 genlmsg_cancel(msg, hdr); nl80211_send_survey()
7965 struct sk_buff *msg; nl80211_remain_on_channel() local
7993 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); nl80211_remain_on_channel()
7994 if (!msg) nl80211_remain_on_channel()
7997 hdr = nl80211hdr_put(msg, info->snd_portid, info->snd_seq, 0, nl80211_remain_on_channel()
8010 if (nla_put_u64(msg, NL80211_ATTR_COOKIE, cookie)) nl80211_remain_on_channel()
8013 genlmsg_end(msg, hdr); nl80211_remain_on_channel()
8015 return genlmsg_reply(msg, info); nl80211_remain_on_channel()
8020 nlmsg_free(msg); nl80211_remain_on_channel()
8319 struct sk_buff *msg = NULL; nl80211_tx_mgmt() local
8404 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); nl80211_tx_mgmt()
8405 if (!msg) nl80211_tx_mgmt()
8408 hdr = nl80211hdr_put(msg, info->snd_portid, info->snd_seq, 0, nl80211_tx_mgmt()
8421 if (msg) { nl80211_tx_mgmt()
8422 if (nla_put_u64(msg, NL80211_ATTR_COOKIE, cookie)) nl80211_tx_mgmt()
8425 genlmsg_end(msg, hdr); nl80211_tx_mgmt()
8426 return genlmsg_reply(msg, info); nl80211_tx_mgmt()
8434 nlmsg_free(msg); nl80211_tx_mgmt()
8507 struct sk_buff *msg; nl80211_get_power_save() local
8516 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); nl80211_get_power_save()
8517 if (!msg) nl80211_get_power_save()
8520 hdr = nl80211hdr_put(msg, info->snd_portid, info->snd_seq, 0, nl80211_get_power_save()
8532 if (nla_put_u32(msg, NL80211_ATTR_PS_STATE, ps_state)) nl80211_get_power_save()
8535 genlmsg_end(msg, hdr); nl80211_get_power_save()
8536 return genlmsg_reply(msg, info); nl80211_get_power_save()
8541 nlmsg_free(msg); nl80211_get_power_save()
8751 static int nl80211_send_wowlan_patterns(struct sk_buff *msg, nl80211_send_wowlan_patterns() argument
8761 nl_pats = nla_nest_start(msg, NL80211_WOWLAN_TRIG_PKT_PATTERN); nl80211_send_wowlan_patterns()
8766 nl_pat = nla_nest_start(msg, i + 1); nl80211_send_wowlan_patterns()
8770 if (nla_put(msg, NL80211_PKTPAT_MASK, DIV_ROUND_UP(pat_len, 8), nl80211_send_wowlan_patterns()
8772 nla_put(msg, NL80211_PKTPAT_PATTERN, pat_len, nl80211_send_wowlan_patterns()
8774 nla_put_u32(msg, NL80211_PKTPAT_OFFSET, nl80211_send_wowlan_patterns()
8777 nla_nest_end(msg, nl_pat); nl80211_send_wowlan_patterns()
8779 nla_nest_end(msg, nl_pats); nl80211_send_wowlan_patterns()
8784 static int nl80211_send_wowlan_tcp(struct sk_buff *msg, nl80211_send_wowlan_tcp() argument
8792 nl_tcp = nla_nest_start(msg, NL80211_WOWLAN_TRIG_TCP_CONNECTION); nl80211_send_wowlan_tcp()
8796 if (nla_put_in_addr(msg, NL80211_WOWLAN_TCP_SRC_IPV4, tcp->src) || nl80211_send_wowlan_tcp()
8797 nla_put_in_addr(msg, NL80211_WOWLAN_TCP_DST_IPV4, tcp->dst) || nl80211_send_wowlan_tcp()
8798 nla_put(msg, NL80211_WOWLAN_TCP_DST_MAC, ETH_ALEN, tcp->dst_mac) || nl80211_send_wowlan_tcp()
8799 nla_put_u16(msg, NL80211_WOWLAN_TCP_SRC_PORT, tcp->src_port) || nl80211_send_wowlan_tcp()
8800 nla_put_u16(msg, NL80211_WOWLAN_TCP_DST_PORT, tcp->dst_port) || nl80211_send_wowlan_tcp()
8801 nla_put(msg, NL80211_WOWLAN_TCP_DATA_PAYLOAD, nl80211_send_wowlan_tcp()
8803 nla_put_u32(msg, NL80211_WOWLAN_TCP_DATA_INTERVAL, nl80211_send_wowlan_tcp()
8805 nla_put(msg, NL80211_WOWLAN_TCP_WAKE_PAYLOAD, nl80211_send_wowlan_tcp()
8807 nla_put(msg, NL80211_WOWLAN_TCP_WAKE_MASK, nl80211_send_wowlan_tcp()
8812 nla_put(msg, NL80211_WOWLAN_TCP_DATA_PAYLOAD_SEQ, nl80211_send_wowlan_tcp()
8817 nla_put(msg, NL80211_WOWLAN_TCP_DATA_PAYLOAD_TOKEN, nl80211_send_wowlan_tcp()
8822 nla_nest_end(msg, nl_tcp); nl80211_send_wowlan_tcp()
8827 static int nl80211_send_wowlan_nd(struct sk_buff *msg, nl80211_send_wowlan_nd() argument
8836 nd = nla_nest_start(msg, NL80211_WOWLAN_TRIG_NET_DETECT); nl80211_send_wowlan_nd()
8840 if (nla_put_u32(msg, NL80211_ATTR_SCHED_SCAN_INTERVAL, req->interval)) nl80211_send_wowlan_nd()
8843 if (nla_put_u32(msg, NL80211_ATTR_SCHED_SCAN_DELAY, req->delay)) nl80211_send_wowlan_nd()
8846 freqs = nla_nest_start(msg, NL80211_ATTR_SCAN_FREQUENCIES); nl80211_send_wowlan_nd()
8851 nla_put_u32(msg, i, req->channels[i]->center_freq); nl80211_send_wowlan_nd()
8853 nla_nest_end(msg, freqs); nl80211_send_wowlan_nd()
8856 matches = nla_nest_start(msg, NL80211_ATTR_SCHED_SCAN_MATCH); nl80211_send_wowlan_nd()
8858 match = nla_nest_start(msg, i); nl80211_send_wowlan_nd()
8859 nla_put(msg, NL80211_SCHED_SCAN_MATCH_ATTR_SSID, nl80211_send_wowlan_nd()
8862 nla_nest_end(msg, match); nl80211_send_wowlan_nd()
8864 nla_nest_end(msg, matches); nl80211_send_wowlan_nd()
8867 nla_nest_end(msg, nd); nl80211_send_wowlan_nd()
8875 struct sk_buff *msg; nl80211_get_wowlan() local
8890 msg = nlmsg_new(size, GFP_KERNEL); nl80211_get_wowlan()
8891 if (!msg) nl80211_get_wowlan()
8894 hdr = nl80211hdr_put(msg, info->snd_portid, info->snd_seq, 0, nl80211_get_wowlan()
8902 nl_wowlan = nla_nest_start(msg, NL80211_ATTR_WOWLAN_TRIGGERS); nl80211_get_wowlan()
8907 nla_put_flag(msg, NL80211_WOWLAN_TRIG_ANY)) || nl80211_get_wowlan()
8909 nla_put_flag(msg, NL80211_WOWLAN_TRIG_DISCONNECT)) || nl80211_get_wowlan()
8911 nla_put_flag(msg, NL80211_WOWLAN_TRIG_MAGIC_PKT)) || nl80211_get_wowlan()
8913 nla_put_flag(msg, NL80211_WOWLAN_TRIG_GTK_REKEY_FAILURE)) || nl80211_get_wowlan()
8915 nla_put_flag(msg, NL80211_WOWLAN_TRIG_EAP_IDENT_REQUEST)) || nl80211_get_wowlan()
8917 nla_put_flag(msg, NL80211_WOWLAN_TRIG_4WAY_HANDSHAKE)) || nl80211_get_wowlan()
8919 nla_put_flag(msg, NL80211_WOWLAN_TRIG_RFKILL_RELEASE))) nl80211_get_wowlan()
8922 if (nl80211_send_wowlan_patterns(msg, rdev)) nl80211_get_wowlan()
8925 if (nl80211_send_wowlan_tcp(msg, nl80211_get_wowlan()
8930 msg, nl80211_get_wowlan()
8934 nla_nest_end(msg, nl_wowlan); nl80211_get_wowlan()
8937 genlmsg_end(msg, hdr); nl80211_get_wowlan()
8938 return genlmsg_reply(msg, info); nl80211_get_wowlan()
8941 nlmsg_free(msg); nl80211_get_wowlan()
9323 static int nl80211_send_coalesce_rules(struct sk_buff *msg, nl80211_send_coalesce_rules() argument
9333 nl_rules = nla_nest_start(msg, NL80211_ATTR_COALESCE_RULE); nl80211_send_coalesce_rules()
9338 nl_rule = nla_nest_start(msg, i + 1); nl80211_send_coalesce_rules()
9343 if (nla_put_u32(msg, NL80211_ATTR_COALESCE_RULE_DELAY, nl80211_send_coalesce_rules()
9347 if (nla_put_u32(msg, NL80211_ATTR_COALESCE_RULE_CONDITION, nl80211_send_coalesce_rules()
9351 nl_pats = nla_nest_start(msg, nl80211_send_coalesce_rules()
9357 nl_pat = nla_nest_start(msg, j + 1); nl80211_send_coalesce_rules()
9361 if (nla_put(msg, NL80211_PKTPAT_MASK, nl80211_send_coalesce_rules()
9364 nla_put(msg, NL80211_PKTPAT_PATTERN, pat_len, nl80211_send_coalesce_rules()
9366 nla_put_u32(msg, NL80211_PKTPAT_OFFSET, nl80211_send_coalesce_rules()
9369 nla_nest_end(msg, nl_pat); nl80211_send_coalesce_rules()
9371 nla_nest_end(msg, nl_pats); nl80211_send_coalesce_rules()
9372 nla_nest_end(msg, nl_rule); nl80211_send_coalesce_rules()
9374 nla_nest_end(msg, nl_rules); nl80211_send_coalesce_rules()
9382 struct sk_buff *msg; nl80211_get_coalesce() local
9388 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); nl80211_get_coalesce()
9389 if (!msg) nl80211_get_coalesce()
9392 hdr = nl80211hdr_put(msg, info->snd_portid, info->snd_seq, 0, nl80211_get_coalesce()
9397 if (rdev->coalesce && nl80211_send_coalesce_rules(msg, rdev)) nl80211_get_coalesce()
9400 genlmsg_end(msg, hdr); nl80211_get_coalesce()
9401 return genlmsg_reply(msg, info); nl80211_get_coalesce()
9404 nlmsg_free(msg); nl80211_get_coalesce()
9655 struct sk_buff *msg; nl80211_probe_client() local
9671 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); nl80211_probe_client()
9672 if (!msg) nl80211_probe_client()
9675 hdr = nl80211hdr_put(msg, info->snd_portid, info->snd_seq, 0, nl80211_probe_client()
9688 if (nla_put_u64(msg, NL80211_ATTR_COOKIE, cookie)) nl80211_probe_client()
9691 genlmsg_end(msg, hdr); nl80211_probe_client()
9693 return genlmsg_reply(msg, info); nl80211_probe_client()
9698 nlmsg_free(msg); nl80211_probe_client()
9784 struct sk_buff *msg; nl80211_get_protocol_features() local
9786 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); nl80211_get_protocol_features()
9787 if (!msg) nl80211_get_protocol_features()
9790 hdr = nl80211hdr_put(msg, info->snd_portid, info->snd_seq, 0, nl80211_get_protocol_features()
9795 if (nla_put_u32(msg, NL80211_ATTR_PROTOCOL_FEATURES, nl80211_get_protocol_features()
9799 genlmsg_end(msg, hdr); nl80211_get_protocol_features()
9800 return genlmsg_reply(msg, info); nl80211_get_protocol_features()
9803 kfree_skb(msg); nl80211_get_protocol_features()
11044 struct sk_buff *msg; nl80211_notify_wiphy() local
11050 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); nl80211_notify_wiphy()
11051 if (!msg) nl80211_notify_wiphy()
11054 if (nl80211_send_wiphy(rdev, cmd, msg, 0, 0, 0, &state) < 0) { nl80211_notify_wiphy()
11055 nlmsg_free(msg); nl80211_notify_wiphy()
11059 genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0, nl80211_notify_wiphy()
11063 static int nl80211_add_scan_req(struct sk_buff *msg, nl80211_add_scan_req() argument
11073 nest = nla_nest_start(msg, NL80211_ATTR_SCAN_SSIDS); nl80211_add_scan_req()
11077 if (nla_put(msg, i, req->ssids[i].ssid_len, req->ssids[i].ssid)) nl80211_add_scan_req()
11080 nla_nest_end(msg, nest); nl80211_add_scan_req()
11082 nest = nla_nest_start(msg, NL80211_ATTR_SCAN_FREQUENCIES); nl80211_add_scan_req()
11086 if (nla_put_u32(msg, i, req->channels[i]->center_freq)) nl80211_add_scan_req()
11089 nla_nest_end(msg, nest); nl80211_add_scan_req()
11092 nla_put(msg, NL80211_ATTR_IE, req->ie_len, req->ie)) nl80211_add_scan_req()
11096 nla_put_u32(msg, NL80211_ATTR_SCAN_FLAGS, req->flags)) nl80211_add_scan_req()
11104 static int nl80211_send_scan_msg(struct sk_buff *msg, nl80211_send_scan_msg() argument
11112 hdr = nl80211hdr_put(msg, portid, seq, flags, cmd); nl80211_send_scan_msg()
11116 if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || nl80211_send_scan_msg()
11117 (wdev->netdev && nla_put_u32(msg, NL80211_ATTR_IFINDEX, nl80211_send_scan_msg()
11119 nla_put_u64(msg, NL80211_ATTR_WDEV, wdev_id(wdev))) nl80211_send_scan_msg()
11123 nl80211_add_scan_req(msg, rdev); nl80211_send_scan_msg()
11125 genlmsg_end(msg, hdr); nl80211_send_scan_msg()
11129 genlmsg_cancel(msg, hdr); nl80211_send_scan_msg()
11134 nl80211_send_sched_scan_msg(struct sk_buff *msg, nl80211_send_sched_scan_msg() argument
11141 hdr = nl80211hdr_put(msg, portid, seq, flags, cmd); nl80211_send_sched_scan_msg()
11145 if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || nl80211_send_sched_scan_msg()
11146 nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex)) nl80211_send_sched_scan_msg()
11149 genlmsg_end(msg, hdr); nl80211_send_sched_scan_msg()
11153 genlmsg_cancel(msg, hdr); nl80211_send_sched_scan_msg()
11160 struct sk_buff *msg; nl80211_send_scan_start() local
11162 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); nl80211_send_scan_start()
11163 if (!msg) nl80211_send_scan_start()
11166 if (nl80211_send_scan_msg(msg, rdev, wdev, 0, 0, 0, nl80211_send_scan_start()
11168 nlmsg_free(msg); nl80211_send_scan_start()
11172 genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0, nl80211_send_scan_start()
11179 struct sk_buff *msg; nl80211_build_scan_msg() local
11181 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); nl80211_build_scan_msg()
11182 if (!msg) nl80211_build_scan_msg()
11185 if (nl80211_send_scan_msg(msg, rdev, wdev, 0, 0, 0, nl80211_build_scan_msg()
11188 nlmsg_free(msg); nl80211_build_scan_msg()
11192 return msg; nl80211_build_scan_msg()
11196 struct sk_buff *msg) nl80211_send_scan_result()
11198 if (!msg) nl80211_send_scan_result()
11201 genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0, nl80211_send_scan_result()
11208 struct sk_buff *msg; nl80211_send_sched_scan_results() local
11210 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); nl80211_send_sched_scan_results()
11211 if (!msg) nl80211_send_sched_scan_results()
11214 if (nl80211_send_sched_scan_msg(msg, rdev, netdev, 0, 0, 0, nl80211_send_sched_scan_results()
11216 nlmsg_free(msg); nl80211_send_sched_scan_results()
11220 genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0, nl80211_send_sched_scan_results()
11227 struct sk_buff *msg; nl80211_send_sched_scan() local
11229 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); nl80211_send_sched_scan()
11230 if (!msg) nl80211_send_sched_scan()
11233 if (nl80211_send_sched_scan_msg(msg, rdev, netdev, 0, 0, 0, cmd) < 0) { nl80211_send_sched_scan()
11234 nlmsg_free(msg); nl80211_send_sched_scan()
11238 genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0, nl80211_send_sched_scan()
11242 static bool nl80211_reg_change_event_fill(struct sk_buff *msg, nl80211_reg_change_event_fill() argument
11246 if (nla_put_u8(msg, NL80211_ATTR_REG_INITIATOR, request->initiator)) nl80211_reg_change_event_fill()
11250 if (nla_put_u8(msg, NL80211_ATTR_REG_TYPE, nl80211_reg_change_event_fill()
11254 if (nla_put_u8(msg, NL80211_ATTR_REG_TYPE, nl80211_reg_change_event_fill()
11259 if (nla_put_u8(msg, NL80211_ATTR_REG_TYPE, nl80211_reg_change_event_fill()
11263 if (nla_put_u8(msg, NL80211_ATTR_REG_TYPE, nl80211_reg_change_event_fill()
11265 nla_put_string(msg, NL80211_ATTR_REG_ALPHA2, nl80211_reg_change_event_fill()
11274 nla_put_u32(msg, NL80211_ATTR_WIPHY, request->wiphy_idx)) nl80211_reg_change_event_fill()
11279 nla_put_flag(msg, NL80211_ATTR_WIPHY_SELF_MANAGED_REG)) nl80211_reg_change_event_fill()
11296 struct sk_buff *msg; nl80211_common_reg_change_event() local
11299 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); nl80211_common_reg_change_event()
11300 if (!msg) nl80211_common_reg_change_event()
11303 hdr = nl80211hdr_put(msg, 0, 0, 0, cmd_id); nl80211_common_reg_change_event()
11305 nlmsg_free(msg); nl80211_common_reg_change_event()
11309 if (nl80211_reg_change_event_fill(msg, request) == false) nl80211_common_reg_change_event()
11312 genlmsg_end(msg, hdr); nl80211_common_reg_change_event()
11315 genlmsg_multicast_allns(&nl80211_fam, msg, 0, nl80211_common_reg_change_event()
11322 genlmsg_cancel(msg, hdr); nl80211_common_reg_change_event()
11323 nlmsg_free(msg); nl80211_common_reg_change_event()
11332 struct sk_buff *msg; nl80211_send_mlme_event() local
11335 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp); nl80211_send_mlme_event()
11336 if (!msg) nl80211_send_mlme_event()
11339 hdr = nl80211hdr_put(msg, 0, 0, 0, cmd); nl80211_send_mlme_event()
11341 nlmsg_free(msg); nl80211_send_mlme_event()
11345 if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || nl80211_send_mlme_event()
11346 nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) || nl80211_send_mlme_event()
11347 nla_put(msg, NL80211_ATTR_FRAME, len, buf)) nl80211_send_mlme_event()
11352 nla_nest_start(msg, NL80211_ATTR_STA_WME); nl80211_send_mlme_event()
11356 if (nla_put_u8(msg, NL80211_STA_WME_UAPSD_QUEUES, nl80211_send_mlme_event()
11360 nla_nest_end(msg, nla_wmm); nl80211_send_mlme_event()
11363 genlmsg_end(msg, hdr); nl80211_send_mlme_event()
11365 genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0, nl80211_send_mlme_event()
11370 genlmsg_cancel(msg, hdr); nl80211_send_mlme_event()
11371 nlmsg_free(msg); nl80211_send_mlme_event()
11432 struct sk_buff *msg; nl80211_send_mlme_timeout() local
11435 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp); nl80211_send_mlme_timeout()
11436 if (!msg) nl80211_send_mlme_timeout()
11439 hdr = nl80211hdr_put(msg, 0, 0, 0, cmd); nl80211_send_mlme_timeout()
11441 nlmsg_free(msg); nl80211_send_mlme_timeout()
11445 if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || nl80211_send_mlme_timeout()
11446 nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) || nl80211_send_mlme_timeout()
11447 nla_put_flag(msg, NL80211_ATTR_TIMED_OUT) || nl80211_send_mlme_timeout()
11448 nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, addr)) nl80211_send_mlme_timeout()
11451 genlmsg_end(msg, hdr); nl80211_send_mlme_timeout()
11453 genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0, nl80211_send_mlme_timeout()
11458 genlmsg_cancel(msg, hdr); nl80211_send_mlme_timeout()
11459 nlmsg_free(msg); nl80211_send_mlme_timeout()
11484 struct sk_buff *msg; nl80211_send_connect_result() local
11487 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp); nl80211_send_connect_result()
11488 if (!msg) nl80211_send_connect_result()
11491 hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_CONNECT); nl80211_send_connect_result()
11493 nlmsg_free(msg); nl80211_send_connect_result()
11497 if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || nl80211_send_connect_result()
11498 nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) || nl80211_send_connect_result()
11499 (bssid && nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, bssid)) || nl80211_send_connect_result()
11500 nla_put_u16(msg, NL80211_ATTR_STATUS_CODE, status) || nl80211_send_connect_result()
11502 nla_put(msg, NL80211_ATTR_REQ_IE, req_ie_len, req_ie)) || nl80211_send_connect_result()
11504 nla_put(msg, NL80211_ATTR_RESP_IE, resp_ie_len, resp_ie))) nl80211_send_connect_result()
11507 genlmsg_end(msg, hdr); nl80211_send_connect_result()
11509 genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0, nl80211_send_connect_result()
11514 genlmsg_cancel(msg, hdr); nl80211_send_connect_result()
11515 nlmsg_free(msg); nl80211_send_connect_result()
11524 struct sk_buff *msg; nl80211_send_roamed() local
11527 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp); nl80211_send_roamed()
11528 if (!msg) nl80211_send_roamed()
11531 hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_ROAM); nl80211_send_roamed()
11533 nlmsg_free(msg); nl80211_send_roamed()
11537 if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || nl80211_send_roamed()
11538 nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) || nl80211_send_roamed()
11539 nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, bssid) || nl80211_send_roamed()
11541 nla_put(msg, NL80211_ATTR_REQ_IE, req_ie_len, req_ie)) || nl80211_send_roamed()
11543 nla_put(msg, NL80211_ATTR_RESP_IE, resp_ie_len, resp_ie))) nl80211_send_roamed()
11546 genlmsg_end(msg, hdr); nl80211_send_roamed()
11548 genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0, nl80211_send_roamed()
11553 genlmsg_cancel(msg, hdr); nl80211_send_roamed()
11554 nlmsg_free(msg); nl80211_send_roamed()
11562 struct sk_buff *msg; nl80211_send_disconnected() local
11565 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); nl80211_send_disconnected()
11566 if (!msg) nl80211_send_disconnected()
11569 hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_DISCONNECT); nl80211_send_disconnected()
11571 nlmsg_free(msg); nl80211_send_disconnected()
11575 if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || nl80211_send_disconnected()
11576 nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) || nl80211_send_disconnected()
11578 nla_put_u16(msg, NL80211_ATTR_REASON_CODE, reason)) || nl80211_send_disconnected()
11580 nla_put_flag(msg, NL80211_ATTR_DISCONNECTED_BY_AP)) || nl80211_send_disconnected()
11581 (ie && nla_put(msg, NL80211_ATTR_IE, ie_len, ie))) nl80211_send_disconnected()
11584 genlmsg_end(msg, hdr); nl80211_send_disconnected()
11586 genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0, nl80211_send_disconnected()
11591 genlmsg_cancel(msg, hdr); nl80211_send_disconnected()
11592 nlmsg_free(msg); nl80211_send_disconnected()
11600 struct sk_buff *msg; nl80211_send_ibss_bssid() local
11603 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp); nl80211_send_ibss_bssid()
11604 if (!msg) nl80211_send_ibss_bssid()
11607 hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_JOIN_IBSS); nl80211_send_ibss_bssid()
11609 nlmsg_free(msg); nl80211_send_ibss_bssid()
11613 if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || nl80211_send_ibss_bssid()
11614 nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) || nl80211_send_ibss_bssid()
11615 nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, bssid)) nl80211_send_ibss_bssid()
11618 genlmsg_end(msg, hdr); nl80211_send_ibss_bssid()
11620 genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0, nl80211_send_ibss_bssid()
11625 genlmsg_cancel(msg, hdr); nl80211_send_ibss_bssid()
11626 nlmsg_free(msg); nl80211_send_ibss_bssid()
11634 struct sk_buff *msg; cfg80211_notify_new_peer_candidate() local
11642 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp); cfg80211_notify_new_peer_candidate()
11643 if (!msg) cfg80211_notify_new_peer_candidate()
11646 hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_NEW_PEER_CANDIDATE); cfg80211_notify_new_peer_candidate()
11648 nlmsg_free(msg); cfg80211_notify_new_peer_candidate()
11652 if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || cfg80211_notify_new_peer_candidate()
11653 nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) || cfg80211_notify_new_peer_candidate()
11654 nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, addr) || cfg80211_notify_new_peer_candidate()
11656 nla_put(msg, NL80211_ATTR_IE, ie_len , ie))) cfg80211_notify_new_peer_candidate()
11659 genlmsg_end(msg, hdr); cfg80211_notify_new_peer_candidate()
11661 genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0, cfg80211_notify_new_peer_candidate()
11666 genlmsg_cancel(msg, hdr); cfg80211_notify_new_peer_candidate()
11667 nlmsg_free(msg); cfg80211_notify_new_peer_candidate()
11676 struct sk_buff *msg; nl80211_michael_mic_failure() local
11679 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp); nl80211_michael_mic_failure()
11680 if (!msg) nl80211_michael_mic_failure()
11683 hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_MICHAEL_MIC_FAILURE); nl80211_michael_mic_failure()
11685 nlmsg_free(msg); nl80211_michael_mic_failure()
11689 if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || nl80211_michael_mic_failure()
11690 nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) || nl80211_michael_mic_failure()
11691 (addr && nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, addr)) || nl80211_michael_mic_failure()
11692 nla_put_u32(msg, NL80211_ATTR_KEY_TYPE, key_type) || nl80211_michael_mic_failure()
11694 nla_put_u8(msg, NL80211_ATTR_KEY_IDX, key_id)) || nl80211_michael_mic_failure()
11695 (tsc && nla_put(msg, NL80211_ATTR_KEY_SEQ, 6, tsc))) nl80211_michael_mic_failure()
11698 genlmsg_end(msg, hdr); nl80211_michael_mic_failure()
11700 genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0, nl80211_michael_mic_failure()
11705 genlmsg_cancel(msg, hdr); nl80211_michael_mic_failure()
11706 nlmsg_free(msg); nl80211_michael_mic_failure()
11713 struct sk_buff *msg; nl80211_send_beacon_hint_event() local
11717 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC); nl80211_send_beacon_hint_event()
11718 if (!msg) nl80211_send_beacon_hint_event()
11721 hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_REG_BEACON_HINT); nl80211_send_beacon_hint_event()
11723 nlmsg_free(msg); nl80211_send_beacon_hint_event()
11731 if (nla_put_u32(msg, NL80211_ATTR_WIPHY, get_wiphy_idx(wiphy))) nl80211_send_beacon_hint_event()
11735 nl_freq = nla_nest_start(msg, NL80211_ATTR_FREQ_BEFORE); nl80211_send_beacon_hint_event()
11738 if (nl80211_msg_put_channel(msg, channel_before, false)) nl80211_send_beacon_hint_event()
11740 nla_nest_end(msg, nl_freq); nl80211_send_beacon_hint_event()
11743 nl_freq = nla_nest_start(msg, NL80211_ATTR_FREQ_AFTER); nl80211_send_beacon_hint_event()
11746 if (nl80211_msg_put_channel(msg, channel_after, false)) nl80211_send_beacon_hint_event()
11748 nla_nest_end(msg, nl_freq); nl80211_send_beacon_hint_event()
11750 genlmsg_end(msg, hdr); nl80211_send_beacon_hint_event()
11753 genlmsg_multicast_allns(&nl80211_fam, msg, 0, nl80211_send_beacon_hint_event()
11760 genlmsg_cancel(msg, hdr); nl80211_send_beacon_hint_event()
11761 nlmsg_free(msg); nl80211_send_beacon_hint_event()
11770 struct sk_buff *msg; nl80211_send_remain_on_chan_event() local
11773 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp); nl80211_send_remain_on_chan_event()
11774 if (!msg) nl80211_send_remain_on_chan_event()
11777 hdr = nl80211hdr_put(msg, 0, 0, 0, cmd); nl80211_send_remain_on_chan_event()
11779 nlmsg_free(msg); nl80211_send_remain_on_chan_event()
11783 if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || nl80211_send_remain_on_chan_event()
11784 (wdev->netdev && nla_put_u32(msg, NL80211_ATTR_IFINDEX, nl80211_send_remain_on_chan_event()
11786 nla_put_u64(msg, NL80211_ATTR_WDEV, wdev_id(wdev)) || nl80211_send_remain_on_chan_event()
11787 nla_put_u32(msg, NL80211_ATTR_WIPHY_FREQ, chan->center_freq) || nl80211_send_remain_on_chan_event()
11788 nla_put_u32(msg, NL80211_ATTR_WIPHY_CHANNEL_TYPE, nl80211_send_remain_on_chan_event()
11790 nla_put_u64(msg, NL80211_ATTR_COOKIE, cookie)) nl80211_send_remain_on_chan_event()
11794 nla_put_u32(msg, NL80211_ATTR_DURATION, duration)) nl80211_send_remain_on_chan_event()
11797 genlmsg_end(msg, hdr); nl80211_send_remain_on_chan_event()
11799 genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0, nl80211_send_remain_on_chan_event()
11804 genlmsg_cancel(msg, hdr); nl80211_send_remain_on_chan_event()
11805 nlmsg_free(msg); nl80211_send_remain_on_chan_event()
11840 struct sk_buff *msg; cfg80211_new_sta() local
11844 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp); cfg80211_new_sta()
11845 if (!msg) cfg80211_new_sta()
11848 if (nl80211_send_station(msg, NL80211_CMD_NEW_STATION, 0, 0, 0, cfg80211_new_sta()
11850 nlmsg_free(msg); cfg80211_new_sta()
11854 genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0, cfg80211_new_sta()
11864 struct sk_buff *msg; cfg80211_del_sta_sinfo() local
11872 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp); cfg80211_del_sta_sinfo()
11873 if (!msg) cfg80211_del_sta_sinfo()
11876 if (nl80211_send_station(msg, NL80211_CMD_DEL_STATION, 0, 0, 0, cfg80211_del_sta_sinfo()
11878 nlmsg_free(msg); cfg80211_del_sta_sinfo()
11882 genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0, cfg80211_del_sta_sinfo()
11893 struct sk_buff *msg; cfg80211_conn_failed() local
11896 msg = nlmsg_new(NLMSG_GOODSIZE, gfp); cfg80211_conn_failed()
11897 if (!msg) cfg80211_conn_failed()
11900 hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_CONN_FAILED); cfg80211_conn_failed()
11902 nlmsg_free(msg); cfg80211_conn_failed()
11906 if (nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) || cfg80211_conn_failed()
11907 nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, mac_addr) || cfg80211_conn_failed()
11908 nla_put_u32(msg, NL80211_ATTR_CONN_FAILED_REASON, reason)) cfg80211_conn_failed()
11911 genlmsg_end(msg, hdr); cfg80211_conn_failed()
11913 genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0, cfg80211_conn_failed()
11918 genlmsg_cancel(msg, hdr); cfg80211_conn_failed()
11919 nlmsg_free(msg); cfg80211_conn_failed()
11928 struct sk_buff *msg; __nl80211_unexpected_frame() local
11935 msg = nlmsg_new(100, gfp); __nl80211_unexpected_frame()
11936 if (!msg) __nl80211_unexpected_frame()
11939 hdr = nl80211hdr_put(msg, 0, 0, 0, cmd); __nl80211_unexpected_frame()
11941 nlmsg_free(msg); __nl80211_unexpected_frame()
11945 if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || __nl80211_unexpected_frame()
11946 nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) || __nl80211_unexpected_frame()
11947 nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, addr)) __nl80211_unexpected_frame()
11950 genlmsg_end(msg, hdr); __nl80211_unexpected_frame()
11951 genlmsg_unicast(wiphy_net(&rdev->wiphy), msg, nlportid); __nl80211_unexpected_frame()
11955 genlmsg_cancel(msg, hdr); __nl80211_unexpected_frame()
11956 nlmsg_free(msg); __nl80211_unexpected_frame()
12008 struct sk_buff *msg; nl80211_send_mgmt() local
12011 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp); nl80211_send_mgmt()
12012 if (!msg) nl80211_send_mgmt()
12015 hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_FRAME); nl80211_send_mgmt()
12017 nlmsg_free(msg); nl80211_send_mgmt()
12021 if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || nl80211_send_mgmt()
12022 (netdev && nla_put_u32(msg, NL80211_ATTR_IFINDEX, nl80211_send_mgmt()
12024 nla_put_u64(msg, NL80211_ATTR_WDEV, wdev_id(wdev)) || nl80211_send_mgmt()
12025 nla_put_u32(msg, NL80211_ATTR_WIPHY_FREQ, freq) || nl80211_send_mgmt()
12027 nla_put_u32(msg, NL80211_ATTR_RX_SIGNAL_DBM, sig_dbm)) || nl80211_send_mgmt()
12028 nla_put(msg, NL80211_ATTR_FRAME, len, buf) || nl80211_send_mgmt()
12030 nla_put_u32(msg, NL80211_ATTR_RXMGMT_FLAGS, flags))) nl80211_send_mgmt()
12033 genlmsg_end(msg, hdr); nl80211_send_mgmt()
12035 return genlmsg_unicast(wiphy_net(&rdev->wiphy), msg, nlportid); nl80211_send_mgmt()
12038 genlmsg_cancel(msg, hdr); nl80211_send_mgmt()
12039 nlmsg_free(msg); nl80211_send_mgmt()
12049 struct sk_buff *msg; cfg80211_mgmt_tx_status() local
12054 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp); cfg80211_mgmt_tx_status()
12055 if (!msg) cfg80211_mgmt_tx_status()
12058 hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_FRAME_TX_STATUS); cfg80211_mgmt_tx_status()
12060 nlmsg_free(msg); cfg80211_mgmt_tx_status()
12064 if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || cfg80211_mgmt_tx_status()
12065 (netdev && nla_put_u32(msg, NL80211_ATTR_IFINDEX, cfg80211_mgmt_tx_status()
12067 nla_put_u64(msg, NL80211_ATTR_WDEV, wdev_id(wdev)) || cfg80211_mgmt_tx_status()
12068 nla_put(msg, NL80211_ATTR_FRAME, len, buf) || cfg80211_mgmt_tx_status()
12069 nla_put_u64(msg, NL80211_ATTR_COOKIE, cookie) || cfg80211_mgmt_tx_status()
12070 (ack && nla_put_flag(msg, NL80211_ATTR_ACK))) cfg80211_mgmt_tx_status()
12073 genlmsg_end(msg, hdr); cfg80211_mgmt_tx_status()
12075 genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0, cfg80211_mgmt_tx_status()
12080 genlmsg_cancel(msg, hdr); cfg80211_mgmt_tx_status()
12081 nlmsg_free(msg); cfg80211_mgmt_tx_status()
12090 struct sk_buff *msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp); cfg80211_prepare_cqm() local
12093 if (!msg) cfg80211_prepare_cqm()
12096 cb = (void **)msg->cb; cfg80211_prepare_cqm()
12098 cb[0] = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_NOTIFY_CQM); cfg80211_prepare_cqm()
12100 nlmsg_free(msg); cfg80211_prepare_cqm()
12104 if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || cfg80211_prepare_cqm()
12105 nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex)) cfg80211_prepare_cqm()
12108 if (mac && nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, mac)) cfg80211_prepare_cqm()
12111 cb[1] = nla_nest_start(msg, NL80211_ATTR_CQM); cfg80211_prepare_cqm()
12117 return msg; cfg80211_prepare_cqm()
12119 nlmsg_free(msg); cfg80211_prepare_cqm()
12123 static void cfg80211_send_cqm(struct sk_buff *msg, gfp_t gfp) cfg80211_send_cqm() argument
12125 void **cb = (void **)msg->cb; cfg80211_send_cqm()
12128 nla_nest_end(msg, cb[1]); cfg80211_send_cqm()
12129 genlmsg_end(msg, cb[0]); cfg80211_send_cqm()
12131 memset(msg->cb, 0, sizeof(msg->cb)); cfg80211_send_cqm()
12133 genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0, cfg80211_send_cqm()
12141 struct sk_buff *msg; cfg80211_cqm_rssi_notify() local
12149 msg = cfg80211_prepare_cqm(dev, NULL, gfp); cfg80211_cqm_rssi_notify()
12150 if (!msg) cfg80211_cqm_rssi_notify()
12153 if (nla_put_u32(msg, NL80211_ATTR_CQM_RSSI_THRESHOLD_EVENT, cfg80211_cqm_rssi_notify()
12157 cfg80211_send_cqm(msg, gfp); cfg80211_cqm_rssi_notify()
12162 nlmsg_free(msg); cfg80211_cqm_rssi_notify()
12170 struct sk_buff *msg; cfg80211_cqm_txe_notify() local
12172 msg = cfg80211_prepare_cqm(dev, peer, gfp); cfg80211_cqm_txe_notify()
12173 if (!msg) cfg80211_cqm_txe_notify()
12176 if (nla_put_u32(msg, NL80211_ATTR_CQM_TXE_PKTS, num_packets)) cfg80211_cqm_txe_notify()
12179 if (nla_put_u32(msg, NL80211_ATTR_CQM_TXE_RATE, rate)) cfg80211_cqm_txe_notify()
12182 if (nla_put_u32(msg, NL80211_ATTR_CQM_TXE_INTVL, intvl)) cfg80211_cqm_txe_notify()
12185 cfg80211_send_cqm(msg, gfp); cfg80211_cqm_txe_notify()
12189 nlmsg_free(msg); cfg80211_cqm_txe_notify()
12196 struct sk_buff *msg; cfg80211_cqm_pktloss_notify() local
12200 msg = cfg80211_prepare_cqm(dev, peer, gfp); cfg80211_cqm_pktloss_notify()
12201 if (!msg) cfg80211_cqm_pktloss_notify()
12204 if (nla_put_u32(msg, NL80211_ATTR_CQM_PKT_LOSS_EVENT, num_packets)) cfg80211_cqm_pktloss_notify()
12207 cfg80211_send_cqm(msg, gfp); cfg80211_cqm_pktloss_notify()
12211 nlmsg_free(msg); cfg80211_cqm_pktloss_notify()
12217 struct sk_buff *msg; cfg80211_cqm_beacon_loss_notify() local
12219 msg = cfg80211_prepare_cqm(dev, NULL, gfp); cfg80211_cqm_beacon_loss_notify()
12220 if (!msg) cfg80211_cqm_beacon_loss_notify()
12223 if (nla_put_flag(msg, NL80211_ATTR_CQM_BEACON_LOSS_EVENT)) cfg80211_cqm_beacon_loss_notify()
12226 cfg80211_send_cqm(msg, gfp); cfg80211_cqm_beacon_loss_notify()
12230 nlmsg_free(msg); cfg80211_cqm_beacon_loss_notify()
12238 struct sk_buff *msg; nl80211_gtk_rekey_notify() local
12242 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp); nl80211_gtk_rekey_notify()
12243 if (!msg) nl80211_gtk_rekey_notify()
12246 hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_SET_REKEY_OFFLOAD); nl80211_gtk_rekey_notify()
12248 nlmsg_free(msg); nl80211_gtk_rekey_notify()
12252 if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || nl80211_gtk_rekey_notify()
12253 nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) || nl80211_gtk_rekey_notify()
12254 nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, bssid)) nl80211_gtk_rekey_notify()
12257 rekey_attr = nla_nest_start(msg, NL80211_ATTR_REKEY_DATA); nl80211_gtk_rekey_notify()
12261 if (nla_put(msg, NL80211_REKEY_DATA_REPLAY_CTR, nl80211_gtk_rekey_notify()
12265 nla_nest_end(msg, rekey_attr); nl80211_gtk_rekey_notify()
12267 genlmsg_end(msg, hdr); nl80211_gtk_rekey_notify()
12269 genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0, nl80211_gtk_rekey_notify()
12274 genlmsg_cancel(msg, hdr); nl80211_gtk_rekey_notify()
12275 nlmsg_free(msg); nl80211_gtk_rekey_notify()
12295 struct sk_buff *msg; nl80211_pmksa_candidate_notify() local
12299 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp); nl80211_pmksa_candidate_notify()
12300 if (!msg) nl80211_pmksa_candidate_notify()
12303 hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_PMKSA_CANDIDATE); nl80211_pmksa_candidate_notify()
12305 nlmsg_free(msg); nl80211_pmksa_candidate_notify()
12309 if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || nl80211_pmksa_candidate_notify()
12310 nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex)) nl80211_pmksa_candidate_notify()
12313 attr = nla_nest_start(msg, NL80211_ATTR_PMKSA_CANDIDATE); nl80211_pmksa_candidate_notify()
12317 if (nla_put_u32(msg, NL80211_PMKSA_CANDIDATE_INDEX, index) || nl80211_pmksa_candidate_notify()
12318 nla_put(msg, NL80211_PMKSA_CANDIDATE_BSSID, ETH_ALEN, bssid) || nl80211_pmksa_candidate_notify()
12320 nla_put_flag(msg, NL80211_PMKSA_CANDIDATE_PREAUTH))) nl80211_pmksa_candidate_notify()
12323 nla_nest_end(msg, attr); nl80211_pmksa_candidate_notify()
12325 genlmsg_end(msg, hdr); nl80211_pmksa_candidate_notify()
12327 genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0, nl80211_pmksa_candidate_notify()
12332 genlmsg_cancel(msg, hdr); nl80211_pmksa_candidate_notify()
12333 nlmsg_free(msg); nl80211_pmksa_candidate_notify()
12355 struct sk_buff *msg; nl80211_ch_switch_notify() local
12358 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp); nl80211_ch_switch_notify()
12359 if (!msg) nl80211_ch_switch_notify()
12362 hdr = nl80211hdr_put(msg, 0, 0, 0, notif); nl80211_ch_switch_notify()
12364 nlmsg_free(msg); nl80211_ch_switch_notify()
12368 if (nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex)) nl80211_ch_switch_notify()
12371 if (nl80211_send_chandef(msg, chandef)) nl80211_ch_switch_notify()
12375 (nla_put_u32(msg, NL80211_ATTR_CH_SWITCH_COUNT, count))) nl80211_ch_switch_notify()
12378 genlmsg_end(msg, hdr); nl80211_ch_switch_notify()
12380 genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0, nl80211_ch_switch_notify()
12385 genlmsg_cancel(msg, hdr); nl80211_ch_switch_notify()
12386 nlmsg_free(msg); nl80211_ch_switch_notify()
12428 struct sk_buff *msg; nl80211_radar_notify() local
12431 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp); nl80211_radar_notify()
12432 if (!msg) nl80211_radar_notify()
12435 hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_RADAR_DETECT); nl80211_radar_notify()
12437 nlmsg_free(msg); nl80211_radar_notify()
12441 if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx)) nl80211_radar_notify()
12448 if (nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) || nl80211_radar_notify()
12449 nla_put_u64(msg, NL80211_ATTR_WDEV, wdev_id(wdev))) nl80211_radar_notify()
12453 if (nla_put_u32(msg, NL80211_ATTR_RADAR_EVENT, event)) nl80211_radar_notify()
12456 if (nl80211_send_chandef(msg, chandef)) nl80211_radar_notify()
12459 genlmsg_end(msg, hdr); nl80211_radar_notify()
12461 genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0, nl80211_radar_notify()
12466 genlmsg_cancel(msg, hdr); nl80211_radar_notify()
12467 nlmsg_free(msg); nl80211_radar_notify()
12475 struct sk_buff *msg; cfg80211_probe_status() local
12480 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp); cfg80211_probe_status()
12482 if (!msg) cfg80211_probe_status()
12485 hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_PROBE_CLIENT); cfg80211_probe_status()
12487 nlmsg_free(msg); cfg80211_probe_status()
12491 if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || cfg80211_probe_status()
12492 nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) || cfg80211_probe_status()
12493 nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, addr) || cfg80211_probe_status()
12494 nla_put_u64(msg, NL80211_ATTR_COOKIE, cookie) || cfg80211_probe_status()
12495 (acked && nla_put_flag(msg, NL80211_ATTR_ACK))) cfg80211_probe_status()
12498 genlmsg_end(msg, hdr); cfg80211_probe_status()
12500 genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0, cfg80211_probe_status()
12505 genlmsg_cancel(msg, hdr); cfg80211_probe_status()
12506 nlmsg_free(msg); cfg80211_probe_status()
12515 struct sk_buff *msg; cfg80211_report_obss_beacon() local
12523 msg = nlmsg_new(len + 100, GFP_ATOMIC); cfg80211_report_obss_beacon()
12524 if (!msg) { cfg80211_report_obss_beacon()
12529 hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_FRAME); cfg80211_report_obss_beacon()
12533 if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || cfg80211_report_obss_beacon()
12535 nla_put_u32(msg, NL80211_ATTR_WIPHY_FREQ, freq)) || cfg80211_report_obss_beacon()
12537 nla_put_u32(msg, NL80211_ATTR_RX_SIGNAL_DBM, sig_dbm)) || cfg80211_report_obss_beacon()
12538 nla_put(msg, NL80211_ATTR_FRAME, len, frame)) cfg80211_report_obss_beacon()
12541 genlmsg_end(msg, hdr); cfg80211_report_obss_beacon()
12543 genlmsg_unicast(wiphy_net(&rdev->wiphy), msg, reg->nlportid); cfg80211_report_obss_beacon()
12551 genlmsg_cancel(msg, hdr); cfg80211_report_obss_beacon()
12552 nlmsg_free(msg); cfg80211_report_obss_beacon()
12557 static int cfg80211_net_detect_results(struct sk_buff *msg, cfg80211_net_detect_results() argument
12565 msg, NL80211_WOWLAN_TRIG_NET_DETECT_RESULTS); cfg80211_net_detect_results()
12572 nl_match = nla_nest_start(msg, i); cfg80211_net_detect_results()
12583 if (nla_put(msg, NL80211_ATTR_SSID, match->ssid.ssid_len, cfg80211_net_detect_results()
12585 nla_nest_cancel(msg, nl_match); cfg80211_net_detect_results()
12591 msg, NL80211_ATTR_SCAN_FREQUENCIES); cfg80211_net_detect_results()
12593 nla_nest_cancel(msg, nl_match); cfg80211_net_detect_results()
12598 if (nla_put_u32(msg, j, match->channels[j])) { cfg80211_net_detect_results()
12599 nla_nest_cancel(msg, nl_freqs); cfg80211_net_detect_results()
12600 nla_nest_cancel(msg, nl_match); cfg80211_net_detect_results()
12605 nla_nest_end(msg, nl_freqs); cfg80211_net_detect_results()
12608 nla_nest_end(msg, nl_match); cfg80211_net_detect_results()
12612 nla_nest_end(msg, nl_results); cfg80211_net_detect_results()
12621 struct sk_buff *msg; cfg80211_report_wowlan_wakeup() local
12630 msg = nlmsg_new(size, gfp); cfg80211_report_wowlan_wakeup()
12631 if (!msg) cfg80211_report_wowlan_wakeup()
12634 hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_SET_WOWLAN); cfg80211_report_wowlan_wakeup()
12638 if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || cfg80211_report_wowlan_wakeup()
12639 nla_put_u64(msg, NL80211_ATTR_WDEV, wdev_id(wdev))) cfg80211_report_wowlan_wakeup()
12642 if (wdev->netdev && nla_put_u32(msg, NL80211_ATTR_IFINDEX, cfg80211_report_wowlan_wakeup()
12649 reasons = nla_nest_start(msg, NL80211_ATTR_WOWLAN_TRIGGERS); cfg80211_report_wowlan_wakeup()
12654 nla_put_flag(msg, NL80211_WOWLAN_TRIG_DISCONNECT)) cfg80211_report_wowlan_wakeup()
12657 nla_put_flag(msg, NL80211_WOWLAN_TRIG_MAGIC_PKT)) cfg80211_report_wowlan_wakeup()
12660 nla_put_flag(msg, NL80211_WOWLAN_TRIG_GTK_REKEY_FAILURE)) cfg80211_report_wowlan_wakeup()
12663 nla_put_flag(msg, NL80211_WOWLAN_TRIG_EAP_IDENT_REQUEST)) cfg80211_report_wowlan_wakeup()
12666 nla_put_flag(msg, NL80211_WOWLAN_TRIG_4WAY_HANDSHAKE)) cfg80211_report_wowlan_wakeup()
12669 nla_put_flag(msg, NL80211_WOWLAN_TRIG_RFKILL_RELEASE)) cfg80211_report_wowlan_wakeup()
12673 nla_put_u32(msg, NL80211_WOWLAN_TRIG_PKT_PATTERN, cfg80211_report_wowlan_wakeup()
12678 nla_put_flag(msg, NL80211_WOWLAN_TRIG_WAKEUP_TCP_MATCH)) cfg80211_report_wowlan_wakeup()
12682 nla_put_flag(msg, NL80211_WOWLAN_TRIG_WAKEUP_TCP_CONNLOST)) cfg80211_report_wowlan_wakeup()
12686 nla_put_flag(msg, cfg80211_report_wowlan_wakeup()
12702 nla_put_u32(msg, len_attr, wakeup->packet_len)) cfg80211_report_wowlan_wakeup()
12705 if (nla_put(msg, pkt_attr, wakeup->packet_present_len, cfg80211_report_wowlan_wakeup()
12711 cfg80211_net_detect_results(msg, wakeup)) cfg80211_report_wowlan_wakeup()
12714 nla_nest_end(msg, reasons); cfg80211_report_wowlan_wakeup()
12717 genlmsg_end(msg, hdr); cfg80211_report_wowlan_wakeup()
12719 genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0, cfg80211_report_wowlan_wakeup()
12724 nlmsg_free(msg); cfg80211_report_wowlan_wakeup()
12735 struct sk_buff *msg; cfg80211_tdls_oper_request() local
12741 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp); cfg80211_tdls_oper_request()
12742 if (!msg) cfg80211_tdls_oper_request()
12745 hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_TDLS_OPER); cfg80211_tdls_oper_request()
12747 nlmsg_free(msg); cfg80211_tdls_oper_request()
12751 if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || cfg80211_tdls_oper_request()
12752 nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) || cfg80211_tdls_oper_request()
12753 nla_put_u8(msg, NL80211_ATTR_TDLS_OPERATION, oper) || cfg80211_tdls_oper_request()
12754 nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, peer) || cfg80211_tdls_oper_request()
12756 nla_put_u16(msg, NL80211_ATTR_REASON_CODE, reason_code))) cfg80211_tdls_oper_request()
12759 genlmsg_end(msg, hdr); cfg80211_tdls_oper_request()
12761 genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0, cfg80211_tdls_oper_request()
12766 genlmsg_cancel(msg, hdr); cfg80211_tdls_oper_request()
12767 nlmsg_free(msg); cfg80211_tdls_oper_request()
12852 struct sk_buff *msg; cfg80211_ft_event() local
12860 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); cfg80211_ft_event()
12861 if (!msg) cfg80211_ft_event()
12864 hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_FT_EVENT); cfg80211_ft_event()
12868 if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || cfg80211_ft_event()
12869 nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) || cfg80211_ft_event()
12870 nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, ft_event->target_ap)) cfg80211_ft_event()
12874 nla_put(msg, NL80211_ATTR_IE, ft_event->ies_len, ft_event->ies)) cfg80211_ft_event()
12877 nla_put(msg, NL80211_ATTR_IE_RIC, ft_event->ric_ies_len, cfg80211_ft_event()
12881 genlmsg_end(msg, hdr); cfg80211_ft_event()
12883 genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0, cfg80211_ft_event()
12887 nlmsg_free(msg); cfg80211_ft_event()
12894 struct sk_buff *msg; cfg80211_crit_proto_stopped() local
12905 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp); cfg80211_crit_proto_stopped()
12906 if (!msg) cfg80211_crit_proto_stopped()
12909 hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_CRIT_PROTOCOL_STOP); cfg80211_crit_proto_stopped()
12913 if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || cfg80211_crit_proto_stopped()
12914 nla_put_u64(msg, NL80211_ATTR_WDEV, wdev_id(wdev))) cfg80211_crit_proto_stopped()
12917 genlmsg_end(msg, hdr); cfg80211_crit_proto_stopped()
12919 genlmsg_unicast(wiphy_net(&rdev->wiphy), msg, nlportid); cfg80211_crit_proto_stopped()
12924 genlmsg_cancel(msg, hdr); cfg80211_crit_proto_stopped()
12925 nlmsg_free(msg); cfg80211_crit_proto_stopped()
12934 struct sk_buff *msg; nl80211_send_ap_stopped() local
12937 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); nl80211_send_ap_stopped()
12938 if (!msg) nl80211_send_ap_stopped()
12941 hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_STOP_AP); nl80211_send_ap_stopped()
12945 if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || nl80211_send_ap_stopped()
12946 nla_put_u32(msg, NL80211_ATTR_IFINDEX, wdev->netdev->ifindex) || nl80211_send_ap_stopped()
12947 nla_put_u64(msg, NL80211_ATTR_WDEV, wdev_id(wdev))) nl80211_send_ap_stopped()
12950 genlmsg_end(msg, hdr); nl80211_send_ap_stopped()
12952 genlmsg_multicast_netns(&nl80211_fam, wiphy_net(wiphy), msg, 0, nl80211_send_ap_stopped()
12956 nlmsg_free(msg); nl80211_send_ap_stopped()
941 nl80211_put_iface_combinations(struct wiphy *wiphy, struct sk_buff *msg, bool large) nl80211_put_iface_combinations() argument
1010 nl80211_send_wowlan_tcp_caps(struct cfg80211_registered_device *rdev, struct sk_buff *msg) nl80211_send_wowlan_tcp_caps() argument
1252 nl80211_send_wiphy(struct cfg80211_registered_device *rdev, enum nl80211_commands cmd, struct sk_buff *msg, u32 portid, u32 seq, int flags, struct nl80211_dump_wiphy_state *state) nl80211_send_wiphy() argument
5391 nl80211_put_regdom(const struct ieee80211_regdomain *regdom, struct sk_buff *msg) nl80211_put_regdom() argument
11195 nl80211_send_scan_result(struct cfg80211_registered_device *rdev, struct sk_buff *msg) nl80211_send_scan_result() argument
/linux-4.1.27/drivers/net/ethernet/intel/fm10k/
H A Dfm10k_tlv.h115 #define fm10k_tlv_attr_put_u8(msg, attr_id, val) \
116 fm10k_tlv_attr_put_value(msg, attr_id, val, 1)
117 #define fm10k_tlv_attr_put_u16(msg, attr_id, val) \
118 fm10k_tlv_attr_put_value(msg, attr_id, val, 2)
119 #define fm10k_tlv_attr_put_u32(msg, attr_id, val) \
120 fm10k_tlv_attr_put_value(msg, attr_id, val, 4)
121 #define fm10k_tlv_attr_put_u64(msg, attr_id, val) \
122 fm10k_tlv_attr_put_value(msg, attr_id, val, 8)
123 #define fm10k_tlv_attr_put_s8(msg, attr_id, val) \
124 fm10k_tlv_attr_put_value(msg, attr_id, val, 1)
125 #define fm10k_tlv_attr_put_s16(msg, attr_id, val) \
126 fm10k_tlv_attr_put_value(msg, attr_id, val, 2)
127 #define fm10k_tlv_attr_put_s32(msg, attr_id, val) \
128 fm10k_tlv_attr_put_value(msg, attr_id, val, 4)
129 #define fm10k_tlv_attr_put_s64(msg, attr_id, val) \
130 fm10k_tlv_attr_put_value(msg, attr_id, val, 8)
H A Dfm10k_tlv.c25 * @msg: Pointer to message block
30 s32 fm10k_tlv_msg_init(u32 *msg, u16 msg_id) fm10k_tlv_msg_init() argument
33 if (!msg) fm10k_tlv_msg_init()
36 *msg = (FM10K_TLV_FLAGS_MSG << FM10K_TLV_FLAGS_SHIFT) | msg_id; fm10k_tlv_msg_init()
43 * @msg: Pointer to message block
51 s32 fm10k_tlv_attr_put_null_string(u32 *msg, u16 attr_id, fm10k_tlv_attr_put_null_string() argument
58 if (!string || !msg) fm10k_tlv_attr_put_null_string()
61 attr = &msg[FM10K_TLV_DWORD_LEN(*msg)]; fm10k_tlv_attr_put_null_string()
63 /* copy string into local variable and then write to msg */ fm10k_tlv_attr_put_null_string()
87 *msg += FM10K_TLV_LEN_ALIGN(len); fm10k_tlv_attr_put_null_string()
120 * @msg: Pointer to message block
128 s32 fm10k_tlv_attr_put_mac_vlan(u32 *msg, u16 attr_id, fm10k_tlv_attr_put_mac_vlan() argument
135 if (!msg || !mac_addr) fm10k_tlv_attr_put_mac_vlan()
138 attr = &msg[FM10K_TLV_DWORD_LEN(*msg)]; fm10k_tlv_attr_put_mac_vlan()
143 /* copy value into local variable and then write to msg */ fm10k_tlv_attr_put_mac_vlan()
150 *msg += FM10K_TLV_LEN_ALIGN(len); fm10k_tlv_attr_put_mac_vlan()
180 * @msg: Pointer to message block
188 s32 fm10k_tlv_attr_put_bool(u32 *msg, u16 attr_id) fm10k_tlv_attr_put_bool() argument
191 if (!msg) fm10k_tlv_attr_put_bool()
195 msg[FM10K_TLV_DWORD_LEN(*msg)] = attr_id; fm10k_tlv_attr_put_bool()
198 *msg += FM10K_TLV_HDR_LEN << FM10K_TLV_LEN_SHIFT; fm10k_tlv_attr_put_bool()
205 * @msg: Pointer to message block
212 * that msg is a valid pointer, and len is 1, 2, 4, or 8.
214 s32 fm10k_tlv_attr_put_value(u32 *msg, u16 attr_id, s64 value, u32 len) fm10k_tlv_attr_put_value() argument
218 /* verify non-null msg and len is 1, 2, 4, or 8 */ fm10k_tlv_attr_put_value()
219 if (!msg || !len || len > 8 || (len & (len - 1))) fm10k_tlv_attr_put_value()
222 attr = &msg[FM10K_TLV_DWORD_LEN(*msg)]; fm10k_tlv_attr_put_value()
238 *msg += FM10K_TLV_LEN_ALIGN(len); fm10k_tlv_attr_put_value()
277 * @msg: Pointer to message block
286 s32 fm10k_tlv_attr_put_le_struct(u32 *msg, u16 attr_id, fm10k_tlv_attr_put_le_struct() argument
293 /* verify non-null msg and len is in 32 bit words */ fm10k_tlv_attr_put_le_struct()
294 if (!msg || !len || (len % 4)) fm10k_tlv_attr_put_le_struct()
297 attr = &msg[FM10K_TLV_DWORD_LEN(*msg)]; fm10k_tlv_attr_put_le_struct()
309 *msg += FM10K_TLV_LEN_ALIGN(len); fm10k_tlv_attr_put_le_struct()
347 * @msg: Pointer to message block
356 u32 *fm10k_tlv_attr_nest_start(u32 *msg, u16 attr_id) fm10k_tlv_attr_nest_start() argument
361 if (!msg) fm10k_tlv_attr_nest_start()
364 attr = &msg[FM10K_TLV_DWORD_LEN(*msg)]; fm10k_tlv_attr_nest_start()
374 * @msg: Pointer to message block
381 s32 fm10k_tlv_attr_nest_stop(u32 *msg) fm10k_tlv_attr_nest_stop() argument
387 if (!msg) fm10k_tlv_attr_nest_stop()
391 attr = &msg[FM10K_TLV_DWORD_LEN(*msg)]; fm10k_tlv_attr_nest_stop()
397 *msg += len; fm10k_tlv_attr_nest_stop()
548 * @msg: Pointer to message
558 s32 fm10k_tlv_msg_parse(struct fm10k_hw *hw, u32 *msg, fm10k_tlv_msg_parse() argument
567 if (!msg || !data) fm10k_tlv_msg_parse()
571 if (!(*msg & (FM10K_TLV_FLAGS_MSG << FM10K_TLV_FLAGS_SHIFT))) fm10k_tlv_msg_parse()
575 msg_id = *msg & FM10K_TLV_ID_MASK; fm10k_tlv_msg_parse()
587 err = fm10k_tlv_attr_parse(msg, results, data->attr); fm10k_tlv_msg_parse()
648 * @msg: Pointer to message
653 static void fm10k_tlv_msg_test_generate_data(u32 *msg, u32 attr_flags) fm10k_tlv_msg_test_generate_data() argument
656 fm10k_tlv_attr_put_null_string(msg, FM10K_TEST_MSG_STRING, fm10k_tlv_msg_test_generate_data()
659 fm10k_tlv_attr_put_mac_vlan(msg, FM10K_TEST_MSG_MAC_ADDR, fm10k_tlv_msg_test_generate_data()
662 fm10k_tlv_attr_put_u8(msg, FM10K_TEST_MSG_U8, test_u8); fm10k_tlv_msg_test_generate_data()
664 fm10k_tlv_attr_put_u16(msg, FM10K_TEST_MSG_U16, test_u16); fm10k_tlv_msg_test_generate_data()
666 fm10k_tlv_attr_put_u32(msg, FM10K_TEST_MSG_U32, test_u32); fm10k_tlv_msg_test_generate_data()
668 fm10k_tlv_attr_put_u64(msg, FM10K_TEST_MSG_U64, test_u64); fm10k_tlv_msg_test_generate_data()
670 fm10k_tlv_attr_put_s8(msg, FM10K_TEST_MSG_S8, test_s8); fm10k_tlv_msg_test_generate_data()
672 fm10k_tlv_attr_put_s16(msg, FM10K_TEST_MSG_S16, test_s16); fm10k_tlv_msg_test_generate_data()
674 fm10k_tlv_attr_put_s32(msg, FM10K_TEST_MSG_S32, test_s32); fm10k_tlv_msg_test_generate_data()
676 fm10k_tlv_attr_put_s64(msg, FM10K_TEST_MSG_S64, test_s64); fm10k_tlv_msg_test_generate_data()
678 fm10k_tlv_attr_put_le_struct(msg, FM10K_TEST_MSG_LE_STRUCT, fm10k_tlv_msg_test_generate_data()
684 * @msg: Pointer to message
690 void fm10k_tlv_msg_test_create(u32 *msg, u32 attr_flags) fm10k_tlv_msg_test_create() argument
694 fm10k_tlv_msg_init(msg, FM10K_TLV_MSG_ID_TEST); fm10k_tlv_msg_test_create()
696 fm10k_tlv_msg_test_generate_data(msg, attr_flags); fm10k_tlv_msg_test_create()
702 nest = fm10k_tlv_attr_nest_start(msg, FM10K_TEST_MSG_NESTED); fm10k_tlv_msg_test_create()
706 fm10k_tlv_attr_nest_stop(msg); fm10k_tlv_msg_test_create()
/linux-4.1.27/drivers/media/pci/ngene/
H A Dngene-i2c.c119 struct i2c_msg msg[], int num) ngene_i2c_master_xfer()
128 if (num == 2 && msg[1].flags & I2C_M_RD && !(msg[0].flags & I2C_M_RD)) ngene_i2c_master_xfer()
129 if (!ngene_command_i2c_read(dev, msg[0].addr, ngene_i2c_master_xfer()
130 msg[0].buf, msg[0].len, ngene_i2c_master_xfer()
131 msg[1].buf, msg[1].len, 0)) ngene_i2c_master_xfer()
134 if (num == 1 && !(msg[0].flags & I2C_M_RD)) ngene_i2c_master_xfer()
135 if (!ngene_command_i2c_write(dev, msg[0].addr, ngene_i2c_master_xfer()
136 msg[0].buf, msg[0].len)) ngene_i2c_master_xfer()
138 if (num == 1 && (msg[0].flags & I2C_M_RD)) ngene_i2c_master_xfer()
139 if (!ngene_command_i2c_read(dev, msg[0].addr, NULL, 0, ngene_i2c_master_xfer()
140 msg[0].buf, msg[0].len, 0)) ngene_i2c_master_xfer()
118 ngene_i2c_master_xfer(struct i2c_adapter *adapter, struct i2c_msg msg[], int num) ngene_i2c_master_xfer() argument
/linux-4.1.27/drivers/net/wireless/rsi/
H A Drsi_91x_pkt.c133 __le16 *msg = NULL; rsi_send_mgmt_pkt() local
168 msg = (__le16 *)skb->data; rsi_send_mgmt_pkt()
175 msg[0] = cpu_to_le16((skb->len - FRAME_DESC_SZ) | rsi_send_mgmt_pkt()
177 msg[1] = cpu_to_le16(TX_DOT11_MGMT); rsi_send_mgmt_pkt()
178 msg[2] = cpu_to_le16(MIN_802_11_HDR_LEN << 8); rsi_send_mgmt_pkt()
179 msg[3] = cpu_to_le16(RATE_INFO_ENABLE); rsi_send_mgmt_pkt()
180 msg[6] = cpu_to_le16(le16_to_cpu(wh->seq_ctrl) >> 4); rsi_send_mgmt_pkt()
183 msg[3] |= cpu_to_le16(RSI_BROADCAST_PKT); rsi_send_mgmt_pkt()
186 msg[4] = cpu_to_le16(RSI_11B_MODE); rsi_send_mgmt_pkt()
188 msg[4] = cpu_to_le16((RSI_RATE_6 & 0x0f) | RSI_11G_MODE); rsi_send_mgmt_pkt()
191 msg[4] = cpu_to_le16(0xB | RSI_11G_MODE); rsi_send_mgmt_pkt()
192 msg[5] = cpu_to_le16(0x6); rsi_send_mgmt_pkt()
197 msg[1] |= cpu_to_le16(BIT(10)); rsi_send_mgmt_pkt()
198 msg[7] = cpu_to_le16(PROBEREQ_CONFIRM); rsi_send_mgmt_pkt()
202 msg[7] |= cpu_to_le16(vap_id << 8); rsi_send_mgmt_pkt()
205 (u8 *)msg, rsi_send_mgmt_pkt()
/linux-4.1.27/drivers/gpu/drm/radeon/
H A Dradeon_dp_auxch.c55 radeon_dp_aux_transfer_native(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg) radeon_dp_aux_transfer_native() argument
65 u8 *buf = msg->buffer; radeon_dp_aux_transfer_native()
71 if (WARN_ON(msg->size > 16)) radeon_dp_aux_transfer_native()
74 switch (msg->request & ~DP_AUX_I2C_MOT) { radeon_dp_aux_transfer_native()
89 if (msg->size) { radeon_dp_aux_transfer_native()
90 msize = msg->size - 1; radeon_dp_aux_transfer_native()
93 bytes += msg->size; radeon_dp_aux_transfer_native()
119 /* request, address, msg size */ radeon_dp_aux_transfer_native()
120 byte = (msg->request << 4) | ((msg->address >> 16) & 0xf); radeon_dp_aux_transfer_native()
124 byte = (msg->address >> 8) & 0xff; radeon_dp_aux_transfer_native()
128 byte = msg->address & 0xff; radeon_dp_aux_transfer_native()
136 /* if we are writing - write the msg buffer */ radeon_dp_aux_transfer_native()
138 for (i = 0; i < msg->size; i++) { radeon_dp_aux_transfer_native()
197 ret = msg->size; radeon_dp_aux_transfer_native()
202 msg->reply = ack >> 4; radeon_dp_aux_transfer_native()
H A Dradeon_uvd.c310 static int radeon_uvd_cs_msg_decode(uint32_t *msg, unsigned buf_sizes[]) radeon_uvd_cs_msg_decode() argument
312 unsigned stream_type = msg[4]; radeon_uvd_cs_msg_decode()
313 unsigned width = msg[6]; radeon_uvd_cs_msg_decode()
314 unsigned height = msg[7]; radeon_uvd_cs_msg_decode()
315 unsigned dpb_size = msg[9]; radeon_uvd_cs_msg_decode()
316 unsigned pitch = msg[28]; radeon_uvd_cs_msg_decode()
424 int32_t *msg, msg_type, handle; radeon_uvd_cs_msg() local
451 msg = ptr + offset; radeon_uvd_cs_msg()
453 msg_type = msg[1]; radeon_uvd_cs_msg()
454 handle = msg[2]; radeon_uvd_cs_msg()
463 /* it's a create msg, calc image size (width * height) */ radeon_uvd_cs_msg()
464 img_size = msg[7] * msg[8]; radeon_uvd_cs_msg()
466 r = radeon_uvd_validate_codec(p, msg[4]); radeon_uvd_cs_msg()
489 /* it's a decode msg, validate codec and calc buffer sizes */ radeon_uvd_cs_msg()
490 r = radeon_uvd_validate_codec(p, msg[4]); radeon_uvd_cs_msg()
492 r = radeon_uvd_cs_msg_decode(msg, buf_sizes); radeon_uvd_cs_msg()
512 /* it's a destroy msg, free the handle */ radeon_uvd_cs_msg()
582 DRM_ERROR("msg/fb buffer %LX-%LX out of 256MB segment!\n", radeon_uvd_cs_reloc()
644 /* does the IB has a msg command */ radeon_uvd_cs_parse()
688 DRM_ERROR("UVD-IBs need a msg command!\n"); radeon_uvd_cs_parse()
726 crash the vcpu so just try to emmit a dummy create/destroy msg to
735 uint32_t *msg = rdev->uvd.cpu_addr + offs; radeon_uvd_get_create_msg() local
744 /* stitch together an UVD create msg */ radeon_uvd_get_create_msg()
745 msg[0] = cpu_to_le32(0x00000de4); radeon_uvd_get_create_msg()
746 msg[1] = cpu_to_le32(0x00000000); radeon_uvd_get_create_msg()
747 msg[2] = cpu_to_le32(handle); radeon_uvd_get_create_msg()
748 msg[3] = cpu_to_le32(0x00000000); radeon_uvd_get_create_msg()
749 msg[4] = cpu_to_le32(0x00000000); radeon_uvd_get_create_msg()
750 msg[5] = cpu_to_le32(0x00000000); radeon_uvd_get_create_msg()
751 msg[6] = cpu_to_le32(0x00000000); radeon_uvd_get_create_msg()
752 msg[7] = cpu_to_le32(0x00000780); radeon_uvd_get_create_msg()
753 msg[8] = cpu_to_le32(0x00000440); radeon_uvd_get_create_msg()
754 msg[9] = cpu_to_le32(0x00000000); radeon_uvd_get_create_msg()
755 msg[10] = cpu_to_le32(0x01b37000); radeon_uvd_get_create_msg()
757 msg[i] = cpu_to_le32(0x0); radeon_uvd_get_create_msg()
771 uint32_t *msg = rdev->uvd.cpu_addr + offs; radeon_uvd_get_destroy_msg() local
780 /* stitch together an UVD destroy msg */ radeon_uvd_get_destroy_msg()
781 msg[0] = cpu_to_le32(0x00000de4); radeon_uvd_get_destroy_msg()
782 msg[1] = cpu_to_le32(0x00000002); radeon_uvd_get_destroy_msg()
783 msg[2] = cpu_to_le32(handle); radeon_uvd_get_destroy_msg()
784 msg[3] = cpu_to_le32(0x00000000); radeon_uvd_get_destroy_msg()
786 msg[i] = cpu_to_le32(0x0); radeon_uvd_get_destroy_msg()
/linux-4.1.27/arch/parisc/include/asm/
H A Dtraps.h8 void parisc_terminate(char *msg, struct pt_regs *regs,
/linux-4.1.27/arch/arc/kernel/
H A Dsmp.c202 * In arches with IRQ for each msg type (above), receiver can use IRQ-id to
203 * figure out what msg was sent. For those which don't (ARC has dedicated IPI
204 * IRQ), the msg-type needs to be conveyed via per-cpu data
209 static void ipi_send_msg_one(int cpu, enum ipi_msg_type msg) ipi_send_msg_one() argument
215 pr_debug("%d Sending msg [%d] to %d\n", smp_processor_id(), msg, cpu); ipi_send_msg_one()
220 * Atomically write new msg bit (in case others are writing too), ipi_send_msg_one()
225 new |= 1U << msg; ipi_send_msg_one()
230 * Only do so if there's no pending msg from other concurrent sender(s). ipi_send_msg_one()
231 * Otherwise, recevier will see this msg as well when it takes the ipi_send_msg_one()
232 * IPI corresponding to that msg. This is true, even if it is already in ipi_send_msg_one()
233 * IPI handler, because !@old means it has not yet dequeued the msg(s) ipi_send_msg_one()
234 * so @new msg can be a free-loader ipi_send_msg_one()
242 static void ipi_send_msg(const struct cpumask *callmap, enum ipi_msg_type msg) ipi_send_msg() argument
247 ipi_send_msg_one(cpu, msg); ipi_send_msg()
281 static inline void __do_IPI(unsigned long msg) __do_IPI() argument
283 switch (msg) { __do_IPI()
297 pr_warn("IPI with unexpected msg %ld\n", msg); __do_IPI()
316 * "dequeue" the msg corresponding to this IPI (and possibly other do_IPI()
317 * piggybacked msg from elided IPIs: see ipi_send_msg_one() above) do_IPI()
322 unsigned long msg = __ffs(pending); do_IPI() local
323 __do_IPI(msg); do_IPI()
324 pending &= ~(1U << msg); do_IPI()
/linux-4.1.27/drivers/misc/sgi-xp/
H A Dxpc_uv.c333 xpc_send_gru_msg(struct gru_message_queue_desc *gru_mq_desc, void *msg, xpc_send_gru_msg() argument
340 ret = gru_send_message_gpa(gru_mq_desc, msg, msg_size); xpc_send_gru_msg()
437 struct xpc_activate_mq_msg_activate_req_uv *msg; xpc_handle_activate_mq_msg_uv() local
444 msg = container_of(msg_hdr, struct xpc_handle_activate_mq_msg_uv()
451 part->remote_rp_pa = msg->rp_gpa; /* !!! _pa is _gpa */ xpc_handle_activate_mq_msg_uv()
453 part_uv->heartbeat_gpa = msg->heartbeat_gpa; xpc_handle_activate_mq_msg_uv()
455 if (msg->activate_gru_mq_desc_gpa != xpc_handle_activate_mq_msg_uv()
461 msg->activate_gru_mq_desc_gpa; xpc_handle_activate_mq_msg_uv()
469 struct xpc_activate_mq_msg_deactivate_req_uv *msg; xpc_handle_activate_mq_msg_uv() local
471 msg = container_of(msg_hdr, struct xpc_handle_activate_mq_msg_uv()
478 part_uv->reason = msg->reason; xpc_handle_activate_mq_msg_uv()
485 struct xpc_activate_mq_msg_chctl_closerequest_uv *msg; xpc_handle_activate_mq_msg_uv() local
490 msg = container_of(msg_hdr, struct xpc_handle_activate_mq_msg_uv()
493 args = &part->remote_openclose_args[msg->ch_number]; xpc_handle_activate_mq_msg_uv()
494 args->reason = msg->reason; xpc_handle_activate_mq_msg_uv()
497 part->chctl.flags[msg->ch_number] |= XPC_CHCTL_CLOSEREQUEST; xpc_handle_activate_mq_msg_uv()
504 struct xpc_activate_mq_msg_chctl_closereply_uv *msg; xpc_handle_activate_mq_msg_uv() local
509 msg = container_of(msg_hdr, struct xpc_handle_activate_mq_msg_uv()
514 part->chctl.flags[msg->ch_number] |= XPC_CHCTL_CLOSEREPLY; xpc_handle_activate_mq_msg_uv()
521 struct xpc_activate_mq_msg_chctl_openrequest_uv *msg; xpc_handle_activate_mq_msg_uv() local
526 msg = container_of(msg_hdr, struct xpc_handle_activate_mq_msg_uv()
529 args = &part->remote_openclose_args[msg->ch_number]; xpc_handle_activate_mq_msg_uv()
530 args->entry_size = msg->entry_size; xpc_handle_activate_mq_msg_uv()
531 args->local_nentries = msg->local_nentries; xpc_handle_activate_mq_msg_uv()
534 part->chctl.flags[msg->ch_number] |= XPC_CHCTL_OPENREQUEST; xpc_handle_activate_mq_msg_uv()
541 struct xpc_activate_mq_msg_chctl_openreply_uv *msg; xpc_handle_activate_mq_msg_uv() local
546 msg = container_of(msg_hdr, struct xpc_handle_activate_mq_msg_uv()
548 args = &part->remote_openclose_args[msg->ch_number]; xpc_handle_activate_mq_msg_uv()
549 args->remote_nentries = msg->remote_nentries; xpc_handle_activate_mq_msg_uv()
550 args->local_nentries = msg->local_nentries; xpc_handle_activate_mq_msg_uv()
551 args->local_msgqueue_pa = msg->notify_gru_mq_desc_gpa; xpc_handle_activate_mq_msg_uv()
554 part->chctl.flags[msg->ch_number] |= XPC_CHCTL_OPENREPLY; xpc_handle_activate_mq_msg_uv()
561 struct xpc_activate_mq_msg_chctl_opencomplete_uv *msg; xpc_handle_activate_mq_msg_uv() local
566 msg = container_of(msg_hdr, struct xpc_handle_activate_mq_msg_uv()
569 part->chctl.flags[msg->ch_number] |= XPC_CHCTL_OPENCOMPLETE; xpc_handle_activate_mq_msg_uv()
587 dev_err(xpc_part, "received unknown activate_mq msg type=%d " xpc_handle_activate_mq_msg_uv()
672 xpc_send_activate_IRQ_uv(struct xpc_partition *part, void *msg, size_t msg_size, xpc_send_activate_IRQ_uv() argument
675 struct xpc_activate_mq_msghdr_uv *msg_hdr = msg; xpc_send_activate_IRQ_uv()
715 ret = xpc_send_gru_msg(part_uv->cached_activate_gru_mq_desc, msg, xpc_send_activate_IRQ_uv()
728 xpc_send_activate_IRQ_part_uv(struct xpc_partition *part, void *msg, xpc_send_activate_IRQ_part_uv() argument
733 ret = xpc_send_activate_IRQ_uv(part, msg, msg_size, msg_type); xpc_send_activate_IRQ_part_uv()
740 void *msg, size_t msg_size, int msg_type) xpc_send_activate_IRQ_ch_uv()
745 ret = xpc_send_activate_IRQ_uv(part, msg, msg_size, msg_type); xpc_send_activate_IRQ_ch_uv()
898 struct xpc_activate_mq_msg_activate_req_uv msg; xpc_request_partition_activation_uv() local
911 msg.rp_gpa = uv_gpa(xpc_rsvd_page); xpc_request_partition_activation_uv()
912 msg.heartbeat_gpa = xpc_rsvd_page->sn.uv.heartbeat_gpa; xpc_request_partition_activation_uv()
913 msg.activate_gru_mq_desc_gpa = xpc_request_partition_activation_uv()
915 xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg), xpc_request_partition_activation_uv()
932 struct xpc_activate_mq_msg_deactivate_req_uv msg; xpc_request_partition_deactivation_uv() local
941 msg.reason = part->reason; xpc_request_partition_deactivation_uv()
942 xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg), xpc_request_partition_deactivation_uv()
1040 struct xpc_activate_mq_msg_uv msg; xpc_make_first_contact_uv() local
1043 * We send a sync msg to get the remote partition's remote_act_state xpc_make_first_contact_uv()
1047 xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg), xpc_make_first_contact_uv()
1202 struct xpc_activate_mq_msg_chctl_closerequest_uv msg; xpc_send_chctl_closerequest_uv() local
1204 msg.ch_number = ch->number; xpc_send_chctl_closerequest_uv()
1205 msg.reason = ch->reason; xpc_send_chctl_closerequest_uv()
1206 xpc_send_activate_IRQ_ch_uv(ch, irq_flags, &msg, sizeof(msg), xpc_send_chctl_closerequest_uv()
1213 struct xpc_activate_mq_msg_chctl_closereply_uv msg; xpc_send_chctl_closereply_uv() local
1215 msg.ch_number = ch->number; xpc_send_chctl_closereply_uv()
1216 xpc_send_activate_IRQ_ch_uv(ch, irq_flags, &msg, sizeof(msg), xpc_send_chctl_closereply_uv()
1223 struct xpc_activate_mq_msg_chctl_openrequest_uv msg; xpc_send_chctl_openrequest_uv() local
1225 msg.ch_number = ch->number; xpc_send_chctl_openrequest_uv()
1226 msg.entry_size = ch->entry_size; xpc_send_chctl_openrequest_uv()
1227 msg.local_nentries = ch->local_nentries; xpc_send_chctl_openrequest_uv()
1228 xpc_send_activate_IRQ_ch_uv(ch, irq_flags, &msg, sizeof(msg), xpc_send_chctl_openrequest_uv()
1235 struct xpc_activate_mq_msg_chctl_openreply_uv msg; xpc_send_chctl_openreply_uv() local
1237 msg.ch_number = ch->number; xpc_send_chctl_openreply_uv()
1238 msg.local_nentries = ch->local_nentries; xpc_send_chctl_openreply_uv()
1239 msg.remote_nentries = ch->remote_nentries; xpc_send_chctl_openreply_uv()
1240 msg.notify_gru_mq_desc_gpa = uv_gpa(xpc_notify_mq_uv->gru_mq_desc); xpc_send_chctl_openreply_uv()
1241 xpc_send_activate_IRQ_ch_uv(ch, irq_flags, &msg, sizeof(msg), xpc_send_chctl_openreply_uv()
1248 struct xpc_activate_mq_msg_chctl_opencomplete_uv msg; xpc_send_chctl_opencomplete_uv() local
1250 msg.ch_number = ch->number; xpc_send_chctl_opencomplete_uv()
1251 xpc_send_activate_IRQ_ch_uv(ch, irq_flags, &msg, sizeof(msg), xpc_send_chctl_opencomplete_uv()
1281 struct xpc_activate_mq_msg_uv msg; xpc_indicate_partition_engaged_uv() local
1283 xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg), xpc_indicate_partition_engaged_uv()
1290 struct xpc_activate_mq_msg_uv msg; xpc_indicate_partition_disengaged_uv() local
1292 xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg), xpc_indicate_partition_disengaged_uv()
1359 /* wakeup anyone waiting for a free msg slot */ xpc_free_msg_slot_uv()
1389 struct xpc_notify_mq_msg_uv *msg) xpc_handle_notify_mq_ack_uv()
1392 int entry = msg->hdr.msg_slot_number % ch->local_nentries; xpc_handle_notify_mq_ack_uv()
1396 BUG_ON(msg_slot->msg_slot_number != msg->hdr.msg_slot_number); xpc_handle_notify_mq_ack_uv()
1407 struct xpc_notify_mq_msg_uv *msg) xpc_handle_notify_mq_msg_uv()
1414 int ch_number = msg->hdr.ch_number; xpc_handle_notify_mq_msg_uv()
1441 /* see if we're really dealing with an ACK for a previously sent msg */ xpc_handle_notify_mq_msg_uv()
1442 if (msg->hdr.size == 0) { xpc_handle_notify_mq_msg_uv()
1443 xpc_handle_notify_mq_ack_uv(ch, msg); xpc_handle_notify_mq_msg_uv()
1452 (msg->hdr.msg_slot_number % ch->remote_nentries) * ch->entry_size; xpc_handle_notify_mq_msg_uv()
1456 memcpy(msg_slot, msg, msg->hdr.size); xpc_handle_notify_mq_msg_uv()
1477 struct xpc_notify_mq_msg_uv *msg; xpc_handle_notify_IRQ_uv() local
1481 while ((msg = gru_get_next_message(xpc_notify_mq_uv->gru_mq_desc)) != xpc_handle_notify_IRQ_uv()
1484 partid = msg->hdr.partid; xpc_handle_notify_IRQ_uv()
1492 xpc_handle_notify_mq_msg_uv(part, msg); xpc_handle_notify_IRQ_uv()
1497 gru_free_message(xpc_notify_mq_uv->gru_mq_desc, msg); xpc_handle_notify_IRQ_uv()
1536 struct xpc_notify_mq_msg_uv *msg; xpc_send_payload_uv() local
1574 msg = (struct xpc_notify_mq_msg_uv *)&msg_buffer; xpc_send_payload_uv()
1575 msg->hdr.partid = xp_partition_id; xpc_send_payload_uv()
1576 msg->hdr.ch_number = ch->number; xpc_send_payload_uv()
1577 msg->hdr.size = msg_size; xpc_send_payload_uv()
1578 msg->hdr.msg_slot_number = msg_slot->msg_slot_number; xpc_send_payload_uv()
1579 memcpy(&msg->payload, payload, payload_size); xpc_send_payload_uv()
1581 ret = xpc_send_gru_msg(ch->sn.uv.cached_notify_gru_mq_desc, msg, xpc_send_payload_uv()
1646 struct xpc_notify_mq_msg_uv *msg; xpc_get_deliverable_payload_uv() local
1652 msg = container_of(entry, struct xpc_notify_mq_msg_uv, xpc_get_deliverable_payload_uv()
1654 payload = &msg->payload; xpc_get_deliverable_payload_uv()
1663 struct xpc_notify_mq_msg_uv *msg; xpc_received_payload_uv() local
1666 msg = container_of(payload, struct xpc_notify_mq_msg_uv, payload); xpc_received_payload_uv()
1670 msg->hdr.partid = xp_partition_id; xpc_received_payload_uv()
1671 msg->hdr.size = 0; /* size of zero indicates this is an ACK */ xpc_received_payload_uv()
1673 ret = xpc_send_gru_msg(ch->sn.uv.cached_notify_gru_mq_desc, msg, xpc_received_payload_uv()
739 xpc_send_activate_IRQ_ch_uv(struct xpc_channel *ch, unsigned long *irq_flags, void *msg, size_t msg_size, int msg_type) xpc_send_activate_IRQ_ch_uv() argument
1388 xpc_handle_notify_mq_ack_uv(struct xpc_channel *ch, struct xpc_notify_mq_msg_uv *msg) xpc_handle_notify_mq_ack_uv() argument
1406 xpc_handle_notify_mq_msg_uv(struct xpc_partition *part, struct xpc_notify_mq_msg_uv *msg) xpc_handle_notify_mq_msg_uv() argument
H A Dxpnet.c83 && (msg->magic == XPNET_MAGIC))
147 xpnet_receive(short partid, int channel, struct xpnet_message *msg) xpnet_receive() argument
153 if (!XPNET_VALID_MSG(msg)) { xpnet_receive()
157 xpc_received(partid, channel, (void *)msg); xpnet_receive()
163 dev_dbg(xpnet, "received 0x%lx, %d, %d, %d\n", msg->buf_pa, msg->size, xpnet_receive()
164 msg->leadin_ignore, msg->tailout_ignore); xpnet_receive()
167 skb = dev_alloc_skb(msg->size + L1_CACHE_BYTES); xpnet_receive()
170 msg->size + L1_CACHE_BYTES); xpnet_receive()
172 xpc_received(partid, channel, (void *)msg); xpnet_receive()
186 msg->leadin_ignore)); xpnet_receive()
192 skb_put(skb, (msg->size - msg->leadin_ignore - msg->tailout_ignore)); xpnet_receive()
197 if ((XPNET_VERSION_MINOR(msg->version) == 1) && xpnet_receive()
198 (msg->embedded_bytes != 0)) { xpnet_receive()
200 "%lu)\n", skb->data, &msg->data, xpnet_receive()
201 (size_t)msg->embedded_bytes); xpnet_receive()
203 skb_copy_to_linear_data(skb, &msg->data, xpnet_receive()
204 (size_t)msg->embedded_bytes); xpnet_receive()
209 (void *)msg->buf_pa, msg->size); xpnet_receive()
211 ret = xp_remote_memcpy(xp_pa(dst), msg->buf_pa, msg->size); xpnet_receive()
220 (void *)msg->buf_pa, msg->size, ret); xpnet_receive()
222 xpc_received(partid, channel, (void *)msg); xpnet_receive()
248 xpc_received(partid, channel, (void *)msg); xpnet_receive()
380 struct xpnet_message *msg = (struct xpnet_message *)&msg_buffer; xpnet_send() local
384 msg->embedded_bytes = embedded_bytes; xpnet_send()
386 msg->version = XPNET_VERSION_EMBED; xpnet_send()
388 &msg->data, skb->data, (size_t)embedded_bytes); xpnet_send()
389 skb_copy_from_linear_data(skb, &msg->data, xpnet_send()
393 msg->version = XPNET_VERSION; xpnet_send()
395 msg->magic = XPNET_MAGIC; xpnet_send()
396 msg->size = end_addr - start_addr; xpnet_send()
397 msg->leadin_ignore = (u64)skb->data - start_addr; xpnet_send()
398 msg->tailout_ignore = end_addr - (u64)skb_tail_pointer(skb); xpnet_send()
399 msg->buf_pa = xp_pa((void *)start_addr); xpnet_send()
402 "msg->buf_pa=0x%lx, msg->size=%u, " xpnet_send()
403 "msg->leadin_ignore=%u, msg->tailout_ignore=%u\n", xpnet_send()
404 dest_partid, XPC_NET_CHANNEL, msg->buf_pa, msg->size, xpnet_send()
405 msg->leadin_ignore, msg->tailout_ignore); xpnet_send()
409 ret = xpc_send_notify(dest_partid, XPC_NET_CHANNEL, XPC_NOWAIT, msg, xpnet_send()
/linux-4.1.27/sound/oss/
H A Dmidi_synth.c44 do_midi_msg(int synthno, unsigned char *msg, int mlen) do_midi_msg() argument
46 switch (msg[0] & 0xf0) do_midi_msg()
49 if (msg[2] != 0) do_midi_msg()
51 STORE(SEQ_START_NOTE(synthno, msg[0] & 0x0f, msg[1], msg[2])); do_midi_msg()
54 msg[2] = 64; do_midi_msg()
57 STORE(SEQ_STOP_NOTE(synthno, msg[0] & 0x0f, msg[1], msg[2])); do_midi_msg()
61 STORE(SEQ_KEY_PRESSURE(synthno, msg[0] & 0x0f, msg[1], msg[2])); do_midi_msg()
65 STORE(SEQ_CONTROL(synthno, msg[0] & 0x0f, do_midi_msg()
66 msg[1], msg[2])); do_midi_msg()
70 STORE(SEQ_SET_PATCH(synthno, msg[0] & 0x0f, msg[1])); do_midi_msg()
74 STORE(SEQ_CHN_PRESSURE(synthno, msg[0] & 0x0f, msg[1])); do_midi_msg()
78 STORE(SEQ_BENDER(synthno, msg[0] & 0x0f, do_midi_msg()
79 (msg[1] & 0x7f) | ((msg[2] & 0x7f) << 7))); do_midi_msg()
83 /* printk( "MPU: Unknown midi channel message %02x\n", msg[0]); */ do_midi_msg()
286 int msg, chn; midi_synth_kill_note() local
299 msg = prev_out_status[orig_dev] & 0xf0; midi_synth_kill_note()
302 if (chn == channel && ((msg == 0x90 && velocity == 64) || msg == 0x80)) midi_synth_kill_note()
311 if (msg == 0x90) /* midi_synth_kill_note()
376 int msg, chn; midi_synth_start_note() local
389 msg = prev_out_status[orig_dev] & 0xf0; midi_synth_start_note()
392 if (chn == channel && msg == 0x90) midi_synth_start_note()
562 int msg, chn; midi_synth_aftertouch() local
571 msg = prev_out_status[orig_dev] & 0xf0; midi_synth_aftertouch()
574 if (msg != 0xd0 || chn != channel) /* midi_synth_aftertouch()
594 int chn, msg; midi_synth_controller() local
603 msg = prev_out_status[orig_dev] & 0xf0; midi_synth_controller()
606 if (msg != 0xb0 || chn != channel) midi_synth_controller()
623 int msg, prev_chn; midi_synth_bender() local
633 msg = prev_out_status[orig_dev] & 0xf0; midi_synth_bender()
636 if (msg != 0xd0 || prev_chn != channel) /* midi_synth_bender()
/linux-4.1.27/drivers/media/usb/dvb-usb-v2/
H A Dgl861.c53 static int gl861_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[], gl861_i2c_xfer() argument
67 if (i+1 < num && (msg[i+1].flags & I2C_M_RD)) { gl861_i2c_xfer()
68 if (gl861_i2c_msg(d, msg[i].addr, msg[i].buf, gl861_i2c_xfer()
69 msg[i].len, msg[i+1].buf, msg[i+1].len) < 0) gl861_i2c_xfer()
73 if (gl861_i2c_msg(d, msg[i].addr, msg[i].buf, gl861_i2c_xfer()
74 msg[i].len, NULL, 0) < 0) gl861_i2c_xfer()
H A Dmxl111sf-i2c.c239 struct i2c_msg *msg) mxl111sf_i2c_sw_xfer_msg()
245 if (msg->flags & I2C_M_RD) { mxl111sf_i2c_sw_xfer_msg()
252 (msg->addr << 1) | 0x01); mxl111sf_i2c_sw_xfer_msg()
258 for (i = 0; i < msg->len; i++) { mxl111sf_i2c_sw_xfer_msg()
260 &msg->buf[i]); mxl111sf_i2c_sw_xfer_msg()
266 if (i < msg->len - 1) mxl111sf_i2c_sw_xfer_msg()
283 (msg->addr << 1) & 0xfe); mxl111sf_i2c_sw_xfer_msg()
289 for (i = 0; i < msg->len; i++) { mxl111sf_i2c_sw_xfer_msg()
291 msg->buf[i]); mxl111sf_i2c_sw_xfer_msg()
441 struct i2c_msg *msg) mxl111sf_i2c_hw_xfer_msg()
454 msg->addr, (msg->flags & I2C_M_RD) ? msg->len : 0, mxl111sf_i2c_hw_xfer_msg()
455 (!(msg->flags & I2C_M_RD)) ? msg->len : 0); mxl111sf_i2c_hw_xfer_msg()
491 if (!(msg->flags & I2C_M_RD) && (msg->len > 0)) { mxl111sf_i2c_hw_xfer_msg()
492 mxl_i2c("%d\t%02x", msg->len, msg->buf[0]); mxl111sf_i2c_hw_xfer_msg()
501 buf[6] = (msg->addr); mxl111sf_i2c_hw_xfer_msg()
509 msg->addr); mxl111sf_i2c_hw_xfer_msg()
521 block_len = (msg->len / 8); mxl111sf_i2c_hw_xfer_msg()
522 left_over_len = (msg->len % 8); mxl111sf_i2c_hw_xfer_msg()
532 buf[3+(i*3)] = msg->buf[(index*8)+i]; mxl111sf_i2c_hw_xfer_msg()
541 msg->addr); mxl111sf_i2c_hw_xfer_msg()
562 buf[3+(i*3)] = msg->buf[(index*8)+i]; mxl111sf_i2c_hw_xfer_msg()
564 index, i, msg->buf[(index*8)+i]); mxl111sf_i2c_hw_xfer_msg()
572 msg->addr); mxl111sf_i2c_hw_xfer_msg()
592 if ((msg->flags & I2C_M_RD) && (msg->len > 0)) { mxl111sf_i2c_hw_xfer_msg()
593 mxl_i2c("read buf len %d", msg->len); mxl111sf_i2c_hw_xfer_msg()
603 buf[6] = (msg->len & 0xFF); mxl111sf_i2c_hw_xfer_msg()
608 buf[9] = msg->addr; mxl111sf_i2c_hw_xfer_msg()
616 msg->addr); mxl111sf_i2c_hw_xfer_msg()
629 block_len = ((msg->len) / 8); mxl111sf_i2c_hw_xfer_msg()
630 left_over_len = ((msg->len) % 8); mxl111sf_i2c_hw_xfer_msg()
653 msg->addr); mxl111sf_i2c_hw_xfer_msg()
671 msg->buf[(index*8)+i] = mxl111sf_i2c_hw_xfer_msg()
683 msg->buf[(index*8)+(k+i+1)] = mxl111sf_i2c_hw_xfer_msg()
686 msg->buf[(index*8)+(k+i)], mxl111sf_i2c_hw_xfer_msg()
689 msg->buf[(index*8)+(k+i+1)], mxl111sf_i2c_hw_xfer_msg()
699 msg->buf[(index*8)+i] = mxl111sf_i2c_hw_xfer_msg()
703 msg->buf[(index*8)+i] = mxl111sf_i2c_hw_xfer_msg()
730 msg->addr); mxl111sf_i2c_hw_xfer_msg()
741 msg->buf[(block_len*8)+i] = mxl111sf_i2c_hw_xfer_msg()
815 struct i2c_msg msg[], int num) mxl111sf_i2c_xfer()
827 mxl111sf_i2c_hw_xfer_msg(state, &msg[i]) : mxl111sf_i2c_xfer()
828 mxl111sf_i2c_sw_xfer_msg(state, &msg[i]); mxl111sf_i2c_xfer()
833 (msg[i].flags & I2C_M_RD) ? mxl111sf_i2c_xfer()
835 msg[i].len, msg[i].addr); mxl111sf_i2c_xfer()
238 mxl111sf_i2c_sw_xfer_msg(struct mxl111sf_state *state, struct i2c_msg *msg) mxl111sf_i2c_sw_xfer_msg() argument
440 mxl111sf_i2c_hw_xfer_msg(struct mxl111sf_state *state, struct i2c_msg *msg) mxl111sf_i2c_hw_xfer_msg() argument
814 mxl111sf_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[], int num) mxl111sf_i2c_xfer() argument
H A Dau6610.c96 static int au6610_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[], au6610_i2c_xfer() argument
110 if (i+1 < num && (msg[i+1].flags & I2C_M_RD)) { au6610_i2c_xfer()
111 if (au6610_i2c_msg(d, msg[i].addr, msg[i].buf, au6610_i2c_xfer()
112 msg[i].len, msg[i+1].buf, au6610_i2c_xfer()
113 msg[i+1].len) < 0) au6610_i2c_xfer()
116 } else if (au6610_i2c_msg(d, msg[i].addr, msg[i].buf, au6610_i2c_xfer()
117 msg[i].len, NULL, 0) < 0) au6610_i2c_xfer()
H A Dce6230.c100 struct i2c_msg msg[], int num) ce6230_i2c_master_xfer()
115 if (num > i + 1 && (msg[i+1].flags & I2C_M_RD)) { ce6230_i2c_master_xfer()
116 if (msg[i].addr == ce6230_i2c_master_xfer()
119 req.value = msg[i].addr >> 1; ce6230_i2c_master_xfer()
120 req.index = msg[i].buf[0]; ce6230_i2c_master_xfer()
121 req.data_len = msg[i+1].len; ce6230_i2c_master_xfer()
122 req.data = &msg[i+1].buf[0]; ce6230_i2c_master_xfer()
132 if (msg[i].addr == ce6230_i2c_master_xfer()
135 req.value = msg[i].addr >> 1; ce6230_i2c_master_xfer()
136 req.index = msg[i].buf[0]; ce6230_i2c_master_xfer()
137 req.data_len = msg[i].len-1; ce6230_i2c_master_xfer()
138 req.data = &msg[i].buf[1]; ce6230_i2c_master_xfer()
142 req.value = 0x2000 + (msg[i].addr >> 1); ce6230_i2c_master_xfer()
144 req.data_len = msg[i].len; ce6230_i2c_master_xfer()
145 req.data = &msg[i].buf[0]; ce6230_i2c_master_xfer()
99 ce6230_i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg msg[], int num) ce6230_i2c_master_xfer() argument
/linux-4.1.27/include/linux/mfd/
H A Dipaq-micro.h101 * @msg: current message
116 struct ipaq_micro_msg *msg; member in struct:ipaq_micro
125 ipaq_micro_tx_msg(struct ipaq_micro *micro, struct ipaq_micro_msg *msg);
129 struct ipaq_micro_msg *msg) ipaq_micro_tx_msg_sync()
133 init_completion(&msg->ack); ipaq_micro_tx_msg_sync()
134 ret = ipaq_micro_tx_msg(micro, msg); ipaq_micro_tx_msg_sync()
135 wait_for_completion(&msg->ack); ipaq_micro_tx_msg_sync()
142 struct ipaq_micro_msg *msg) ipaq_micro_tx_msg_async()
144 init_completion(&msg->ack); ipaq_micro_tx_msg_async()
145 return ipaq_micro_tx_msg(micro, msg); ipaq_micro_tx_msg_async()
128 ipaq_micro_tx_msg_sync(struct ipaq_micro *micro, struct ipaq_micro_msg *msg) ipaq_micro_tx_msg_sync() argument
141 ipaq_micro_tx_msg_async(struct ipaq_micro *micro, struct ipaq_micro_msg *msg) ipaq_micro_tx_msg_async() argument
/linux-4.1.27/drivers/atm/
H A Datmtcp.c43 const struct atmtcp_control *msg,int flag) atmtcp_send_control()
54 skb = alloc_skb(sizeof(*msg),GFP_KERNEL); atmtcp_send_control()
64 *new_msg = *msg; atmtcp_send_control()
88 static int atmtcp_recv_control(const struct atmtcp_control *msg) atmtcp_recv_control() argument
90 struct atm_vcc *vcc = *(struct atm_vcc **) &msg->vcc; atmtcp_recv_control()
92 vcc->vpi = msg->addr.sap_addr.vpi; atmtcp_recv_control()
93 vcc->vci = msg->addr.sap_addr.vci; atmtcp_recv_control()
94 vcc->qos = msg->qos; atmtcp_recv_control()
95 sk_atm(vcc)->sk_err = -msg->result; atmtcp_recv_control()
96 switch (msg->type) { atmtcp_recv_control()
105 msg->type); atmtcp_recv_control()
121 struct atmtcp_control msg; atmtcp_v_open() local
126 memset(&msg,0,sizeof(msg)); atmtcp_v_open()
127 msg.addr.sap_family = AF_ATMPVC; atmtcp_v_open()
128 msg.hdr.vpi = htons(vpi); atmtcp_v_open()
129 msg.addr.sap_addr.vpi = vpi; atmtcp_v_open()
130 msg.hdr.vci = htons(vci); atmtcp_v_open()
131 msg.addr.sap_addr.vci = vci; atmtcp_v_open()
133 msg.type = ATMTCP_CTRL_OPEN; atmtcp_v_open()
134 msg.qos = vcc->qos; atmtcp_v_open()
137 error = atmtcp_send_control(vcc,ATMTCP_CTRL_OPEN,&msg,ATM_VF_READY); atmtcp_v_open()
145 struct atmtcp_control msg; atmtcp_v_close() local
147 memset(&msg,0,sizeof(msg)); atmtcp_v_close()
148 msg.addr.sap_family = AF_ATMPVC; atmtcp_v_close()
149 msg.addr.sap_addr.vpi = vcc->vpi; atmtcp_v_close()
150 msg.addr.sap_addr.vci = vcc->vci; atmtcp_v_close()
152 (void) atmtcp_send_control(vcc,ATMTCP_CTRL_CLOSE,&msg,ATM_VF_ADDR); atmtcp_v_close()
42 atmtcp_send_control(struct atm_vcc *vcc,int type, const struct atmtcp_control *msg,int flag) atmtcp_send_control() argument
/linux-4.1.27/drivers/isdn/pcbit/
H A Dlayer2.h93 #define SET_MSG_SCMD(msg, ch) (msg = (msg & 0xffffff00) | (((ch) & 0xff)))
94 #define SET_MSG_CMD(msg, ch) (msg = (msg & 0xffff00ff) | (((ch) & 0xff) << 8))
95 #define SET_MSG_PROC(msg, ch) (msg = (msg & 0xff00ffff) | (((ch) & 0xff) << 16))
96 #define SET_MSG_CPU(msg, ch) (msg = (msg & 0x00ffffff) | (((ch) & 0xff) << 24))
98 #define GET_MSG_SCMD(msg) ((msg) & 0xFF)
99 #define GET_MSG_CMD(msg) ((msg) >> 8 & 0xFF)
100 #define GET_MSG_PROC(msg) ((msg) >> 16 & 0xFF)
101 #define GET_MSG_CPU(msg) ((msg) >> 24)
115 ulong msg; member in struct:frame_buf
124 extern int pcbit_l2_write(struct pcbit_dev *dev, ulong msg, ushort refnum,
/linux-4.1.27/drivers/md/
H A Ddm-log-userspace-transfer.c59 struct cn_msg *msg = prealloced_cn_msg; dm_ulog_sendto_server() local
61 memset(msg, 0, sizeof(struct cn_msg)); dm_ulog_sendto_server()
63 msg->id.idx = ulog_cn_id.idx; dm_ulog_sendto_server()
64 msg->id.val = ulog_cn_id.val; dm_ulog_sendto_server()
65 msg->ack = 0; dm_ulog_sendto_server()
66 msg->seq = tfr->seq; dm_ulog_sendto_server()
67 msg->len = sizeof(struct dm_ulog_request) + tfr->data_size; dm_ulog_sendto_server()
69 r = cn_netlink_send(msg, 0, 0, gfp_any()); dm_ulog_sendto_server()
75 * Parameters for this function can be either msg or tfr, but not
77 * If just msg is given, then the reply is simply an ACK from userspace
82 static int fill_pkg(struct cn_msg *msg, struct dm_ulog_request *tfr) fill_pkg() argument
84 uint32_t rtn_seq = (msg) ? msg->seq : (tfr) ? tfr->seq : 0; fill_pkg()
101 if (msg) { fill_pkg()
102 pkg->error = -msg->ack; fill_pkg()
133 static void cn_ulog_callback(struct cn_msg *msg, struct netlink_skb_parms *nsp) cn_ulog_callback() argument
135 struct dm_ulog_request *tfr = (struct dm_ulog_request *)(msg + 1); cn_ulog_callback()
141 if (msg->len == 0) cn_ulog_callback()
142 fill_pkg(msg, NULL); cn_ulog_callback()
143 else if (msg->len < sizeof(*tfr)) cn_ulog_callback()
145 (unsigned)sizeof(*tfr), msg->len, msg->seq); cn_ulog_callback()
/linux-4.1.27/drivers/i2c/busses/
H A Di2c-axxia.c86 * @msg: pointer to current message
87 * @msg_xfrd: number of bytes transferred in msg
97 struct i2c_msg *msg; member in struct:axxia_i2c_dev
206 static int i2c_m_rd(const struct i2c_msg *msg) i2c_m_rd() argument
208 return (msg->flags & I2C_M_RD) != 0; i2c_m_rd()
211 static int i2c_m_ten(const struct i2c_msg *msg) i2c_m_ten() argument
213 return (msg->flags & I2C_M_TEN) != 0; i2c_m_ten()
216 static int i2c_m_recv_len(const struct i2c_msg *msg) i2c_m_recv_len() argument
218 return (msg->flags & I2C_M_RECV_LEN) != 0; i2c_m_recv_len()
227 struct i2c_msg *msg = idev->msg; axxia_i2c_empty_rx_fifo() local
229 int bytes_to_transfer = min(rx_fifo_avail, msg->len - idev->msg_xfrd); axxia_i2c_empty_rx_fifo()
234 if (idev->msg_xfrd == 0 && i2c_m_recv_len(msg)) { axxia_i2c_empty_rx_fifo()
244 msg->len = 1 + c; axxia_i2c_empty_rx_fifo()
245 writel(msg->len, idev->base + MST_RX_XFER); axxia_i2c_empty_rx_fifo()
247 msg->buf[idev->msg_xfrd++] = c; axxia_i2c_empty_rx_fifo()
259 struct i2c_msg *msg = idev->msg; axxia_i2c_fill_tx_fifo() local
261 int bytes_to_transfer = min(tx_fifo_avail, msg->len - idev->msg_xfrd); axxia_i2c_fill_tx_fifo()
262 int ret = msg->len - idev->msg_xfrd - bytes_to_transfer; axxia_i2c_fill_tx_fifo()
265 writel(msg->buf[idev->msg_xfrd++], idev->base + MST_DATA); axxia_i2c_fill_tx_fifo()
281 if (!idev->msg) { axxia_i2c_isr()
287 if (i2c_m_rd(idev->msg) && (status & MST_STATUS_RFL)) axxia_i2c_isr()
291 if (!i2c_m_rd(idev->msg) && (status & MST_STATUS_TFL)) { axxia_i2c_isr()
303 if (i2c_m_rd(idev->msg) && idev->msg_xfrd < idev->msg->len) axxia_i2c_isr()
317 idev->msg->addr, axxia_i2c_isr()
332 static int axxia_i2c_xfer_msg(struct axxia_i2c_dev *idev, struct i2c_msg *msg) axxia_i2c_xfer_msg() argument
339 idev->msg = msg; axxia_i2c_xfer_msg()
344 if (i2c_m_ten(msg)) { axxia_i2c_xfer_msg()
349 addr_1 = 0xF0 | ((msg->addr >> 7) & 0x06); axxia_i2c_xfer_msg()
350 addr_2 = msg->addr & 0xFF; axxia_i2c_xfer_msg()
356 addr_1 = (msg->addr << 1) & 0xFF; axxia_i2c_xfer_msg()
360 if (i2c_m_rd(msg)) { axxia_i2c_xfer_msg()
362 rx_xfer = i2c_m_recv_len(msg) ? I2C_SMBUS_BLOCK_MAX : msg->len; axxia_i2c_xfer_msg()
368 tx_xfer = msg->len; axxia_i2c_xfer_msg()
376 if (i2c_m_rd(msg)) axxia_i2c_xfer_msg()
H A Di2c-kempld.c67 struct i2c_msg *msg; member in struct:kempld_i2c_data
94 struct i2c_msg *msg = i2c->msg; kempld_i2c_process() local
125 if (i2c->msg->flags & I2C_M_TEN) { kempld_i2c_process()
126 addr = 0xf0 | ((i2c->msg->addr >> 7) & 0x6); kempld_i2c_process()
129 addr = (i2c->msg->addr << 1); kempld_i2c_process()
134 addr |= (i2c->msg->flags & I2C_M_RD) ? 1 : 0; kempld_i2c_process()
144 kempld_write8(pld, KEMPLD_I2C_DATA, i2c->msg->addr & 0xff); kempld_i2c_process()
152 i2c->state = (msg->flags & I2C_M_RD) ? STATE_READ : STATE_WRITE; kempld_i2c_process()
160 msg->buf[i2c->pos++] = kempld_read8(pld, KEMPLD_I2C_DATA); kempld_i2c_process()
163 if (i2c->pos >= msg->len) { kempld_i2c_process()
165 i2c->msg++; kempld_i2c_process()
167 msg = i2c->msg; kempld_i2c_process()
170 if (!(msg->flags & I2C_M_NOSTART)) { kempld_i2c_process()
174 i2c->state = (msg->flags & I2C_M_RD) kempld_i2c_process()
185 kempld_write8(pld, KEMPLD_I2C_CMD, i2c->pos == (msg->len - 1) ? kempld_i2c_process()
188 kempld_write8(pld, KEMPLD_I2C_DATA, msg->buf[i2c->pos++]); kempld_i2c_process()
203 i2c->msg = msgs; kempld_i2c_xfer()
H A Di2c-digicolor.c57 struct i2c_msg *msg; member in struct:dc_i2c
80 static u8 dc_i2c_addr_cmd(struct i2c_msg *msg) dc_i2c_addr_cmd() argument
82 u8 addr = (msg->addr & 0x7f) << 1; dc_i2c_addr_cmd()
84 if (msg->flags & I2C_M_RD) dc_i2c_addr_cmd()
103 dc_i2c_write_byte(i2c, i2c->msg->buf[i2c->msgbuf_ptr++]); dc_i2c_write_buf()
108 bool last = (i2c->msgbuf_ptr + 1 == i2c->msg->len); dc_i2c_next_read()
129 i2c->msg->buf[i2c->msgbuf_ptr++] = dc_i2c_read_byte(i2c); dc_i2c_read_buf()
149 struct i2c_msg *msg = i2c->msg; dc_i2c_start_msg() local
151 if (!(msg->flags & I2C_M_NOSTART)) { dc_i2c_start_msg()
154 } else if (msg->flags & I2C_M_RD) { dc_i2c_start_msg()
183 addr_cmd = dc_i2c_addr_cmd(i2c->msg); dc_i2c_irq()
188 if (i2c->msg->flags & I2C_M_RD) { dc_i2c_irq()
196 if (i2c->msgbuf_ptr < i2c->msg->len) dc_i2c_irq()
202 if (i2c->msgbuf_ptr < i2c->msg->len) dc_i2c_irq()
218 static int dc_i2c_xfer_msg(struct dc_i2c *i2c, struct i2c_msg *msg, int first, dc_i2c_xfer_msg() argument
225 i2c->msg = msg; dc_i2c_xfer_msg()
H A Di2c-sh7760.c86 struct i2c_msg *msg; member in struct:cami2c
115 struct i2c_msg *msg = id->msg; sh7760_i2c_irq() local
116 char *data = msg->buf; sh7760_i2c_irq()
165 if (msg->len <= len) { sh7760_i2c_irq()
181 while (msg->len && len) { sh7760_i2c_irq()
183 msg->len--; sh7760_i2c_irq()
187 if (msg->len) { sh7760_i2c_irq()
188 len = (msg->len >= FIFO_SIZE) ? FIFO_SIZE - 1 sh7760_i2c_irq()
189 : msg->len - 1; sh7760_i2c_irq()
195 if ((fsr & FSR_TEND) && (msg->len < 1)) { sh7760_i2c_irq()
208 while (msg->len && (IN32(id, I2CTFDR) < FIFO_SIZE)) { sh7760_i2c_irq()
210 msg->len--; sh7760_i2c_irq()
213 if (msg->len < 1) { sh7760_i2c_irq()
217 len = (msg->len >= FIFO_SIZE) ? 2 : 0; sh7760_i2c_irq()
227 id->msg = NULL; sh7760_i2c_irq()
248 OUT32(id, I2CMAR, (id->msg->addr << 1) | 1); sh7760_i2c_mrecv()
251 if (id->msg->len >= FIFO_SIZE) sh7760_i2c_mrecv()
254 len = id->msg->len - 1; /* trigger before all received */ sh7760_i2c_mrecv()
274 OUT32(id, I2CMAR, (id->msg->addr << 1) | 0); sh7760_i2c_msend()
277 if (id->msg->len >= FIFO_SIZE) sh7760_i2c_msend()
285 while (id->msg->len && IN32(id, I2CTFDR) < FIFO_SIZE) { sh7760_i2c_msend()
286 OUT32(id, I2CRXTX, *(id->msg->buf)); sh7760_i2c_msend()
287 (id->msg->len)--; sh7760_i2c_msend()
288 (id->msg->buf)++; sh7760_i2c_msend()
295 OUT32(id, I2CFIER, FIER_TEIE | (id->msg->len ? FIER_TXIE : 0)); sh7760_i2c_msend()
321 id->msg = msgs; sh7760_i2c_master_xfer()
356 id->msg = NULL; sh7760_i2c_master_xfer()
H A Di2c-cros-ec-tunnel.c93 struct ec_params_i2c_passthru_msg *msg = &params->msg[i]; ec_i2c_construct_message() local
95 msg->len = i2c_msg->len; ec_i2c_construct_message()
96 msg->addr_flags = i2c_msg->addr; ec_i2c_construct_message()
102 msg->addr_flags |= EC_I2C_FLAG_READ; ec_i2c_construct_message()
104 memcpy(out_data, i2c_msg->buf, msg->len); ec_i2c_construct_message()
105 out_data += msg->len; ec_i2c_construct_message()
186 struct cros_ec_command msg = { }; ec_i2c_xfer() local
201 result = ec_i2c_construct_message(msg.outdata, i2c_msgs, num, bus_num); ec_i2c_xfer()
205 msg.version = 0; ec_i2c_xfer()
206 msg.command = EC_CMD_I2C_PASSTHRU; ec_i2c_xfer()
207 msg.outsize = request_len; ec_i2c_xfer()
208 msg.insize = response_len; ec_i2c_xfer()
210 result = cros_ec_cmd_xfer(bus->ec, &msg); ec_i2c_xfer()
214 result = ec_i2c_parse_response(msg.indata, i2c_msgs, &num); ec_i2c_xfer()
H A Di2c-meson.c68 * @msg: Pointer to the current I2C message
87 struct i2c_msg *msg; member in struct:meson_i2c
186 bool write = !(i2c->msg->flags & I2C_M_RD); meson_i2c_prepare_xfer()
189 i2c->count = min_t(int, i2c->msg->len - i2c->pos, 8); meson_i2c_prepare_xfer()
195 if (write || i2c->pos + i2c->count < i2c->msg->len) meson_i2c_prepare_xfer()
202 meson_i2c_put_data(i2c, i2c->msg->buf + i2c->pos, i2c->count); meson_i2c_prepare_xfer()
248 meson_i2c_get_data(i2c, i2c->msg->buf + i2c->pos, meson_i2c_irq()
253 if (i2c->pos >= i2c->msg->len) { meson_i2c_irq()
263 if (i2c->pos >= i2c->msg->len) { meson_i2c_irq()
292 static void meson_i2c_do_start(struct meson_i2c *i2c, struct i2c_msg *msg) meson_i2c_do_start() argument
296 token = (msg->flags & I2C_M_RD) ? TOKEN_SLAVE_ADDR_READ : meson_i2c_do_start()
299 writel(msg->addr << 1, i2c->regs + REG_SLAVE_ADDR); meson_i2c_do_start()
304 static int meson_i2c_xfer_msg(struct meson_i2c *i2c, struct i2c_msg *msg, meson_i2c_xfer_msg() argument
310 i2c->msg = msg; meson_i2c_xfer_msg()
318 flags = (msg->flags & I2C_M_IGNORE_NAK) ? REG_CTRL_ACK_IGNORE : 0; meson_i2c_xfer_msg()
321 if (!(msg->flags & I2C_M_NOSTART)) meson_i2c_xfer_msg()
322 meson_i2c_do_start(i2c, msg); meson_i2c_xfer_msg()
324 i2c->state = (msg->flags & I2C_M_RD) ? STATE_READ : STATE_WRITE; meson_i2c_xfer_msg()
H A Di2c-qup.c118 struct i2c_msg *msg; member in struct:qup_i2c_dev
140 if (!qup->msg) { qup_i2c_interrupt()
247 static void qup_i2c_set_write_mode(struct qup_i2c_dev *qup, struct i2c_msg *msg) qup_i2c_set_write_mode() argument
250 int total = msg->len + 1; qup_i2c_set_write_mode()
264 static void qup_i2c_issue_write(struct qup_i2c_dev *qup, struct i2c_msg *msg) qup_i2c_issue_write() argument
266 u32 addr = msg->addr << 1; qup_i2c_issue_write()
280 while (qup->pos < msg->len) { qup_i2c_issue_write()
286 if (qup->pos == msg->len - 1) qup_i2c_issue_write()
292 val |= (qup_tag | msg->buf[qup->pos]) << QUP_MSW_SHIFT; qup_i2c_issue_write()
294 val = qup_tag | msg->buf[qup->pos]; qup_i2c_issue_write()
297 if (idx & 1 || qup->pos == msg->len - 1) qup_i2c_issue_write()
305 static int qup_i2c_write_one(struct qup_i2c_dev *qup, struct i2c_msg *msg) qup_i2c_write_one() argument
310 qup->msg = msg; qup_i2c_write_one()
315 qup_i2c_set_write_mode(qup, msg); qup_i2c_write_one()
328 qup_i2c_issue_write(qup, msg); qup_i2c_write_one()
343 dev_err(qup->dev, "NACK from %x\n", msg->addr); qup_i2c_write_one()
347 } while (qup->pos < msg->len); qup_i2c_write_one()
354 qup->msg = NULL; qup_i2c_write_one()
373 static void qup_i2c_issue_read(struct qup_i2c_dev *qup, struct i2c_msg *msg) qup_i2c_issue_read() argument
377 addr = (msg->addr << 1) | 1; qup_i2c_issue_read()
380 len = (msg->len == QUP_READ_LIMIT) ? 0 : msg->len; qup_i2c_issue_read()
387 static void qup_i2c_read_fifo(struct qup_i2c_dev *qup, struct i2c_msg *msg) qup_i2c_read_fifo() argument
393 for (idx = 0; qup->pos < msg->len; idx++) { qup_i2c_read_fifo()
403 msg->buf[qup->pos++] = val & 0xFF; qup_i2c_read_fifo()
405 msg->buf[qup->pos++] = val >> QUP_MSW_SHIFT; qup_i2c_read_fifo()
410 static int qup_i2c_read_one(struct qup_i2c_dev *qup, struct i2c_msg *msg) qup_i2c_read_one() argument
415 qup->msg = msg; qup_i2c_read_one()
420 qup_i2c_set_read_mode(qup, msg->len); qup_i2c_read_one()
432 qup_i2c_issue_read(qup, msg); qup_i2c_read_one()
448 dev_err(qup->dev, "NACK from %x\n", msg->addr); qup_i2c_read_one()
453 qup_i2c_read_fifo(qup, msg); qup_i2c_read_one()
454 } while (qup->pos < msg->len); qup_i2c_read_one()
458 qup->msg = NULL; qup_i2c_read_one()
/linux-4.1.27/drivers/media/pci/mantis/
H A Dmantis_i2c.c38 static int mantis_i2c_read(struct mantis_pci *mantis, const struct i2c_msg *msg) mantis_i2c_read() argument
43 __func__, msg->addr); mantis_i2c_read()
45 for (i = 0; i < msg->len; i++) { mantis_i2c_read()
46 rxd = (msg->addr << 25) | (1 << 24) mantis_i2c_read()
51 if (i == (msg->len - 1)) mantis_i2c_read()
76 msg->buf[i] = (u8)((rxd >> 8) & 0xFF); mantis_i2c_read()
77 dprintk(MANTIS_INFO, 0, "%02x ", msg->buf[i]); mantis_i2c_read()
84 static int mantis_i2c_write(struct mantis_pci *mantis, const struct i2c_msg *msg) mantis_i2c_write() argument
90 __func__, msg->addr); mantis_i2c_write()
92 for (i = 0; i < msg->len; i++) { mantis_i2c_write()
93 dprintk(MANTIS_INFO, 0, "%02x ", msg->buf[i]); mantis_i2c_write()
94 txd = (msg->addr << 25) | (msg->buf[i] << 8) mantis_i2c_write()
99 if (i == (msg->len - 1)) mantis_i2c_write()
H A Dmantis_vp2033.c56 struct i2c_msg msg[] = { read_pwm() local
61 if ((i2c_transfer(adapter, msg, 2) != 2) read_pwm()
75 struct i2c_msg msg = {.addr = 0x60, .flags = 0, .buf = buf, .len = sizeof(buf)}; tda1002x_cu1216_tuner_set() local
94 if (i2c_transfer(adapter, &msg, 1) != 1) tda1002x_cu1216_tuner_set()
98 msg.flags = I2C_M_RD; tda1002x_cu1216_tuner_set()
99 msg.len = 1; tda1002x_cu1216_tuner_set()
104 if (i2c_transfer(adapter, &msg, 1) == 1 && (buf[0] & 0x40)) tda1002x_cu1216_tuner_set()
111 msg.flags = 0; tda1002x_cu1216_tuner_set()
112 msg.len = 2; tda1002x_cu1216_tuner_set()
113 msg.buf = &buf[2]; tda1002x_cu1216_tuner_set()
118 if (i2c_transfer(adapter, &msg, 1) != 1) tda1002x_cu1216_tuner_set()
H A Dmantis_vp2040.c57 struct i2c_msg msg = {.addr = 0x60, .flags = 0, .buf = buf, .len = sizeof(buf)}; tda1002x_cu1216_tuner_set() local
76 if (i2c_transfer(adapter, &msg, 1) != 1) tda1002x_cu1216_tuner_set()
80 msg.flags = I2C_M_RD; tda1002x_cu1216_tuner_set()
81 msg.len = 1; tda1002x_cu1216_tuner_set()
86 if (i2c_transfer(adapter, &msg, 1) == 1 && (buf[0] & 0x40)) tda1002x_cu1216_tuner_set()
93 msg.flags = 0; tda1002x_cu1216_tuner_set()
94 msg.len = 2; tda1002x_cu1216_tuner_set()
95 msg.buf = &buf[2]; tda1002x_cu1216_tuner_set()
100 if (i2c_transfer(adapter, &msg, 1) != 1) tda1002x_cu1216_tuner_set()
112 struct i2c_msg msg[] = { read_pwm() local
117 if ((i2c_transfer(adapter, msg, 2) != 2) read_pwm()
/linux-4.1.27/drivers/scsi/libfc/
H A Dfc_elsct.c93 const char *msg; fc_els_resp_type() local
100 msg = "response no error"; fc_els_resp_type()
103 msg = "response timeout"; fc_els_resp_type()
106 msg = "response closed"; fc_els_resp_type()
109 msg = "response unknown error"; fc_els_resp_type()
118 msg = "accept"; fc_els_resp_type()
121 msg = "reject"; fc_els_resp_type()
124 msg = "response unknown ELS"; fc_els_resp_type()
133 msg = "CT accept"; fc_els_resp_type()
136 msg = "CT reject"; fc_els_resp_type()
139 msg = "response unknown CT"; fc_els_resp_type()
143 msg = "short CT response"; fc_els_resp_type()
147 msg = "response not ELS or CT"; fc_els_resp_type()
151 return msg; fc_els_resp_type()
/linux-4.1.27/arch/m68k/mac/
H A Diop.c222 static void iop_free_msg(struct iop_msg *msg) iop_free_msg() argument
224 msg->status = IOP_MSGSTATUS_UNUSED; iop_free_msg()
346 void iop_complete_message(struct iop_msg *msg) iop_complete_message() argument
348 int iop_num = msg->iop_num; iop_complete_message()
349 int chan = msg->channel; iop_complete_message()
353 printk("iop_complete(%p): iop %d chan %d\n", msg, msg->iop_num, msg->channel); iop_complete_message()
356 offset = IOP_ADDR_RECV_MSG + (msg->channel * IOP_MSG_LEN); iop_complete_message()
359 iop_writeb(iop_base[iop_num], offset, msg->reply[i]); iop_complete_message()
364 iop_interrupt(iop_base[msg->iop_num]); iop_complete_message()
366 iop_free_msg(msg); iop_complete_message()
373 static void iop_do_send(struct iop_msg *msg) iop_do_send() argument
375 volatile struct mac_iop *iop = iop_base[msg->iop_num]; iop_do_send()
378 offset = IOP_ADDR_SEND_MSG + (msg->channel * IOP_MSG_LEN); iop_do_send()
381 iop_writeb(iop, offset, msg->message[i]); iop_do_send()
384 iop_writeb(iop, IOP_ADDR_SEND_STATE + msg->channel, IOP_MSG_NEW); iop_do_send()
397 struct iop_msg *msg,*msg2; iop_handle_send() local
406 if (!(msg = iop_send_queue[iop_num][chan])) return; iop_handle_send()
408 msg->status = IOP_MSGSTATUS_COMPLETE; iop_handle_send()
411 msg->reply[i] = iop_readb(iop, offset); iop_handle_send()
413 if (msg->handler) (*msg->handler)(msg); iop_handle_send()
414 msg2 = msg; iop_handle_send()
415 msg = msg->next; iop_handle_send()
418 iop_send_queue[iop_num][chan] = msg; iop_handle_send()
419 if (msg) iop_do_send(msg); iop_handle_send()
431 struct iop_msg *msg; iop_handle_recv() local
437 msg = iop_alloc_msg(); iop_handle_recv()
438 msg->iop_num = iop_num; iop_handle_recv()
439 msg->channel = chan; iop_handle_recv()
440 msg->status = IOP_MSGSTATUS_UNSOL; iop_handle_recv()
441 msg->handler = iop_listeners[iop_num][chan].handler; iop_handle_recv()
446 msg->message[i] = iop_readb(iop, offset); iop_handle_recv()
454 if (msg->handler) { iop_handle_recv()
455 (*msg->handler)(msg); iop_handle_recv()
461 printk(" %02X", (uint) msg->message[i]); iop_handle_recv()
465 iop_complete_message(msg); iop_handle_recv()
481 struct iop_msg *msg, *q; iop_send_message() local
487 msg = iop_alloc_msg(); iop_send_message()
488 if (!msg) return -ENOMEM; iop_send_message()
490 msg->next = NULL; iop_send_message()
491 msg->status = IOP_MSGSTATUS_WAITING; iop_send_message()
492 msg->iop_num = iop_num; iop_send_message()
493 msg->channel = chan; iop_send_message()
494 msg->caller_priv = privdata; iop_send_message()
495 memcpy(msg->message, msg_data, msg_len); iop_send_message()
496 msg->handler = handler; iop_send_message()
499 iop_send_queue[iop_num][chan] = msg; iop_send_message()
502 q->next = msg; iop_send_message()
507 iop_do_send(msg); iop_send_message()
/linux-4.1.27/drivers/net/wireless/iwlwifi/
H A Diwl-devtrace-msg.h41 __dynamic_array(char, msg, MAX_MSG_LEN)
44 WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
48 TP_printk("%s", __get_str(msg))
79 __dynamic_array(char, msg, MAX_MSG_LEN)
85 WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
89 TP_printk("%s", __get_str(msg))
96 #define TRACE_INCLUDE_FILE iwl-devtrace-msg
/linux-4.1.27/drivers/media/usb/au0828/
H A Dau0828-i2c.c141 const struct i2c_msg *msg, int joined_rlen) i2c_sendbytes()
153 (dev->board.tuner_addr == msg->addr)) { i2c_sendbytes()
159 if (msg->len == 64) i2c_sendbytes()
168 au0828_write(dev, AU0828_I2C_DEST_ADDR_203, msg->addr << 1); i2c_sendbytes()
170 dprintk(4, "SEND: %02x\n", msg->addr); i2c_sendbytes()
173 if (msg->len == 0) { i2c_sendbytes()
192 for (i = 0; i < msg->len;) { i2c_sendbytes()
194 dprintk(4, " %02x\n", msg->buf[i]); i2c_sendbytes()
196 au0828_write(dev, AU0828_I2C_WRITE_FIFO_205, msg->buf[i]); i2c_sendbytes()
201 if ((strobe >= 4) || (i >= msg->len)) { i2c_sendbytes()
204 if (i < msg->len) i2c_sendbytes()
226 return msg->len; i2c_sendbytes()
231 const struct i2c_msg *msg, int joined) i2c_readbytes()
247 (dev->board.tuner_addr == msg->addr)) i2c_readbytes()
254 au0828_write(dev, AU0828_I2C_DEST_ADDR_203, msg->addr << 1); i2c_readbytes()
259 if (msg->len == 0) { i2c_readbytes()
268 for (i = 0; i < msg->len;) { i2c_readbytes()
272 if (i < msg->len) i2c_readbytes()
283 msg->buf[i-1] = au0828_read(dev, AU0828_I2C_READ_FIFO_209) & i2c_readbytes()
286 dprintk(4, " %02x\n", msg->buf[i-1]); i2c_readbytes()
293 return msg->len; i2c_readbytes()
140 i2c_sendbytes(struct i2c_adapter *i2c_adap, const struct i2c_msg *msg, int joined_rlen) i2c_sendbytes() argument
230 i2c_readbytes(struct i2c_adapter *i2c_adap, const struct i2c_msg *msg, int joined) i2c_readbytes() argument
/linux-4.1.27/drivers/net/can/
H A Djanz-ican3.c283 static int ican3_old_recv_msg(struct ican3_dev *mod, struct ican3_msg *msg) ican3_old_recv_msg() argument
308 memcpy_fromio(msg, mod->dpm, sizeof(*msg)); ican3_old_recv_msg()
328 static int ican3_old_send_msg(struct ican3_dev *mod, struct ican3_msg *msg) ican3_old_send_msg() argument
350 memcpy_toio(mod->dpm, msg, sizeof(*msg)); ican3_old_send_msg()
532 static int ican3_new_send_msg(struct ican3_dev *mod, struct ican3_msg *msg) ican3_new_send_msg() argument
548 memcpy_toio(mod->dpm, msg, sizeof(*msg)); ican3_new_send_msg()
563 static int ican3_new_recv_msg(struct ican3_dev *mod, struct ican3_msg *msg) ican3_new_recv_msg() argument
579 memcpy_fromio(msg, mod->dpm, sizeof(*msg)); ican3_new_recv_msg()
595 static int ican3_send_msg(struct ican3_dev *mod, struct ican3_msg *msg) ican3_send_msg() argument
603 ret = ican3_old_send_msg(mod, msg); ican3_send_msg()
605 ret = ican3_new_send_msg(mod, msg); ican3_send_msg()
611 static int ican3_recv_msg(struct ican3_dev *mod, struct ican3_msg *msg) ican3_recv_msg() argument
619 ret = ican3_old_recv_msg(mod, msg); ican3_recv_msg()
621 ret = ican3_new_recv_msg(mod, msg); ican3_recv_msg()
633 struct ican3_msg msg; ican3_msg_connect() local
635 memset(&msg, 0, sizeof(msg)); ican3_msg_connect()
636 msg.spec = MSG_CONNECTI; ican3_msg_connect()
637 msg.len = cpu_to_le16(0); ican3_msg_connect()
639 return ican3_send_msg(mod, &msg); ican3_msg_connect()
644 struct ican3_msg msg; ican3_msg_disconnect() local
646 memset(&msg, 0, sizeof(msg)); ican3_msg_disconnect()
647 msg.spec = MSG_DISCONNECT; ican3_msg_disconnect()
648 msg.len = cpu_to_le16(0); ican3_msg_disconnect()
650 return ican3_send_msg(mod, &msg); ican3_msg_disconnect()
655 struct ican3_msg msg; ican3_msg_newhostif() local
658 memset(&msg, 0, sizeof(msg)); ican3_msg_newhostif()
659 msg.spec = MSG_NEWHOSTIF; ican3_msg_newhostif()
660 msg.len = cpu_to_le16(0); ican3_msg_newhostif()
665 ret = ican3_send_msg(mod, &msg); ican3_msg_newhostif()
676 struct ican3_msg msg; ican3_msg_fasthostif() local
679 memset(&msg, 0, sizeof(msg)); ican3_msg_fasthostif()
680 msg.spec = MSG_INITFDPMQUEUE; ican3_msg_fasthostif()
681 msg.len = cpu_to_le16(8); ican3_msg_fasthostif()
685 msg.data[0] = addr & 0xff; ican3_msg_fasthostif()
686 msg.data[1] = (addr >> 8) & 0xff; ican3_msg_fasthostif()
687 msg.data[2] = (addr >> 16) & 0xff; ican3_msg_fasthostif()
688 msg.data[3] = (addr >> 24) & 0xff; ican3_msg_fasthostif()
692 msg.data[4] = addr & 0xff; ican3_msg_fasthostif()
693 msg.data[5] = (addr >> 8) & 0xff; ican3_msg_fasthostif()
694 msg.data[6] = (addr >> 16) & 0xff; ican3_msg_fasthostif()
695 msg.data[7] = (addr >> 24) & 0xff; ican3_msg_fasthostif()
700 return ican3_send_msg(mod, &msg); ican3_msg_fasthostif()
709 struct ican3_msg msg; ican3_set_id_filter() local
713 memset(&msg, 0, sizeof(msg)); ican3_set_id_filter()
714 msg.spec = MSG_SETAFILMASK; ican3_set_id_filter()
715 msg.len = cpu_to_le16(5); ican3_set_id_filter()
716 msg.data[0] = 0x00; /* IDLo LSB */ ican3_set_id_filter()
717 msg.data[1] = 0x00; /* IDLo MSB */ ican3_set_id_filter()
718 msg.data[2] = 0xff; /* IDHi LSB */ ican3_set_id_filter()
719 msg.data[3] = 0x07; /* IDHi MSB */ ican3_set_id_filter()
722 msg.data[4] = accept ? SETAFILMASK_FASTIF : SETAFILMASK_REJECT; ican3_set_id_filter()
724 ret = ican3_send_msg(mod, &msg); ican3_set_id_filter()
729 memset(&msg, 0, sizeof(msg)); ican3_set_id_filter()
730 msg.spec = MSG_SETAFILMASK; ican3_set_id_filter()
731 msg.len = cpu_to_le16(13); ican3_set_id_filter()
732 msg.data[0] = 0; /* MUX = 0 */ ican3_set_id_filter()
733 msg.data[1] = 0x00; /* IDLo LSB */ ican3_set_id_filter()
734 msg.data[2] = 0x00; ican3_set_id_filter()
735 msg.data[3] = 0x00; ican3_set_id_filter()
736 msg.data[4] = 0x20; /* IDLo MSB */ ican3_set_id_filter()
737 msg.data[5] = 0xff; /* IDHi LSB */ ican3_set_id_filter()
738 msg.data[6] = 0xff; ican3_set_id_filter()
739 msg.data[7] = 0xff; ican3_set_id_filter()
740 msg.data[8] = 0x3f; /* IDHi MSB */ ican3_set_id_filter()
743 msg.data[9] = accept ? SETAFILMASK_FASTIF : SETAFILMASK_REJECT; ican3_set_id_filter()
745 return ican3_send_msg(mod, &msg); ican3_set_id_filter()
753 struct ican3_msg msg; ican3_set_bus_state() local
755 memset(&msg, 0, sizeof(msg)); ican3_set_bus_state()
756 msg.spec = on ? MSG_CONREQ : MSG_COFFREQ; ican3_set_bus_state()
757 msg.len = cpu_to_le16(0); ican3_set_bus_state()
759 return ican3_send_msg(mod, &msg); ican3_set_bus_state()
764 struct ican3_msg msg; ican3_set_termination() local
766 memset(&msg, 0, sizeof(msg)); ican3_set_termination()
767 msg.spec = MSG_HWCONF; ican3_set_termination()
768 msg.len = cpu_to_le16(2); ican3_set_termination()
769 msg.data[0] = 0x00; ican3_set_termination()
770 msg.data[1] = on ? HWCONF_TERMINATE_ON : HWCONF_TERMINATE_OFF; ican3_set_termination()
772 return ican3_send_msg(mod, &msg); ican3_set_termination()
777 struct ican3_msg msg; ican3_send_inquiry() local
779 memset(&msg, 0, sizeof(msg)); ican3_send_inquiry()
780 msg.spec = MSG_INQUIRY; ican3_send_inquiry()
781 msg.len = cpu_to_le16(2); ican3_send_inquiry()
782 msg.data[0] = subspec; ican3_send_inquiry()
783 msg.data[1] = 0x00; ican3_send_inquiry()
785 return ican3_send_msg(mod, &msg); ican3_send_inquiry()
790 struct ican3_msg msg; ican3_set_buserror() local
792 memset(&msg, 0, sizeof(msg)); ican3_set_buserror()
793 msg.spec = MSG_CCONFREQ; ican3_set_buserror()
794 msg.len = cpu_to_le16(2); ican3_set_buserror()
795 msg.data[0] = 0x00; ican3_set_buserror()
796 msg.data[1] = quota; ican3_set_buserror()
798 return ican3_send_msg(mod, &msg); ican3_set_buserror()
881 static void ican3_handle_idvers(struct ican3_dev *mod, struct ican3_msg *msg) ican3_handle_idvers() argument
883 netdev_dbg(mod->ndev, "IDVERS response: %s\n", msg->data); ican3_handle_idvers()
886 static void ican3_handle_msglost(struct ican3_dev *mod, struct ican3_msg *msg) ican3_handle_msglost() argument
898 if (msg->spec == MSG_MSGLOST) { ican3_handle_msglost()
899 netdev_err(mod->ndev, "lost %d control messages\n", msg->data[0]); ican3_handle_msglost()
928 static int ican3_handle_cevtind(struct ican3_dev *mod, struct ican3_msg *msg) ican3_handle_cevtind() argument
938 if (msg->data[1] != CEVTIND_CHIP_SJA1000) { ican3_handle_cevtind()
944 if (le16_to_cpu(msg->len) < 6) { ican3_handle_cevtind()
949 isrc = msg->data[0]; ican3_handle_cevtind()
950 ecc = msg->data[2]; ican3_handle_cevtind()
951 status = msg->data[3]; ican3_handle_cevtind()
952 rxerr = msg->data[4]; ican3_handle_cevtind()
953 txerr = msg->data[5]; ican3_handle_cevtind()
1075 static void ican3_handle_inquiry(struct ican3_dev *mod, struct ican3_msg *msg) ican3_handle_inquiry() argument
1077 switch (msg->data[0]) { ican3_handle_inquiry()
1080 mod->bec.rxerr = msg->data[5]; ican3_handle_inquiry()
1081 mod->bec.txerr = msg->data[6]; ican3_handle_inquiry()
1085 mod->termination_enabled = msg->data[6] & HWCONF_TERMINATE_ON; ican3_handle_inquiry()
1095 struct ican3_msg *msg) ican3_handle_unknown_message()
1098 msg->spec, le16_to_cpu(msg->len)); ican3_handle_unknown_message()
1104 static void ican3_handle_message(struct ican3_dev *mod, struct ican3_msg *msg) ican3_handle_message() argument
1107 mod->num, msg->spec, le16_to_cpu(msg->len)); ican3_handle_message()
1109 switch (msg->spec) { ican3_handle_message()
1111 ican3_handle_idvers(mod, msg); ican3_handle_message()
1115 ican3_handle_msglost(mod, msg); ican3_handle_message()
1118 ican3_handle_cevtind(mod, msg); ican3_handle_message()
1121 ican3_handle_inquiry(mod, msg); ican3_handle_message()
1124 ican3_handle_unknown_message(mod, msg); ican3_handle_message()
1313 struct ican3_msg uninitialized_var(msg); ican3_napi()
1314 ret = ican3_recv_msg(mod, &msg); ican3_napi()
1318 ican3_handle_message(mod, &msg); ican3_napi()
1628 struct ican3_msg msg; ican3_set_bittiming() local
1637 memset(&msg, 0, sizeof(msg)); ican3_set_bittiming()
1638 msg.spec = MSG_CBTRREQ; ican3_set_bittiming()
1639 msg.len = cpu_to_le16(4); ican3_set_bittiming()
1640 msg.data[0] = 0x00; ican3_set_bittiming()
1641 msg.data[1] = 0x00; ican3_set_bittiming()
1642 msg.data[2] = btr0; ican3_set_bittiming()
1643 msg.data[3] = btr1; ican3_set_bittiming()
1645 return ican3_send_msg(mod, &msg); ican3_set_bittiming()
1094 ican3_handle_unknown_message(struct ican3_dev *mod, struct ican3_msg *msg) ican3_handle_unknown_message() argument
/linux-4.1.27/drivers/scsi/bfa/
H A Dbfi_ms.h116 struct bfi_mhdr_s mh; /* Common msg header */
127 struct bfi_mhdr_s mh; /* common msg header */
139 struct bfi_mhdr_s mh; /* common msg header */
151 struct bfi_mhdr_s mh; /* common msg header */
182 struct bfi_mhdr_s mh; /* common msg header */
186 struct bfi_mhdr_s mh; /* common msg header */
196 struct bfi_mhdr_s mh; /* common msg header */
207 struct bfi_mhdr_s mh; /* common msg header */
243 struct bfi_mhdr_s mh; /* msg header */
251 struct bfi_mhdr_s mh; /* common msg header */
262 struct bfi_mhdr_s mh; /* msg header */
277 struct bfi_mhdr_s mh; /* msg header */
286 struct bfi_mhdr_s mh; /* common msg header */
327 struct bfi_msg_s *msg; member in union:bfi_fcport_i2h_msg_u
351 struct bfi_mhdr_s mh; /* Common msg header */
371 struct bfi_mhdr_s mh; /* Common msg header */
391 struct bfi_mhdr_s mh; /* Common msg header */
398 struct bfi_mhdr_s mh; /* Common msg header */
418 struct bfi_mhdr_s mh; /* common msg header */
432 struct bfi_mhdr_s mh; /* common msg header */
453 struct bfi_mhdr_s mh; /* common msg header */
460 struct bfi_mhdr_s mh; /* common msg header */
467 struct bfi_mhdr_s mh; /* common msg header */
473 struct bfi_mhdr_s mh; /* common msg header */
479 struct bfi_mhdr_s *msg; member in union:bfi_lps_h2i_msg_u
486 struct bfi_msg_s *msg; member in union:bfi_lps_i2h_msg_u
508 struct bfi_mhdr_s mh; /* common msg header */
521 struct bfi_mhdr_s mh; /* common msg header */
530 struct bfi_mhdr_s mh; /* common msg header */
537 struct bfi_mhdr_s mh; /* common msg header */
543 struct bfi_mhdr_s mh; /* common msg header */
550 struct bfi_mhdr_s mh; /* common msg header */
558 struct bfi_mhdr_s mh; /*!< common msg header */
566 struct bfi_msg_s *msg; member in union:bfi_rport_h2i_msg_u
573 struct bfi_msg_s *msg; member in union:bfi_rport_i2h_msg_u
596 struct bfi_mhdr_s mh; /* common msg header */
600 u8 msg_no; /* seq id of the msg */
605 struct bfi_mhdr_s mh; /* common msg header */
608 u8 seq_id; /* seq id of the msg */
612 struct bfi_mhdr_s mh; /* common msg header */
614 u8 seq_id; /* seq id of the msg */
619 struct bfi_mhdr_s mh; /* common msg header */
622 u8 seq_id; /* seq id of the msg */
626 struct bfi_mhdr_s mh; /* common msg header */
634 struct bfi_msg_s *msg; member in union:bfi_itn_h2i_msg_u
641 struct bfi_msg_s *msg; member in union:bfi_itn_i2h_msg_u
675 struct bfi_mhdr_s mh; /* Common msg header */
780 struct bfi_mhdr_s mh; /* common msg header */
795 struct bfi_mhdr_s mh; /* Common msg header */
814 struct bfi_mhdr_s mh; /* Common msg header */
824 struct bfi_mhdr_s mh; /* Common msg header */
847 struct bfi_mhdr_s mh; /* Common msg header */
/linux-4.1.27/drivers/s390/net/
H A Dsmsgiucv_app.c37 #define ENV_TEXT_LEN(msg) (strlen(ENV_TEXT_STR) + strlen((msg)) + 1)
67 const char *msg) smsg_app_event_alloc()
76 ENV_TEXT_LEN(msg), GFP_ATOMIC); smsg_app_event_alloc()
91 snprintf(ev->envp[2], ENV_TEXT_LEN(msg), ENV_TEXT_STR "%s", msg); smsg_app_event_alloc()
120 static void smsg_app_callback(const char *from, char *msg) smsg_app_callback() argument
130 msg += strlen(SMSG_PREFIX); smsg_app_callback()
131 while (*msg && isspace(*msg)) smsg_app_callback()
132 msg++; smsg_app_callback()
133 if (*msg == '\0') smsg_app_callback()
137 se = smsg_app_event_alloc(from, msg); smsg_app_callback()
66 smsg_app_event_alloc(const char *from, const char *msg) smsg_app_event_alloc() argument
/linux-4.1.27/drivers/staging/nvec/
H A Dnvec_ps2.c81 unsigned char *msg = (unsigned char *)data; nvec_ps2_notifier() local
85 for (i = 0; i < msg[1]; i++) nvec_ps2_notifier()
86 serio_interrupt(ps2_dev.ser_dev, msg[2 + i], 0); nvec_ps2_notifier()
87 NVEC_PHD("ps/2 mouse event: ", &msg[2], msg[1]); nvec_ps2_notifier()
91 if (msg[2] == 1) { nvec_ps2_notifier()
92 for (i = 0; i < (msg[1] - 2); i++) nvec_ps2_notifier()
93 serio_interrupt(ps2_dev.ser_dev, msg[i + 4], 0); nvec_ps2_notifier()
94 NVEC_PHD("ps/2 mouse reply: ", &msg[4], msg[1] - 2); nvec_ps2_notifier()
97 else if (msg[1] != 2) /* !ack */ nvec_ps2_notifier()
98 NVEC_PHD("unhandled mouse event: ", msg, msg[1] + 2); nvec_ps2_notifier()
H A Dnvec.c146 unsigned char *msg = (unsigned char *)data; nvec_status_notifier() local
151 dev_warn(nvec->dev, "unhandled msg type %ld\n", event_type); nvec_status_notifier()
153 msg, msg[1] + 2, true); nvec_status_notifier()
193 * @msg: A message (must be allocated by nvec_msg_alloc() and belong to @nvec)
197 void nvec_msg_free(struct nvec_chip *nvec, struct nvec_msg *msg) nvec_msg_free() argument
199 if (msg != &nvec->tx_scratch) nvec_msg_free()
200 dev_vdbg(nvec->dev, "INFO: Free %ti\n", msg - nvec->msg_pool); nvec_msg_free()
201 atomic_set(&msg->used, 0); nvec_msg_free()
206 * nvec_msg_is_event - Return %true if @msg is an event
207 * @msg: A message
209 static bool nvec_msg_is_event(struct nvec_msg *msg) nvec_msg_is_event() argument
211 return msg->data[0] >> 7; nvec_msg_is_event()
216 * @msg: The message to get the size for
220 static size_t nvec_msg_size(struct nvec_msg *msg) nvec_msg_size() argument
222 bool is_event = nvec_msg_is_event(msg); nvec_msg_size()
223 int event_length = (msg->data[0] & 0x60) >> 5; nvec_msg_size()
227 return (msg->pos || msg->size) ? (msg->data[1] + 2) : 0; nvec_msg_size()
264 struct nvec_msg *msg; nvec_write_async() local
267 msg = nvec_msg_alloc(nvec, NVEC_MSG_TX); nvec_write_async()
269 if (msg == NULL) nvec_write_async()
272 msg->data[0] = size; nvec_write_async()
273 memcpy(msg->data + 1, data, size); nvec_write_async()
274 msg->size = size + 1; nvec_write_async()
277 list_add_tail(&msg->node, &nvec->tx_data); nvec_write_async()
304 struct nvec_msg *msg; nvec_write_sync() local
326 msg = nvec->last_sync_msg; nvec_write_sync()
330 return msg; nvec_write_sync()
382 struct nvec_msg *msg; nvec_request_master() local
386 msg = list_first_entry(&nvec->tx_data, struct nvec_msg, node); nvec_request_master()
395 msg->pos = 0; nvec_request_master()
401 list_del_init(&msg->node); nvec_request_master()
402 nvec_msg_free(nvec, msg); nvec_request_master()
411 * @msg: A message received by @nvec
416 static int parse_msg(struct nvec_chip *nvec, struct nvec_msg *msg) parse_msg() argument
418 if ((msg->data[0] & 1 << 7) == 0 && msg->data[3]) { parse_msg()
419 dev_err(nvec->dev, "ec responded %*ph\n", 4, msg->data); parse_msg()
423 if ((msg->data[0] >> 7) == 1 && (msg->data[0] & 0x0f) == 5) parse_msg()
425 DUMP_PREFIX_NONE, 16, 1, msg->data, parse_msg()
426 msg->data[1] + 2, true); parse_msg()
428 atomic_notifier_call_chain(&nvec->notifier_list, msg->data[0] & 0x8f, parse_msg()
429 msg->data); parse_msg()
445 struct nvec_msg *msg; nvec_dispatch() local
449 msg = list_first_entry(&nvec->rx_data, struct nvec_msg, node); nvec_dispatch()
450 list_del_init(&msg->node); nvec_dispatch()
454 (msg->data[2] << 8) + msg->data[0]) { nvec_dispatch()
457 nvec->last_sync_msg = msg; nvec_dispatch()
460 parse_msg(nvec, msg); nvec_dispatch()
461 nvec_msg_free(nvec, msg); nvec_dispatch()
793 struct nvec_msg *msg; tegra_nvec_probe() local
882 msg = nvec_write_sync(nvec, get_firmware_version, 2); tegra_nvec_probe()
884 if (msg) { tegra_nvec_probe()
886 msg->data[4], msg->data[5], msg->data[6], msg->data[7]); tegra_nvec_probe()
888 nvec_msg_free(nvec, msg); tegra_nvec_probe()
930 struct nvec_msg *msg; nvec_suspend() local
938 msg = nvec_write_sync(nvec, ap_suspend, sizeof(ap_suspend)); nvec_suspend()
939 nvec_msg_free(nvec, msg); nvec_suspend()
/linux-4.1.27/net/irda/
H A Dirnetlink.c83 struct sk_buff *msg; irda_nl_get_mode() local
91 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); irda_nl_get_mode()
92 if (!msg) { irda_nl_get_mode()
103 hdr = genlmsg_put(msg, info->snd_portid, info->snd_seq, irda_nl_get_mode()
110 if(nla_put_string(msg, IRDA_NL_ATTR_IFNAME, irda_nl_get_mode()
114 if(nla_put_u32(msg, IRDA_NL_ATTR_MODE, irlap->mode)) irda_nl_get_mode()
117 genlmsg_end(msg, hdr); irda_nl_get_mode()
119 return genlmsg_reply(msg, info); irda_nl_get_mode()
122 nlmsg_free(msg); irda_nl_get_mode()
/linux-4.1.27/drivers/video/backlight/
H A Dipaq_micro_bl.c22 struct ipaq_micro_msg msg = { micro_bl_update_status() local
38 msg.tx_data[0] = 0x01; micro_bl_update_status()
39 msg.tx_data[1] = intensity > 0 ? 1 : 0; micro_bl_update_status()
40 msg.tx_data[2] = intensity; micro_bl_update_status()
41 return ipaq_micro_tx_msg_sync(micro, &msg); micro_bl_update_status()
/linux-4.1.27/drivers/gpu/drm/tegra/
H A Ddpaux.c105 struct drm_dp_aux_msg *msg) tegra_dpaux_transfer()
114 if (msg->size > 16) tegra_dpaux_transfer()
121 if (msg->size < 1) { tegra_dpaux_transfer()
122 switch (msg->request & ~DP_AUX_I2C_MOT) { tegra_dpaux_transfer()
133 value = DPAUX_DP_AUXCTL_CMDLEN(msg->size - 1); tegra_dpaux_transfer()
136 switch (msg->request & ~DP_AUX_I2C_MOT) { tegra_dpaux_transfer()
138 if (msg->request & DP_AUX_I2C_MOT) tegra_dpaux_transfer()
146 if (msg->request & DP_AUX_I2C_MOT) tegra_dpaux_transfer()
154 if (msg->request & DP_AUX_I2C_MOT) tegra_dpaux_transfer()
173 tegra_dpaux_writel(dpaux, msg->address, DPAUX_DP_AUXADDR); tegra_dpaux_transfer()
176 if ((msg->request & DP_AUX_I2C_READ) == 0) { tegra_dpaux_transfer()
177 tegra_dpaux_write_fifo(dpaux, msg->buffer, msg->size); tegra_dpaux_transfer()
178 ret = msg->size; tegra_dpaux_transfer()
204 msg->reply = DP_AUX_NATIVE_REPLY_ACK; tegra_dpaux_transfer()
208 msg->reply = DP_AUX_NATIVE_REPLY_NACK; tegra_dpaux_transfer()
212 msg->reply = DP_AUX_NATIVE_REPLY_DEFER; tegra_dpaux_transfer()
216 msg->reply = DP_AUX_I2C_REPLY_NACK; tegra_dpaux_transfer()
220 msg->reply = DP_AUX_I2C_REPLY_DEFER; tegra_dpaux_transfer()
224 if ((msg->size > 0) && (msg->reply == DP_AUX_NATIVE_REPLY_ACK)) { tegra_dpaux_transfer()
225 if (msg->request & DP_AUX_I2C_READ) { tegra_dpaux_transfer()
228 if (WARN_ON(count != msg->size)) tegra_dpaux_transfer()
229 count = min_t(size_t, count, msg->size); tegra_dpaux_transfer()
231 tegra_dpaux_read_fifo(dpaux, msg->buffer, count); tegra_dpaux_transfer()
104 tegra_dpaux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg) tegra_dpaux_transfer() argument
/linux-4.1.27/arch/um/os-Linux/drivers/
H A Dtuntap_user.c70 struct msghdr msg; tuntap_open_tramp() local
87 msg.msg_name = NULL; tuntap_open_tramp()
88 msg.msg_namelen = 0; tuntap_open_tramp()
91 msg.msg_iov = &iov; tuntap_open_tramp()
92 msg.msg_iovlen = 1; tuntap_open_tramp()
95 msg.msg_iov = NULL; tuntap_open_tramp()
96 msg.msg_iovlen = 0; tuntap_open_tramp()
98 msg.msg_control = buf; tuntap_open_tramp()
99 msg.msg_controllen = sizeof(buf); tuntap_open_tramp()
100 msg.msg_flags = 0; tuntap_open_tramp()
101 n = recvmsg(me, &msg, 0); tuntap_open_tramp()
111 cmsg = CMSG_FIRSTHDR(&msg); tuntap_open_tramp()
/linux-4.1.27/arch/ia64/sn/kernel/
H A Dmsi_sn.c68 struct msi_msg msg; sn_setup_msi_irq() local
138 msg.address_hi = (u32)(bus_addr >> 32); sn_setup_msi_irq()
139 msg.address_lo = (u32)(bus_addr & 0x00000000ffffffff); sn_setup_msi_irq()
145 msg.data = 0x100 + irq; sn_setup_msi_irq()
148 pci_write_msi_msg(irq, &msg); sn_setup_msi_irq()
158 struct msi_msg msg; sn_set_msi_irq_affinity() local
178 __get_cached_msi_msg(data->msi_desc, &msg); sn_set_msi_irq_affinity()
183 bus_addr = (u64)(msg.address_hi) << 32 | (u64)(msg.address_lo); sn_set_msi_irq_affinity()
205 msg.address_hi = (u32)(bus_addr >> 32); sn_set_msi_irq_affinity()
206 msg.address_lo = (u32)(bus_addr & 0x00000000ffffffff); sn_set_msi_irq_affinity()
208 pci_write_msi_msg(irq, &msg); sn_set_msi_irq_affinity()
/linux-4.1.27/arch/arm/mach-ixp4xx/include/mach/
H A Dnpe.h32 int npe_send_message(struct npe *npe, const void *msg, const char *what);
33 int npe_recv_message(struct npe *npe, void *msg, const char *what);
34 int npe_send_recv_message(struct npe *npe, void *msg, const char *what);
/linux-4.1.27/scripts/kconfig/
H A Dkxgettext.c84 const char *msg; member in struct:message
92 static struct message *message__new(const char *msg, char *option, message__new() argument
104 self->msg = strdup(msg); message__new()
105 if (self->msg == NULL) message__new()
120 static struct message *mesage__find(const char *msg) mesage__find() argument
125 if (strcmp(m->msg, msg) == 0) mesage__find()
149 static int message__add(const char *msg, char *option, const char *file, message__add() argument
154 char *escaped = escape(msg, bf, sizeof(bf)); message__add()
213 "msgstr \"\"\n", self->msg); message__print_gettext_msgid_msgstr()
222 if (strlen(m->msg) > sizeof("\"\"")) menu__xgettext()
/linux-4.1.27/drivers/net/wireless/brcm80211/brcmsmac/
H A Dbrcms_trace_brcmsmac_msg.h31 __dynamic_array(char, msg, MAX_MSG_LEN)
34 WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
38 TP_printk("%s", __get_str(msg))
67 __dynamic_array(char, msg, MAX_MSG_LEN)
72 WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
76 TP_printk("%s: %s", __get_str(func), __get_str(msg))
/linux-4.1.27/arch/powerpc/platforms/ps3/
H A Dsmp.c42 static void ps3_smp_message_pass(int cpu, int msg) ps3_smp_message_pass() argument
47 if (msg >= MSG_COUNT) { ps3_smp_message_pass()
48 DBG("%s:%d: bad msg: %d\n", __func__, __LINE__, msg); ps3_smp_message_pass()
52 virq = per_cpu(ps3_ipi_virqs, cpu)[msg]; ps3_smp_message_pass()
57 " (%d)\n", __func__, __LINE__, cpu, msg, result); ps3_smp_message_pass()
/linux-4.1.27/security/selinux/
H A Dnetlink.c50 struct selnl_msg_setenforce *msg = nlmsg_data(nlh); selnl_add_payload() local
52 memset(msg, 0, len); selnl_add_payload()
53 msg->val = *((int *)data); selnl_add_payload()
58 struct selnl_msg_policyload *msg = nlmsg_data(nlh); selnl_add_payload() local
60 memset(msg, 0, len); selnl_add_payload()
61 msg->seqno = *((u32 *)data); selnl_add_payload()
/linux-4.1.27/drivers/gpu/drm/
H A Ddrm_dp_mst_topology.c67 /* sideband msg handling */ drm_dp_msg_header_crc4()
203 u8 *buf = raw->msg; drm_dp_encode_sideband_req()
299 static void drm_dp_crc_sideband_chunk_req(u8 *msg, u8 len) drm_dp_crc_sideband_chunk_req() argument
302 crc4 = drm_dp_msg_data_crc4(msg, len); drm_dp_crc_sideband_chunk_req()
303 msg[len] = crc4; drm_dp_crc_sideband_chunk_req()
310 u8 *buf = raw->msg; drm_dp_encode_sideband_reply()
317 /* this adds a chunk of msg to the builder to get the final msg */ drm_dp_sideband_msg_build()
318 static bool drm_dp_sideband_msg_build(struct drm_dp_sideband_msg_rx *msg, drm_dp_sideband_msg_build() argument
334 msg->curchunk_len = recv_hdr.msg_len; drm_dp_sideband_msg_build()
335 msg->curchunk_hdrlen = hdrlen; drm_dp_sideband_msg_build()
338 if (recv_hdr.somt && msg->have_somt) drm_dp_sideband_msg_build()
342 memcpy(&msg->initial_hdr, &recv_hdr, sizeof(struct drm_dp_sideband_msg_hdr)); drm_dp_sideband_msg_build()
343 msg->have_somt = true; drm_dp_sideband_msg_build()
346 msg->have_eomt = true; drm_dp_sideband_msg_build()
349 msg->curchunk_idx = min(msg->curchunk_len, (u8)(replybuflen - hdrlen)); drm_dp_sideband_msg_build()
350 memcpy(&msg->chunk[0], replybuf + hdrlen, msg->curchunk_idx); drm_dp_sideband_msg_build()
352 memcpy(&msg->chunk[msg->curchunk_idx], replybuf, replybuflen); drm_dp_sideband_msg_build()
353 msg->curchunk_idx += replybuflen; drm_dp_sideband_msg_build()
356 if (msg->curchunk_idx >= msg->curchunk_len) { drm_dp_sideband_msg_build()
358 crc4 = drm_dp_msg_data_crc4(msg->chunk, msg->curchunk_len - 1); drm_dp_sideband_msg_build()
359 /* copy chunk into bigger msg */ drm_dp_sideband_msg_build()
360 memcpy(&msg->msg[msg->curlen], msg->chunk, msg->curchunk_len - 1); drm_dp_sideband_msg_build()
361 msg->curlen += msg->curchunk_len - 1; drm_dp_sideband_msg_build()
371 memcpy(repmsg->u.link_addr.guid, &raw->msg[idx], 16); drm_dp_sideband_parse_link_address()
373 repmsg->u.link_addr.nports = raw->msg[idx] & 0xf; drm_dp_sideband_parse_link_address()
378 if (raw->msg[idx] & 0x80) drm_dp_sideband_parse_link_address()
381 repmsg->u.link_addr.ports[i].peer_device_type = (raw->msg[idx] >> 4) & 0x7; drm_dp_sideband_parse_link_address()
382 repmsg->u.link_addr.ports[i].port_number = (raw->msg[idx] & 0xf); drm_dp_sideband_parse_link_address()
387 repmsg->u.link_addr.ports[i].mcs = (raw->msg[idx] >> 7) & 0x1; drm_dp_sideband_parse_link_address()
388 repmsg->u.link_addr.ports[i].ddps = (raw->msg[idx] >> 6) & 0x1; drm_dp_sideband_parse_link_address()
390 repmsg->u.link_addr.ports[i].legacy_device_plug_status = (raw->msg[idx] >> 5) & 0x1; drm_dp_sideband_parse_link_address()
395 repmsg->u.link_addr.ports[i].dpcd_revision = (raw->msg[idx]); drm_dp_sideband_parse_link_address()
399 memcpy(repmsg->u.link_addr.ports[i].peer_guid, &raw->msg[idx], 16); drm_dp_sideband_parse_link_address()
403 repmsg->u.link_addr.ports[i].num_sdp_streams = (raw->msg[idx] >> 4) & 0xf; drm_dp_sideband_parse_link_address()
404 repmsg->u.link_addr.ports[i].num_sdp_stream_sinks = (raw->msg[idx] & 0xf); drm_dp_sideband_parse_link_address()
422 repmsg->u.remote_dpcd_read_ack.port_number = raw->msg[idx] & 0xf; drm_dp_sideband_parse_remote_dpcd_read()
426 repmsg->u.remote_dpcd_read_ack.num_bytes = raw->msg[idx]; drm_dp_sideband_parse_remote_dpcd_read()
430 memcpy(repmsg->u.remote_dpcd_read_ack.bytes, &raw->msg[idx], repmsg->u.remote_dpcd_read_ack.num_bytes); drm_dp_sideband_parse_remote_dpcd_read()
441 repmsg->u.remote_dpcd_write_ack.port_number = raw->msg[idx] & 0xf; drm_dp_sideband_parse_remote_dpcd_write()
456 repmsg->u.remote_i2c_read_ack.port_number = (raw->msg[idx] & 0xf); drm_dp_sideband_parse_remote_i2c_read_ack()
460 repmsg->u.remote_i2c_read_ack.num_bytes = raw->msg[idx]; drm_dp_sideband_parse_remote_i2c_read_ack()
463 memcpy(repmsg->u.remote_i2c_read_ack.bytes, &raw->msg[idx], repmsg->u.remote_i2c_read_ack.num_bytes); drm_dp_sideband_parse_remote_i2c_read_ack()
474 repmsg->u.path_resources.port_number = (raw->msg[idx] >> 4) & 0xf; drm_dp_sideband_parse_enum_path_resources_ack()
478 repmsg->u.path_resources.full_payload_bw_number = (raw->msg[idx] << 8) | (raw->msg[idx+1]); drm_dp_sideband_parse_enum_path_resources_ack()
482 repmsg->u.path_resources.avail_payload_bw_number = (raw->msg[idx] << 8) | (raw->msg[idx+1]); drm_dp_sideband_parse_enum_path_resources_ack()
496 repmsg->u.allocate_payload.port_number = (raw->msg[idx] >> 4) & 0xf; drm_dp_sideband_parse_allocate_payload_ack()
500 repmsg->u.allocate_payload.vcpi = raw->msg[idx]; drm_dp_sideband_parse_allocate_payload_ack()
504 repmsg->u.allocate_payload.allocated_pbn = (raw->msg[idx] << 8) | (raw->msg[idx+1]); drm_dp_sideband_parse_allocate_payload_ack()
518 repmsg->u.query_payload.port_number = (raw->msg[idx] >> 4) & 0xf; drm_dp_sideband_parse_query_payload_ack()
522 repmsg->u.query_payload.allocated_pbn = (raw->msg[idx] << 8) | (raw->msg[idx + 1]); drm_dp_sideband_parse_query_payload_ack()
533 struct drm_dp_sideband_msg_reply_body *msg) drm_dp_sideband_parse_reply()
535 memset(msg, 0, sizeof(*msg)); drm_dp_sideband_parse_reply()
536 msg->reply_type = (raw->msg[0] & 0x80) >> 7; drm_dp_sideband_parse_reply()
537 msg->req_type = (raw->msg[0] & 0x7f); drm_dp_sideband_parse_reply()
539 if (msg->reply_type) { drm_dp_sideband_parse_reply()
540 memcpy(msg->u.nak.guid, &raw->msg[1], 16); drm_dp_sideband_parse_reply()
541 msg->u.nak.reason = raw->msg[17]; drm_dp_sideband_parse_reply()
542 msg->u.nak.nak_data = raw->msg[18]; drm_dp_sideband_parse_reply()
546 switch (msg->req_type) { drm_dp_sideband_parse_reply()
548 return drm_dp_sideband_parse_link_address(raw, msg); drm_dp_sideband_parse_reply()
550 return drm_dp_sideband_parse_query_payload_ack(raw, msg); drm_dp_sideband_parse_reply()
552 return drm_dp_sideband_parse_remote_dpcd_read(raw, msg); drm_dp_sideband_parse_reply()
554 return drm_dp_sideband_parse_remote_dpcd_write(raw, msg); drm_dp_sideband_parse_reply()
556 return drm_dp_sideband_parse_remote_i2c_read_ack(raw, msg); drm_dp_sideband_parse_reply()
558 return drm_dp_sideband_parse_enum_path_resources_ack(raw, msg); drm_dp_sideband_parse_reply()
560 return drm_dp_sideband_parse_allocate_payload_ack(raw, msg); drm_dp_sideband_parse_reply()
562 DRM_ERROR("Got unknown reply 0x%02x\n", msg->req_type); drm_dp_sideband_parse_reply()
568 struct drm_dp_sideband_msg_req_body *msg) drm_dp_sideband_parse_connection_status_notify()
572 msg->u.conn_stat.port_number = (raw->msg[idx] & 0xf0) >> 4; drm_dp_sideband_parse_connection_status_notify()
577 memcpy(msg->u.conn_stat.guid, &raw->msg[idx], 16); drm_dp_sideband_parse_connection_status_notify()
582 msg->u.conn_stat.legacy_device_plug_status = (raw->msg[idx] >> 6) & 0x1; drm_dp_sideband_parse_connection_status_notify()
583 msg->u.conn_stat.displayport_device_plug_status = (raw->msg[idx] >> 5) & 0x1; drm_dp_sideband_parse_connection_status_notify()
584 msg->u.conn_stat.message_capability_status = (raw->msg[idx] >> 4) & 0x1; drm_dp_sideband_parse_connection_status_notify()
585 msg->u.conn_stat.input_port = (raw->msg[idx] >> 3) & 0x1; drm_dp_sideband_parse_connection_status_notify()
586 msg->u.conn_stat.peer_device_type = (raw->msg[idx] & 0x7); drm_dp_sideband_parse_connection_status_notify()
595 struct drm_dp_sideband_msg_req_body *msg) drm_dp_sideband_parse_resource_status_notify()
599 msg->u.resource_stat.port_number = (raw->msg[idx] & 0xf0) >> 4; drm_dp_sideband_parse_resource_status_notify()
604 memcpy(msg->u.resource_stat.guid, &raw->msg[idx], 16); drm_dp_sideband_parse_resource_status_notify()
609 msg->u.resource_stat.available_pbn = (raw->msg[idx] << 8) | (raw->msg[idx + 1]); drm_dp_sideband_parse_resource_status_notify()
618 struct drm_dp_sideband_msg_req_body *msg) drm_dp_sideband_parse_req()
620 memset(msg, 0, sizeof(*msg)); drm_dp_sideband_parse_req()
621 msg->req_type = (raw->msg[0] & 0x7f); drm_dp_sideband_parse_req()
623 switch (msg->req_type) { drm_dp_sideband_parse_req()
625 return drm_dp_sideband_parse_connection_status_notify(raw, msg); drm_dp_sideband_parse_req()
627 return drm_dp_sideband_parse_resource_status_notify(raw, msg); drm_dp_sideband_parse_req()
629 DRM_ERROR("Got unknown request 0x%02x\n", msg->req_type); drm_dp_sideband_parse_req()
634 static int build_dpcd_write(struct drm_dp_sideband_msg_tx *msg, u8 port_num, u32 offset, u8 num_bytes, u8 *bytes) build_dpcd_write() argument
643 drm_dp_encode_sideband_req(&req, msg); build_dpcd_write()
648 static int build_link_address(struct drm_dp_sideband_msg_tx *msg) build_link_address() argument
653 drm_dp_encode_sideband_req(&req, msg); build_link_address()
657 static int build_enum_path_resources(struct drm_dp_sideband_msg_tx *msg, int port_num) build_enum_path_resources() argument
663 drm_dp_encode_sideband_req(&req, msg); build_enum_path_resources()
664 msg->path_msg = true; build_enum_path_resources()
668 static int build_allocate_payload(struct drm_dp_sideband_msg_tx *msg, int port_num, build_allocate_payload() argument
677 drm_dp_encode_sideband_req(&req, msg); build_allocate_payload()
678 msg->path_msg = true; build_allocate_payload()
763 DRM_DEBUG_KMS("timedout msg send %p %d %d\n", txmsg, txmsg->state, txmsg->seqno); drm_dp_mst_wait_tx_reply()
838 /* drop any tx slots msg */ drm_dp_destroy_mst_branch_device()
1342 static int build_dpcd_read(struct drm_dp_sideband_msg_tx *msg, u8 port_num, u32 offset, u8 num_bytes)
1350 drm_dp_encode_sideband_req(&req, msg);
1357 bool up, u8 *msg, int len) drm_dp_send_sideband_msg()
1371 &msg[offset], drm_dp_send_sideband_msg()
1394 /* both msg slots are full */ set_hdr_from_dst_qlock()
1410 req_type = txmsg->msg[0] & 0x7f; set_hdr_from_dst_qlock()
1452 /* 48 - sideband msg size - 1 byte for data CRC, x header bytes */ process_single_tx_qlock()
1464 memcpy(&chunk[idx], &txmsg->msg[txmsg->cur_offset], tosend); process_single_tx_qlock()
1471 DRM_DEBUG_KMS("sideband msg failed to send\n"); process_single_tx_qlock()
1490 /* construct a chunk from the first msg in the tx_msg queue */ process_single_down_tx_qlock()
1503 DRM_DEBUG_KMS("failed to send msg in q %d\n", ret); process_single_down_tx_qlock()
1522 /* construct a chunk from the first msg in the tx_msg queue */ process_single_up_tx_qlock()
1526 DRM_DEBUG_KMS("failed to send msg in q %d\n", ret); process_single_up_tx_qlock()
1942 static int drm_dp_encode_up_ack_reply(struct drm_dp_sideband_msg_tx *msg, u8 req_type) drm_dp_encode_up_ack_reply() argument
1948 drm_dp_encode_sideband_reply(&reply, msg); drm_dp_encode_up_ack_reply()
2167 struct drm_dp_sideband_msg_rx *msg; drm_dp_get_one_sb_msg() local
2169 msg = up ? &mgr->up_req_recv : &mgr->down_rep_recv; drm_dp_get_one_sb_msg()
2178 ret = drm_dp_sideband_msg_build(msg, replyblock, len, true); drm_dp_get_one_sb_msg()
2180 DRM_DEBUG_KMS("sideband msg build failed %d\n", replyblock[0]); drm_dp_get_one_sb_msg()
2183 replylen = msg->curchunk_len + msg->curchunk_hdrlen; drm_dp_get_one_sb_msg()
2195 ret = drm_dp_sideband_msg_build(msg, replyblock, len, false); drm_dp_get_one_sb_msg()
2197 DRM_DEBUG_KMS("failed to build sideband msg\n"); drm_dp_get_one_sb_msg()
2231 DRM_DEBUG_KMS("Got MST reply with no msg %p %d %d %02x %02x\n", drm_dp_mst_handle_down_rep()
2236 mgr->down_rep_recv.msg[0]); drm_dp_mst_handle_down_rep()
2266 struct drm_dp_sideband_msg_req_body msg; drm_dp_mst_handle_up_req() local
2282 drm_dp_sideband_parse_req(&mgr->up_req_recv, &msg); drm_dp_mst_handle_up_req()
2284 if (msg.req_type == DP_CONNECTION_STATUS_NOTIFY) { drm_dp_mst_handle_up_req()
2285 drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, msg.req_type, seqno, false); drm_dp_mst_handle_up_req()
2288 mstb = drm_dp_get_mst_branch_device_by_guid(mgr, msg.u.conn_stat.guid); drm_dp_mst_handle_up_req()
2296 drm_dp_update_port(mstb, &msg.u.conn_stat); drm_dp_mst_handle_up_req()
2298 DRM_DEBUG_KMS("Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n", msg.u.conn_stat.port_number, msg.u.conn_stat.legacy_device_plug_status, msg.u.conn_stat.displayport_device_plug_status, msg.u.conn_stat.message_capability_status, msg.u.conn_stat.input_port, msg.u.conn_stat.peer_device_type); drm_dp_mst_handle_up_req()
2301 } else if (msg.req_type == DP_RESOURCE_STATUS_NOTIFY) { drm_dp_mst_handle_up_req()
2302 drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, msg.req_type, seqno, false); drm_dp_mst_handle_up_req()
2304 mstb = drm_dp_get_mst_branch_device_by_guid(mgr, msg.u.resource_stat.guid); drm_dp_mst_handle_up_req()
2312 DRM_DEBUG_KMS("Got RSN: pn: %d avail_pbn %d\n", msg.u.resource_stat.port_number, msg.u.resource_stat.available_pbn); drm_dp_mst_handle_up_req()
2951 struct drm_dp_sideband_msg_req_body msg; drm_dp_mst_i2c_xfer() local
2959 /* construct i2c msg */ drm_dp_mst_i2c_xfer()
2960 /* see if last msg is a read */ drm_dp_mst_i2c_xfer()
2970 memset(&msg, 0, sizeof(msg)); drm_dp_mst_i2c_xfer()
2971 msg.req_type = DP_REMOTE_I2C_READ; drm_dp_mst_i2c_xfer()
2972 msg.u.i2c_read.num_transactions = num - 1; drm_dp_mst_i2c_xfer()
2973 msg.u.i2c_read.port_number = port->port_num; drm_dp_mst_i2c_xfer()
2975 msg.u.i2c_read.transactions[i].i2c_dev_id = msgs[i].addr; drm_dp_mst_i2c_xfer()
2976 msg.u.i2c_read.transactions[i].num_bytes = msgs[i].len; drm_dp_mst_i2c_xfer()
2977 msg.u.i2c_read.transactions[i].bytes = msgs[i].buf; drm_dp_mst_i2c_xfer()
2979 msg.u.i2c_read.read_i2c_device_id = msgs[num - 1].addr; drm_dp_mst_i2c_xfer()
2980 msg.u.i2c_read.num_bytes_read = msgs[num - 1].len; drm_dp_mst_i2c_xfer()
2989 drm_dp_encode_sideband_req(&msg, txmsg); drm_dp_mst_i2c_xfer()
532 drm_dp_sideband_parse_reply(struct drm_dp_sideband_msg_rx *raw, struct drm_dp_sideband_msg_reply_body *msg) drm_dp_sideband_parse_reply() argument
567 drm_dp_sideband_parse_connection_status_notify(struct drm_dp_sideband_msg_rx *raw, struct drm_dp_sideband_msg_req_body *msg) drm_dp_sideband_parse_connection_status_notify() argument
594 drm_dp_sideband_parse_resource_status_notify(struct drm_dp_sideband_msg_rx *raw, struct drm_dp_sideband_msg_req_body *msg) drm_dp_sideband_parse_resource_status_notify() argument
617 drm_dp_sideband_parse_req(struct drm_dp_sideband_msg_rx *raw, struct drm_dp_sideband_msg_req_body *msg) drm_dp_sideband_parse_req() argument
1356 drm_dp_send_sideband_msg(struct drm_dp_mst_topology_mgr *mgr, bool up, u8 *msg, int len) drm_dp_send_sideband_msg() argument
H A Ddrm_dp_helper.c177 struct drm_dp_aux_msg msg; drm_dp_dpcd_access() local
181 memset(&msg, 0, sizeof(msg)); drm_dp_dpcd_access()
182 msg.address = offset; drm_dp_dpcd_access()
183 msg.request = request; drm_dp_dpcd_access()
184 msg.buffer = buffer; drm_dp_dpcd_access()
185 msg.size = size; drm_dp_dpcd_access()
196 err = aux->transfer(aux, &msg); drm_dp_dpcd_access()
206 switch (msg.reply & DP_AUX_NATIVE_REPLY_MASK) { drm_dp_dpcd_access()
428 * aux->transfer function does not modify anything in the msg other than the
433 static int drm_dp_i2c_do_msg(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg) drm_dp_i2c_do_msg() argument
445 ret = aux->transfer(aux, msg); drm_dp_i2c_do_msg()
456 switch (msg->reply & DP_AUX_NATIVE_REPLY_MASK) { drm_dp_i2c_do_msg()
465 DRM_DEBUG_KMS("native nack (result=%d, size=%zu)\n", ret, msg->size); drm_dp_i2c_do_msg()
483 DRM_ERROR("invalid native reply %#04x\n", msg->reply); drm_dp_i2c_do_msg()
487 switch (msg->reply & DP_AUX_I2C_REPLY_MASK) { drm_dp_i2c_do_msg()
496 DRM_DEBUG_KMS("I2C nack (result=%d, size=%zu\n", ret, msg->size); drm_dp_i2c_do_msg()
507 DRM_ERROR("invalid I2C reply %#04x\n", msg->reply); drm_dp_i2c_do_msg()
524 struct drm_dp_aux_msg msg = *orig_msg; drm_dp_i2c_drain_msg() local
526 while (msg.size > 0) { drm_dp_i2c_drain_msg()
527 err = drm_dp_i2c_do_msg(aux, &msg); drm_dp_i2c_drain_msg()
531 if (err < msg.size && err < ret) { drm_dp_i2c_drain_msg()
533 msg.size, err); drm_dp_i2c_drain_msg()
537 msg.size -= err; drm_dp_i2c_drain_msg()
538 msg.buffer += err; drm_dp_i2c_drain_msg()
560 struct drm_dp_aux_msg msg; drm_dp_i2c_xfer() local
565 memset(&msg, 0, sizeof(msg)); drm_dp_i2c_xfer()
568 msg.address = msgs[i].addr; drm_dp_i2c_xfer()
569 msg.request = (msgs[i].flags & I2C_M_RD) ? drm_dp_i2c_xfer()
572 msg.request |= DP_AUX_I2C_MOT; drm_dp_i2c_xfer()
577 msg.buffer = NULL; drm_dp_i2c_xfer()
578 msg.size = 0; drm_dp_i2c_xfer()
579 err = drm_dp_i2c_do_msg(aux, &msg); drm_dp_i2c_xfer()
587 for (j = 0; j < msgs[i].len; j += msg.size) { drm_dp_i2c_xfer()
588 msg.buffer = msgs[i].buf + j; drm_dp_i2c_xfer()
589 msg.size = min(transfer_size, msgs[i].len - j); drm_dp_i2c_xfer()
591 err = drm_dp_i2c_drain_msg(aux, &msg); drm_dp_i2c_xfer()
605 msg.request &= ~DP_AUX_I2C_MOT; drm_dp_i2c_xfer()
606 msg.buffer = NULL; drm_dp_i2c_xfer()
607 msg.size = 0; drm_dp_i2c_xfer()
608 (void)drm_dp_i2c_do_msg(aux, &msg); drm_dp_i2c_xfer()
H A Ddrm_mipi_dsi.c235 struct mipi_dsi_msg *msg) mipi_dsi_device_transfer()
243 msg->flags |= MIPI_DSI_MSG_USE_LPM; mipi_dsi_device_transfer()
245 return ops->transfer(dsi->host, msg); mipi_dsi_device_transfer()
319 * @msg: message to translate into a packet
324 const struct mipi_dsi_msg *msg) mipi_dsi_create_packet()
326 if (!packet || !msg) mipi_dsi_create_packet()
330 if (!mipi_dsi_packet_format_is_short(msg->type) && mipi_dsi_create_packet()
331 !mipi_dsi_packet_format_is_long(msg->type)) mipi_dsi_create_packet()
334 if (msg->channel > 3) mipi_dsi_create_packet()
338 packet->header[0] = ((msg->channel & 0x3) << 6) | (msg->type & 0x3f); mipi_dsi_create_packet()
349 if (mipi_dsi_packet_format_is_long(msg->type)) { mipi_dsi_create_packet()
350 packet->header[1] = (msg->tx_len >> 0) & 0xff; mipi_dsi_create_packet()
351 packet->header[2] = (msg->tx_len >> 8) & 0xff; mipi_dsi_create_packet()
353 packet->payload_length = msg->tx_len; mipi_dsi_create_packet()
354 packet->payload = msg->tx_buf; mipi_dsi_create_packet()
356 const u8 *tx = msg->tx_buf; mipi_dsi_create_packet()
358 packet->header[1] = (msg->tx_len > 0) ? tx[0] : 0; mipi_dsi_create_packet()
359 packet->header[2] = (msg->tx_len > 1) ? tx[1] : 0; mipi_dsi_create_packet()
381 struct mipi_dsi_msg msg = { mipi_dsi_set_maximum_return_packet_size() local
388 return mipi_dsi_device_transfer(dsi, &msg); mipi_dsi_set_maximum_return_packet_size()
407 struct mipi_dsi_msg msg = { mipi_dsi_generic_write() local
415 msg.type = MIPI_DSI_GENERIC_SHORT_WRITE_0_PARAM; mipi_dsi_generic_write()
419 msg.type = MIPI_DSI_GENERIC_SHORT_WRITE_1_PARAM; mipi_dsi_generic_write()
423 msg.type = MIPI_DSI_GENERIC_SHORT_WRITE_2_PARAM; mipi_dsi_generic_write()
427 msg.type = MIPI_DSI_GENERIC_LONG_WRITE; mipi_dsi_generic_write()
431 return mipi_dsi_device_transfer(dsi, &msg); mipi_dsi_generic_write()
452 struct mipi_dsi_msg msg = { mipi_dsi_generic_read() local
462 msg.type = MIPI_DSI_GENERIC_READ_REQUEST_0_PARAM; mipi_dsi_generic_read()
466 msg.type = MIPI_DSI_GENERIC_READ_REQUEST_1_PARAM; mipi_dsi_generic_read()
470 msg.type = MIPI_DSI_GENERIC_READ_REQUEST_2_PARAM; mipi_dsi_generic_read()
477 return mipi_dsi_device_transfer(dsi, &msg); mipi_dsi_generic_read()
496 struct mipi_dsi_msg msg = { mipi_dsi_dcs_write_buffer() local
507 msg.type = MIPI_DSI_DCS_SHORT_WRITE; mipi_dsi_dcs_write_buffer()
511 msg.type = MIPI_DSI_DCS_SHORT_WRITE_PARAM; mipi_dsi_dcs_write_buffer()
515 msg.type = MIPI_DSI_DCS_LONG_WRITE; mipi_dsi_dcs_write_buffer()
519 return mipi_dsi_device_transfer(dsi, &msg); mipi_dsi_dcs_write_buffer()
579 struct mipi_dsi_msg msg = { mipi_dsi_dcs_read() local
588 return mipi_dsi_device_transfer(dsi, &msg); mipi_dsi_dcs_read()
234 mipi_dsi_device_transfer(struct mipi_dsi_device *dsi, struct mipi_dsi_msg *msg) mipi_dsi_device_transfer() argument
323 mipi_dsi_create_packet(struct mipi_dsi_packet *packet, const struct mipi_dsi_msg *msg) mipi_dsi_create_packet() argument
/linux-4.1.27/arch/x86/include/asm/
H A Dvirtext.h79 * You can use the 'msg' arg to get a message describing the problem,
84 static inline int cpu_has_svm(const char **msg) cpu_has_svm() argument
89 if (msg) cpu_has_svm()
90 *msg = "not amd"; cpu_has_svm()
96 if (msg) cpu_has_svm()
97 *msg = "can't execute cpuid_8000000a"; cpu_has_svm()
103 if (msg) cpu_has_svm()
104 *msg = "svm not available"; cpu_has_svm()
/linux-4.1.27/arch/sh/boards/mach-kfr2r09/
H A Dsetup.c412 struct i2c_msg msg; kfr2r09_usb0_gadget_i2c_setup() local
422 msg.addr = 0x09; kfr2r09_usb0_gadget_i2c_setup()
423 msg.buf = buf; kfr2r09_usb0_gadget_i2c_setup()
424 msg.len = 1; kfr2r09_usb0_gadget_i2c_setup()
425 msg.flags = 0; kfr2r09_usb0_gadget_i2c_setup()
426 ret = i2c_transfer(a, &msg, 1); kfr2r09_usb0_gadget_i2c_setup()
431 msg.addr = 0x09; kfr2r09_usb0_gadget_i2c_setup()
432 msg.buf = buf; kfr2r09_usb0_gadget_i2c_setup()
433 msg.len = 1; kfr2r09_usb0_gadget_i2c_setup()
434 msg.flags = I2C_M_RD; kfr2r09_usb0_gadget_i2c_setup()
435 ret = i2c_transfer(a, &msg, 1); kfr2r09_usb0_gadget_i2c_setup()
441 msg.addr = 0x09; kfr2r09_usb0_gadget_i2c_setup()
442 msg.buf = buf; kfr2r09_usb0_gadget_i2c_setup()
443 msg.len = 2; kfr2r09_usb0_gadget_i2c_setup()
444 msg.flags = 0; kfr2r09_usb0_gadget_i2c_setup()
445 ret = i2c_transfer(a, &msg, 1); kfr2r09_usb0_gadget_i2c_setup()
455 struct i2c_msg msg; kfr2r09_serial_i2c_setup() local
465 msg.addr = 0x09; kfr2r09_serial_i2c_setup()
466 msg.buf = buf; kfr2r09_serial_i2c_setup()
467 msg.len = 1; kfr2r09_serial_i2c_setup()
468 msg.flags = 0; kfr2r09_serial_i2c_setup()
469 ret = i2c_transfer(a, &msg, 1); kfr2r09_serial_i2c_setup()
474 msg.addr = 0x09; kfr2r09_serial_i2c_setup()
475 msg.buf = buf; kfr2r09_serial_i2c_setup()
476 msg.len = 1; kfr2r09_serial_i2c_setup()
477 msg.flags = I2C_M_RD; kfr2r09_serial_i2c_setup()
478 ret = i2c_transfer(a, &msg, 1); kfr2r09_serial_i2c_setup()
484 msg.addr = 0x09; kfr2r09_serial_i2c_setup()
485 msg.buf = buf; kfr2r09_serial_i2c_setup()
486 msg.len = 2; kfr2r09_serial_i2c_setup()
487 msg.flags = 0; kfr2r09_serial_i2c_setup()
488 ret = i2c_transfer(a, &msg, 1); kfr2r09_serial_i2c_setup()
/linux-4.1.27/drivers/net/wireless/brcm80211/brcmfmac/
H A Dbcdc.c104 struct brcmf_proto_bcdc_dcmd msg; member in struct:brcmf_bcdc
114 struct brcmf_proto_bcdc_dcmd *msg = &bcdc->msg; brcmf_proto_bcdc_msg() local
119 memset(msg, 0, sizeof(struct brcmf_proto_bcdc_dcmd)); brcmf_proto_bcdc_msg()
121 msg->cmd = cpu_to_le32(cmd); brcmf_proto_bcdc_msg()
122 msg->len = cpu_to_le32(len); brcmf_proto_bcdc_msg()
128 msg->flags = cpu_to_le32(flags); brcmf_proto_bcdc_msg()
133 len += sizeof(*msg); brcmf_proto_bcdc_msg()
138 return brcmf_bus_txctl(drvr->bus_if, (unsigned char *)&bcdc->msg, len); brcmf_proto_bcdc_msg()
149 ret = brcmf_bus_rxctl(drvr->bus_if, (unsigned char *)&bcdc->msg, brcmf_proto_bcdc_cmplt()
153 } while (BCDC_DCMD_ID(le32_to_cpu(bcdc->msg.flags)) != id); brcmf_proto_bcdc_cmplt()
163 struct brcmf_proto_bcdc_dcmd *msg = &bcdc->msg; brcmf_proto_bcdc_query_dcmd() local
183 flags = le32_to_cpu(msg->flags); brcmf_proto_bcdc_query_dcmd()
196 info = (void *)&msg[1]; brcmf_proto_bcdc_query_dcmd()
207 ret = le32_to_cpu(msg->status); brcmf_proto_bcdc_query_dcmd()
218 struct brcmf_proto_bcdc_dcmd *msg = &bcdc->msg; brcmf_proto_bcdc_set_dcmd() local
232 flags = le32_to_cpu(msg->flags); brcmf_proto_bcdc_set_dcmd()
244 ret = le32_to_cpu(msg->status); brcmf_proto_bcdc_set_dcmd()
366 /* ensure that the msg buf directly follows the cdc msg struct */ brcmf_proto_bcdc_attach()
367 if ((unsigned long)(&bcdc->msg + 1) != (unsigned long)bcdc->buf) { brcmf_proto_bcdc_attach()
/linux-4.1.27/sound/pci/mixart/
H A Dmixart_core.c146 struct mixart_msg *msg, send_msg()
155 if (snd_BUG_ON(msg->size % 4)) send_msg()
184 writel_be( msg->size + MSG_DESCRIPTOR_SIZE, MIXART_MEM(mgr, msg_frame_address) ); /* size of descriptor + request */ send_msg()
185 writel_be( msg->message_id , MIXART_MEM(mgr, msg_frame_address + 4) ); /* dwMessageID */ send_msg()
186 writel_be( msg->uid.object_id, MIXART_MEM(mgr, msg_frame_address + 8) ); /* uidDest */ send_msg()
187 writel_be( msg->uid.desc, MIXART_MEM(mgr, msg_frame_address + 12) ); /* */ send_msg()
190 writel_be( msg->size, MIXART_MEM(mgr, msg_frame_address + 24) ); /* SizeDLL_T16 */ send_msg()
196 for( i=0; i < msg->size; i+=4 ) { send_msg()
197 writel_be( *(u32*)(msg->data + i), MIXART_MEM(mgr, MSG_HEADER_SIZE + msg_frame_address + i) ); send_msg()
264 "error: no response on msg %x\n", msg_frame); snd_mixart_send_msg()
348 u32 msg, addr, type; snd_mixart_process_msg() local
352 msg = mgr->msg_fifo[mgr->msg_fifo_readptr]; snd_mixart_process_msg()
357 addr = msg & ~MSG_TYPE_MASK; snd_mixart_process_msg()
358 type = msg & MSG_TYPE_MASK; snd_mixart_process_msg()
370 err, msg); snd_mixart_process_msg()
387 msg, resp.message_id, resp.uid.object_id, resp.uid.desc, resp.size); snd_mixart_process_msg()
392 /* msg contains no address ! do not get_msg() ! */ snd_mixart_process_msg()
398 msg); snd_mixart_process_msg()
404 } /* while there is a msg in fifo */ snd_mixart_process_msg()
437 u32 msg; snd_mixart_threaded_irq() local
441 while (retrieve_msg_frame(mgr, &msg)) { snd_mixart_threaded_irq()
443 switch (msg & MSG_TYPE_MASK) { snd_mixart_threaded_irq()
448 err = get_msg(mgr, &resp, msg & ~MSG_TYPE_MASK); snd_mixart_threaded_irq()
452 err, msg); snd_mixart_threaded_irq()
538 if(msg & MSG_CANCEL_NOTIFY_MASK) { snd_mixart_threaded_irq()
539 msg &= ~MSG_CANCEL_NOTIFY_MASK; snd_mixart_threaded_irq()
541 "canceled notification %x !\n", msg); snd_mixart_threaded_irq()
547 if( (msg & ~MSG_TYPE_MASK) == mgr->pending_event ) { snd_mixart_threaded_irq()
553 mgr->msg_fifo[mgr->msg_fifo_writeptr] = msg; snd_mixart_threaded_irq()
563 "interrupt received request %x\n", msg); snd_mixart_threaded_irq()
566 } /* switch on msg type */ snd_mixart_threaded_irq()
145 send_msg( struct mixart_mgr *mgr, struct mixart_msg *msg, int max_answersize, int mark_pending, u32 *msg_event) send_msg() argument
/linux-4.1.27/drivers/scsi/
H A Ddpt_i2o.c317 u32 msg[17]; adpt_inquiry() local
329 memset(msg, 0, sizeof(msg)); adpt_inquiry()
346 msg[0] = reqlen<<16 | SGL_OFFSET_12; adpt_inquiry()
347 msg[1] = (0xff<<24|HOST_TID<<12|ADAPTER_TID); adpt_inquiry()
348 msg[2] = 0; adpt_inquiry()
349 msg[3] = 0; adpt_inquiry()
351 msg[4] = I2O_CMD_SCSI_EXEC|DPT_ORGANIZATION_ID<<16; adpt_inquiry()
352 msg[5] = ADAPTER_TID | 1<<16 /* Interpret*/; adpt_inquiry()
357 msg[6] = scsidir|0x20a00000| 6 /* cmd len*/; adpt_inquiry()
359 mptr=msg+7; adpt_inquiry()
389 rcode = adpt_i2o_post_wait(pHba, msg, reqlen<<2, 120); adpt_inquiry()
685 u32 msg[5]; adpt_abort() local
698 memset(msg, 0, sizeof(msg)); adpt_abort()
699 msg[0] = FIVE_WORD_MSG_SIZE|SGL_OFFSET_0; adpt_abort()
700 msg[1] = I2O_CMD_SCSI_ABORT<<24|HOST_TID<<12|dptdevice->tid; adpt_abort()
701 msg[2] = 0; adpt_abort()
702 msg[3]= 0; adpt_abort()
703 msg[4] = adpt_cmd_to_context(cmd); adpt_abort()
706 rcode = adpt_i2o_post_wait(pHba, msg, sizeof(msg), FOREVER); adpt_abort()
729 u32 msg[4]; adpt_device_reset() local
740 memset(msg, 0, sizeof(msg)); adpt_device_reset()
741 msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0; adpt_device_reset()
742 msg[1] = (I2O_DEVICE_RESET<<24|HOST_TID<<12|d->tid); adpt_device_reset()
743 msg[2] = 0; adpt_device_reset()
744 msg[3] = 0; adpt_device_reset()
750 rcode = adpt_i2o_post_wait(pHba, msg,sizeof(msg), FOREVER); adpt_device_reset()
773 u32 msg[4]; adpt_bus_reset() local
777 memset(msg, 0, sizeof(msg)); adpt_bus_reset()
779 msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0; adpt_bus_reset()
780 msg[1] = (I2O_HBA_BUS_RESET<<24|HOST_TID<<12|pHba->channel[cmd->device->channel].tid); adpt_bus_reset()
781 msg[2] = 0; adpt_bus_reset()
782 msg[3] = 0; adpt_bus_reset()
785 rcode = adpt_i2o_post_wait(pHba, msg,sizeof(msg), FOREVER); adpt_bus_reset()
1195 static int adpt_i2o_post_wait(adpt_hba* pHba, u32* msg, int len, int timeout) adpt_i2o_post_wait() argument
1230 msg[2] |= 0x80000000 | ((u32)wait_data->id); adpt_i2o_post_wait()
1232 if((status = adpt_i2o_post_this(pHba, msg, len)) == 0){ adpt_i2o_post_wait()
1286 u32 __iomem *msg; adpt_i2o_post_this() local
1301 msg = pHba->msg_addr_virt + m; adpt_i2o_post_this()
1302 memcpy_toio(msg, data, len); adpt_i2o_post_this()
1352 u32 msg[8]; adpt_i2o_reset_hba() local
1385 msg[0]=EIGHT_WORD_MSG_SIZE|SGL_OFFSET_0; adpt_i2o_reset_hba()
1386 msg[1]=I2O_CMD_ADAPTER_RESET<<24|HOST_TID<<12|ADAPTER_TID; adpt_i2o_reset_hba()
1387 msg[2]=0; adpt_i2o_reset_hba()
1388 msg[3]=0; adpt_i2o_reset_hba()
1389 msg[4]=0; adpt_i2o_reset_hba()
1390 msg[5]=0; adpt_i2o_reset_hba()
1391 msg[6]=dma_low(addr); adpt_i2o_reset_hba()
1392 msg[7]=dma_high(addr); adpt_i2o_reset_hba()
1394 memcpy_toio(pHba->msg_addr_virt+m, msg, sizeof(msg)); adpt_i2o_reset_hba()
1707 u32 msg[MAX_MESSAGE_SIZE]; adpt_i2o_passthru() local
1723 memset(&msg, 0, MAX_MESSAGE_SIZE*4); adpt_i2o_passthru()
1724 // get user msg size in u32s adpt_i2o_passthru()
1737 if(copy_from_user(msg, user_msg, size)) { adpt_i2o_passthru()
1751 sg_offset = (msg[0]>>4)&0xf; adpt_i2o_passthru()
1752 msg[2] = 0x40000000; // IOCTL context adpt_i2o_passthru()
1753 msg[3] = adpt_ioctl_to_context(pHba, reply); adpt_i2o_passthru()
1754 if (msg[3] == (u32)-1) adpt_i2o_passthru()
1760 struct sg_simple_element *sg = (struct sg_simple_element*) (msg+sg_offset); adpt_i2o_passthru()
1809 rcode = adpt_i2o_post_wait(pHba, msg, size, FOREVER); adpt_i2o_passthru()
1831 memset(&msg, 0, MAX_MESSAGE_SIZE*4); adpt_i2o_passthru()
1832 // get user msg size in u32s adpt_i2o_passthru()
1844 if (copy_from_user (msg, user_msg, size)) { adpt_i2o_passthru()
1851 sg = (struct sg_simple_element*)(msg + sg_offset); adpt_i2o_passthru()
1883 (struct sg_simple_element*) (msg +sg_offset); adpt_i2o_passthru()
2160 void __iomem *msg; adpt_isr() local
2169 msg = pHba->msg_addr_virt + old_m; adpt_isr()
2170 old_context = readl(msg+12); adpt_isr()
2219 u32 msg[MAX_MESSAGE_SIZE]; adpt_scsi_to_i2o() local
2231 memset(msg, 0 , sizeof(msg)); adpt_scsi_to_i2o()
2265 // msg[0] is set later adpt_scsi_to_i2o()
2267 msg[1] = ((0xff<<24)|(HOST_TID<<12)|d->tid); adpt_scsi_to_i2o()
2268 msg[2] = 0; adpt_scsi_to_i2o()
2269 msg[3] = adpt_cmd_to_context(cmd); /* Want SCSI control block back */ adpt_scsi_to_i2o()
2272 msg[4] = I2O_CMD_SCSI_EXEC|(DPT_ORGANIZATION_ID<<16); adpt_scsi_to_i2o()
2273 msg[5] = d->tid; adpt_scsi_to_i2o()
2278 msg[6] = scsidir|0x20a00000|cmd->cmd_len; adpt_scsi_to_i2o()
2280 mptr=msg+7; adpt_scsi_to_i2o()
2314 reqlen = mptr - msg;
2327 msg[0] = reqlen<<16 | ((reqlen > 12) ? SGL_OFFSET_12 : SGL_OFFSET_0);
2330 rcode = adpt_i2o_post_this(pHba, msg, reqlen<<2);
2781 u32 __iomem *msg; adpt_send_nop() local
2796 msg = (u32 __iomem *)(pHba->msg_addr_virt + m); adpt_send_nop()
2797 writel( THREE_WORD_MSG_SIZE | SGL_OFFSET_0,&msg[0]); adpt_send_nop()
2798 writel( I2O_CMD_UTIL_NOP << 24 | HOST_TID << 12 | 0,&msg[1]); adpt_send_nop()
2799 writel( 0,&msg[2]); adpt_send_nop()
2811 u32 __iomem *msg = NULL; adpt_i2o_init_outbound_q() local
2830 msg=(u32 __iomem *)(pHba->msg_addr_virt+m); adpt_i2o_init_outbound_q()
2841 writel(EIGHT_WORD_MSG_SIZE| SGL_OFFSET_6, &msg[0]); adpt_i2o_init_outbound_q()
2842 writel(I2O_CMD_OUTBOUND_INIT<<24 | HOST_TID<<12 | ADAPTER_TID, &msg[1]); adpt_i2o_init_outbound_q()
2843 writel(0, &msg[2]); adpt_i2o_init_outbound_q()
2844 writel(0x0106, &msg[3]); /* Transaction context */ adpt_i2o_init_outbound_q()
2845 writel(4096, &msg[4]); /* Host page frame size */ adpt_i2o_init_outbound_q()
2846 writel((REPLY_FRAME_SIZE)<<16|0x80, &msg[5]); /* Outbound msg frame size and Initcode */ adpt_i2o_init_outbound_q()
2847 writel(0xD0000004, &msg[6]); /* Simple SG LE, EOB */ adpt_i2o_init_outbound_q()
2848 writel((u32)addr, &msg[7]); adpt_i2o_init_outbound_q()
2920 u32 __iomem *msg; adpt_i2o_status_get() local
2952 msg=(u32 __iomem *)(pHba->msg_addr_virt+m); adpt_i2o_status_get()
2954 writel(NINE_WORD_MSG_SIZE|SGL_OFFSET_0, &msg[0]); adpt_i2o_status_get()
2955 writel(I2O_CMD_STATUS_GET<<24|HOST_TID<<12|ADAPTER_TID, &msg[1]); adpt_i2o_status_get()
2956 writel(1, &msg[2]); adpt_i2o_status_get()
2957 writel(0, &msg[3]); adpt_i2o_status_get()
2958 writel(0, &msg[4]); adpt_i2o_status_get()
2959 writel(0, &msg[5]); adpt_i2o_status_get()
2960 writel( dma_low(pHba->status_block_pa), &msg[6]); adpt_i2o_status_get()
2961 writel( dma_high(pHba->status_block_pa), &msg[7]); adpt_i2o_status_get()
2962 writel(sizeof(i2o_status_block), &msg[8]); // 88 bytes adpt_i2o_status_get() local
3042 u32 msg[8]; adpt_i2o_lct_get() local
3062 msg[0] = EIGHT_WORD_MSG_SIZE|SGL_OFFSET_6; adpt_i2o_lct_get()
3063 msg[1] = I2O_CMD_LCT_NOTIFY<<24 | HOST_TID<<12 | ADAPTER_TID; adpt_i2o_lct_get()
3064 msg[2] = 0; adpt_i2o_lct_get()
3065 msg[3] = 0; adpt_i2o_lct_get()
3066 msg[4] = 0xFFFFFFFF; /* All devices */ adpt_i2o_lct_get()
3067 msg[5] = 0x00000000; /* Report now */ adpt_i2o_lct_get()
3068 msg[6] = 0xD0000000|pHba->lct_size; adpt_i2o_lct_get()
3069 msg[7] = (u32)pHba->lct_pa; adpt_i2o_lct_get()
3071 if ((ret=adpt_i2o_post_wait(pHba, msg, sizeof(msg), 360))) { adpt_i2o_lct_get()
3284 u32 msg[6]; adpt_i2o_hrt_get() local
3297 msg[0]= SIX_WORD_MSG_SIZE| SGL_OFFSET_4; adpt_i2o_hrt_get()
3298 msg[1]= I2O_CMD_HRT_GET<<24 | HOST_TID<<12 | ADAPTER_TID; adpt_i2o_hrt_get()
3299 msg[2]= 0; adpt_i2o_hrt_get()
3300 msg[3]= 0; adpt_i2o_hrt_get()
3301 msg[4]= (0xD0000000 | size); /* Simple transaction */ adpt_i2o_hrt_get()
3302 msg[5]= (u32)pHba->hrt_pa; /* Dump it here */ adpt_i2o_hrt_get()
3304 if ((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg),20))) { adpt_i2o_hrt_get()
3394 u32 msg[9]; adpt_i2o_issue_params() local
3398 msg[0] = NINE_WORD_MSG_SIZE | SGL_OFFSET_5; adpt_i2o_issue_params()
3399 msg[1] = cmd << 24 | HOST_TID << 12 | tid; adpt_i2o_issue_params()
3400 msg[2] = 0; adpt_i2o_issue_params()
3401 msg[3] = 0; adpt_i2o_issue_params()
3402 msg[4] = 0; adpt_i2o_issue_params()
3403 msg[5] = 0x54000000 | oplen; /* OperationBlock */ adpt_i2o_issue_params()
3404 msg[6] = (u32)opblk_pa; adpt_i2o_issue_params()
3405 msg[7] = 0xD0000000 | reslen; /* ResultBlock */ adpt_i2o_issue_params()
3406 msg[8] = (u32)resblk_pa; adpt_i2o_issue_params()
3408 if ((wait_status = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 20))) { adpt_i2o_issue_params()
3429 u32 msg[4]; adpt_i2o_quiesce_hba() local
3441 msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0; adpt_i2o_quiesce_hba()
3442 msg[1] = I2O_CMD_SYS_QUIESCE<<24|HOST_TID<<12|ADAPTER_TID; adpt_i2o_quiesce_hba()
3443 msg[2] = 0; adpt_i2o_quiesce_hba()
3444 msg[3] = 0; adpt_i2o_quiesce_hba()
3446 if((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 240))) { adpt_i2o_quiesce_hba()
3463 u32 msg[4]; adpt_i2o_enable_hba() local
3477 msg[0]=FOUR_WORD_MSG_SIZE|SGL_OFFSET_0; adpt_i2o_enable_hba()
3478 msg[1]=I2O_CMD_SYS_ENABLE<<24|HOST_TID<<12|ADAPTER_TID; adpt_i2o_enable_hba()
3479 msg[2]= 0; adpt_i2o_enable_hba()
3480 msg[3]= 0; adpt_i2o_enable_hba()
3482 if ((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 240))) { adpt_i2o_enable_hba()
3496 u32 msg[12]; adpt_i2o_systab_send() local
3499 msg[0] = I2O_MESSAGE_SIZE(12) | SGL_OFFSET_6; adpt_i2o_systab_send()
3500 msg[1] = I2O_CMD_SYS_TAB_SET<<24 | HOST_TID<<12 | ADAPTER_TID; adpt_i2o_systab_send()
3501 msg[2] = 0; adpt_i2o_systab_send()
3502 msg[3] = 0; adpt_i2o_systab_send()
3503 msg[4] = (0<<16) | ((pHba->unit+2) << 12); /* Host 0 IOP ID (unit + 2) */ adpt_i2o_systab_send()
3504 msg[5] = 0; /* Segment 0 */ adpt_i2o_systab_send()
3511 msg[6] = 0x54000000 | sys_tbl_len; adpt_i2o_systab_send()
3512 msg[7] = (u32)sys_tbl_pa; adpt_i2o_systab_send()
3513 msg[8] = 0x54000000 | 0; adpt_i2o_systab_send()
3514 msg[9] = 0; adpt_i2o_systab_send()
3515 msg[10] = 0xD4000000 | 0; adpt_i2o_systab_send()
3516 msg[11] = 0; adpt_i2o_systab_send()
3518 if ((ret=adpt_i2o_post_wait(pHba, msg, sizeof(msg), 120))) { adpt_i2o_systab_send()
H A Dscsi_transport_spi.c1175 int spi_populate_width_msg(unsigned char *msg, int width) spi_populate_width_msg() argument
1177 msg[0] = EXTENDED_MESSAGE; spi_populate_width_msg()
1178 msg[1] = 2; spi_populate_width_msg()
1179 msg[2] = EXTENDED_WDTR; spi_populate_width_msg()
1180 msg[3] = width; spi_populate_width_msg()
1185 int spi_populate_sync_msg(unsigned char *msg, int period, int offset) spi_populate_sync_msg() argument
1187 msg[0] = EXTENDED_MESSAGE; spi_populate_sync_msg()
1188 msg[1] = 3; spi_populate_sync_msg()
1189 msg[2] = EXTENDED_SDTR; spi_populate_sync_msg()
1190 msg[3] = period; spi_populate_sync_msg()
1191 msg[4] = offset; spi_populate_sync_msg()
1196 int spi_populate_ppr_msg(unsigned char *msg, int period, int offset, spi_populate_ppr_msg() argument
1199 msg[0] = EXTENDED_MESSAGE; spi_populate_ppr_msg()
1200 msg[1] = 6; spi_populate_ppr_msg()
1201 msg[2] = EXTENDED_PPR; spi_populate_ppr_msg()
1202 msg[3] = period; spi_populate_ppr_msg()
1203 msg[4] = 0; spi_populate_ppr_msg()
1204 msg[5] = offset; spi_populate_ppr_msg()
1205 msg[6] = width; spi_populate_ppr_msg()
1206 msg[7] = options; spi_populate_ppr_msg()
1213 * @msg: pointer to the area to place the tag
1221 int spi_populate_tag_msg(unsigned char *msg, struct scsi_cmnd *cmd) spi_populate_tag_msg() argument
1224 *msg++ = SIMPLE_QUEUE_TAG; spi_populate_tag_msg()
1225 *msg++ = cmd->request->tag; spi_populate_tag_msg()
1256 static void print_nego(const unsigned char *msg, int per, int off, int width) print_nego() argument
1260 period_to_str(buf, msg[per]); print_nego()
1265 printk("offset = %d ", msg[off]); print_nego()
1267 printk("width = %d ", 8 << msg[width]); print_nego()
1270 static void print_ptr(const unsigned char *msg, int msb, const char *desc) print_ptr() argument
1272 int ptr = (msg[msb] << 24) | (msg[msb+1] << 16) | (msg[msb+2] << 8) | print_ptr()
1273 msg[msb+3]; print_ptr()
1277 int spi_print_msg(const unsigned char *msg) spi_print_msg() argument
1280 if (msg[0] == EXTENDED_MESSAGE) { spi_print_msg()
1281 len = 2 + msg[1]; spi_print_msg()
1284 if (msg[2] < ARRAY_SIZE(extended_msgs)) spi_print_msg()
1285 printk ("%s ", extended_msgs[msg[2]]); spi_print_msg()
1288 (int) msg[2]); spi_print_msg()
1289 switch (msg[2]) { spi_print_msg()
1291 print_ptr(msg, 3, "pointer"); spi_print_msg()
1294 print_nego(msg, 3, 4, 0); spi_print_msg()
1297 print_nego(msg, 0, 0, 3); spi_print_msg()
1300 print_nego(msg, 3, 5, 6); spi_print_msg()
1303 print_ptr(msg, 3, "out"); spi_print_msg()
1304 print_ptr(msg, 7, "in"); spi_print_msg()
1308 printk("%02x ", msg[i]); spi_print_msg()
1311 } else if (msg[0] & 0x80) { spi_print_msg()
1313 (msg[0] & 0x40) ? "" : "not ", spi_print_msg()
1314 (msg[0] & 0x20) ? "target routine" : "lun", spi_print_msg()
1315 msg[0] & 0x7); spi_print_msg()
1317 } else if (msg[0] < 0x1f) { spi_print_msg()
1318 if (msg[0] < ARRAY_SIZE(one_byte_msgs) && one_byte_msgs[msg[0]]) spi_print_msg()
1319 printk("%s ", one_byte_msgs[msg[0]]); spi_print_msg()
1321 printk("reserved (%02x) ", msg[0]); spi_print_msg()
1322 } else if (msg[0] == 0x55) { spi_print_msg()
1325 } else if (msg[0] <= 0x2f) { spi_print_msg()
1326 if ((msg[0] - 0x20) < ARRAY_SIZE(two_byte_msgs)) spi_print_msg()
1327 printk("%s %02x ", two_byte_msgs[msg[0] - 0x20], spi_print_msg()
1328 msg[1]); spi_print_msg()
1331 msg[0], msg[1]); spi_print_msg()
1341 int spi_print_msg(const unsigned char *msg) spi_print_msg() argument
1345 if (msg[0] == EXTENDED_MESSAGE) { spi_print_msg()
1346 len = 2 + msg[1]; spi_print_msg()
1350 printk("%02x ", msg[i]); spi_print_msg()
1352 } else if (msg[0] & 0x80) { spi_print_msg()
1353 printk("%02x ", msg[0]); spi_print_msg()
1355 } else if ((msg[0] < 0x1f) || (msg[0] == 0x55)) { spi_print_msg()
1356 printk("%02x ", msg[0]); spi_print_msg()
1358 } else if (msg[0] <= 0x2f) { spi_print_msg()
1359 printk("%02x %02x", msg[0], msg[1]); spi_print_msg()
1362 printk("%02x ", msg[0]); spi_print_msg()
/linux-4.1.27/drivers/media/usb/pvrusb2/
H A Dpvrusb2-eeprom.c47 struct i2c_msg msg[2]; pvr2_eeprom_fetch() local
81 msg[0].addr = addr; pvr2_eeprom_fetch()
82 msg[0].flags = 0; pvr2_eeprom_fetch()
83 msg[0].len = mode16 ? 2 : 1; pvr2_eeprom_fetch()
84 msg[0].buf = iadd; pvr2_eeprom_fetch()
85 msg[1].addr = addr; pvr2_eeprom_fetch()
86 msg[1].flags = I2C_M_RD; pvr2_eeprom_fetch()
103 msg[1].len = pcnt; pvr2_eeprom_fetch()
104 msg[1].buf = eeprom+tcnt; pvr2_eeprom_fetch()
106 msg,ARRAY_SIZE(msg))) != 2) { pvr2_eeprom_fetch()
/linux-4.1.27/net/phonet/
H A Ddatagram.c86 static int pn_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) pn_sendmsg() argument
88 DECLARE_SOCKADDR(struct sockaddr_pn *, target, msg->msg_name); pn_sendmsg()
92 if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_EOR|MSG_NOSIGNAL| pn_sendmsg()
99 if (msg->msg_namelen < sizeof(struct sockaddr_pn)) pn_sendmsg()
106 msg->msg_flags & MSG_DONTWAIT, &err); pn_sendmsg()
111 err = memcpy_from_msg((void *)skb_put(skb, len), msg, len); pn_sendmsg()
127 static int pn_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, pn_recvmsg() argument
147 msg->msg_flags |= MSG_TRUNC; pn_recvmsg()
151 rval = skb_copy_datagram_msg(skb, 0, msg, copylen); pn_recvmsg()
159 if (msg->msg_name != NULL) { pn_recvmsg()
161 memcpy(msg->msg_name, &sa, sizeof(sa)); pn_recvmsg()
/linux-4.1.27/arch/powerpc/sysdev/
H A Dpmi.c45 pmi_message_t msg; member in struct:pmi_data
77 data->msg.type = type; pmi_irq_handler()
78 data->msg.data0 = ioread8(data->pmi_reg + PMI_READ_DATA0); pmi_irq_handler()
79 data->msg.data1 = ioread8(data->pmi_reg + PMI_READ_DATA1); pmi_irq_handler()
80 data->msg.data2 = ioread8(data->pmi_reg + PMI_READ_DATA2); pmi_irq_handler()
90 if (data->msg.type & PMI_ACK) { pmi_irq_handler()
119 if (handler->type == data->msg.type) pmi_notify_handlers()
120 handler->handle_pmi_message(data->msg); pmi_notify_handlers()
218 int pmi_send_message(pmi_message_t msg) pmi_send_message() argument
228 data->msg = msg; pmi_send_message()
229 pr_debug("pmi_send_message: msg is %08x\n", *(u32*)&msg); pmi_send_message()
234 iowrite8(msg.data0, data->pmi_reg + PMI_WRITE_DATA0); pmi_send_message()
235 iowrite8(msg.data1, data->pmi_reg + PMI_WRITE_DATA1); pmi_send_message()
236 iowrite8(msg.data2, data->pmi_reg + PMI_WRITE_DATA2); pmi_send_message()
237 iowrite8(msg.type, data->pmi_reg + PMI_WRITE_TYPE); pmi_send_message()
/linux-4.1.27/arch/arm/mach-omap1/
H A Dboard-sx1.c53 struct i2c_msg msg[1]; sx1_i2c_write_byte() local
59 msg->addr = devaddr; /* I2C address of chip */ sx1_i2c_write_byte()
60 msg->flags = 0; sx1_i2c_write_byte()
61 msg->len = 2; sx1_i2c_write_byte()
62 msg->buf = data; sx1_i2c_write_byte()
65 err = i2c_transfer(adap, msg, 1); sx1_i2c_write_byte()
77 struct i2c_msg msg[1]; sx1_i2c_read_byte() local
84 msg->addr = devaddr; /* I2C address of chip */ sx1_i2c_read_byte()
85 msg->flags = 0; sx1_i2c_read_byte()
86 msg->len = 1; sx1_i2c_read_byte()
87 msg->buf = data; sx1_i2c_read_byte()
89 err = i2c_transfer(adap, msg, 1); sx1_i2c_read_byte()
91 msg->addr = devaddr; /* I2C address */ sx1_i2c_read_byte()
92 msg->flags = I2C_M_RD; sx1_i2c_read_byte()
93 msg->len = 1; sx1_i2c_read_byte()
94 msg->buf = data; sx1_i2c_read_byte()
95 err = i2c_transfer(adap, msg, 1); sx1_i2c_read_byte()
/linux-4.1.27/kernel/irq/
H A Dmsi.c21 void __get_cached_msi_msg(struct msi_desc *entry, struct msi_msg *msg) __get_cached_msi_msg() argument
23 *msg = entry->msg; __get_cached_msi_msg()
26 void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg) get_cached_msi_msg() argument
30 __get_cached_msi_msg(entry, msg); get_cached_msi_msg()
36 struct msi_msg *msg) irq_chip_write_msi_msg()
38 data->chip->irq_write_msi_msg(data, msg); irq_chip_write_msi_msg()
54 struct msi_msg msg; msi_domain_set_affinity() local
59 BUG_ON(irq_chip_compose_msi_msg(irq_data, &msg)); msi_domain_set_affinity()
60 irq_chip_write_msi_msg(irq_data, &msg); msi_domain_set_affinity()
69 struct msi_msg msg; msi_domain_activate() local
71 BUG_ON(irq_chip_compose_msi_msg(irq_data, &msg)); msi_domain_activate()
72 irq_chip_write_msi_msg(irq_data, &msg); msi_domain_activate()
78 struct msi_msg msg; msi_domain_deactivate() local
80 memset(&msg, 0, sizeof(msg)); msi_domain_deactivate()
81 irq_chip_write_msi_msg(irq_data, &msg); msi_domain_deactivate()
35 irq_chip_write_msi_msg(struct irq_data *data, struct msi_msg *msg) irq_chip_write_msi_msg() argument
/linux-4.1.27/drivers/media/usb/em28xx/
H A Dem28xx-input.c161 struct i2c_msg msg[] = { { .addr = i2c_dev->addr, .flags = 0, .buf = &subaddr, .len = 1}, em28xx_get_key_winfast_usbii_deluxe() local
165 if (2 != i2c_transfer(i2c_dev->adapter, msg, 2)) em28xx_get_key_winfast_usbii_deluxe()
171 msg[1].buf = &key; em28xx_get_key_winfast_usbii_deluxe()
172 if (2 != i2c_transfer(i2c_dev->adapter, msg, 2)) em28xx_get_key_winfast_usbii_deluxe()
192 u8 msg[3] = { 0, 0, 0 }; default_polling_getkey() local
198 msg, sizeof(msg)); default_polling_getkey()
203 poll_result->toggle_bit = (msg[0] >> 7); default_polling_getkey()
206 poll_result->read_count = (msg[0] & 0x7f); default_polling_getkey()
212 poll_result->scancode = RC_SCANCODE_RC5(msg[1], msg[2]); default_polling_getkey()
217 poll_result->scancode = RC_SCANCODE_NEC(msg[1], msg[2]); default_polling_getkey()
222 poll_result->scancode = msg[1] << 8 | msg[2]; default_polling_getkey()
234 u8 msg[5] = { 0, 0, 0, 0, 0 }; em2874_polling_getkey() local
240 msg, sizeof(msg)); em2874_polling_getkey()
245 poll_result->toggle_bit = (msg[0] >> 7); em2874_polling_getkey()
248 poll_result->read_count = (msg[0] & 0x7f); em2874_polling_getkey()
257 poll_result->scancode = RC_SCANCODE_RC5(msg[1], msg[2]); em2874_polling_getkey()
262 poll_result->scancode = msg[1] << 8 | msg[2]; em2874_polling_getkey()
263 if ((msg[3] ^ msg[4]) != 0xff) /* 32 bits NEC */ em2874_polling_getkey()
264 poll_result->scancode = RC_SCANCODE_NEC32((msg[1] << 24) | em2874_polling_getkey()
265 (msg[2] << 16) | em2874_polling_getkey()
266 (msg[3] << 8) | em2874_polling_getkey()
267 (msg[4])); em2874_polling_getkey()
268 else if ((msg[1] ^ msg[2]) != 0xff) /* 24 bits NEC */ em2874_polling_getkey()
269 poll_result->scancode = RC_SCANCODE_NECX(msg[1] << 8 | em2874_polling_getkey()
270 msg[2], msg[3]); em2874_polling_getkey()
272 poll_result->scancode = RC_SCANCODE_NEC(msg[1], msg[3]); em2874_polling_getkey()
277 poll_result->scancode = RC_SCANCODE_RC6_0(msg[1], msg[2]); em2874_polling_getkey()
282 poll_result->scancode = (msg[1] << 24) | (msg[2] << 16) | em2874_polling_getkey()
283 (msg[3] << 8) | msg[4]; em2874_polling_getkey()
/linux-4.1.27/net/rxrpc/
H A Dar-output.c28 struct msghdr *msg, size_t len);
33 static int rxrpc_sendmsg_cmsg(struct rxrpc_sock *rx, struct msghdr *msg, rxrpc_sendmsg_cmsg() argument
44 if (msg->msg_controllen == 0) rxrpc_sendmsg_cmsg()
47 for_each_cmsghdr(cmsg, msg) { for_each_cmsghdr()
48 if (!CMSG_OK(msg, cmsg)) for_each_cmsghdr()
60 if (msg->msg_flags & MSG_CMSG_COMPAT) { for_each_cmsghdr()
132 struct msghdr *msg, size_t len) rxrpc_client_sendmsg()
147 ret = rxrpc_sendmsg_cmsg(rx, msg, &user_call_ID, &cmd, &abort_code, rxrpc_client_sendmsg()
155 if (msg->msg_name) { rxrpc_client_sendmsg()
157 msg->msg_name); rxrpc_client_sendmsg()
192 ret = rxrpc_send_data(rx, call, msg, len); rxrpc_client_sendmsg()
203 * @msg: The data to send
207 * appropriate to sending data. No control data should be supplied in @msg,
211 int rxrpc_kernel_send_data(struct rxrpc_call *call, struct msghdr *msg, rxrpc_kernel_send_data() argument
218 ASSERTCMP(msg->msg_name, ==, NULL); rxrpc_kernel_send_data()
219 ASSERTCMP(msg->msg_control, ==, NULL); rxrpc_kernel_send_data()
233 ret = rxrpc_send_data(call->socket, call, msg, len); rxrpc_kernel_send_data()
272 int rxrpc_server_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len) rxrpc_server_sendmsg() argument
282 ret = rxrpc_sendmsg_cmsg(rx, msg, &user_call_ID, &cmd, &abort_code, rxrpc_server_sendmsg()
313 ret = rxrpc_send_data(rx, call, msg, len); rxrpc_server_sendmsg()
335 struct msghdr msg; rxrpc_send_packet() local
343 msg.msg_name = &trans->peer->srx.transport.sin; rxrpc_send_packet()
344 msg.msg_namelen = sizeof(trans->peer->srx.transport.sin); rxrpc_send_packet()
345 msg.msg_control = NULL; rxrpc_send_packet()
346 msg.msg_controllen = 0; rxrpc_send_packet()
347 msg.msg_flags = 0; rxrpc_send_packet()
359 ret = kernel_sendmsg(trans->local->socket, &msg, iov, 1, rxrpc_send_packet()
379 ret = kernel_sendmsg(trans->local->socket, &msg, iov, 1, rxrpc_send_packet()
522 struct msghdr *msg, size_t len) rxrpc_send_data()
531 timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT); rxrpc_send_data()
539 more = msg->msg_flags & MSG_MORE; rxrpc_send_data()
554 if (msg->msg_flags & MSG_DONTWAIT) rxrpc_send_data()
567 if (chunk > msg_data_left(msg) && !more) rxrpc_send_data()
568 chunk = msg_data_left(msg); rxrpc_send_data()
579 sk, size, msg->msg_flags & MSG_DONTWAIT, &ret); rxrpc_send_data()
611 if (msg_data_left(msg) > 0) { rxrpc_send_data()
614 if (copy > msg_data_left(msg)) rxrpc_send_data()
615 copy = msg_data_left(msg); rxrpc_send_data()
620 ret = skb_add_data(skb, &msg->msg_iter, copy); rxrpc_send_data()
636 (msg_data_left(msg) == 0 && !more)) { rxrpc_send_data()
666 if (msg_data_left(msg) == 0 && !more) rxrpc_send_data()
682 rxrpc_queue_packet(call, skb, !msg_data_left(msg) && !more); rxrpc_send_data()
685 } while (msg_data_left(msg) > 0); rxrpc_send_data()
131 rxrpc_client_sendmsg(struct rxrpc_sock *rx, struct rxrpc_transport *trans, struct msghdr *msg, size_t len) rxrpc_client_sendmsg() argument
520 rxrpc_send_data(struct rxrpc_sock *rx, struct rxrpc_call *call, struct msghdr *msg, size_t len) rxrpc_send_data() argument
H A Dar-connevent.c64 struct msghdr msg; rxrpc_abort_connection() local
86 msg.msg_name = &conn->trans->peer->srx.transport.sin; rxrpc_abort_connection()
87 msg.msg_namelen = sizeof(conn->trans->peer->srx.transport.sin); rxrpc_abort_connection()
88 msg.msg_control = NULL; rxrpc_abort_connection()
89 msg.msg_controllen = 0; rxrpc_abort_connection()
90 msg.msg_flags = 0; rxrpc_abort_connection()
115 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len); rxrpc_abort_connection()
341 struct msghdr msg; rxrpc_reject_packets() local
357 msg.msg_name = &sa; rxrpc_reject_packets()
358 msg.msg_control = NULL; rxrpc_reject_packets()
359 msg.msg_controllen = 0; rxrpc_reject_packets()
360 msg.msg_flags = 0; rxrpc_reject_packets()
366 msg.msg_namelen = sizeof(sa.sin); rxrpc_reject_packets()
369 msg.msg_namelen = 0; rxrpc_reject_packets()
392 kernel_sendmsg(local->socket, &msg, iov, 2, size); rxrpc_reject_packets()
H A Dar-recvmsg.c46 int rxrpc_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, rxrpc_recvmsg() argument
64 ullen = msg->msg_flags & MSG_CMSG_COMPAT ? 4 : sizeof(unsigned long); rxrpc_recvmsg()
67 msg->msg_flags |= MSG_MORE; rxrpc_recvmsg()
146 if (msg->msg_name) { rxrpc_recvmsg()
149 memcpy(msg->msg_name, rxrpc_recvmsg()
151 msg->msg_namelen = len; rxrpc_recvmsg()
153 sock_recv_timestamp(msg, &rx->sk, skb); rxrpc_recvmsg()
165 ret = put_cmsg(msg, SOL_RXRPC, RXRPC_USER_CALL_ID, rxrpc_recvmsg()
183 ret = skb_copy_datagram_msg(skb, offset, msg, copy); rxrpc_recvmsg()
220 msg->msg_flags &= ~MSG_MORE; rxrpc_recvmsg()
264 ret = put_cmsg(msg, SOL_RXRPC, RXRPC_NEW_CALL, 0, &abort_code); rxrpc_recvmsg()
275 ret = put_cmsg(msg, SOL_RXRPC, RXRPC_USER_CALL_ID, rxrpc_recvmsg()
285 ret = put_cmsg(msg, SOL_RXRPC, RXRPC_ACK, 0, &abort_code); rxrpc_recvmsg()
288 ret = put_cmsg(msg, SOL_RXRPC, RXRPC_BUSY, 0, &abort_code); rxrpc_recvmsg()
292 ret = put_cmsg(msg, SOL_RXRPC, RXRPC_ABORT, 4, &abort_code); rxrpc_recvmsg()
297 ret = put_cmsg(msg, SOL_RXRPC, RXRPC_NET_ERROR, 4, &abort_code); rxrpc_recvmsg()
302 ret = put_cmsg(msg, SOL_RXRPC, RXRPC_LOCAL_ERROR, 4, rxrpc_recvmsg()
315 msg->msg_flags &= ~MSG_MORE; rxrpc_recvmsg()
316 msg->msg_flags |= MSG_EOR; rxrpc_recvmsg()
/linux-4.1.27/drivers/acpi/
H A Dacpi_ipmi.c112 static void ipmi_msg_handler(struct ipmi_recv_msg *msg, void *user_msg_data);
261 struct kernel_ipmi_msg *msg; acpi_format_ipmi_request() local
266 msg = &tx_msg->tx_message; acpi_format_ipmi_request()
272 msg->netfn = IPMI_OP_RGN_NETFN(address); acpi_format_ipmi_request()
273 msg->cmd = IPMI_OP_RGN_CMD(address); acpi_format_ipmi_request()
274 msg->data = tx_msg->data; acpi_format_ipmi_request()
285 "Unexpected request (msg len %d).\n", acpi_format_ipmi_request()
289 msg->data_len = buffer->length; acpi_format_ipmi_request()
290 memcpy(tx_msg->data, buffer->data, msg->data_len); acpi_format_ipmi_request()
314 static void acpi_format_ipmi_response(struct acpi_ipmi_msg *msg, acpi_format_ipmi_response() argument
329 buffer->status = msg->msg_done; acpi_format_ipmi_response()
330 if (msg->msg_done != ACPI_IPMI_OK) acpi_format_ipmi_response()
337 buffer->length = msg->rx_len; acpi_format_ipmi_response()
338 memcpy(buffer->data, msg->data, msg->rx_len); acpi_format_ipmi_response()
362 /* wake up the sleep thread on the Tx msg */ ipmi_flush_tx_msg()
371 struct acpi_ipmi_msg *msg) ipmi_cancel_tx_msg()
379 if (msg == tx_msg) { ipmi_cancel_tx_msg()
391 static void ipmi_msg_handler(struct ipmi_recv_msg *msg, void *user_msg_data) ipmi_msg_handler() argument
399 if (msg->user != ipmi_device->user_interface) { ipmi_msg_handler()
402 msg->user, ipmi_device->user_interface); ipmi_msg_handler()
408 if (msg->msgid == tx_msg->tx_msgid) { ipmi_msg_handler()
418 "Unexpected response (msg id %ld) is returned.\n", ipmi_msg_handler()
419 msg->msgid); ipmi_msg_handler()
424 if (msg->msg.data_len > ACPI_IPMI_MAX_MSG_LENGTH) { ipmi_msg_handler()
426 "Unexpected response (msg len %d).\n", ipmi_msg_handler()
427 msg->msg.data_len); ipmi_msg_handler()
431 /* response msg is an error msg */ ipmi_msg_handler()
432 msg->recv_type = IPMI_RESPONSE_RECV_TYPE; ipmi_msg_handler()
433 if (msg->recv_type == IPMI_RESPONSE_RECV_TYPE && ipmi_msg_handler()
434 msg->msg.data_len == 1) { ipmi_msg_handler()
435 if (msg->msg.data[0] == IPMI_TIMEOUT_COMPLETION_CODE) { ipmi_msg_handler()
443 tx_msg->rx_len = msg->msg.data_len; ipmi_msg_handler()
444 memcpy(tx_msg->data, msg->msg.data, tx_msg->rx_len); ipmi_msg_handler()
451 ipmi_free_recv_msg(msg); ipmi_msg_handler()
370 ipmi_cancel_tx_msg(struct acpi_ipmi_device *ipmi, struct acpi_ipmi_msg *msg) ipmi_cancel_tx_msg() argument
/linux-4.1.27/drivers/media/pci/bt8xx/
H A Dbttv-i2c.c139 bttv_i2c_sendbytes(struct bttv *btv, const struct i2c_msg *msg, int last) bttv_i2c_sendbytes() argument
145 if (0 == msg->len) bttv_i2c_sendbytes()
149 xmit = (msg->addr << 25) | (msg->buf[0] << 16) | I2C_HW; bttv_i2c_sendbytes()
150 if (msg->len > 1 || !last) bttv_i2c_sendbytes()
159 pr_cont(" <W %02x %02x", msg->addr << 1, msg->buf[0]); bttv_i2c_sendbytes()
162 for (cnt = 1; cnt < msg->len; cnt++ ) { bttv_i2c_sendbytes()
164 xmit = (msg->buf[cnt] << 24) | I2C_HW | BT878_I2C_NOSTART; bttv_i2c_sendbytes()
165 if (cnt < msg->len-1 || !last) bttv_i2c_sendbytes()
174 pr_cont(" %02x", msg->buf[cnt]); bttv_i2c_sendbytes()
178 return msg->len; bttv_i2c_sendbytes()
189 bttv_i2c_readbytes(struct bttv *btv, const struct i2c_msg *msg, int last) bttv_i2c_readbytes() argument
195 for (cnt = 0; cnt < msg->len; cnt++) { bttv_i2c_readbytes()
196 xmit = (msg->addr << 25) | (1 << 24) | I2C_HW; bttv_i2c_readbytes()
197 if (cnt < msg->len-1) bttv_i2c_readbytes()
199 if (cnt < msg->len-1 || !last) bttv_i2c_readbytes()
206 pr_cont(" <R %02x", (msg->addr << 1) +1); bttv_i2c_readbytes()
215 msg->buf[cnt] = ((u32)btread(BT848_I2C) >> 8) & 0xff; bttv_i2c_readbytes()
217 pr_cont(" =%02x", msg->buf[cnt]); bttv_i2c_readbytes()
224 return msg->len; bttv_i2c_readbytes()
/linux-4.1.27/fs/nfs/blocklayout/
H A Drpc_pipefs.c60 struct rpc_pipe_msg *msg = &bl_pipe_msg.msg; bl_resolve_deviceid() local
75 memset(msg, 0, sizeof(*msg)); bl_resolve_deviceid()
76 msg->len = sizeof(*bl_msg) + b->simple.len; bl_resolve_deviceid()
77 msg->data = kzalloc(msg->len, gfp_mask); bl_resolve_deviceid()
78 if (!msg->data) bl_resolve_deviceid()
81 bl_msg = msg->data; bl_resolve_deviceid()
84 nfs4_encode_simple(msg->data + sizeof(*bl_msg), b); bl_resolve_deviceid()
88 rc = rpc_queue_upcall(nn->bl_device_pipe, msg); bl_resolve_deviceid()
106 kfree(msg->data); bl_resolve_deviceid()
129 static void bl_pipe_destroy_msg(struct rpc_pipe_msg *msg) bl_pipe_destroy_msg() argument
132 container_of(msg, struct bl_pipe_msg, msg); bl_pipe_destroy_msg()
134 if (msg->errno >= 0) bl_pipe_destroy_msg()
/linux-4.1.27/tools/perf/util/
H A Dusage.c14 char msg[1024]; report() local
15 vsnprintf(msg, sizeof(msg), err, params); report()
16 fprintf(stderr, " %s%s\n", prefix, msg); report()
/linux-4.1.27/fs/ceph/
H A Dmds_client.c290 static int parse_reply_info(struct ceph_msg *msg, parse_reply_info() argument
298 info->head = msg->front.iov_base; parse_reply_info()
299 p = msg->front.iov_base + sizeof(struct ceph_mds_reply_head); parse_reply_info()
300 end = p + msg->front.iov_len - sizeof(struct ceph_mds_reply_head); parse_reply_info()
817 struct ceph_msg *msg; create_session_msg() local
820 msg = ceph_msg_new(CEPH_MSG_CLIENT_SESSION, sizeof(*h), GFP_NOFS, create_session_msg()
822 if (!msg) { create_session_msg()
823 pr_err("create_session_msg ENOMEM creating msg\n"); create_session_msg()
826 h = msg->front.iov_base; create_session_msg()
830 return msg; create_session_msg()
839 struct ceph_msg *msg; create_session_open_msg() local
863 msg = ceph_msg_new(CEPH_MSG_CLIENT_SESSION, sizeof(*h) + metadata_bytes, create_session_open_msg()
865 if (!msg) { create_session_open_msg()
866 pr_err("create_session_msg ENOMEM creating msg\n"); create_session_open_msg()
869 h = msg->front.iov_base; create_session_open_msg()
879 msg->hdr.version = cpu_to_le16(2); create_session_open_msg()
880 msg->hdr.compat_version = cpu_to_le16(1); create_session_open_msg()
883 p = msg->front.iov_base + sizeof(*h); create_session_open_msg()
901 return msg; create_session_open_msg()
912 struct ceph_msg *msg; __open_session() local
924 msg = create_session_open_msg(mdsc, session->s_seq); __open_session()
925 if (!msg) __open_session()
927 ceph_con_send(&session->s_con, msg); __open_session()
1006 struct ceph_msg *msg; cleanup_cap_releases() local
1010 msg = list_first_entry(&session->s_cap_releases, cleanup_cap_releases()
1012 list_del_init(&msg->list_head); cleanup_cap_releases()
1013 ceph_msg_put(msg); cleanup_cap_releases()
1016 msg = list_first_entry(&session->s_cap_releases_done, cleanup_cap_releases()
1018 list_del_init(&msg->list_head); cleanup_cap_releases()
1019 ceph_msg_put(msg); cleanup_cap_releases()
1239 struct ceph_msg *msg; send_renew_caps() local
1258 msg = create_session_msg(CEPH_SESSION_REQUEST_RENEWCAPS, send_renew_caps()
1260 if (!msg) send_renew_caps()
1262 ceph_con_send(&session->s_con, msg); send_renew_caps()
1269 struct ceph_msg *msg; send_flushmsg_ack() local
1273 msg = create_session_msg(CEPH_SESSION_FLUSHMSG_ACK, seq); send_flushmsg_ack()
1274 if (!msg) send_flushmsg_ack()
1276 ceph_con_send(&session->s_con, msg); send_flushmsg_ack()
1321 struct ceph_msg *msg; request_close_session() local
1326 msg = create_session_msg(CEPH_SESSION_REQUEST_CLOSE, session->s_seq); request_close_session()
1327 if (!msg) request_close_session()
1329 ceph_con_send(&session->s_con, msg); request_close_session()
1435 struct ceph_msg *msg, *partial = NULL; ceph_add_cap_releases() local
1447 msg = list_first_entry(&session->s_cap_releases, ceph_add_cap_releases()
1450 head = msg->front.iov_base; ceph_add_cap_releases()
1453 dout(" partial %p with (%d/%d)\n", msg, num, ceph_add_cap_releases()
1456 partial = msg; ceph_add_cap_releases()
1461 msg = ceph_msg_new(CEPH_MSG_CLIENT_CAPRELEASE, PAGE_CACHE_SIZE, ceph_add_cap_releases()
1463 if (!msg) ceph_add_cap_releases()
1465 dout("add_cap_releases %p msg %p now %d\n", session, msg, ceph_add_cap_releases()
1466 (int)msg->front.iov_len); ceph_add_cap_releases()
1467 head = msg->front.iov_base; ceph_add_cap_releases()
1469 msg->front.iov_len = sizeof(*head); ceph_add_cap_releases()
1471 list_add(&msg->list_head, &session->s_cap_releases); ceph_add_cap_releases()
1560 struct ceph_msg *msg; ceph_send_cap_releases() local
1565 msg = list_first_entry(&session->s_cap_releases_done, ceph_send_cap_releases()
1567 list_del_init(&msg->list_head); ceph_send_cap_releases()
1569 msg->hdr.front_len = cpu_to_le32(msg->front.iov_len); ceph_send_cap_releases()
1570 dout("send_cap_releases mds%d %p\n", session->s_mds, msg); ceph_send_cap_releases()
1571 ceph_con_send(&session->s_con, msg); ceph_send_cap_releases()
1580 struct ceph_msg *msg; discard_cap_releases() local
1588 msg = list_first_entry(&session->s_cap_releases, discard_cap_releases()
1590 head = msg->front.iov_base; discard_cap_releases()
1593 session->s_mds, msg, num); discard_cap_releases()
1595 msg->front.iov_len = sizeof(*head); discard_cap_releases()
1601 msg = list_first_entry(&session->s_cap_releases_done, discard_cap_releases()
1603 list_del_init(&msg->list_head); discard_cap_releases()
1605 head = msg->front.iov_base; discard_cap_releases()
1607 dout("discard_cap_releases mds%d %p %u\n", session->s_mds, msg, discard_cap_releases()
1611 msg->front.iov_len = sizeof(*head); discard_cap_releases()
1612 list_add(&msg->list_head, &session->s_cap_releases); discard_cap_releases()
1878 struct ceph_msg *msg; create_request_message() local
1894 msg = ERR_PTR(ret); create_request_message()
1902 msg = ERR_PTR(ret); create_request_message()
1919 msg = ceph_msg_new(CEPH_MSG_CLIENT_REQUEST, len, GFP_NOFS, false); create_request_message()
1920 if (!msg) { create_request_message()
1921 msg = ERR_PTR(-ENOMEM); create_request_message()
1925 msg->hdr.version = cpu_to_le16(2); create_request_message()
1926 msg->hdr.tid = cpu_to_le64(req->r_tid); create_request_message()
1928 head = msg->front.iov_base; create_request_message()
1929 p = msg->front.iov_base + sizeof(*head); create_request_message()
1930 end = msg->front.iov_base + msg->front.iov_len; create_request_message()
1942 req->r_request_release_offset = p - msg->front.iov_base; create_request_message()
1963 p = msg->front.iov_base + req->r_request_release_offset; create_request_message()
1976 msg->front.iov_len = p - msg->front.iov_base; create_request_message()
1977 msg->hdr.front_len = cpu_to_le32(msg->front.iov_len); create_request_message()
1982 ceph_msg_data_add_pagelist(msg, pagelist); create_request_message()
1983 msg->hdr.data_len = cpu_to_le32(pagelist->length); create_request_message()
1985 msg->hdr.data_len = 0; create_request_message()
1988 msg->hdr.data_off = cpu_to_le16(0); create_request_message()
1997 return msg; create_request_message()
2021 struct ceph_msg *msg; __prepare_send_request() local
2045 msg = req->r_request; __prepare_send_request()
2046 rhead = msg->front.iov_base; __prepare_send_request()
2061 p = msg->front.iov_base + req->r_request_release_offset; __prepare_send_request()
2068 msg->front.iov_len = p - msg->front.iov_base; __prepare_send_request()
2069 msg->hdr.front_len = cpu_to_le32(msg->front.iov_len); __prepare_send_request()
2077 msg = create_request_message(mdsc, req, mds, drop_cap_releases); __prepare_send_request()
2078 if (IS_ERR(msg)) { __prepare_send_request()
2079 req->r_err = PTR_ERR(msg); __prepare_send_request()
2081 return PTR_ERR(msg); __prepare_send_request()
2083 req->r_request = msg; __prepare_send_request()
2085 rhead = msg->front.iov_base; __prepare_send_request()
2336 static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg) handle_reply() argument
2340 struct ceph_mds_reply_head *head = msg->front.iov_base; handle_reply()
2347 if (msg->front.iov_len < sizeof(*head)) { handle_reply()
2349 ceph_msg_dump(msg); handle_reply()
2354 tid = le64_to_cpu(msg->hdr.tid); handle_reply()
2447 err = parse_reply_info(msg, rinfo, session->s_con.peer_features); handle_reply()
2453 ceph_msg_dump(msg); handle_reply()
2490 req->r_reply = msg; handle_reply()
2491 ceph_msg_get(msg); handle_reply()
2516 struct ceph_msg *msg) handle_forward()
2519 u64 tid = le64_to_cpu(msg->hdr.tid); handle_forward()
2523 void *p = msg->front.iov_base; handle_forward()
2524 void *end = p + msg->front.iov_len; handle_forward()
2567 struct ceph_msg *msg) handle_session()
2573 struct ceph_mds_session_head *h = msg->front.iov_base; handle_session()
2577 if (msg->front.iov_len != sizeof(*h)) handle_session()
2668 (int)msg->front.iov_len); handle_session()
2669 ceph_msg_dump(msg); handle_session()
3094 struct ceph_msg *msg) handle_lease()
3101 struct ceph_mds_lease *h = msg->front.iov_base; handle_lease()
3110 if (msg->front.iov_len < sizeof(*h) + sizeof(u32)) handle_lease()
3116 dname.len = msg->front.iov_len - sizeof(*h) - sizeof(u32); handle_lease()
3184 ceph_msg_get(msg); handle_lease()
3185 ceph_con_send(&session->s_con, msg); handle_lease()
3194 ceph_msg_dump(msg); handle_lease()
3202 struct ceph_msg *msg; ceph_mdsc_lease_send_msg() local
3212 msg = ceph_msg_new(CEPH_MSG_CLIENT_LEASE, len, GFP_NOFS, false); ceph_mdsc_lease_send_msg()
3213 if (!msg) ceph_mdsc_lease_send_msg()
3215 lease = msg->front.iov_base; ceph_mdsc_lease_send_msg()
3228 msg->more_to_follow = (action == CEPH_MDS_LEASE_RELEASE); ceph_mdsc_lease_send_msg()
3230 ceph_con_send(&session->s_con, msg); ceph_mdsc_lease_send_msg()
3631 void ceph_mdsc_handle_map(struct ceph_mds_client *mdsc, struct ceph_msg *msg) ceph_mdsc_handle_map() argument
3635 void *p = msg->front.iov_base; ceph_mdsc_handle_map()
3636 void *end = p + msg->front.iov_len; ceph_mdsc_handle_map()
3722 static void dispatch(struct ceph_connection *con, struct ceph_msg *msg) dispatch() argument
3726 int type = le16_to_cpu(msg->hdr.type); dispatch()
3737 ceph_mdsc_handle_map(mdsc, msg); dispatch()
3740 handle_session(s, msg); dispatch()
3743 handle_reply(s, msg); dispatch()
3746 handle_forward(mdsc, s, msg); dispatch()
3749 ceph_handle_caps(s, msg); dispatch()
3752 ceph_handle_snap(mdsc, s, msg); dispatch()
3755 handle_lease(mdsc, s, msg); dispatch()
3763 ceph_msg_put(msg); dispatch()
3826 struct ceph_msg *msg; mds_alloc_msg() local
3834 msg = ceph_msg_new(type, front_len, GFP_NOFS, false); mds_alloc_msg()
3835 if (!msg) { mds_alloc_msg()
3836 pr_err("unable to allocate msg type %d len %d\n", mds_alloc_msg()
3841 return msg; mds_alloc_msg()
3844 static int sign_message(struct ceph_connection *con, struct ceph_msg *msg) sign_message() argument
3848 return ceph_auth_sign_message(auth, msg); sign_message()
3851 static int check_message_signature(struct ceph_connection *con, struct ceph_msg *msg) check_message_signature() argument
3855 return ceph_auth_check_message_signature(auth, msg); check_message_signature()
2514 handle_forward(struct ceph_mds_client *mdsc, struct ceph_mds_session *session, struct ceph_msg *msg) handle_forward() argument
2566 handle_session(struct ceph_mds_session *session, struct ceph_msg *msg) handle_session() argument
3092 handle_lease(struct ceph_mds_client *mdsc, struct ceph_mds_session *session, struct ceph_msg *msg) handle_lease() argument
/linux-4.1.27/drivers/staging/gdm724x/
H A Dnetlink_k.c42 static void (*rcv_cb)(struct net_device *dev, u16 type, void *msg, int len);
49 void *msg; netlink_rcv_cb() local
71 msg = ND_NLMSG_DATA(nlh); netlink_rcv_cb()
76 rcv_cb(dev, nlh->nlmsg_type, msg, mlen); netlink_rcv_cb()
91 void (*cb)(struct net_device *dev, u16 type, void *msg, int len)) netlink_init()
115 int netlink_send(struct sock *sock, int group, u16 type, void *msg, int len) netlink_send() argument
135 memcpy(NLMSG_DATA(nlh), msg, len); netlink_send() local
90 netlink_init(int unit, void (*cb)(struct net_device *dev, u16 type, void *msg, int len)) netlink_init() argument
/linux-4.1.27/drivers/staging/gdm72xx/
H A Dnetlink_k.c45 static void (*rcv_cb)(struct net_device *dev, u16 type, void *msg, int len);
52 void *msg; netlink_rcv_cb() local
66 msg = ND_NLMSG_DATA(nlh); netlink_rcv_cb()
72 rcv_cb(dev, nlh->nlmsg_type, msg, mlen); netlink_rcv_cb()
92 void *msg, int len)) netlink_init()
116 int netlink_send(struct sock *sock, int group, u16 type, void *msg, int len) netlink_send() argument
141 memcpy(nlmsg_data(nlh), msg, len); netlink_send() local
91 netlink_init(int unit, void (*cb)(struct net_device *dev, u16 type, void *msg, int len)) netlink_init() argument
/linux-4.1.27/drivers/staging/lustre/include/linux/lnet/
H A Dsocklnd.h88 socklnd_init_msg(ksock_msg_t *msg, int type) socklnd_init_msg() argument
90 msg->ksm_csum = 0; socklnd_init_msg()
91 msg->ksm_type = type; socklnd_init_msg()
92 msg->ksm_zc_cookies[0] = msg->ksm_zc_cookies[1] = 0; socklnd_init_msg()
96 #define KSOCK_MSG_LNET 0xc1 /* lnet msg */
98 /* We need to know this number to parse hello msg from ksocklnd in
/linux-4.1.27/arch/s390/kernel/
H A Dos_info.c77 char *buf, *buf_align, *msg; os_info_old_alloc() local
82 msg = "not available"; os_info_old_alloc()
88 msg = "alloc failed"; os_info_old_alloc()
93 msg = "copy failed"; os_info_old_alloc()
98 msg = "checksum failed"; os_info_old_alloc()
102 msg = "copied"; os_info_old_alloc()
110 nr, msg, addr, size); os_info_old_alloc()
/linux-4.1.27/drivers/infiniband/core/
H A Dcm.c216 struct ib_mad_send_buf *msg; member in struct:cm_id_private
256 struct ib_mad_send_buf **msg) cm_alloc_msg()
282 *msg = m; cm_alloc_msg()
288 struct ib_mad_send_buf **msg) cm_alloc_response_msg()
306 *msg = m; cm_alloc_response_msg()
310 static void cm_free_msg(struct ib_mad_send_buf *msg) cm_free_msg() argument
312 ib_destroy_ah(msg->ah); cm_free_msg()
313 if (msg->context[0]) cm_free_msg()
314 cm_deref_id(msg->context[0]); cm_free_msg()
315 ib_free_send_mad(msg); cm_free_msg()
856 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); cm_destroy_id()
870 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); cm_destroy_id()
890 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); cm_destroy_id()
906 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); cm_destroy_id()
1165 ret = cm_alloc_msg(cm_id_priv, &cm_id_priv->msg); ib_send_cm_req()
1169 req_msg = (struct cm_req_msg *) cm_id_priv->msg->mad; ib_send_cm_req()
1172 cm_id_priv->msg->timeout_ms = cm_id_priv->timeout_ms; ib_send_cm_req()
1173 cm_id_priv->msg->context[1] = (void *) (unsigned long) IB_CM_REQ_SENT; ib_send_cm_req()
1179 ret = ib_post_send_mad(cm_id_priv->msg, NULL); ib_send_cm_req()
1189 error2: cm_free_msg(cm_id_priv->msg); ib_send_cm_req()
1201 struct ib_mad_send_buf *msg = NULL; cm_issue_rej() local
1205 ret = cm_alloc_response_msg(port, mad_recv_wc, &msg); cm_issue_rej()
1211 rej_msg = (struct cm_rej_msg *) msg->mad; cm_issue_rej()
1224 ret = ib_post_send_mad(msg, NULL); cm_issue_rej()
1226 cm_free_msg(msg); cm_issue_rej()
1402 struct ib_mad_send_buf *msg = NULL; cm_dup_req_handler() local
1412 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg); cm_dup_req_handler()
1419 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv, cm_dup_req_handler()
1425 cm_format_rej((struct cm_rej_msg *) msg->mad, cm_id_priv, cm_dup_req_handler()
1433 ret = ib_post_send_mad(msg, NULL); cm_dup_req_handler()
1439 free: cm_free_msg(msg); cm_dup_req_handler()
1649 struct ib_mad_send_buf *msg; ib_send_cm_rep() local
1666 ret = cm_alloc_msg(cm_id_priv, &msg); ib_send_cm_rep()
1670 rep_msg = (struct cm_rep_msg *) msg->mad; ib_send_cm_rep()
1672 msg->timeout_ms = cm_id_priv->timeout_ms; ib_send_cm_rep()
1673 msg->context[1] = (void *) (unsigned long) IB_CM_REP_SENT; ib_send_cm_rep()
1675 ret = ib_post_send_mad(msg, NULL); ib_send_cm_rep()
1678 cm_free_msg(msg); ib_send_cm_rep()
1683 cm_id_priv->msg = msg; ib_send_cm_rep()
1712 struct ib_mad_send_buf *msg; ib_send_cm_rtu() local
1732 ret = cm_alloc_msg(cm_id_priv, &msg); ib_send_cm_rtu()
1736 cm_format_rtu((struct cm_rtu_msg *) msg->mad, cm_id_priv, ib_send_cm_rtu()
1739 ret = ib_post_send_mad(msg, NULL); ib_send_cm_rtu()
1742 cm_free_msg(msg); ib_send_cm_rtu()
1783 struct ib_mad_send_buf *msg = NULL; cm_dup_rep_handler() local
1794 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg); cm_dup_rep_handler()
1800 cm_format_rtu((struct cm_rtu_msg *) msg->mad, cm_id_priv, cm_dup_rep_handler()
1804 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv, cm_dup_rep_handler()
1812 ret = ib_post_send_mad(msg, NULL); cm_dup_rep_handler()
1818 free: cm_free_msg(msg); cm_dup_rep_handler()
1892 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); cm_rep_handler()
1925 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); cm_establish_handler()
1965 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); cm_rtu_handler()
2001 struct ib_mad_send_buf *msg; ib_send_cm_dreq() local
2017 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); ib_send_cm_dreq()
2019 ret = cm_alloc_msg(cm_id_priv, &msg); ib_send_cm_dreq()
2025 cm_format_dreq((struct cm_dreq_msg *) msg->mad, cm_id_priv, ib_send_cm_dreq()
2027 msg->timeout_ms = cm_id_priv->timeout_ms; ib_send_cm_dreq()
2028 msg->context[1] = (void *) (unsigned long) IB_CM_DREQ_SENT; ib_send_cm_dreq()
2030 ret = ib_post_send_mad(msg, NULL); ib_send_cm_dreq()
2034 cm_free_msg(msg); ib_send_cm_dreq()
2039 cm_id_priv->msg = msg; ib_send_cm_dreq()
2063 struct ib_mad_send_buf *msg; ib_send_cm_drep() local
2086 ret = cm_alloc_msg(cm_id_priv, &msg); ib_send_cm_drep()
2090 cm_format_drep((struct cm_drep_msg *) msg->mad, cm_id_priv, ib_send_cm_drep()
2093 ret = ib_post_send_mad(msg, NULL); ib_send_cm_drep()
2096 cm_free_msg(msg); ib_send_cm_drep()
2108 struct ib_mad_send_buf *msg = NULL; cm_issue_drep() local
2113 ret = cm_alloc_response_msg(port, mad_recv_wc, &msg); cm_issue_drep()
2118 drep_msg = (struct cm_drep_msg *) msg->mad; cm_issue_drep()
2124 ret = ib_post_send_mad(msg, NULL); cm_issue_drep()
2126 cm_free_msg(msg); cm_issue_drep()
2135 struct ib_mad_send_buf *msg = NULL; cm_dreq_handler() local
2157 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); cm_dreq_handler()
2162 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); cm_dreq_handler()
2169 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg)) cm_dreq_handler()
2172 cm_format_drep((struct cm_drep_msg *) msg->mad, cm_id_priv, cm_dreq_handler()
2177 if (ib_post_send_mad(msg, NULL)) cm_dreq_handler()
2178 cm_free_msg(msg); cm_dreq_handler()
2227 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); cm_drep_handler()
2251 struct ib_mad_send_buf *msg; ib_send_cm_rej() local
2269 ret = cm_alloc_msg(cm_id_priv, &msg); ib_send_cm_rej()
2271 cm_format_rej((struct cm_rej_msg *) msg->mad, ib_send_cm_rej()
2279 ret = cm_alloc_msg(cm_id_priv, &msg); ib_send_cm_rej()
2281 cm_format_rej((struct cm_rej_msg *) msg->mad, ib_send_cm_rej()
2295 ret = ib_post_send_mad(msg, NULL); ib_send_cm_rej()
2297 cm_free_msg(msg); ib_send_cm_rej()
2370 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); cm_rej_handler()
2380 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); cm_rej_handler()
2391 cm_id_priv->msg); cm_rej_handler()
2423 struct ib_mad_send_buf *msg; ib_send_cm_mra() local
2465 ret = cm_alloc_msg(cm_id_priv, &msg); ib_send_cm_mra()
2469 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv, ib_send_cm_mra()
2472 ret = ib_post_send_mad(msg, NULL); ib_send_cm_mra()
2490 cm_free_msg(msg); ib_send_cm_mra()
2531 cm_id_priv->msg, timeout)) cm_mra_handler()
2538 cm_id_priv->msg, timeout)) cm_mra_handler()
2546 cm_id_priv->msg, timeout)) { cm_mra_handler()
2564 cm_id_priv->msg->context[1] = (void *) (unsigned long) cm_mra_handler()
2619 struct ib_mad_send_buf *msg; ib_send_cm_lap() local
2642 ret = cm_alloc_msg(cm_id_priv, &msg); ib_send_cm_lap()
2646 cm_format_lap((struct cm_lap_msg *) msg->mad, cm_id_priv, ib_send_cm_lap()
2648 msg->timeout_ms = cm_id_priv->timeout_ms; ib_send_cm_lap()
2649 msg->context[1] = (void *) (unsigned long) IB_CM_ESTABLISHED; ib_send_cm_lap()
2651 ret = ib_post_send_mad(msg, NULL); ib_send_cm_lap()
2654 cm_free_msg(msg); ib_send_cm_lap()
2659 cm_id_priv->msg = msg; ib_send_cm_lap()
2695 struct ib_mad_send_buf *msg = NULL; cm_lap_handler() local
2721 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg)) cm_lap_handler()
2724 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv, cm_lap_handler()
2731 if (ib_post_send_mad(msg, NULL)) cm_lap_handler()
2732 cm_free_msg(msg); cm_lap_handler()
2794 struct ib_mad_send_buf *msg; ib_send_cm_apr() local
2811 ret = cm_alloc_msg(cm_id_priv, &msg); ib_send_cm_apr()
2815 cm_format_apr((struct cm_apr_msg *) msg->mad, cm_id_priv, status, ib_send_cm_apr()
2817 ret = ib_post_send_mad(msg, NULL); ib_send_cm_apr()
2820 cm_free_msg(msg); ib_send_cm_apr()
2855 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); cm_apr_handler()
2856 cm_id_priv->msg = NULL; cm_apr_handler()
2930 struct ib_mad_send_buf *msg; ib_send_cm_sidr_req() local
2947 ret = cm_alloc_msg(cm_id_priv, &msg); ib_send_cm_sidr_req()
2951 cm_format_sidr_req((struct cm_sidr_req_msg *) msg->mad, cm_id_priv, ib_send_cm_sidr_req()
2953 msg->timeout_ms = cm_id_priv->timeout_ms; ib_send_cm_sidr_req()
2954 msg->context[1] = (void *) (unsigned long) IB_CM_SIDR_REQ_SENT; ib_send_cm_sidr_req()
2958 ret = ib_post_send_mad(msg, NULL); ib_send_cm_sidr_req()
2964 cm_free_msg(msg); ib_send_cm_sidr_req()
2968 cm_id_priv->msg = msg; ib_send_cm_sidr_req()
3074 struct ib_mad_send_buf *msg; ib_send_cm_sidr_rep() local
3090 ret = cm_alloc_msg(cm_id_priv, &msg); ib_send_cm_sidr_rep()
3094 cm_format_sidr_rep((struct cm_sidr_rep_msg *) msg->mad, cm_id_priv, ib_send_cm_sidr_rep()
3096 ret = ib_post_send_mad(msg, NULL); ib_send_cm_sidr_rep()
3099 cm_free_msg(msg); ib_send_cm_sidr_rep()
3151 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); cm_sidr_rep_handler()
3162 static void cm_process_send_error(struct ib_mad_send_buf *msg, cm_process_send_error() argument
3171 cm_id_priv = msg->context[0]; cm_process_send_error()
3175 state = (enum ib_cm_state) (unsigned long) msg->context[1]; cm_process_send_error()
3176 if (msg != cm_id_priv->msg || state != cm_id_priv->id.state) cm_process_send_error()
3206 cm_free_msg(msg); cm_process_send_error()
3212 cm_free_msg(msg); cm_process_send_error()
3218 struct ib_mad_send_buf *msg = mad_send_wc->send_buf; cm_send_handler() local
3224 msg->mad)->attr_id) - CM_ATTR_ID_OFFSET; cm_send_handler()
3231 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER)) cm_send_handler()
3232 msg->retries = 1; cm_send_handler()
3234 atomic_long_add(1 + msg->retries, cm_send_handler()
3236 if (msg->retries) cm_send_handler()
3237 atomic_long_add(msg->retries, cm_send_handler()
3244 cm_free_msg(msg); cm_send_handler()
3247 if (msg->context[0] && msg->context[1]) cm_send_handler()
3248 cm_process_send_error(msg, mad_send_wc->status); cm_send_handler()
3250 cm_free_msg(msg); cm_send_handler()
255 cm_alloc_msg(struct cm_id_private *cm_id_priv, struct ib_mad_send_buf **msg) cm_alloc_msg() argument
286 cm_alloc_response_msg(struct cm_port *port, struct ib_mad_recv_wc *mad_recv_wc, struct ib_mad_send_buf **msg) cm_alloc_response_msg() argument
/linux-4.1.27/drivers/media/i2c/smiapp/
H A Dsmiapp-regs.c79 struct i2c_msg msg; ____smiapp_read() local
84 msg.addr = client->addr; ____smiapp_read()
85 msg.flags = 0; ____smiapp_read()
86 msg.len = 2; ____smiapp_read()
87 msg.buf = data; ____smiapp_read()
92 r = i2c_transfer(client->adapter, &msg, 1); ____smiapp_read()
99 msg.len = len; ____smiapp_read()
100 msg.flags = I2C_M_RD; ____smiapp_read()
101 r = i2c_transfer(client->adapter, &msg, 1); ____smiapp_read()
222 struct i2c_msg msg; smiapp_write_no_quirk() local
234 msg.addr = client->addr; smiapp_write_no_quirk()
235 msg.flags = 0; /* Write */ smiapp_write_no_quirk()
236 msg.len = 2 + len; smiapp_write_no_quirk()
237 msg.buf = data; smiapp_write_no_quirk()
267 r = i2c_transfer(client->adapter, &msg, 1); smiapp_write_no_quirk()
/linux-4.1.27/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/
H A Danx9805.c175 struct i2c_msg *msg = msgs; anx9805_xfer() local
187 if ( (msg->flags & I2C_M_RD) && msg->addr == 0x50) { anx9805_xfer()
188 nv_wri2cr(mast, port->addr, 0x40, msg->addr << 1); anx9805_xfer()
191 nv_wri2cr(mast, port->addr, 0x44, msg->len); anx9805_xfer()
194 for (i = 0; i < msg->len; i++) { anx9805_xfer()
201 msg->buf[i] = nv_rdi2cr(mast, port->addr, 0x47); anx9805_xfer()
204 if (!(msg->flags & I2C_M_RD)) { anx9805_xfer()
205 if (msg->addr == 0x50 && msg->len == 0x01) { anx9805_xfer()
206 off = msg->buf[0]; anx9805_xfer()
208 if (msg->addr == 0x30 && msg->len == 0x01) { anx9805_xfer()
209 seg = msg->buf[0]; anx9805_xfer()
215 msg++; anx9805_xfer()
/linux-4.1.27/drivers/hv/
H A Dhv_snapshot.c47 struct hv_vss_msg *msg; /* current message */ member in struct:__anon4675
77 vss_cn_callback(struct cn_msg *msg, struct netlink_skb_parms *nsp) vss_cn_callback() argument
81 vss_msg = (struct hv_vss_msg *)msg->data; vss_cn_callback()
98 int op = vss_transaction.msg->vss_hdr.operation; vss_send_op()
100 struct cn_msg *msg; vss_send_op() local
103 msg = kzalloc(sizeof(*msg) + sizeof(*vss_msg), GFP_ATOMIC); vss_send_op()
104 if (!msg) vss_send_op()
107 vss_msg = (struct hv_vss_msg *)msg->data; vss_send_op()
109 msg->id.idx = CN_VSS_IDX; vss_send_op()
110 msg->id.val = CN_VSS_VAL; vss_send_op()
113 msg->len = sizeof(struct hv_vss_msg); vss_send_op()
115 rc = cn_netlink_send(msg, 0, 0, GFP_ATOMIC); vss_send_op()
121 kfree(msg); vss_send_op()
228 vss_transaction.msg = (struct hv_vss_msg *)vss_msg; hv_vss_onchannelcallback()
/linux-4.1.27/net/ipv6/
H A Ddatagram.c386 int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len) ipv6_recv_error() argument
391 DECLARE_SOCKADDR(struct sockaddr_in6 *, sin, msg->msg_name); ipv6_recv_error()
406 msg->msg_flags |= MSG_TRUNC; ipv6_recv_error()
409 err = skb_copy_datagram_msg(skb, 0, msg, copied); ipv6_recv_error()
413 sock_recv_timestamp(msg, sk, skb); ipv6_recv_error()
446 ip6_datagram_recv_common_ctl(sk, msg, skb); ipv6_recv_error()
450 ip6_datagram_recv_specific_ctl(sk, msg, skb); ipv6_recv_error()
458 ip_cmsg_recv(msg, skb); ipv6_recv_error()
462 put_cmsg(msg, SOL_IPV6, IPV6_RECVERR, sizeof(errhdr), &errhdr); ipv6_recv_error()
466 msg->msg_flags |= MSG_ERRQUEUE; ipv6_recv_error()
479 int ipv6_recv_rxpmtu(struct sock *sk, struct msghdr *msg, int len, ipv6_recv_rxpmtu() argument
485 DECLARE_SOCKADDR(struct sockaddr_in6 *, sin, msg->msg_name); ipv6_recv_rxpmtu()
496 msg->msg_flags |= MSG_TRUNC; ipv6_recv_rxpmtu()
499 err = skb_copy_datagram_msg(skb, 0, msg, copied); ipv6_recv_rxpmtu()
503 sock_recv_timestamp(msg, sk, skb); ipv6_recv_rxpmtu()
516 put_cmsg(msg, SOL_IPV6, IPV6_PATHMTU, sizeof(mtu_info), &mtu_info); ipv6_recv_rxpmtu()
527 void ip6_datagram_recv_common_ctl(struct sock *sk, struct msghdr *msg, ip6_datagram_recv_common_ctl() argument
547 put_cmsg(msg, SOL_IPV6, IPV6_PKTINFO, ip6_datagram_recv_common_ctl()
552 void ip6_datagram_recv_specific_ctl(struct sock *sk, struct msghdr *msg, ip6_datagram_recv_specific_ctl() argument
561 put_cmsg(msg, SOL_IPV6, IPV6_HOPLIMIT, sizeof(hlim), &hlim); ip6_datagram_recv_specific_ctl()
566 put_cmsg(msg, SOL_IPV6, IPV6_TCLASS, sizeof(tclass), &tclass); ip6_datagram_recv_specific_ctl()
572 put_cmsg(msg, SOL_IPV6, IPV6_FLOWINFO, sizeof(flowinfo), &flowinfo); ip6_datagram_recv_specific_ctl()
578 put_cmsg(msg, SOL_IPV6, IPV6_HOPOPTS, (ptr[1]+1)<<3, ptr); ip6_datagram_recv_specific_ctl()
604 put_cmsg(msg, SOL_IPV6, IPV6_DSTOPTS, len, ptr); ip6_datagram_recv_specific_ctl()
610 put_cmsg(msg, SOL_IPV6, IPV6_RTHDR, len, ptr); ip6_datagram_recv_specific_ctl()
632 put_cmsg(msg, SOL_IPV6, IPV6_2292PKTINFO, sizeof(src_info), &src_info); ip6_datagram_recv_specific_ctl()
636 put_cmsg(msg, SOL_IPV6, IPV6_2292HOPLIMIT, sizeof(hlim), &hlim); ip6_datagram_recv_specific_ctl()
640 put_cmsg(msg, SOL_IPV6, IPV6_2292HOPOPTS, (ptr[1]+1)<<3, ptr); ip6_datagram_recv_specific_ctl()
644 put_cmsg(msg, SOL_IPV6, IPV6_2292DSTOPTS, (ptr[1]+1)<<3, ptr); ip6_datagram_recv_specific_ctl()
648 put_cmsg(msg, SOL_IPV6, IPV6_2292RTHDR, (rthdr->hdrlen+1) << 3, rthdr); ip6_datagram_recv_specific_ctl()
652 put_cmsg(msg, SOL_IPV6, IPV6_2292DSTOPTS, (ptr[1]+1)<<3, ptr); ip6_datagram_recv_specific_ctl()
672 put_cmsg(msg, SOL_IPV6, IPV6_ORIGDSTADDR, sizeof(sin6), &sin6); ip6_datagram_recv_specific_ctl()
677 void ip6_datagram_recv_ctl(struct sock *sk, struct msghdr *msg, ip6_datagram_recv_ctl() argument
680 ip6_datagram_recv_common_ctl(sk, msg, skb); ip6_datagram_recv_ctl()
681 ip6_datagram_recv_specific_ctl(sk, msg, skb); ip6_datagram_recv_ctl()
686 struct msghdr *msg, struct flowi6 *fl6, ip6_datagram_send_ctl()
697 for_each_cmsghdr(cmsg, msg) { for_each_cmsghdr()
700 if (!CMSG_OK(msg, cmsg)) { for_each_cmsghdr()
685 ip6_datagram_send_ctl(struct net *net, struct sock *sk, struct msghdr *msg, struct flowi6 *fl6, struct ipv6_txoptions *opt, int *hlimit, int *tclass, int *dontfrag) ip6_datagram_send_ctl() argument
/linux-4.1.27/drivers/rpmsg/
H A Dvirtio_rpmsg_bus.c95 * Each buffer will have 16 bytes for the msg header and 496 bytes for
691 struct rpmsg_hdr *msg; rpmsg_send_offchannel_raw() local
715 msg = get_a_tx_buf(vrp); rpmsg_send_offchannel_raw()
716 if (!msg && !wait) rpmsg_send_offchannel_raw()
720 while (!msg) { rpmsg_send_offchannel_raw()
731 (msg = get_a_tx_buf(vrp)), rpmsg_send_offchannel_raw()
744 msg->len = len; rpmsg_send_offchannel_raw()
745 msg->flags = 0; rpmsg_send_offchannel_raw()
746 msg->src = src; rpmsg_send_offchannel_raw()
747 msg->dst = dst; rpmsg_send_offchannel_raw()
748 msg->reserved = 0; rpmsg_send_offchannel_raw()
749 memcpy(msg->data, data, len); rpmsg_send_offchannel_raw()
752 msg->src, msg->dst, msg->len, rpmsg_send_offchannel_raw()
753 msg->flags, msg->reserved); rpmsg_send_offchannel_raw()
755 msg, sizeof(*msg) + msg->len, true); rpmsg_send_offchannel_raw()
757 sg_init_one(&sg, msg, sizeof(*msg) + len); rpmsg_send_offchannel_raw()
762 err = virtqueue_add_outbuf(vrp->svq, &sg, 1, msg, GFP_KERNEL); rpmsg_send_offchannel_raw()
782 struct rpmsg_hdr *msg, unsigned int len) rpmsg_recv_single()
789 msg->src, msg->dst, msg->len, rpmsg_recv_single()
790 msg->flags, msg->reserved); rpmsg_recv_single()
792 msg, sizeof(*msg) + msg->len, true); rpmsg_recv_single()
799 msg->len > (len - sizeof(struct rpmsg_hdr))) { rpmsg_recv_single()
800 dev_warn(dev, "inbound msg too big: (%d, %d)\n", len, msg->len); rpmsg_recv_single()
807 ept = idr_find(&vrp->endpoints, msg->dst); rpmsg_recv_single()
820 ept->cb(ept->rpdev, msg->data, msg->len, ept->priv, rpmsg_recv_single()
821 msg->src); rpmsg_recv_single()
828 dev_warn(dev, "msg received with no recipient\n"); rpmsg_recv_single()
831 sg_init_one(&sg, msg, RPMSG_BUF_SIZE); rpmsg_recv_single()
834 err = virtqueue_add_inbuf(vrp->rvq, &sg, 1, msg, GFP_KERNEL); rpmsg_recv_single()
848 struct rpmsg_hdr *msg; rpmsg_recv_done() local
852 msg = virtqueue_get_buf(rvq, &len); rpmsg_recv_done()
853 if (!msg) { rpmsg_recv_done()
858 while (msg) { rpmsg_recv_done()
859 err = rpmsg_recv_single(vrp, dev, msg, len); rpmsg_recv_done()
865 msg = virtqueue_get_buf(rvq, &len); rpmsg_recv_done()
877 * a TX msg we just sent it, and the buffer is put back to the used ring.
896 struct rpmsg_ns_msg *msg = data; rpmsg_ns_cb() local
907 if (len != sizeof(*msg)) { rpmsg_ns_cb()
908 dev_err(dev, "malformed ns msg (%d)\n", len); rpmsg_ns_cb()
924 msg->name[RPMSG_NAME_SIZE - 1] = '\0'; rpmsg_ns_cb()
927 msg->flags & RPMSG_NS_DESTROY ? "destroy" : "creat", rpmsg_ns_cb()
928 msg->name, msg->addr); rpmsg_ns_cb()
930 strncpy(chinfo.name, msg->name, sizeof(chinfo.name)); rpmsg_ns_cb()
932 chinfo.dst = msg->addr; rpmsg_ns_cb()
934 if (msg->flags & RPMSG_NS_DESTROY) { rpmsg_ns_cb()
781 rpmsg_recv_single(struct virtproc_info *vrp, struct device *dev, struct rpmsg_hdr *msg, unsigned int len) rpmsg_recv_single() argument
/linux-4.1.27/drivers/i2c/algos/
H A Di2c-algo-pca.c112 struct i2c_msg *msg) pca_address()
117 addr = ((0x7f & msg->addr) << 1); pca_address()
118 if (msg->flags & I2C_M_RD) pca_address()
121 msg->addr, msg->flags & I2C_M_RD ? 'R' : 'W', addr); pca_address()
185 struct i2c_msg *msg = NULL; pca_xfer() local
208 msg = &msgs[curmsg]; pca_xfer()
210 addr = (0x7f & msg->addr) ; pca_xfer()
212 if (msg->flags & I2C_M_RD) pca_xfer()
214 curmsg, msg->len, addr, (addr << 1) | 1); pca_xfer()
217 curmsg, msg->len, addr, addr << 1, pca_xfer()
218 msg->len == 0 ? "" : ", "); pca_xfer()
219 for (i = 0; i < msg->len; i++) pca_xfer()
220 printk("%#04x%s", msg->buf[i], i == msg->len - 1 ? "" : ", "); pca_xfer()
232 msg = &msgs[curmsg]; pca_xfer()
241 completed = pca_address(adap, msg); pca_xfer()
246 if (numbytes < msg->len) { pca_xfer()
248 msg->buf[numbytes]); pca_xfer()
266 completed = pca_rx_ack(adap, msg->len > 1); pca_xfer()
270 if (numbytes < msg->len) { pca_xfer()
271 pca_rx_byte(adap, &msg->buf[numbytes], 1); pca_xfer()
274 numbytes < msg->len - 1); pca_xfer()
310 if (numbytes == msg->len - 1) { pca_xfer()
311 pca_rx_byte(adap, &msg->buf[numbytes], 0); pca_xfer()
320 numbytes, msg->len); pca_xfer()
111 pca_address(struct i2c_algo_pca_data *adap, struct i2c_msg *msg) pca_address() argument
/linux-4.1.27/arch/um/drivers/
H A Dmconsole_user.c43 struct msghdr msg; mconsole_reply_v0() local
48 msg.msg_name = &(req->origin); mconsole_reply_v0()
49 msg.msg_namelen = req->originlen; mconsole_reply_v0()
50 msg.msg_iov = &iov; mconsole_reply_v0()
51 msg.msg_iovlen = 1; mconsole_reply_v0()
52 msg.msg_control = NULL; mconsole_reply_v0()
53 msg.msg_controllen = 0; mconsole_reply_v0()
54 msg.msg_flags = 0; mconsole_reply_v0()
56 return sendmsg(req->originating_fd, &msg, 0); mconsole_reply_v0()

Completed in 4268 milliseconds

123456789