/linux-4.4.14/drivers/infiniband/ulp/isert/ |
H A D | Makefile | 1 ccflags-y := -Idrivers/target -Idrivers/target/iscsi
|
/linux-4.4.14/drivers/infiniband/ulp/srpt/ |
H A D | Makefile | 1 ccflags-y := -Idrivers/target
|
/linux-4.4.14/tools/perf/util/ |
H A D | target.h | 7 struct target { struct 46 enum target_errno target__validate(struct target *target); 47 enum target_errno target__parse_uid(struct target *target); 49 int target__strerror(struct target *target, int errnum, char *buf, size_t buflen); 51 static inline bool target__has_task(struct target *target) target__has_task() argument 53 return target->tid || target->pid || target->uid_str; target__has_task() 56 static inline bool target__has_cpu(struct target *target) target__has_cpu() argument 58 return target->system_wide || target->cpu_list; target__has_cpu() 61 static inline bool target__none(struct target *target) target__none() argument 63 return !target__has_task(target) && !target__has_cpu(target); target__none() 66 static inline bool target__uses_dummy_map(struct target *target) target__uses_dummy_map() argument 70 if (target->default_per_cpu) target__uses_dummy_map() 71 use_dummy = target->per_thread ? true : false; target__uses_dummy_map() 72 else if (target__has_task(target) || target__uses_dummy_map() 73 (!target__has_cpu(target) && !target->uses_mmap)) target__uses_dummy_map()
|
H A D | target.c | 2 * Helper functions for handling target threads/cpus 9 #include "target.h" 16 enum target_errno target__validate(struct target *target) target__validate() argument 20 if (target->pid) target__validate() 21 target->tid = target->pid; target__validate() 24 if (target->tid && target->cpu_list) { target__validate() 25 target->cpu_list = NULL; target__validate() 31 if (target->tid && target->uid_str) { target__validate() 32 target->uid_str = NULL; target__validate() 38 if (target->uid_str && target->cpu_list) { target__validate() 39 target->cpu_list = NULL; target__validate() 45 if (target->tid && target->system_wide) { target__validate() 46 target->system_wide = false; target__validate() 52 if (target->uid_str && target->system_wide) { target__validate() 53 target->system_wide = false; target__validate() 59 if (target->per_thread && (target->system_wide || target->cpu_list)) { target__validate() 60 target->per_thread = false; target__validate() 68 enum target_errno target__parse_uid(struct target *target) target__parse_uid() argument 72 const char *str = target->uid_str; target__parse_uid() 74 target->uid = UINT_MAX; target__parse_uid() 97 target->uid = result->pw_uid; target__parse_uid() 115 int target__strerror(struct target *target, int errnum, target__strerror() argument 146 snprintf(buf, buflen, msg, target->uid_str); target__strerror()
|
H A D | top.c | 30 struct target *target = &opts->target; perf_top__header_snprintf() local 84 if (target->pid) perf_top__header_snprintf() 86 target->pid); perf_top__header_snprintf() 87 else if (target->tid) perf_top__header_snprintf() 89 target->tid); perf_top__header_snprintf() 90 else if (target->uid_str != NULL) perf_top__header_snprintf() 92 target->uid_str); perf_top__header_snprintf() 96 if (target->cpu_list) perf_top__header_snprintf() 99 target->cpu_list); perf_top__header_snprintf() 101 if (target->tid) perf_top__header_snprintf()
|
H A D | annotate.c | 37 zfree(&ops->target.raw); ins__delete() 38 zfree(&ops->target.name); ins__delete() 60 ops->target.addr = strtoull(ops->raw, &endptr, 16); call__parse() 73 ops->target.name = strdup(name); call__parse() 76 return ops->target.name == NULL ? -1 : 0; call__parse() 81 ops->target.addr = 0; call__parse() 89 ops->target.addr = strtoull(tok + 1, NULL, 16); call__parse() 96 if (ops->target.name) call__scnprintf() 97 return scnprintf(bf, size, "%-6.6s %s", ins->name, ops->target.name); call__scnprintf() 99 if (ops->target.addr == 0) call__scnprintf() 102 return scnprintf(bf, size, "%-6.6s *%" PRIx64, ins->name, ops->target.addr); call__scnprintf() 119 ops->target.addr = strtoull(ops->raw, NULL, 16); jump__parse() 122 ops->target.offset = strtoull(s, NULL, 16); jump__parse() 124 ops->target.offset = UINT64_MAX; jump__parse() 132 return scnprintf(bf, size, "%-6.6s %" PRIx64, ins->name, ops->target.offset); jump__scnprintf() 224 zfree(&ops->target.raw); lock__delete() 225 zfree(&ops->target.name); lock__delete() 236 char *s = strchr(ops->raw, ','), *target, *comment, prev; mov__parse() local 248 target = ++s; mov__parse() 256 while (s > target && isspace(s[0])) mov__parse() 262 ops->target.raw = strdup(target); mov__parse() 265 if (ops->target.raw == NULL) mov__parse() 275 comment__symbol(ops->target.raw, comment, &ops->target.addr, &ops->target.name); mov__parse() 289 ops->target.name ?: ops->target.raw); mov__scnprintf() 299 char *target, *comment, *s, prev; dec__parse() local 301 target = s = ops->raw; dec__parse() 308 ops->target.raw = strdup(target); dec__parse() 311 if (ops->target.raw == NULL) dec__parse() 321 comment__symbol(ops->target.raw, comment, &ops->target.addr, &ops->target.name); dec__parse() 330 ops->target.name ?: ops->target.raw); dec__scnprintf() 1025 if (dl->ops.target.offset == UINT64_MAX) symbol__parse_objdump_line() 1026 dl->ops.target.offset = dl->ops.target.addr - symbol__parse_objdump_line() 1029 /* kcore has no symbols, so add the call target name */ symbol__parse_objdump_line() 1030 if (dl->ins && ins__is_call(dl->ins) && !dl->ops.target.name) { symbol__parse_objdump_line() 1031 struct addr_map_symbol target = { symbol__parse_objdump_line() local 1033 .addr = dl->ops.target.addr, symbol__parse_objdump_line() 1036 if (!map_groups__find_ams(&target, NULL) && symbol__parse_objdump_line() 1037 target.sym->start == target.al_addr) symbol__parse_objdump_line() 1038 dl->ops.target.name = strdup(target.sym->name); symbol__parse_objdump_line()
|
/linux-4.4.14/drivers/net/wireless/ath/ath6kl/ |
H A D | htc-ops.h | 28 static inline int ath6kl_htc_wait_target(struct htc_target *target) ath6kl_htc_wait_target() argument 30 return target->dev->ar->htc_ops->wait_target(target); ath6kl_htc_wait_target() 33 static inline int ath6kl_htc_start(struct htc_target *target) ath6kl_htc_start() argument 35 return target->dev->ar->htc_ops->start(target); ath6kl_htc_start() 38 static inline int ath6kl_htc_conn_service(struct htc_target *target, ath6kl_htc_conn_service() argument 42 return target->dev->ar->htc_ops->conn_service(target, req, resp); ath6kl_htc_conn_service() 45 static inline int ath6kl_htc_tx(struct htc_target *target, ath6kl_htc_tx() argument 48 return target->dev->ar->htc_ops->tx(target, packet); ath6kl_htc_tx() 51 static inline void ath6kl_htc_stop(struct htc_target *target) ath6kl_htc_stop() argument 53 return target->dev->ar->htc_ops->stop(target); ath6kl_htc_stop() 56 static inline void ath6kl_htc_cleanup(struct htc_target *target) ath6kl_htc_cleanup() argument 58 return target->dev->ar->htc_ops->cleanup(target); ath6kl_htc_cleanup() 61 static inline void ath6kl_htc_flush_txep(struct htc_target *target, ath6kl_htc_flush_txep() argument 65 return target->dev->ar->htc_ops->flush_txep(target, endpoint, tag); ath6kl_htc_flush_txep() 68 static inline void ath6kl_htc_flush_rx_buf(struct htc_target *target) ath6kl_htc_flush_rx_buf() argument 70 return target->dev->ar->htc_ops->flush_rx_buf(target); ath6kl_htc_flush_rx_buf() 73 static inline void ath6kl_htc_activity_changed(struct htc_target *target, ath6kl_htc_activity_changed() argument 77 return target->dev->ar->htc_ops->activity_changed(target, endpoint, ath6kl_htc_activity_changed() 81 static inline int ath6kl_htc_get_rxbuf_num(struct htc_target *target, ath6kl_htc_get_rxbuf_num() argument 84 return target->dev->ar->htc_ops->get_rxbuf_num(target, endpoint); ath6kl_htc_get_rxbuf_num() 87 static inline int ath6kl_htc_add_rxbuf_multiple(struct htc_target *target, ath6kl_htc_add_rxbuf_multiple() argument 90 return target->dev->ar->htc_ops->add_rxbuf_multiple(target, pktq); ath6kl_htc_add_rxbuf_multiple() 93 static inline int ath6kl_htc_credit_setup(struct htc_target *target, ath6kl_htc_credit_setup() argument 96 return target->dev->ar->htc_ops->credit_setup(target, info); ath6kl_htc_credit_setup()
|
H A D | htc_pipe.c | 56 ep->ep_cb.tx_comp_multi(ep->target, queue_to_indicate); do_send_completion() 72 ep->ep_cb.tx_complete(ep->target, packet); do_send_completion() 77 static void send_packet_completion(struct htc_target *target, send_packet_completion() argument 80 struct htc_endpoint *ep = &target->endpoint[packet->endpoint]; send_packet_completion() 91 static void get_htc_packet_credit_based(struct htc_target *target, get_htc_packet_credit_based() argument 118 if (transfer_len <= target->tgt_cred_sz) { get_htc_packet_credit_based() 122 credits_required = transfer_len / target->tgt_cred_sz; get_htc_packet_credit_based() 123 remainder = transfer_len % target->tgt_cred_sz; get_htc_packet_credit_based() 146 /* check if we need credits back from the target */ get_htc_packet_credit_based() 149 /* tell the target we need credits ASAP! */ get_htc_packet_credit_based() 173 static void get_htc_packet(struct htc_target *target, get_htc_packet() argument 203 static int htc_issue_packets(struct htc_target *target, htc_issue_packets() argument 248 spin_lock_bh(&target->tx_lock); htc_issue_packets() 253 spin_unlock_bh(&target->tx_lock); htc_issue_packets() 255 status = ath6kl_hif_pipe_send(target->dev->ar, htc_issue_packets() 269 spin_lock_bh(&target->tx_lock); htc_issue_packets() 274 spin_unlock_bh(&target->tx_lock); htc_issue_packets() 294 send_packet_completion(target, packet); htc_issue_packets() 301 static enum htc_send_queue_result htc_try_send(struct htc_target *target, htc_try_send() argument 307 struct ath6kl *ar = target->dev->ar; htc_try_send() 330 spin_lock_bh(&target->tx_lock); htc_try_send() 332 spin_unlock_bh(&target->tx_lock); htc_try_send() 388 action = ep->ep_cb.tx_full(ep->target, packet); list_for_each_entry_safe() 419 spin_lock_bh(&target->tx_lock); 425 spin_unlock_bh(&target->tx_lock); 441 spin_unlock_bh(&target->tx_lock); 458 * based on target transmit resource availability, 460 * bus resources greater than target transmit 463 get_htc_packet_credit_based(target, ep, &send_queue); 469 get_htc_packet(target, ep, &send_queue, tx_resources); 480 spin_unlock_bh(&target->tx_lock); 483 htc_issue_packets(target, ep, &send_queue); 491 spin_lock_bh(&target->tx_lock); 496 spin_unlock_bh(&target->tx_lock); 530 static void htc_free_txctrl_packet(struct htc_target *target, htc_free_txctrl_packet() argument 536 static struct htc_packet *htc_alloc_txctrl_packet(struct htc_target *target) htc_alloc_txctrl_packet() argument 541 static void htc_txctrl_complete(struct htc_target *target, htc_txctrl_complete() argument 544 htc_free_txctrl_packet(target, packet); htc_txctrl_complete() 549 static int htc_setup_target_buffer_assignments(struct htc_target *target) htc_setup_target_buffer_assignments() argument 555 credit_per_maxmsg = MAX_MESSAGE_SIZE / target->tgt_cred_sz; htc_setup_target_buffer_assignments() 556 if (MAX_MESSAGE_SIZE % target->tgt_cred_sz) htc_setup_target_buffer_assignments() 561 credits = target->tgt_creds; htc_setup_target_buffer_assignments() 562 entry = &target->pipe.txcredit_alloc[0]; htc_setup_target_buffer_assignments() 640 if (target->pipe.txcredit_alloc[i].service_id != 0) { htc_setup_target_buffer_assignments() 644 target->pipe.txcredit_alloc[i]. htc_setup_target_buffer_assignments() 646 target->pipe.txcredit_alloc[i]. htc_setup_target_buffer_assignments() 655 static void htc_process_credit_report(struct htc_target *target, htc_process_credit_report() argument 664 spin_lock_bh(&target->tx_lock); htc_process_credit_report() 669 spin_unlock_bh(&target->tx_lock); htc_process_credit_report() 673 ep = &target->endpoint[rpt->eid]; htc_process_credit_report() 677 spin_unlock_bh(&target->tx_lock); htc_process_credit_report() 678 htc_try_send(target, ep, NULL); htc_process_credit_report() 679 spin_lock_bh(&target->tx_lock); htc_process_credit_report() 688 spin_unlock_bh(&target->tx_lock); htc_process_credit_report() 692 static void htc_flush_tx_endpoint(struct htc_target *target, htc_flush_tx_endpoint() argument 697 spin_lock_bh(&target->tx_lock); htc_flush_tx_endpoint() 702 send_packet_completion(target, packet); htc_flush_tx_endpoint() 704 spin_unlock_bh(&target->tx_lock); htc_flush_tx_endpoint() 714 static struct htc_packet *htc_lookup_tx_packet(struct htc_target *target, htc_lookup_tx_packet() argument 720 spin_lock_bh(&target->tx_lock); htc_lookup_tx_packet() 738 spin_unlock_bh(&target->tx_lock); htc_lookup_tx_packet() 745 struct htc_target *target = ar->htc_target; ath6kl_htc_pipe_tx_complete() local 758 ep = &target->endpoint[ep_id]; ath6kl_htc_pipe_tx_complete() 760 packet = htc_lookup_tx_packet(target, ep, skb); ath6kl_htc_pipe_tx_complete() 767 send_packet_completion(target, packet); ath6kl_htc_pipe_tx_complete() 774 * happens when credits flow back from the target. in the ath6kl_htc_pipe_tx_complete() 777 htc_try_send(target, ep, NULL); ath6kl_htc_pipe_tx_complete() 783 static int htc_send_packets_multiple(struct htc_target *target, htc_send_packets_multiple() argument 799 ep = &target->endpoint[packet->endpoint]; htc_send_packets_multiple() 801 htc_try_send(target, ep, pkt_queue); htc_send_packets_multiple() 816 static struct htc_packet *alloc_htc_packet_container(struct htc_target *target) alloc_htc_packet_container() argument 819 spin_lock_bh(&target->rx_lock); alloc_htc_packet_container() 821 if (target->pipe.htc_packet_pool == NULL) { alloc_htc_packet_container() 822 spin_unlock_bh(&target->rx_lock); alloc_htc_packet_container() 826 packet = target->pipe.htc_packet_pool; alloc_htc_packet_container() 827 target->pipe.htc_packet_pool = (struct htc_packet *) packet->list.next; alloc_htc_packet_container() 829 spin_unlock_bh(&target->rx_lock); alloc_htc_packet_container() 835 static void free_htc_packet_container(struct htc_target *target, free_htc_packet_container() argument 840 spin_lock_bh(&target->rx_lock); free_htc_packet_container() 842 if (target->pipe.htc_packet_pool == NULL) { free_htc_packet_container() 843 target->pipe.htc_packet_pool = packet; free_htc_packet_container() 846 lh = (struct list_head *) target->pipe.htc_packet_pool; free_htc_packet_container() 848 target->pipe.htc_packet_pool = packet; free_htc_packet_container() 851 spin_unlock_bh(&target->rx_lock); free_htc_packet_container() 854 static int htc_process_trailer(struct htc_target *target, u8 *buffer, htc_process_trailer() argument 897 htc_process_credit_report(target, report, htc_process_trailer() 934 ep->ep_cb.rx(ep->target, packet); do_recv_completion() 940 static void recv_packet_completion(struct htc_target *target, recv_packet_completion() argument 955 struct htc_target *target = ar->htc_target; ath6kl_htc_pipe_rx_complete() local 973 if (WARN_ON_ONCE(!target)) { ath6kl_htc_pipe_rx_complete() 992 ep = &target->endpoint[htc_hdr->eid]; ath6kl_htc_pipe_rx_complete() 1022 status = htc_process_trailer(target, trailer, hdr_info, ath6kl_htc_pipe_rx_complete() 1035 if (target->htc_flags & HTC_OP_STATE_SETUP_COMPLETE) { ath6kl_htc_pipe_rx_complete() 1037 * fatal: target should not send unsolicited ath6kl_htc_pipe_rx_complete() 1052 spin_lock_bh(&target->rx_lock); ath6kl_htc_pipe_rx_complete() 1054 target->pipe.ctrl_response_valid = true; ath6kl_htc_pipe_rx_complete() 1055 target->pipe.ctrl_response_len = min_t(int, netlen, ath6kl_htc_pipe_rx_complete() 1057 memcpy(target->pipe.ctrl_response_buf, netdata, ath6kl_htc_pipe_rx_complete() 1058 target->pipe.ctrl_response_len); ath6kl_htc_pipe_rx_complete() 1060 spin_unlock_bh(&target->rx_lock); ath6kl_htc_pipe_rx_complete() 1073 packet = alloc_htc_packet_container(target); ath6kl_htc_pipe_rx_complete() 1093 recv_packet_completion(target, ep, packet); ath6kl_htc_pipe_rx_complete() 1096 free_htc_packet_container(target, packet); ath6kl_htc_pipe_rx_complete() 1105 static void htc_flush_rx_queue(struct htc_target *target, htc_flush_rx_queue() argument 1111 spin_lock_bh(&target->rx_lock); htc_flush_rx_queue() 1121 spin_unlock_bh(&target->rx_lock); htc_flush_rx_queue() 1135 spin_lock_bh(&target->rx_lock); htc_flush_rx_queue() 1138 spin_unlock_bh(&target->rx_lock); htc_flush_rx_queue() 1142 static int htc_wait_recv_ctrl_message(struct htc_target *target) htc_wait_recv_ctrl_message() argument 1147 spin_lock_bh(&target->rx_lock); htc_wait_recv_ctrl_message() 1149 if (target->pipe.ctrl_response_valid) { htc_wait_recv_ctrl_message() 1150 target->pipe.ctrl_response_valid = false; htc_wait_recv_ctrl_message() 1151 spin_unlock_bh(&target->rx_lock); htc_wait_recv_ctrl_message() 1155 spin_unlock_bh(&target->rx_lock); htc_wait_recv_ctrl_message() 1182 static void reset_endpoint_states(struct htc_target *target) reset_endpoint_states() argument 1188 ep = &target->endpoint[i]; reset_endpoint_states() 1196 ep->target = target; reset_endpoint_states() 1202 static int htc_config_target_hif_pipe(struct htc_target *target) htc_config_target_hif_pipe() argument 1208 static u8 htc_get_credit_alloc(struct htc_target *target, u16 service_id) htc_get_credit_alloc() argument 1214 if (target->pipe.txcredit_alloc[i].service_id == service_id) htc_get_credit_alloc() 1216 target->pipe.txcredit_alloc[i].credit_alloc; htc_get_credit_alloc() 1228 static int ath6kl_htc_pipe_conn_service(struct htc_target *target, ath6kl_htc_pipe_conn_service() argument 1232 struct ath6kl *ar = target->dev->ar; ath6kl_htc_pipe_conn_service() 1258 tx_alloc = htc_get_credit_alloc(target, conn_req->svc_id); ath6kl_htc_pipe_conn_service() 1264 /* allocate a packet to send to the target */ ath6kl_htc_pipe_conn_service() 1265 packet = htc_alloc_txctrl_packet(target); ath6kl_htc_pipe_conn_service() 1292 /* tell target desired recv alloc for this ep */ ath6kl_htc_pipe_conn_service() 1305 status = ath6kl_htc_pipe_tx(target, packet); ath6kl_htc_pipe_conn_service() 1313 status = htc_wait_recv_ctrl_message(target); ath6kl_htc_pipe_conn_service() 1321 target->pipe.ctrl_response_buf; ath6kl_htc_pipe_conn_service() 1324 (target->pipe.ctrl_response_len < sizeof(*resp_msg))) { ath6kl_htc_pipe_conn_service() 1363 ep = &target->endpoint[assigned_epid]; ath6kl_htc_pipe_conn_service() 1380 ep->cred_dist.cred_sz = target->tgt_cred_sz; ath6kl_htc_pipe_conn_service() 1381 ep->cred_dist.cred_per_msg = max_msg_size / target->tgt_cred_sz; ath6kl_htc_pipe_conn_service() 1382 if (max_msg_size % target->tgt_cred_sz) ath6kl_htc_pipe_conn_service() 1411 htc_free_txctrl_packet(target, packet); ath6kl_htc_pipe_conn_service() 1420 struct htc_target *target = NULL; ath6kl_htc_pipe_create() local 1424 target = kzalloc(sizeof(struct htc_target), GFP_KERNEL); ath6kl_htc_pipe_create() 1425 if (target == NULL) { ath6kl_htc_pipe_create() 1431 spin_lock_init(&target->htc_lock); ath6kl_htc_pipe_create() 1432 spin_lock_init(&target->rx_lock); ath6kl_htc_pipe_create() 1433 spin_lock_init(&target->tx_lock); ath6kl_htc_pipe_create() 1435 reset_endpoint_states(target); ath6kl_htc_pipe_create() 1441 free_htc_packet_container(target, packet); ath6kl_htc_pipe_create() 1444 target->dev = kzalloc(sizeof(*target->dev), GFP_KERNEL); ath6kl_htc_pipe_create() 1445 if (!target->dev) { ath6kl_htc_pipe_create() 1450 target->dev->ar = ar; ath6kl_htc_pipe_create() 1451 target->dev->htc_cnxt = target; ath6kl_htc_pipe_create() 1454 ep = &target->endpoint[ENDPOINT_0]; ath6kl_htc_pipe_create() 1459 return target; ath6kl_htc_pipe_create() 1463 if (target != NULL) ath6kl_htc_pipe_create() 1464 ath6kl_htc_pipe_cleanup(target); ath6kl_htc_pipe_create() 1466 target = NULL; ath6kl_htc_pipe_create() 1468 return target; ath6kl_htc_pipe_create() 1472 static void ath6kl_htc_pipe_cleanup(struct htc_target *target) ath6kl_htc_pipe_cleanup() argument 1477 packet = alloc_htc_packet_container(target); ath6kl_htc_pipe_cleanup() 1483 kfree(target->dev); ath6kl_htc_pipe_cleanup() 1486 kfree(target); ath6kl_htc_pipe_cleanup() 1489 static int ath6kl_htc_pipe_start(struct htc_target *target) ath6kl_htc_pipe_start() argument 1495 htc_config_target_hif_pipe(target); ath6kl_htc_pipe_start() 1498 packet = htc_alloc_txctrl_packet(target); ath6kl_htc_pipe_start() 1518 target->htc_flags |= HTC_OP_STATE_SETUP_COMPLETE; ath6kl_htc_pipe_start() 1520 return ath6kl_htc_pipe_tx(target, packet); ath6kl_htc_pipe_start() 1523 static void ath6kl_htc_pipe_stop(struct htc_target *target) ath6kl_htc_pipe_stop() argument 1530 ep = &target->endpoint[i]; ath6kl_htc_pipe_stop() 1531 htc_flush_rx_queue(target, ep); ath6kl_htc_pipe_stop() 1532 htc_flush_tx_endpoint(target, ep, HTC_TX_PACKET_TAG_ALL); ath6kl_htc_pipe_stop() 1535 reset_endpoint_states(target); ath6kl_htc_pipe_stop() 1536 target->htc_flags &= ~HTC_OP_STATE_SETUP_COMPLETE; ath6kl_htc_pipe_stop() 1539 static int ath6kl_htc_pipe_get_rxbuf_num(struct htc_target *target, ath6kl_htc_pipe_get_rxbuf_num() argument 1544 spin_lock_bh(&target->rx_lock); ath6kl_htc_pipe_get_rxbuf_num() 1545 num = get_queue_depth(&(target->endpoint[endpoint].rx_bufq)); ath6kl_htc_pipe_get_rxbuf_num() 1546 spin_unlock_bh(&target->rx_lock); ath6kl_htc_pipe_get_rxbuf_num() 1551 static int ath6kl_htc_pipe_tx(struct htc_target *target, ath6kl_htc_pipe_tx() argument 1564 return htc_send_packets_multiple(target, &queue); ath6kl_htc_pipe_tx() 1567 static int ath6kl_htc_pipe_wait_target(struct htc_target *target) ath6kl_htc_pipe_wait_target() argument 1574 status = htc_wait_recv_ctrl_message(target); ath6kl_htc_pipe_wait_target() 1579 if (target->pipe.ctrl_response_len < sizeof(*ready_msg)) { ath6kl_htc_pipe_wait_target() 1581 target->pipe.ctrl_response_len); ath6kl_htc_pipe_wait_target() 1585 ready_msg = (struct htc_ready_ext_msg *) target->pipe.ctrl_response_buf; ath6kl_htc_pipe_wait_target() 1598 target->tgt_creds = le16_to_cpu(ready_msg->ver2_0_info.cred_cnt); ath6kl_htc_pipe_wait_target() 1599 target->tgt_cred_sz = le16_to_cpu(ready_msg->ver2_0_info.cred_sz); ath6kl_htc_pipe_wait_target() 1601 if ((target->tgt_creds == 0) || (target->tgt_cred_sz == 0)) ath6kl_htc_pipe_wait_target() 1604 htc_setup_target_buffer_assignments(target); ath6kl_htc_pipe_wait_target() 1615 status = ath6kl_htc_pipe_conn_service(target, &connect, &resp); ath6kl_htc_pipe_wait_target() 1620 static void ath6kl_htc_pipe_flush_txep(struct htc_target *target, ath6kl_htc_pipe_flush_txep() argument 1623 struct htc_endpoint *ep = &target->endpoint[endpoint]; ath6kl_htc_pipe_flush_txep() 1631 htc_flush_tx_endpoint(target, ep, tag); ath6kl_htc_pipe_flush_txep() 1634 static int ath6kl_htc_pipe_add_rxbuf_multiple(struct htc_target *target, ath6kl_htc_pipe_add_rxbuf_multiple() argument 1655 ep = &target->endpoint[first->endpoint]; ath6kl_htc_pipe_add_rxbuf_multiple() 1657 spin_lock_bh(&target->rx_lock); ath6kl_htc_pipe_add_rxbuf_multiple() 1662 spin_unlock_bh(&target->rx_lock); ath6kl_htc_pipe_add_rxbuf_multiple() 1676 static void ath6kl_htc_pipe_activity_changed(struct htc_target *target, ath6kl_htc_pipe_activity_changed() argument 1683 static void ath6kl_htc_pipe_flush_rx_buf(struct htc_target *target) ath6kl_htc_pipe_flush_rx_buf() argument 1690 endpoint = &target->endpoint[i]; ath6kl_htc_pipe_flush_rx_buf() 1692 spin_lock_bh(&target->rx_lock); ath6kl_htc_pipe_flush_rx_buf() 1697 spin_unlock_bh(&target->rx_lock); ath6kl_htc_pipe_flush_rx_buf() 1703 spin_lock_bh(&target->rx_lock); ath6kl_htc_pipe_flush_rx_buf() 1706 spin_unlock_bh(&target->rx_lock); ath6kl_htc_pipe_flush_rx_buf() 1710 static int ath6kl_htc_pipe_credit_setup(struct htc_target *target, ath6kl_htc_pipe_credit_setup() argument
|
H A D | htc_mbox.c | 28 static void ath6kl_htc_mbox_cleanup(struct htc_target *target); 29 static void ath6kl_htc_mbox_stop(struct htc_target *target); 30 static int ath6kl_htc_mbox_add_rxbuf_multiple(struct htc_target *target, 32 static void ath6kl_htc_set_credit_dist(struct htc_target *target, 375 static void htc_reclaim_txctrl_buf(struct htc_target *target, htc_reclaim_txctrl_buf() argument 378 spin_lock_bh(&target->htc_lock); htc_reclaim_txctrl_buf() 379 list_add_tail(&pkt->list, &target->free_ctrl_txbuf); htc_reclaim_txctrl_buf() 380 spin_unlock_bh(&target->htc_lock); htc_reclaim_txctrl_buf() 383 static struct htc_packet *htc_get_control_buf(struct htc_target *target, htc_get_control_buf() argument 389 buf_list = tx ? &target->free_ctrl_txbuf : &target->free_ctrl_rxbuf; htc_get_control_buf() 391 spin_lock_bh(&target->htc_lock); htc_get_control_buf() 394 spin_unlock_bh(&target->htc_lock); htc_get_control_buf() 400 spin_unlock_bh(&target->htc_lock); htc_get_control_buf() 408 static void htc_tx_comp_update(struct htc_target *target, htc_tx_comp_update() argument 423 spin_lock_bh(&target->tx_lock); htc_tx_comp_update() 429 target->credit_info, &target->cred_dist_list); htc_tx_comp_update() 431 ath6kl_credit_distribute(target->credit_info, htc_tx_comp_update() 432 &target->cred_dist_list, htc_tx_comp_update() 435 spin_unlock_bh(&target->tx_lock); htc_tx_comp_update() 448 ath6kl_tx_complete(endpoint->target, txq); htc_tx_complete() 451 static void htc_tx_comp_handler(struct htc_target *target, htc_tx_comp_handler() argument 454 struct htc_endpoint *endpoint = &target->endpoint[packet->endpoint]; htc_tx_comp_handler() 460 htc_tx_comp_update(target, endpoint, packet); htc_tx_comp_handler() 467 static void htc_async_tx_scat_complete(struct htc_target *target, htc_async_tx_scat_complete() argument 485 endpoint = &target->endpoint[packet->endpoint]; htc_async_tx_scat_complete() 496 htc_tx_comp_update(target, endpoint, packet); htc_async_tx_scat_complete() 501 hif_scatter_req_add(target->dev->ar, scat_req); htc_async_tx_scat_complete() 507 static int ath6kl_htc_tx_issue(struct htc_target *target, ath6kl_htc_tx_issue() argument 519 padded_len = CALC_TXRX_PADDED_LEN(target, send_len); ath6kl_htc_tx_issue() 524 target->dev->ar->mbox_info.htc_addr, ath6kl_htc_tx_issue() 528 status = hif_read_write_sync(target->dev->ar, ath6kl_htc_tx_issue() 529 target->dev->ar->mbox_info.htc_addr, ath6kl_htc_tx_issue() 536 status = hif_write_async(target->dev->ar, ath6kl_htc_tx_issue() 537 target->dev->ar->mbox_info.htc_addr, ath6kl_htc_tx_issue() 546 static int htc_check_credits(struct htc_target *target, htc_check_credits() argument 551 *req_cred = (len > target->tgt_cred_sz) ? htc_check_credits() 552 DIV_ROUND_UP(len, target->tgt_cred_sz) : 1; htc_check_credits() 564 ath6kl_credit_seek(target->credit_info, &ep->cred_dist); htc_check_credits() 584 ath6kl_credit_seek(target->credit_info, &ep->cred_dist); htc_check_credits() 588 /* tell the target we need credits ASAP! */ htc_check_credits() 599 static void ath6kl_htc_tx_pkts_get(struct htc_target *target, ath6kl_htc_tx_pkts_get() argument 620 len = CALC_TXRX_PADDED_LEN(target, ath6kl_htc_tx_pkts_get() 623 if (htc_check_credits(target, endpoint, &flags, ath6kl_htc_tx_pkts_get() 637 packet->context = target; ath6kl_htc_tx_pkts_get() 679 static int ath6kl_htc_tx_setup_scat_list(struct htc_target *target, ath6kl_htc_tx_setup_scat_list() argument 690 rem_scat = target->max_tx_bndl_sz; ath6kl_htc_tx_setup_scat_list() 699 len = CALC_TXRX_PADDED_LEN(target, ath6kl_htc_tx_setup_scat_list() 702 cred_pad = htc_get_credit_padding(target->tgt_cred_sz, ath6kl_htc_tx_setup_scat_list() 759 struct htc_target *target = endpoint->target; ath6kl_htc_tx_bundle() local 769 ac = target->dev->ar->ep2ac_map[endpoint->eid]; ath6kl_htc_tx_bundle() 774 n_scat = min(n_scat, target->msg_per_bndl_max); ath6kl_htc_tx_bundle() 780 scat_req = hif_scatter_req_get(target->dev->ar); ath6kl_htc_tx_bundle() 810 target->tx_bndl_mask &= ~txb_mask; ath6kl_htc_tx_bundle() 812 target->tx_bndl_mask |= txb_mask; ath6kl_htc_tx_bundle() 821 status = ath6kl_htc_tx_setup_scat_list(target, endpoint, ath6kl_htc_tx_bundle() 825 hif_scatter_req_add(target->dev->ar, scat_req); ath6kl_htc_tx_bundle() 844 ath6kl_hif_submit_scat_req(target->dev, scat_req, false); ath6kl_htc_tx_bundle() 858 static void ath6kl_htc_tx_from_queue(struct htc_target *target, ath6kl_htc_tx_from_queue() argument 868 spin_lock_bh(&target->tx_lock); ath6kl_htc_tx_from_queue() 873 spin_unlock_bh(&target->tx_lock); ath6kl_htc_tx_from_queue() 886 ac = target->dev->ar->ep2ac_map[endpoint->eid]; ath6kl_htc_tx_from_queue() 892 ath6kl_htc_tx_pkts_get(target, endpoint, &txq); ath6kl_htc_tx_from_queue() 897 spin_unlock_bh(&target->tx_lock); ath6kl_htc_tx_from_queue() 904 if ((target->tx_bndl_mask) && ath6kl_htc_tx_from_queue() 910 if (target->tx_bndl_mask & (1 << ac)) { ath6kl_htc_tx_from_queue() 927 status = ath6kl_htc_tx_issue(target, packet); ath6kl_htc_tx_from_queue() 935 spin_lock_bh(&target->tx_lock); ath6kl_htc_tx_from_queue() 946 if (!(target->tx_bndl_mask & (1 << ac)) && ath6kl_htc_tx_from_queue() 948 if (++target->ac_tx_count[ac] >= ath6kl_htc_tx_from_queue() 950 target->ac_tx_count[ac] = 0; ath6kl_htc_tx_from_queue() 951 target->tx_bndl_mask |= (1 << ac); ath6kl_htc_tx_from_queue() 957 target->ac_tx_count[ac] = 0; ath6kl_htc_tx_from_queue() 962 spin_unlock_bh(&target->tx_lock); ath6kl_htc_tx_from_queue() 965 static bool ath6kl_htc_tx_try(struct htc_target *target, ath6kl_htc_tx_try() argument 975 spin_lock_bh(&target->tx_lock); ath6kl_htc_tx_try() 977 spin_unlock_bh(&target->tx_lock); ath6kl_htc_tx_try() 989 if (ep_cb.tx_full(endpoint->target, tx_pkt) == ath6kl_htc_tx_try() 996 spin_lock_bh(&target->tx_lock); ath6kl_htc_tx_try() 998 spin_unlock_bh(&target->tx_lock); ath6kl_htc_tx_try() 1000 ath6kl_htc_tx_from_queue(target, endpoint); ath6kl_htc_tx_try() 1005 static void htc_chk_ep_txq(struct htc_target *target) htc_chk_ep_txq() argument 1016 list_for_each_entry(cred_dist, &target->cred_dist_list, list) { htc_chk_ep_txq() 1019 spin_lock_bh(&target->tx_lock); htc_chk_ep_txq() 1026 spin_unlock_bh(&target->tx_lock); htc_chk_ep_txq() 1034 ath6kl_htc_tx_from_queue(target, endpoint); htc_chk_ep_txq() 1035 spin_lock_bh(&target->tx_lock); htc_chk_ep_txq() 1037 spin_unlock_bh(&target->tx_lock); htc_chk_ep_txq() 1041 static int htc_setup_tx_complete(struct htc_target *target) htc_setup_tx_complete() argument 1046 send_pkt = htc_get_control_buf(target, true); htc_setup_tx_complete() 1051 if (target->htc_tgt_ver >= HTC_VERSION_2P1) { htc_setup_tx_complete() 1061 if (target->msg_per_bndl_max > 0) { htc_setup_tx_complete() 1062 /* Indicate HTC bundling to the target */ htc_setup_tx_complete() 1065 target->msg_per_bndl_max; htc_setup_tx_complete() 1087 status = ath6kl_htc_tx_issue(target, send_pkt); htc_setup_tx_complete() 1088 htc_reclaim_txctrl_buf(target, send_pkt); htc_setup_tx_complete() 1093 static void ath6kl_htc_set_credit_dist(struct htc_target *target, ath6kl_htc_set_credit_dist() argument 1100 target->credit_info = credit_info; ath6kl_htc_set_credit_dist() 1102 list_add_tail(&target->endpoint[ENDPOINT_0].cred_dist.list, ath6kl_htc_set_credit_dist() 1103 &target->cred_dist_list); ath6kl_htc_set_credit_dist() 1107 endpoint = &target->endpoint[ep]; ath6kl_htc_set_credit_dist() 1110 &target->cred_dist_list); ath6kl_htc_set_credit_dist() 1121 static int ath6kl_htc_mbox_tx(struct htc_target *target, ath6kl_htc_mbox_tx() argument 1136 endpoint = &target->endpoint[packet->endpoint]; ath6kl_htc_mbox_tx() 1138 if (!ath6kl_htc_tx_try(target, endpoint, packet)) { ath6kl_htc_mbox_tx() 1139 packet->status = (target->htc_flags & HTC_OP_STATE_STOPPING) ? ath6kl_htc_mbox_tx() 1150 static void ath6kl_htc_mbox_flush_txep(struct htc_target *target, ath6kl_htc_mbox_flush_txep() argument 1155 struct htc_endpoint *endpoint = &target->endpoint[eid]; ath6kl_htc_mbox_flush_txep() 1165 spin_lock_bh(&target->tx_lock); ath6kl_htc_mbox_flush_txep() 1173 spin_unlock_bh(&target->tx_lock); ath6kl_htc_mbox_flush_txep() 1189 static void ath6kl_htc_flush_txep_all(struct htc_target *target) ath6kl_htc_flush_txep_all() argument 1194 dump_cred_dist_stats(target); ath6kl_htc_flush_txep_all() 1197 endpoint = &target->endpoint[i]; ath6kl_htc_flush_txep_all() 1201 ath6kl_htc_mbox_flush_txep(target, i, HTC_TX_PACKET_TAG_ALL); ath6kl_htc_flush_txep_all() 1205 static void ath6kl_htc_mbox_activity_changed(struct htc_target *target, ath6kl_htc_mbox_activity_changed() argument 1209 struct htc_endpoint *endpoint = &target->endpoint[eid]; ath6kl_htc_mbox_activity_changed() 1217 spin_lock_bh(&target->tx_lock); ath6kl_htc_mbox_activity_changed() 1237 target->credit_info, &target->cred_dist_list); ath6kl_htc_mbox_activity_changed() 1239 ath6kl_credit_distribute(target->credit_info, ath6kl_htc_mbox_activity_changed() 1240 &target->cred_dist_list, ath6kl_htc_mbox_activity_changed() 1244 spin_unlock_bh(&target->tx_lock); ath6kl_htc_mbox_activity_changed() 1247 htc_chk_ep_txq(target); ath6kl_htc_mbox_activity_changed() 1262 static inline bool htc_valid_rx_frame_len(struct htc_target *target, htc_valid_rx_frame_len() argument 1265 return (eid == target->dev->ar->ctrl_ep) ? htc_valid_rx_frame_len() 1269 static int htc_add_rxbuf(struct htc_target *target, struct htc_packet *packet) htc_add_rxbuf() argument 1275 return ath6kl_htc_mbox_add_rxbuf_multiple(target, &queue); htc_add_rxbuf() 1278 static void htc_reclaim_rxbuf(struct htc_target *target, htc_reclaim_rxbuf() argument 1285 ep->ep_cb.rx(ep->target, packet); htc_reclaim_rxbuf() 1288 htc_add_rxbuf((void *)(target), packet); htc_reclaim_rxbuf() 1292 static void reclaim_rx_ctrl_buf(struct htc_target *target, reclaim_rx_ctrl_buf() argument 1295 spin_lock_bh(&target->htc_lock); reclaim_rx_ctrl_buf() 1296 list_add_tail(&packet->list, &target->free_ctrl_rxbuf); reclaim_rx_ctrl_buf() 1297 spin_unlock_bh(&target->htc_lock); reclaim_rx_ctrl_buf() 1300 static int ath6kl_htc_rx_packet(struct htc_target *target, ath6kl_htc_rx_packet() argument 1304 struct ath6kl_device *dev = target->dev; ath6kl_htc_rx_packet() 1308 padded_len = CALC_TXRX_PADDED_LEN(target, rx_len); ath6kl_htc_rx_packet() 1354 spin_lock_bh(&endpoint->target->rx_lock); ath6kl_htc_rx_chk_water_mark() 1357 spin_unlock_bh(&endpoint->target->rx_lock); ath6kl_htc_rx_chk_water_mark() 1358 ep_cb.rx_refill(endpoint->target, endpoint->eid); ath6kl_htc_rx_chk_water_mark() 1361 spin_unlock_bh(&endpoint->target->rx_lock); ath6kl_htc_rx_chk_water_mark() 1366 static int ath6kl_htc_rx_setup(struct htc_target *target, ath6kl_htc_rx_setup() argument 1377 full_len = CALC_TXRX_PADDED_LEN(target, ath6kl_htc_rx_setup() 1381 if (!htc_valid_rx_frame_len(target, ep->eid, full_len)) { ath6kl_htc_rx_setup() 1403 spin_unlock_bh(&target->rx_lock); ath6kl_htc_rx_setup() 1406 packet = ep_cb.rx_allocthresh(ep->target, ep->eid, ath6kl_htc_rx_setup() 1408 spin_lock_bh(&target->rx_lock); ath6kl_htc_rx_setup() 1413 spin_unlock_bh(&target->rx_lock); ath6kl_htc_rx_setup() 1414 ep_cb.rx_refill(ep->target, ep->eid); ath6kl_htc_rx_setup() 1415 spin_lock_bh(&target->rx_lock); ath6kl_htc_rx_setup() 1429 target->rx_st_flags |= HTC_RECV_WAIT_BUFFERS; ath6kl_htc_rx_setup() 1430 target->ep_waiting = ep->eid; ath6kl_htc_rx_setup() 1450 if (target->htc_flags & HTC_OP_STATE_STOPPING) { ath6kl_htc_rx_setup() 1469 static int ath6kl_htc_rx_alloc(struct htc_target *target, ath6kl_htc_rx_alloc() argument 1479 spin_lock_bh(&target->rx_lock); ath6kl_htc_rx_alloc() 1523 if (n_msg > target->msg_per_bndl_max) { ath6kl_htc_rx_alloc() 1537 status = ath6kl_htc_rx_setup(target, endpoint, &lk_ahds[i], ath6kl_htc_rx_alloc() 1546 spin_unlock_bh(&target->rx_lock); ath6kl_htc_rx_alloc() 1554 spin_unlock_bh(&target->rx_lock); ath6kl_htc_rx_alloc() 1559 htc_reclaim_rxbuf(target, packet, list_for_each_entry_safe() 1560 &target->endpoint[packet->endpoint]); list_for_each_entry_safe() 1592 static void htc_proc_cred_rpt(struct htc_target *target, htc_proc_cred_rpt() argument 1601 spin_lock_bh(&target->tx_lock); htc_proc_cred_rpt() 1606 spin_unlock_bh(&target->tx_lock); htc_proc_cred_rpt() 1610 endpoint = &target->endpoint[rpt->eid]; htc_proc_cred_rpt() 1659 ath6kl_credit_distribute(target->credit_info, htc_proc_cred_rpt() 1660 &target->cred_dist_list, htc_proc_cred_rpt() 1664 spin_unlock_bh(&target->tx_lock); htc_proc_cred_rpt() 1667 htc_chk_ep_txq(target); htc_proc_cred_rpt() 1670 static int htc_parse_trailer(struct htc_target *target, htc_parse_trailer() argument 1688 htc_proc_cred_rpt(target, htc_parse_trailer() 1750 static int htc_proc_trailer(struct htc_target *target, htc_proc_trailer() argument 1785 status = htc_parse_trailer(target, record, record_buf, htc_proc_trailer() 1803 static int ath6kl_htc_rx_process_hdr(struct htc_target *target, ath6kl_htc_rx_process_hdr() argument 1878 status = htc_proc_trailer(target, packet->buf + HTC_HDR_LENGTH ath6kl_htc_rx_process_hdr() 1907 endpoint->ep_cb.rx(endpoint->target, packet); ath6kl_htc_rx_complete() 1910 static int ath6kl_htc_rx_bundle(struct htc_target *target, ath6kl_htc_rx_bundle() argument 1917 int rem_space = target->max_rx_bndl_sz; ath6kl_htc_rx_bundle() 1921 n_scat_pkt = min(n_scat_pkt, target->msg_per_bndl_max); ath6kl_htc_rx_bundle() 1932 * This would only happen if the target ignored our max ath6kl_htc_rx_bundle() 1945 scat_req = hif_scatter_req_get(target->dev->ar); ath6kl_htc_rx_bundle() 1956 pad_len = CALC_TXRX_PADDED_LEN(target, ath6kl_htc_rx_bundle() 1990 status = ath6kl_hif_submit_scat_req(target->dev, scat_req, true); ath6kl_htc_rx_bundle() 1996 hif_scatter_req_add(target->dev->ar, scat_req); ath6kl_htc_rx_bundle() 2003 static int ath6kl_htc_rx_process_packets(struct htc_target *target, ath6kl_htc_rx_process_packets() argument 2013 ep = &target->endpoint[packet->endpoint]; list_for_each_entry_safe() 2019 status = ath6kl_htc_rx_process_hdr(target, packet, lk_ahds, list_for_each_entry_safe() 2053 static int ath6kl_htc_rx_fetch(struct htc_target *target, ath6kl_htc_rx_fetch() argument 2069 if (target->rx_bndl_enable && (get_queue_depth(rx_pktq) > 1)) { ath6kl_htc_rx_fetch() 2075 status = ath6kl_htc_rx_bundle(target, rx_pktq, ath6kl_htc_rx_fetch() 2105 status = ath6kl_htc_rx_packet(target, packet, ath6kl_htc_rx_fetch() 2128 htc_reclaim_rxbuf(target, packet, list_for_each_entry_safe() 2129 &target->endpoint[packet->endpoint]); list_for_each_entry_safe() 2134 htc_reclaim_rxbuf(target, packet, 2135 &target->endpoint[packet->endpoint]); 2141 int ath6kl_htc_rxmsg_pending_handler(struct htc_target *target, ath6kl_htc_rxmsg_pending_handler() argument 2168 endpoint = &target->endpoint[id]; ath6kl_htc_rxmsg_pending_handler() 2184 status = ath6kl_htc_rx_alloc(target, look_aheads, ath6kl_htc_rxmsg_pending_handler() 2195 target->chk_irq_status_cnt = 1; ath6kl_htc_rxmsg_pending_handler() 2201 status = ath6kl_htc_rx_fetch(target, &rx_pktq, &comp_pktq); ath6kl_htc_rxmsg_pending_handler() 2207 status = ath6kl_htc_rx_process_packets(target, &comp_pktq, ath6kl_htc_rxmsg_pending_handler() 2221 target->chk_irq_status_cnt = 1; ath6kl_htc_rxmsg_pending_handler() 2231 htc_reclaim_rxbuf(target, packets, ath6kl_htc_rxmsg_pending_handler() 2232 &target->endpoint[packets->endpoint]); ath6kl_htc_rxmsg_pending_handler() 2235 if (target->htc_flags & HTC_OP_STATE_STOPPING) { ath6kl_htc_rxmsg_pending_handler() 2237 ath6kl_hif_rx_control(target->dev, false); ath6kl_htc_rxmsg_pending_handler() 2245 if (target->rx_st_flags & HTC_RECV_WAIT_BUFFERS) { ath6kl_htc_rxmsg_pending_handler() 2247 ath6kl_hif_rx_control(target->dev, false); ath6kl_htc_rxmsg_pending_handler() 2255 * Synchronously wait for a control message from the target, 2259 static struct htc_packet *htc_wait_for_ctrl_msg(struct htc_target *target) htc_wait_for_ctrl_msg() argument 2265 if (ath6kl_hif_poll_mboxmsg_rx(target->dev, &look_ahead, htc_wait_for_ctrl_msg() 2277 packet = htc_get_control_buf(target, false); htc_wait_for_ctrl_msg() 2293 if (ath6kl_htc_rx_packet(target, packet, packet->act_len)) htc_wait_for_ctrl_msg() 2300 packet->status = ath6kl_htc_rx_process_hdr(target, packet, NULL, NULL); htc_wait_for_ctrl_msg() 2313 reclaim_rx_ctrl_buf(target, packet); htc_wait_for_ctrl_msg() 2319 static int ath6kl_htc_mbox_add_rxbuf_multiple(struct htc_target *target, ath6kl_htc_mbox_add_rxbuf_multiple() argument 2341 endpoint = &target->endpoint[first_pkt->endpoint]; ath6kl_htc_mbox_add_rxbuf_multiple() 2343 if (target->htc_flags & HTC_OP_STATE_STOPPING) { ath6kl_htc_mbox_add_rxbuf_multiple() 2356 spin_lock_bh(&target->rx_lock); 2361 if (target->rx_st_flags & HTC_RECV_WAIT_BUFFERS) { 2362 if (target->ep_waiting == first_pkt->endpoint) { 2365 target->ep_waiting); 2366 target->rx_st_flags &= ~HTC_RECV_WAIT_BUFFERS; 2367 target->ep_waiting = ENDPOINT_MAX; 2372 spin_unlock_bh(&target->rx_lock); 2374 if (rx_unblock && !(target->htc_flags & HTC_OP_STATE_STOPPING)) 2376 ath6kl_hif_rx_control(target->dev, true); 2381 static void ath6kl_htc_mbox_flush_rx_buf(struct htc_target *target) ath6kl_htc_mbox_flush_rx_buf() argument 2388 endpoint = &target->endpoint[i]; ath6kl_htc_mbox_flush_rx_buf() 2393 spin_lock_bh(&target->rx_lock); ath6kl_htc_mbox_flush_rx_buf() 2397 spin_unlock_bh(&target->rx_lock); ath6kl_htc_mbox_flush_rx_buf() 2404 * been queued from target->free_ctrl_rxbuf where ath6kl_htc_mbox_flush_rx_buf() 2417 spin_lock_bh(&target->rx_lock); ath6kl_htc_mbox_flush_rx_buf() 2419 spin_unlock_bh(&target->rx_lock); ath6kl_htc_mbox_flush_rx_buf() 2423 static int ath6kl_htc_mbox_conn_service(struct htc_target *target, ath6kl_htc_mbox_conn_service() argument 2438 "htc connect service target 0x%p service id 0x%x\n", ath6kl_htc_mbox_conn_service() 2439 target, conn_req->svc_id); ath6kl_htc_mbox_conn_service() 2446 /* allocate a packet to send to the target */ ath6kl_htc_mbox_conn_service() 2447 tx_pkt = htc_get_control_buf(target, true); ath6kl_htc_mbox_conn_service() 2465 status = ath6kl_htc_tx_issue(target, tx_pkt); ath6kl_htc_mbox_conn_service() 2471 rx_pkt = htc_wait_for_ctrl_msg(target); ath6kl_htc_mbox_conn_service() 2490 ath6kl_err("target failed service 0x%X connect request (status:%d)\n", ath6kl_htc_mbox_conn_service() 2506 endpoint = &target->endpoint[assigned_ep]; ath6kl_htc_mbox_conn_service() 2528 endpoint->cred_dist.cred_sz = target->tgt_cred_sz; ath6kl_htc_mbox_conn_service() 2550 conn_req->max_rxmsg_sz / target->tgt_cred_sz; ath6kl_htc_mbox_conn_service() 2553 max_msg_sz / target->tgt_cred_sz; ath6kl_htc_mbox_conn_service() 2563 htc_reclaim_txctrl_buf(target, tx_pkt); ath6kl_htc_mbox_conn_service() 2567 reclaim_rx_ctrl_buf(target, rx_pkt); ath6kl_htc_mbox_conn_service() 2573 static void reset_ep_state(struct htc_target *target) reset_ep_state() argument 2579 endpoint = &target->endpoint[i]; reset_ep_state() 2588 endpoint->target = target; reset_ep_state() 2593 INIT_LIST_HEAD(&target->cred_dist_list); reset_ep_state() 2596 static int ath6kl_htc_mbox_get_rxbuf_num(struct htc_target *target, ath6kl_htc_mbox_get_rxbuf_num() argument 2601 spin_lock_bh(&target->rx_lock); ath6kl_htc_mbox_get_rxbuf_num() 2602 num = get_queue_depth(&(target->endpoint[endpoint].rx_bufq)); ath6kl_htc_mbox_get_rxbuf_num() 2603 spin_unlock_bh(&target->rx_lock); ath6kl_htc_mbox_get_rxbuf_num() 2607 static void htc_setup_msg_bndl(struct htc_target *target) htc_setup_msg_bndl() argument 2610 target->msg_per_bndl_max = min(HTC_HOST_MAX_MSG_PER_BUNDLE, htc_setup_msg_bndl() 2611 target->msg_per_bndl_max); htc_setup_msg_bndl() 2613 if (ath6kl_hif_enable_scatter(target->dev->ar)) { htc_setup_msg_bndl() 2614 target->msg_per_bndl_max = 0; htc_setup_msg_bndl() 2619 target->msg_per_bndl_max = min(target->max_scat_entries, htc_setup_msg_bndl() 2620 target->msg_per_bndl_max); htc_setup_msg_bndl() 2624 target->msg_per_bndl_max); htc_setup_msg_bndl() 2627 target->max_rx_bndl_sz = target->max_xfer_szper_scatreq; htc_setup_msg_bndl() 2629 target->max_tx_bndl_sz = min(HIF_MBOX0_EXT_WIDTH, htc_setup_msg_bndl() 2630 target->max_xfer_szper_scatreq); htc_setup_msg_bndl() 2633 target->max_rx_bndl_sz, target->max_tx_bndl_sz); htc_setup_msg_bndl() 2635 if (target->max_tx_bndl_sz) htc_setup_msg_bndl() 2637 target->tx_bndl_mask = (1 << WMM_NUM_AC) - 1; htc_setup_msg_bndl() 2639 if (target->max_rx_bndl_sz) htc_setup_msg_bndl() 2640 target->rx_bndl_enable = true; htc_setup_msg_bndl() 2642 if ((target->tgt_cred_sz % target->block_sz) != 0) { htc_setup_msg_bndl() 2644 target->tgt_cred_sz); htc_setup_msg_bndl() 2652 target->tx_bndl_mask = 0; htc_setup_msg_bndl() 2656 static int ath6kl_htc_mbox_wait_target(struct htc_target *target) ath6kl_htc_mbox_wait_target() argument 2664 /* we should be getting 1 control message that the target is ready */ ath6kl_htc_mbox_wait_target() 2665 packet = htc_wait_for_ctrl_msg(target); ath6kl_htc_mbox_wait_target() 2684 target->tgt_creds = le16_to_cpu(rdy_msg->ver2_0_info.cred_cnt); ath6kl_htc_mbox_wait_target() 2685 target->tgt_cred_sz = le16_to_cpu(rdy_msg->ver2_0_info.cred_sz); ath6kl_htc_mbox_wait_target() 2688 "htc target ready credits %d size %d\n", ath6kl_htc_mbox_wait_target() 2689 target->tgt_creds, target->tgt_cred_sz); ath6kl_htc_mbox_wait_target() 2694 target->htc_tgt_ver = rdy_msg->htc_ver; ath6kl_htc_mbox_wait_target() 2695 target->msg_per_bndl_max = rdy_msg->msg_per_htc_bndl; ath6kl_htc_mbox_wait_target() 2698 target->htc_tgt_ver = HTC_VERSION_2P0; ath6kl_htc_mbox_wait_target() 2699 target->msg_per_bndl_max = 0; ath6kl_htc_mbox_wait_target() 2703 (target->htc_tgt_ver == HTC_VERSION_2P0) ? "2.0" : ">= 2.1", ath6kl_htc_mbox_wait_target() 2704 target->htc_tgt_ver); ath6kl_htc_mbox_wait_target() 2706 if (target->msg_per_bndl_max > 0) ath6kl_htc_mbox_wait_target() 2707 htc_setup_msg_bndl(target); ath6kl_htc_mbox_wait_target() 2719 status = ath6kl_htc_mbox_conn_service((void *)target, &connect, &resp); ath6kl_htc_mbox_wait_target() 2726 ath6kl_hif_cleanup_scatter(target->dev->ar); ath6kl_htc_mbox_wait_target() 2731 reclaim_rx_ctrl_buf(target, packet); ath6kl_htc_mbox_wait_target() 2738 * Start HTC, enable interrupts and let the target know 2741 static int ath6kl_htc_mbox_start(struct htc_target *target) ath6kl_htc_mbox_start() argument 2746 memset(&target->dev->irq_proc_reg, 0, ath6kl_htc_mbox_start() 2747 sizeof(target->dev->irq_proc_reg)); ath6kl_htc_mbox_start() 2750 ath6kl_hif_disable_intrs(target->dev); ath6kl_htc_mbox_start() 2752 target->htc_flags = 0; ath6kl_htc_mbox_start() 2753 target->rx_st_flags = 0; ath6kl_htc_mbox_start() 2756 while ((packet = htc_get_control_buf(target, false)) != NULL) { ath6kl_htc_mbox_start() 2757 status = htc_add_rxbuf(target, packet); ath6kl_htc_mbox_start() 2763 ath6kl_credit_init(target->credit_info, &target->cred_dist_list, ath6kl_htc_mbox_start() 2764 target->tgt_creds); ath6kl_htc_mbox_start() 2766 dump_cred_dist_stats(target); ath6kl_htc_mbox_start() 2768 /* Indicate to the target of the setup completion */ ath6kl_htc_mbox_start() 2769 status = htc_setup_tx_complete(target); ath6kl_htc_mbox_start() 2775 status = ath6kl_hif_unmask_intrs(target->dev); ath6kl_htc_mbox_start() 2778 ath6kl_htc_mbox_stop(target); ath6kl_htc_mbox_start() 2783 static int ath6kl_htc_reset(struct htc_target *target) ath6kl_htc_reset() argument 2789 reset_ep_state(target); ath6kl_htc_reset() 2791 block_size = target->dev->ar->mbox_info.block_size; ath6kl_htc_reset() 2813 list_add_tail(&packet->list, &target->free_ctrl_rxbuf); ath6kl_htc_reset() 2815 list_add_tail(&packet->list, &target->free_ctrl_txbuf); ath6kl_htc_reset() 2823 static void ath6kl_htc_mbox_stop(struct htc_target *target) ath6kl_htc_mbox_stop() argument 2825 spin_lock_bh(&target->htc_lock); ath6kl_htc_mbox_stop() 2826 target->htc_flags |= HTC_OP_STATE_STOPPING; ath6kl_htc_mbox_stop() 2827 spin_unlock_bh(&target->htc_lock); ath6kl_htc_mbox_stop() 2834 ath6kl_hif_mask_intrs(target->dev); ath6kl_htc_mbox_stop() 2836 ath6kl_htc_flush_txep_all(target); ath6kl_htc_mbox_stop() 2838 ath6kl_htc_mbox_flush_rx_buf(target); ath6kl_htc_mbox_stop() 2840 ath6kl_htc_reset(target); ath6kl_htc_mbox_stop() 2845 struct htc_target *target = NULL; ath6kl_htc_mbox_create() local 2848 target = kzalloc(sizeof(*target), GFP_KERNEL); ath6kl_htc_mbox_create() 2849 if (!target) { ath6kl_htc_mbox_create() 2854 target->dev = kzalloc(sizeof(*target->dev), GFP_KERNEL); ath6kl_htc_mbox_create() 2855 if (!target->dev) { ath6kl_htc_mbox_create() 2861 spin_lock_init(&target->htc_lock); ath6kl_htc_mbox_create() 2862 spin_lock_init(&target->rx_lock); ath6kl_htc_mbox_create() 2863 spin_lock_init(&target->tx_lock); ath6kl_htc_mbox_create() 2865 INIT_LIST_HEAD(&target->free_ctrl_txbuf); ath6kl_htc_mbox_create() 2866 INIT_LIST_HEAD(&target->free_ctrl_rxbuf); ath6kl_htc_mbox_create() 2867 INIT_LIST_HEAD(&target->cred_dist_list); ath6kl_htc_mbox_create() 2869 target->dev->ar = ar; ath6kl_htc_mbox_create() 2870 target->dev->htc_cnxt = target; ath6kl_htc_mbox_create() 2871 target->ep_waiting = ENDPOINT_MAX; ath6kl_htc_mbox_create() 2873 status = ath6kl_hif_setup(target->dev); ath6kl_htc_mbox_create() 2877 status = ath6kl_htc_reset(target); ath6kl_htc_mbox_create() 2881 return target; ath6kl_htc_mbox_create() 2884 ath6kl_htc_mbox_cleanup(target); ath6kl_htc_mbox_create() 2890 static void ath6kl_htc_mbox_cleanup(struct htc_target *target) ath6kl_htc_mbox_cleanup() argument 2894 ath6kl_hif_cleanup_scatter(target->dev->ar); ath6kl_htc_mbox_cleanup() 2897 &target->free_ctrl_txbuf, list) { ath6kl_htc_mbox_cleanup() 2904 &target->free_ctrl_rxbuf, list) { ath6kl_htc_mbox_cleanup() 2910 kfree(target->dev); ath6kl_htc_mbox_cleanup() 2911 kfree(target); ath6kl_htc_mbox_cleanup()
|
H A D | htc.h | 143 * to accommodate optimal alignment for target processing. This reduces 145 * alter the alignment and cause exceptions on the target. When adding to 519 struct htc_target *target; member in struct:htc_endpoint 550 int (*wait_target)(struct htc_target *target); 551 int (*start)(struct htc_target *target); 552 int (*conn_service)(struct htc_target *target, 555 int (*tx)(struct htc_target *target, struct htc_packet *packet); 556 void (*stop)(struct htc_target *target); 557 void (*cleanup)(struct htc_target *target); 558 void (*flush_txep)(struct htc_target *target, 560 void (*flush_rx_buf)(struct htc_target *target); 561 void (*activity_changed)(struct htc_target *target, 564 int (*get_rxbuf_num)(struct htc_target *target, 566 int (*add_rxbuf_multiple)(struct htc_target *target, 568 int (*credit_setup)(struct htc_target *target, 576 /* our HTC target state */ 632 int ath6kl_htc_rxmsg_pending_handler(struct htc_target *target,
|
/linux-4.4.14/include/uapi/linux/netfilter_bridge/ |
H A D | ebt_arpreply.h | 6 int target; member in struct:ebt_arpreply_info
|
H A D | ebt_redirect.h | 6 int target; member in struct:ebt_redirect_info
|
H A D | ebt_mark_t.h | 4 /* The target member is reused for adding new actions, the 5 * value of the real target is -1 to -NUM_STANDARD_TARGETS. 7 * but let's play it safe) are kept to designate this target. 19 int target; member in struct:ebt_mark_t_info
|
H A D | ebt_nat.h | 8 int target; member in struct:ebt_nat_info
|
/linux-4.4.14/lib/ |
H A D | syscall.c | 6 static int collect_syscall(struct task_struct *target, long *callno, collect_syscall() argument 10 struct pt_regs *regs = task_pt_regs(target); collect_syscall() 17 *callno = syscall_get_nr(target, regs); collect_syscall() 19 syscall_get_arguments(target, regs, 0, maxargs, args); collect_syscall() 26 * @target: thread to examine 33 * If @target is blocked in a system call, returns zero with *@callno 37 * call is still in progress. Note we may get this result if @target 41 * If @target is blocked in the kernel during a fault or exception, 43 * If so, it's now safe to examine @target using &struct user_regset 44 * get() calls as long as we're sure @target won't return to user mode. 46 * Returns -%EAGAIN if @target does not remain blocked. 50 int task_current_syscall(struct task_struct *target, long *callno, task_current_syscall() argument 60 if (target == current) task_current_syscall() 61 return collect_syscall(target, callno, args, maxargs, sp, pc); task_current_syscall() 63 state = target->state; task_current_syscall() 67 ncsw = wait_task_inactive(target, state); task_current_syscall() 69 unlikely(collect_syscall(target, callno, args, maxargs, sp, pc)) || task_current_syscall() 70 unlikely(wait_task_inactive(target, state) != ncsw)) task_current_syscall()
|
/linux-4.4.14/drivers/net/wireless/ath/ath9k/ |
H A D | htc_hst.c | 21 static int htc_issue_send(struct htc_target *target, struct sk_buff* skb, htc_issue_send() argument 26 struct htc_endpoint *endpoint = &target->endpoint[epid]; htc_issue_send() 35 status = target->hif->send(target->hif_dev, endpoint->ul_pipeid, skb); htc_issue_send() 88 static void htc_process_target_rdy(struct htc_target *target, htc_process_target_rdy() argument 94 target->credit_size = be16_to_cpu(htc_ready_msg->credit_size); htc_process_target_rdy() 96 endpoint = &target->endpoint[ENDPOINT0]; htc_process_target_rdy() 99 atomic_inc(&target->tgt_ready); htc_process_target_rdy() 100 complete(&target->target_wait); htc_process_target_rdy() 103 static void htc_process_conn_rsp(struct htc_target *target, htc_process_conn_rsp() argument 119 endpoint = &target->endpoint[epid]; htc_process_conn_rsp() 122 tmp_endpoint = &target->endpoint[tepid]; htc_process_conn_rsp() 138 target->conn_rsp_epid = epid; htc_process_conn_rsp() 139 complete(&target->cmd_wait); htc_process_conn_rsp() 141 target->conn_rsp_epid = ENDPOINT_UNUSED; htc_process_conn_rsp() 145 static int htc_config_pipe_credits(struct htc_target *target) htc_config_pipe_credits() argument 154 dev_err(target->dev, "failed to allocate send buffer\n"); htc_config_pipe_credits() 164 cp_msg->credits = target->credits; htc_config_pipe_credits() 166 target->htc_flags |= HTC_OP_CONFIG_PIPE_CREDITS; htc_config_pipe_credits() 168 ret = htc_issue_send(target, skb, skb->len, 0, ENDPOINT0); htc_config_pipe_credits() 172 time_left = wait_for_completion_timeout(&target->cmd_wait, HZ); htc_config_pipe_credits() 174 dev_err(target->dev, "HTC credit config timeout\n"); htc_config_pipe_credits() 184 static int htc_setup_complete(struct htc_target *target) htc_setup_complete() argument 193 dev_err(target->dev, "failed to allocate send buffer\n"); htc_setup_complete() 202 target->htc_flags |= HTC_OP_START_WAIT; htc_setup_complete() 204 ret = htc_issue_send(target, skb, skb->len, 0, ENDPOINT0); htc_setup_complete() 208 time_left = wait_for_completion_timeout(&target->cmd_wait, HZ); htc_setup_complete() 210 dev_err(target->dev, "HTC start timeout\n"); htc_setup_complete() 223 int htc_init(struct htc_target *target) htc_init() argument 227 ret = htc_config_pipe_credits(target); htc_init() 231 return htc_setup_complete(target); htc_init() 234 int htc_connect_service(struct htc_target *target, htc_connect_service() argument 245 endpoint = get_next_avail_ep(target->endpoint); htc_connect_service() 247 dev_err(target->dev, "Endpoint is not available for" htc_connect_service() 261 dev_err(target->dev, "Failed to allocate buf to send" htc_connect_service() 276 ret = htc_issue_send(target, skb, skb->len, 0, ENDPOINT0); htc_connect_service() 280 time_left = wait_for_completion_timeout(&target->cmd_wait, HZ); htc_connect_service() 282 dev_err(target->dev, "Service connection timeout for: %d\n", htc_connect_service() 287 *conn_rsp_epid = target->conn_rsp_epid; htc_connect_service() 294 int htc_send(struct htc_target *target, struct sk_buff *skb) htc_send() argument 299 return htc_issue_send(target, skb, skb->len, 0, tx_ctl->epid); htc_send() 302 int htc_send_epid(struct htc_target *target, struct sk_buff *skb, htc_send_epid() argument 305 return htc_issue_send(target, skb, skb->len, 0, epid); htc_send_epid() 308 void htc_stop(struct htc_target *target) htc_stop() argument 310 target->hif->stop(target->hif_dev); htc_stop() 313 void htc_start(struct htc_target *target) htc_start() argument 315 target->hif->start(target->hif_dev); htc_start() 318 void htc_sta_drain(struct htc_target *target, u8 idx) htc_sta_drain() argument 320 target->hif->sta_drain(target->hif_dev, idx); htc_sta_drain() 470 struct htc_target *target; ath9k_htc_hw_alloc() local 472 target = kzalloc(sizeof(struct htc_target), GFP_KERNEL); ath9k_htc_hw_alloc() 473 if (!target) ath9k_htc_hw_alloc() 476 init_completion(&target->target_wait); ath9k_htc_hw_alloc() 477 init_completion(&target->cmd_wait); ath9k_htc_hw_alloc() 479 target->hif = hif; ath9k_htc_hw_alloc() 480 target->hif_dev = hif_handle; ath9k_htc_hw_alloc() 481 target->dev = dev; ath9k_htc_hw_alloc() 484 endpoint = &target->endpoint[ENDPOINT0]; ath9k_htc_hw_alloc() 488 atomic_set(&target->tgt_ready, 0); ath9k_htc_hw_alloc() 490 return target; ath9k_htc_hw_alloc() 498 int ath9k_htc_hw_init(struct htc_target *target, ath9k_htc_hw_init() argument 502 if (ath9k_htc_probe_device(target, dev, devid, product, drv_info)) { ath9k_htc_hw_init() 510 void ath9k_htc_hw_deinit(struct htc_target *target, bool hot_unplug) ath9k_htc_hw_deinit() argument 512 if (target) ath9k_htc_hw_deinit() 513 ath9k_htc_disconnect_device(target, hot_unplug); ath9k_htc_hw_deinit()
|
H A D | htc_hst.h | 205 int htc_init(struct htc_target *target); 206 int htc_connect_service(struct htc_target *target, 209 int htc_send(struct htc_target *target, struct sk_buff *skb); 210 int htc_send_epid(struct htc_target *target, struct sk_buff *skb, 212 void htc_stop(struct htc_target *target); 213 void htc_start(struct htc_target *target); 214 void htc_sta_drain(struct htc_target *target, u8 idx); 225 int ath9k_htc_hw_init(struct htc_target *target, 228 void ath9k_htc_hw_deinit(struct htc_target *target, bool hot_unplug);
|
/linux-4.4.14/drivers/macintosh/ |
H A D | windfarm_pid.c | 37 s32 target; wf_pid_run() local 67 /* Calculate target */ wf_pid_run() 68 target = (s32)((integ * (s64)st->param.gr + deriv * (s64)st->param.gd + wf_pid_run() 71 target += st->target; wf_pid_run() 72 target = max(target, st->param.min); wf_pid_run() 73 target = min(target, st->param.max); wf_pid_run() 74 st->target = target; wf_pid_run() 76 return st->target; wf_pid_run() 92 s32 error, target, sval, adj; wf_cpu_pid_run() local 137 /* Calculate target */ wf_cpu_pid_run() 138 target = st->target + (s32)((deriv + prop) >> 36); wf_cpu_pid_run() 139 target = max(target, st->param.min); wf_cpu_pid_run() 140 target = min(target, st->param.max); wf_cpu_pid_run() 141 st->target = target; wf_cpu_pid_run() 143 return st->target; wf_cpu_pid_run()
|
H A D | windfarm_pid.h | 29 int additive; /* 1: target relative to previous value */ 31 s32 itarget; /* PID input target */ 32 s32 min,max; /* min and max target values */ 38 s32 target; /* current target value */ member in struct:wf_pid_state 65 s32 ttarget; /* PID input target */ 67 s32 min,max; /* min and max target values */ 74 s32 target; /* current target value */ member in struct:wf_cpu_pid_state
|
/linux-4.4.14/drivers/infiniband/ulp/srp/ |
H A D | ib_srp.c | 92 "Enable workarounds for Topspin/Cisco SRP target bugs if != 0"); 131 "Number of RDMA channels to use for communication with an SRP target. Using more than one channel improves performance if the HCA supports multiple completion vectors. The default value is the minimum of four times the number of online CPU sockets and the number of completion vectors supported by the HCA."); 199 static int srp_target_is_topspin(struct srp_target_port *target) srp_target_is_topspin() argument 205 (!memcmp(&target->ioc_guid, topspin_oui, sizeof topspin_oui) || srp_target_is_topspin() 206 !memcmp(&target->ioc_guid, cisco_oui, sizeof cisco_oui)); srp_target_is_topspin() 258 static int srp_init_qp(struct srp_target_port *target, srp_init_qp() argument 268 ret = ib_find_cached_pkey(target->srp_host->srp_dev->dev, srp_init_qp() 269 target->srp_host->port, srp_init_qp() 270 be16_to_cpu(target->pkey), srp_init_qp() 278 attr->port_num = target->srp_host->port; srp_init_qp() 293 struct srp_target_port *target = ch->target; srp_new_cm_id() local 296 new_cm_id = ib_create_cm_id(target->srp_host->srp_dev->dev, srp_new_cm_id() 304 ch->path.sgid = target->sgid; srp_new_cm_id() 305 ch->path.dgid = target->orig_dgid; srp_new_cm_id() 306 ch->path.pkey = target->pkey; srp_new_cm_id() 307 ch->path.service_id = target->service_id; srp_new_cm_id() 312 static struct ib_fmr_pool *srp_alloc_fmr_pool(struct srp_target_port *target) srp_alloc_fmr_pool() argument 314 struct srp_device *dev = target->srp_host->srp_dev; srp_alloc_fmr_pool() 318 fmr_param.pool_size = target->scsi_host->can_queue; srp_alloc_fmr_pool() 439 static struct srp_fr_pool *srp_alloc_fr_pool(struct srp_target_port *target) srp_alloc_fr_pool() argument 441 struct srp_device *dev = target->srp_host->srp_dev; srp_alloc_fr_pool() 444 target->scsi_host->can_queue, srp_alloc_fr_pool() 484 struct srp_target_port *target = ch->target; srp_create_ch_ib() local 485 struct srp_device *dev = target->srp_host->srp_dev; srp_create_ch_ib() 500 cq_attr.cqe = target->queue_size + 1; srp_create_ch_ib() 509 cq_attr.cqe = m * target->queue_size; srp_create_ch_ib() 521 init_attr->cap.max_send_wr = m * target->queue_size; srp_create_ch_ib() 522 init_attr->cap.max_recv_wr = target->queue_size + 1; srp_create_ch_ib() 536 ret = srp_init_qp(target, qp); srp_create_ch_ib() 541 fr_pool = srp_alloc_fr_pool(target); srp_create_ch_ib() 544 shost_printk(KERN_WARNING, target->scsi_host, PFX srp_create_ch_ib() 549 fmr_pool = srp_alloc_fmr_pool(target); srp_create_ch_ib() 552 shost_printk(KERN_WARNING, target->scsi_host, PFX srp_create_ch_ib() 600 static void srp_free_ch_ib(struct srp_target_port *target, srp_free_ch_ib() argument 603 struct srp_device *dev = target->srp_host->srp_dev; srp_free_ch_ib() 606 if (!ch->target) srp_free_ch_ib() 635 ch->target = NULL; srp_free_ch_ib() 641 for (i = 0; i < target->queue_size; ++i) srp_free_ch_ib() 642 srp_free_iu(target->srp_host, ch->rx_ring[i]); srp_free_ch_ib() 647 for (i = 0; i < target->queue_size; ++i) srp_free_ch_ib() 648 srp_free_iu(target->srp_host, ch->tx_ring[i]); srp_free_ch_ib() 659 struct srp_target_port *target = ch->target; srp_path_rec_completion() local 663 shost_printk(KERN_ERR, target->scsi_host, srp_path_rec_completion() 672 struct srp_target_port *target = ch->target; srp_lookup_path() local 680 target->srp_host->srp_dev->dev, srp_lookup_path() 681 target->srp_host->port, srp_lookup_path() 700 shost_printk(KERN_WARNING, target->scsi_host, srp_lookup_path() 708 struct srp_target_port *target = ch->target; srp_send_req() local 721 req->param.service_id = target->service_id; srp_send_req() 738 req->param.retry_count = target->tl_retry_count; srp_send_req() 744 req->priv.req_it_iu_len = cpu_to_be32(target->max_iu_len); srp_send_req() 758 if (target->io_class == SRP_REV10_IB_IO_CLASS) { srp_send_req() 760 &target->sgid.global.interface_id, 8); srp_send_req() 762 &target->initiator_ext, 8); srp_send_req() 763 memcpy(req->priv.target_port_id, &target->ioc_guid, 8); srp_send_req() 764 memcpy(req->priv.target_port_id + 8, &target->id_ext, 8); srp_send_req() 767 &target->initiator_ext, 8); srp_send_req() 769 &target->sgid.global.interface_id, 8); srp_send_req() 770 memcpy(req->priv.target_port_id, &target->id_ext, 8); srp_send_req() 771 memcpy(req->priv.target_port_id + 8, &target->ioc_guid, 8); srp_send_req() 779 if (srp_target_is_topspin(target)) { srp_send_req() 780 shost_printk(KERN_DEBUG, target->scsi_host, srp_send_req() 782 "activated for target GUID %016llx\n", srp_send_req() 783 be64_to_cpu(target->ioc_guid)); srp_send_req() 786 &target->srp_host->srp_dev->dev->node_guid, 8); srp_send_req() 796 static bool srp_queue_remove_work(struct srp_target_port *target) srp_queue_remove_work() argument 800 spin_lock_irq(&target->lock); srp_queue_remove_work() 801 if (target->state != SRP_TARGET_REMOVED) { srp_queue_remove_work() 802 target->state = SRP_TARGET_REMOVED; srp_queue_remove_work() 805 spin_unlock_irq(&target->lock); srp_queue_remove_work() 808 queue_work(srp_remove_wq, &target->remove_work); srp_queue_remove_work() 813 static void srp_disconnect_target(struct srp_target_port *target) srp_disconnect_target() argument 820 for (i = 0; i < target->ch_count; i++) { srp_disconnect_target() 821 ch = &target->ch[i]; srp_disconnect_target() 824 shost_printk(KERN_DEBUG, target->scsi_host, srp_disconnect_target() 830 static void srp_free_req_data(struct srp_target_port *target, srp_free_req_data() argument 833 struct srp_device *dev = target->srp_host->srp_dev; srp_free_req_data() 841 for (i = 0; i < target->req_ring_size; ++i) { srp_free_req_data() 851 target->indirect_size, srp_free_req_data() 863 struct srp_target_port *target = ch->target; srp_alloc_req_data() local 864 struct srp_device *srp_dev = target->srp_host->srp_dev; srp_alloc_req_data() 871 ch->req_ring = kcalloc(target->req_ring_size, sizeof(*ch->req_ring), srp_alloc_req_data() 876 for (i = 0; i < target->req_ring_size; ++i) { srp_alloc_req_data() 878 mr_list = kmalloc(target->cmd_sg_cnt * sizeof(void *), srp_alloc_req_data() 891 req->indirect_desc = kmalloc(target->indirect_size, GFP_KERNEL); srp_alloc_req_data() 896 target->indirect_size, srp_alloc_req_data() 924 static void srp_remove_target(struct srp_target_port *target) srp_remove_target() argument 929 WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED); srp_remove_target() 931 srp_del_scsi_host_attr(target->scsi_host); srp_remove_target() 932 srp_rport_get(target->rport); srp_remove_target() 933 srp_remove_host(target->scsi_host); srp_remove_target() 934 scsi_remove_host(target->scsi_host); srp_remove_target() 935 srp_stop_rport_timers(target->rport); srp_remove_target() 936 srp_disconnect_target(target); srp_remove_target() 937 for (i = 0; i < target->ch_count; i++) { srp_remove_target() 938 ch = &target->ch[i]; srp_remove_target() 939 srp_free_ch_ib(target, ch); srp_remove_target() 941 cancel_work_sync(&target->tl_err_work); srp_remove_target() 942 srp_rport_put(target->rport); srp_remove_target() 943 for (i = 0; i < target->ch_count; i++) { srp_remove_target() 944 ch = &target->ch[i]; srp_remove_target() 945 srp_free_req_data(target, ch); srp_remove_target() 947 kfree(target->ch); srp_remove_target() 948 target->ch = NULL; srp_remove_target() 950 spin_lock(&target->srp_host->target_lock); srp_remove_target() 951 list_del(&target->list); srp_remove_target() 952 spin_unlock(&target->srp_host->target_lock); srp_remove_target() 954 scsi_host_put(target->scsi_host); srp_remove_target() 959 struct srp_target_port *target = srp_remove_work() local 962 WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED); srp_remove_work() 964 srp_remove_target(target); srp_remove_work() 969 struct srp_target_port *target = rport->lld_data; srp_rport_delete() local 971 srp_queue_remove_work(target); srp_rport_delete() 976 * @target: SRP target port. 978 static int srp_connected_ch(struct srp_target_port *target) srp_connected_ch() argument 982 for (i = 0; i < target->ch_count; i++) srp_connected_ch() 983 c += target->ch[i].connected; srp_connected_ch() 990 struct srp_target_port *target = ch->target; srp_connect_ch() local 993 WARN_ON_ONCE(!multich && srp_connected_ch(target) > 0); srp_connect_ch() 1030 shost_printk(KERN_ERR, target->scsi_host, PFX srp_connect_ch() 1063 struct srp_target_port *target = ch->target; srp_unmap_data() local 1064 struct srp_device *dev = target->srp_host->srp_dev; srp_unmap_data() 1079 shost_printk(KERN_ERR, target->scsi_host, PFX srp_unmap_data() 1083 &target->tl_err_work); srp_unmap_data() 1137 * @req_lim_delta: Amount to be added to @target->req_lim. 1165 struct srp_target_port *target = rport->lld_data; srp_terminate_io() local 1167 struct Scsi_Host *shost = target->scsi_host; srp_terminate_io() 1178 for (i = 0; i < target->ch_count; i++) { shost_for_each_device() 1179 ch = &target->ch[i]; shost_for_each_device() 1181 for (j = 0; j < target->req_ring_size; ++j) { shost_for_each_device() 1201 struct srp_target_port *target = rport->lld_data; srp_rport_reconnect() local 1206 srp_disconnect_target(target); srp_rport_reconnect() 1208 if (target->state == SRP_TARGET_SCANNING) srp_rport_reconnect() 1212 * Now get a new local CM ID so that we avoid confusing the target in srp_rport_reconnect() 1216 for (i = 0; i < target->ch_count; i++) { srp_rport_reconnect() 1217 ch = &target->ch[i]; srp_rport_reconnect() 1220 for (i = 0; i < target->ch_count; i++) { srp_rport_reconnect() 1221 ch = &target->ch[i]; srp_rport_reconnect() 1222 for (j = 0; j < target->req_ring_size; ++j) { srp_rport_reconnect() 1228 for (i = 0; i < target->ch_count; i++) { srp_rport_reconnect() 1229 ch = &target->ch[i]; srp_rport_reconnect() 1238 for (j = 0; j < target->queue_size; ++j) srp_rport_reconnect() 1242 target->qp_in_error = false; srp_rport_reconnect() 1244 for (i = 0; i < target->ch_count; i++) { srp_rport_reconnect() 1245 ch = &target->ch[i]; srp_rport_reconnect() 1253 shost_printk(KERN_INFO, target->scsi_host, srp_rport_reconnect() 1278 struct srp_target_port *target = ch->target; srp_map_finish_fmr() local 1279 struct srp_device *dev = target->srp_host->srp_dev; srp_map_finish_fmr() 1291 if (state->npages == 1 && target->global_mr) { srp_map_finish_fmr() 1293 target->global_mr->rkey); srp_map_finish_fmr() 1318 struct srp_target_port *target = ch->target; srp_map_finish_fr() local 1319 struct srp_device *dev = target->srp_host->srp_dev; srp_map_finish_fr() 1334 if (sg_nents == 1 && target->global_mr) { srp_map_finish_fr() 1337 target->global_mr->rkey); srp_map_finish_fr() 1380 struct srp_target_port *target = ch->target; srp_map_sg_entry() local 1381 struct srp_device *dev = target->srp_host->srp_dev; srp_map_sg_entry() 1429 state->fmr.end = req->fmr_list + ch->target->cmd_sg_cnt; srp_map_sg_fmr() 1452 state->fr.end = req->fr_list + ch->target->cmd_sg_cnt; srp_map_sg_fr() 1476 struct srp_target_port *target = ch->target; srp_map_sg_dma() local 1477 struct srp_device *dev = target->srp_host->srp_dev; srp_map_sg_dma() 1485 target->global_mr->rkey); for_each_sg() 1504 struct srp_target_port *target = ch->target; srp_map_idb() local 1505 struct srp_device *dev = target->srp_host->srp_dev; srp_map_idb() 1550 struct srp_target_port *target = ch->target; srp_map_data() local 1567 shost_printk(KERN_WARNING, target->scsi_host, srp_map_data() 1576 dev = target->srp_host->srp_dev; srp_map_data() 1586 if (count == 1 && target->global_mr) { srp_map_data() 1596 buf->key = cpu_to_be32(target->global_mr->rkey); srp_map_data() 1610 target->indirect_size, DMA_TO_DEVICE); srp_map_data() 1622 * target is not using an external indirect table, we are srp_map_data() 1637 if (unlikely(target->cmd_sg_cnt < state.ndesc && srp_map_data() 1638 !target->allow_ext_sg)) { srp_map_data() 1639 shost_printk(KERN_ERR, target->scsi_host, srp_map_data() 1644 count = min(state.ndesc, target->cmd_sg_cnt); srp_map_data() 1655 if (!target->global_mr) { srp_map_data() 1662 idb_rkey = cpu_to_be32(target->global_mr->rkey); srp_map_data() 1712 * - SRP_IU_RSP: 1, since a conforming SRP target never sends more than 1718 struct srp_target_port *target = ch->target; __srp_get_tx_iu() local 1727 /* Initiator responses to target requests do not consume credits */ __srp_get_tx_iu() 1730 ++target->zero_req_lim; __srp_get_tx_iu() 1744 struct srp_target_port *target = ch->target; srp_post_send() local 1750 list.lkey = target->lkey; srp_post_send() 1764 struct srp_target_port *target = ch->target; srp_post_recv() local 1770 list.lkey = target->lkey; srp_post_recv() 1782 struct srp_target_port *target = ch->target; srp_process_rsp() local 1797 scmnd = scsi_host_find_tag(target->scsi_host, rsp->tag); srp_process_rsp() 1803 shost_printk(KERN_ERR, target->scsi_host, srp_process_rsp() 1805 rsp->tag, ch - target->ch, ch->qp->qp_num); srp_process_rsp() 1842 struct srp_target_port *target = ch->target; srp_response_common() local 1843 struct ib_device *dev = target->srp_host->srp_dev->dev; srp_response_common() 1854 shost_printk(KERN_ERR, target->scsi_host, PFX srp_response_common() 1865 shost_printk(KERN_ERR, target->scsi_host, PFX srp_response_common() 1883 shost_printk(KERN_ERR, ch->target->scsi_host, PFX srp_process_cred_req() 1890 struct srp_target_port *target = ch->target; srp_process_aer_req() local 1897 shost_printk(KERN_ERR, target->scsi_host, PFX srp_process_aer_req() 1901 shost_printk(KERN_ERR, target->scsi_host, PFX srp_process_aer_req() 1907 struct srp_target_port *target = ch->target; srp_handle_recv() local 1908 struct ib_device *dev = target->srp_host->srp_dev->dev; srp_handle_recv() 1919 shost_printk(KERN_ERR, target->scsi_host, srp_handle_recv() 1939 /* XXX Handle target logout */ srp_handle_recv() 1940 shost_printk(KERN_WARNING, target->scsi_host, srp_handle_recv() 1941 PFX "Got target logout request\n"); srp_handle_recv() 1945 shost_printk(KERN_WARNING, target->scsi_host, srp_handle_recv() 1955 shost_printk(KERN_ERR, target->scsi_host, srp_handle_recv() 1961 * @work: Work structure embedded in an SRP target port. 1964 * hence the target->rport test. 1968 struct srp_target_port *target; srp_tl_err_work() local 1970 target = container_of(work, struct srp_target_port, tl_err_work); srp_tl_err_work() 1971 if (target->rport) srp_tl_err_work() 1972 srp_start_tl_fail_timers(target->rport); srp_tl_err_work() 1978 struct srp_target_port *target = ch->target; srp_handle_qp_err() local 1985 if (ch->connected && !target->qp_in_error) { srp_handle_qp_err() 1987 shost_printk(KERN_ERR, target->scsi_host, PFX srp_handle_qp_err() 1991 shost_printk(KERN_ERR, target->scsi_host, PFX srp_handle_qp_err() 1995 shost_printk(KERN_ERR, target->scsi_host, srp_handle_qp_err() 2001 queue_work(system_long_wq, &target->tl_err_work); srp_handle_qp_err() 2003 target->qp_in_error = true; srp_handle_qp_err() 2039 struct srp_target_port *target = host_to_target(shost); srp_queuecommand() local 2040 struct srp_rport *rport = target->rport; srp_queuecommand() 2061 scmnd->result = srp_chkready(target->rport); srp_queuecommand() 2067 ch = &target->ch[blk_mq_unique_tag_to_hwq(tag)]; srp_queuecommand() 2069 WARN_ONCE(idx >= target->req_ring_size, "%s: tag %#x: idx %d >= %d\n", srp_queuecommand() 2071 target->req_ring_size); srp_queuecommand() 2081 dev = target->srp_host->srp_dev->dev; srp_queuecommand() 2082 ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_iu_len, srp_queuecommand() 2100 shost_printk(KERN_ERR, target->scsi_host, srp_queuecommand() 2113 ib_dma_sync_single_for_device(dev, iu->dma, target->max_iu_len, srp_queuecommand() 2117 shost_printk(KERN_ERR, target->scsi_host, PFX "Send failed\n"); srp_queuecommand() 2158 struct srp_target_port *target = ch->target; srp_alloc_iu_bufs() local 2161 ch->rx_ring = kcalloc(target->queue_size, sizeof(*ch->rx_ring), srp_alloc_iu_bufs() 2165 ch->tx_ring = kcalloc(target->queue_size, sizeof(*ch->tx_ring), srp_alloc_iu_bufs() 2170 for (i = 0; i < target->queue_size; ++i) { srp_alloc_iu_bufs() 2171 ch->rx_ring[i] = srp_alloc_iu(target->srp_host, srp_alloc_iu_bufs() 2178 for (i = 0; i < target->queue_size; ++i) { srp_alloc_iu_bufs() 2179 ch->tx_ring[i] = srp_alloc_iu(target->srp_host, srp_alloc_iu_bufs() 2180 target->max_iu_len, srp_alloc_iu_bufs() 2191 for (i = 0; i < target->queue_size; ++i) { srp_alloc_iu_bufs() 2192 srp_free_iu(target->srp_host, ch->rx_ring[i]); srp_alloc_iu_bufs() 2193 srp_free_iu(target->srp_host, ch->tx_ring[i]); srp_alloc_iu_bufs() 2220 * Set target->rq_tmo_jiffies to one second more than the largest time srp_compute_rq_tmo() 2237 struct srp_target_port *target = ch->target; srp_cm_rep_handler() local 2251 target->scsi_host->can_queue srp_cm_rep_handler() 2253 target->scsi_host->can_queue); srp_cm_rep_handler() 2254 target->scsi_host->cmd_per_lun srp_cm_rep_handler() 2255 = min_t(int, target->scsi_host->can_queue, srp_cm_rep_handler() 2256 target->scsi_host->cmd_per_lun); srp_cm_rep_handler() 2258 shost_printk(KERN_WARNING, target->scsi_host, srp_cm_rep_handler() 2284 for (i = 0; i < target->queue_size; i++) { srp_cm_rep_handler() 2297 target->rq_tmo_jiffies = srp_compute_rq_tmo(qp_attr, attr_mask); srp_cm_rep_handler() 2316 struct srp_target_port *target = ch->target; srp_cm_rej_handler() local 2317 struct Scsi_Host *shost = target->scsi_host; srp_cm_rej_handler() 2334 if (srp_target_is_topspin(target)) { srp_cm_rej_handler() 2344 PFX "Topspin/Cisco redirect to target port GID %016llx%016llx\n", srp_cm_rej_handler() 2374 target->sgid.raw, srp_cm_rej_handler() 2375 target->orig_dgid.raw, reason); srp_cm_rej_handler() 2398 struct srp_target_port *target = ch->target; srp_cm_handler() local 2403 shost_printk(KERN_DEBUG, target->scsi_host, srp_cm_handler() 2415 shost_printk(KERN_DEBUG, target->scsi_host, PFX "REJ received\n"); srp_cm_handler() 2422 shost_printk(KERN_WARNING, target->scsi_host, srp_cm_handler() 2426 shost_printk(KERN_ERR, target->scsi_host, srp_cm_handler() 2428 queue_work(system_long_wq, &target->tl_err_work); srp_cm_handler() 2432 shost_printk(KERN_ERR, target->scsi_host, srp_cm_handler() 2445 shost_printk(KERN_WARNING, target->scsi_host, srp_cm_handler() 2474 struct srp_target_port *target = ch->target; srp_send_tsk_mgmt() local 2475 struct srp_rport *rport = target->rport; srp_send_tsk_mgmt() 2476 struct ib_device *dev = target->srp_host->srp_dev->dev; srp_send_tsk_mgmt() 2480 if (!ch->connected || target->qp_in_error) srp_send_tsk_mgmt() 2530 struct srp_target_port *target = host_to_target(scmnd->device->host); srp_abort() local 2537 shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n"); srp_abort() 2543 if (WARN_ON_ONCE(ch_idx >= target->ch_count)) srp_abort() 2545 ch = &target->ch[ch_idx]; srp_abort() 2548 shost_printk(KERN_ERR, target->scsi_host, srp_abort() 2553 else if (target->rport->state == SRP_RPORT_LOST) srp_abort() 2566 struct srp_target_port *target = host_to_target(scmnd->device->host); srp_reset_device() local 2570 shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n"); srp_reset_device() 2572 ch = &target->ch[0]; srp_reset_device() 2579 for (i = 0; i < target->ch_count; i++) { srp_reset_device() 2580 ch = &target->ch[i]; srp_reset_device() 2581 for (i = 0; i < target->req_ring_size; ++i) { srp_reset_device() 2593 struct srp_target_port *target = host_to_target(scmnd->device->host); srp_reset_host() local 2595 shost_printk(KERN_ERR, target->scsi_host, PFX "SRP reset_host called\n"); srp_reset_host() 2597 return srp_reconnect_rport(target->rport) == 0 ? SUCCESS : FAILED; srp_reset_host() 2603 struct srp_target_port *target = host_to_target(shost); srp_slave_configure() local 2608 timeout = max_t(unsigned, 30 * HZ, target->rq_tmo_jiffies); srp_slave_configure() 2618 struct srp_target_port *target = host_to_target(class_to_shost(dev)); show_id_ext() local 2620 return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->id_ext)); show_id_ext() 2626 struct srp_target_port *target = host_to_target(class_to_shost(dev)); show_ioc_guid() local 2628 return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->ioc_guid)); show_ioc_guid() 2634 struct srp_target_port *target = host_to_target(class_to_shost(dev)); show_service_id() local 2636 return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->service_id)); show_service_id() 2642 struct srp_target_port *target = host_to_target(class_to_shost(dev)); show_pkey() local 2644 return sprintf(buf, "0x%04x\n", be16_to_cpu(target->pkey)); show_pkey() 2650 struct srp_target_port *target = host_to_target(class_to_shost(dev)); show_sgid() local 2652 return sprintf(buf, "%pI6\n", target->sgid.raw); show_sgid() 2658 struct srp_target_port *target = host_to_target(class_to_shost(dev)); show_dgid() local 2659 struct srp_rdma_ch *ch = &target->ch[0]; show_dgid() 2667 struct srp_target_port *target = host_to_target(class_to_shost(dev)); show_orig_dgid() local 2669 return sprintf(buf, "%pI6\n", target->orig_dgid.raw); show_orig_dgid() 2675 struct srp_target_port *target = host_to_target(class_to_shost(dev)); show_req_lim() local 2679 for (i = 0; i < target->ch_count; i++) { show_req_lim() 2680 ch = &target->ch[i]; show_req_lim() 2689 struct srp_target_port *target = host_to_target(class_to_shost(dev)); show_zero_req_lim() local 2691 return sprintf(buf, "%d\n", target->zero_req_lim); show_zero_req_lim() 2697 struct srp_target_port *target = host_to_target(class_to_shost(dev)); show_local_ib_port() local 2699 return sprintf(buf, "%d\n", target->srp_host->port); show_local_ib_port() 2705 struct srp_target_port *target = host_to_target(class_to_shost(dev)); show_local_ib_device() local 2707 return sprintf(buf, "%s\n", target->srp_host->srp_dev->dev->name); show_local_ib_device() 2713 struct srp_target_port *target = host_to_target(class_to_shost(dev)); show_ch_count() local 2715 return sprintf(buf, "%d\n", target->ch_count); show_ch_count() 2721 struct srp_target_port *target = host_to_target(class_to_shost(dev)); show_comp_vector() local 2723 return sprintf(buf, "%d\n", target->comp_vector); show_comp_vector() 2729 struct srp_target_port *target = host_to_target(class_to_shost(dev)); show_tl_retry_count() local 2731 return sprintf(buf, "%d\n", target->tl_retry_count); show_tl_retry_count() 2737 struct srp_target_port *target = host_to_target(class_to_shost(dev)); show_cmd_sg_entries() local 2739 return sprintf(buf, "%u\n", target->cmd_sg_cnt); show_cmd_sg_entries() 2745 struct srp_target_port *target = host_to_target(class_to_shost(dev)); show_allow_ext_sg() local 2747 return sprintf(buf, "%s\n", target->allow_ext_sg ? "true" : "false"); show_allow_ext_sg() 2821 * < 0 upon failure. Caller is responsible for SRP target port cleanup. 2822 * 0 and target->state == SRP_TARGET_REMOVED if asynchronous target port 2824 * 0 and target->state != SRP_TARGET_REMOVED upon success. 2826 static int srp_add_target(struct srp_host *host, struct srp_target_port *target) srp_add_target() argument 2831 target->state = SRP_TARGET_SCANNING; srp_add_target() 2832 sprintf(target->target_name, "SRP.T10:%016llX", srp_add_target() 2833 be64_to_cpu(target->id_ext)); srp_add_target() 2835 if (scsi_add_host(target->scsi_host, host->srp_dev->dev->dma_device)) srp_add_target() 2838 memcpy(ids.port_id, &target->id_ext, 8); srp_add_target() 2839 memcpy(ids.port_id + 8, &target->ioc_guid, 8); srp_add_target() 2841 rport = srp_rport_add(target->scsi_host, &ids); srp_add_target() 2843 scsi_remove_host(target->scsi_host); srp_add_target() 2847 rport->lld_data = target; srp_add_target() 2848 target->rport = rport; srp_add_target() 2851 list_add_tail(&target->list, &host->target_list); srp_add_target() 2854 scsi_scan_target(&target->scsi_host->shost_gendev, srp_add_target() 2855 0, target->scsi_id, SCAN_WILD_CARD, 0); srp_add_target() 2857 if (srp_connected_ch(target) < target->ch_count || srp_add_target() 2858 target->qp_in_error) { srp_add_target() 2859 shost_printk(KERN_INFO, target->scsi_host, srp_add_target() 2861 srp_queue_remove_work(target); srp_add_target() 2866 dev_name(&target->scsi_host->shost_gendev), srp_add_target() 2867 srp_sdev_count(target->scsi_host)); srp_add_target() 2869 spin_lock_irq(&target->lock); srp_add_target() 2870 if (target->state == SRP_TARGET_SCANNING) srp_add_target() 2871 target->state = SRP_TARGET_LIVE; srp_add_target() 2872 spin_unlock_irq(&target->lock); srp_add_target() 2892 * srp_conn_unique() - check whether the connection to a target is unique 2894 * @target: SRP target port. 2897 struct srp_target_port *target) srp_conn_unique() 2902 if (target->state == SRP_TARGET_REMOVED) srp_conn_unique() 2909 if (t != target && srp_conn_unique() 2910 target->id_ext == t->id_ext && srp_conn_unique() 2911 target->ioc_guid == t->ioc_guid && srp_conn_unique() 2912 target->initiator_ext == t->initiator_ext) { srp_conn_unique() 2974 static int srp_parse_options(const char *buf, struct srp_target_port *target) srp_parse_options() argument 3004 target->id_ext = cpu_to_be64(simple_strtoull(p, NULL, 16)); srp_parse_options() 3014 target->ioc_guid = cpu_to_be64(simple_strtoull(p, NULL, 16)); srp_parse_options() 3033 &target->orig_dgid.raw[i]) < 1) { srp_parse_options() 3047 target->pkey = cpu_to_be16(token); srp_parse_options() 3056 target->service_id = cpu_to_be64(simple_strtoull(p, NULL, 16)); srp_parse_options() 3065 target->scsi_host->max_sectors = token; srp_parse_options() 3073 target->scsi_host->can_queue = token; srp_parse_options() 3074 target->queue_size = token + SRP_RSP_SQ_SIZE + srp_parse_options() 3077 target->scsi_host->cmd_per_lun = token; srp_parse_options() 3086 target->scsi_host->cmd_per_lun = token; srp_parse_options() 3101 target->io_class = token; srp_parse_options() 3110 target->initiator_ext = cpu_to_be64(simple_strtoull(p, NULL, 16)); srp_parse_options() 3120 target->cmd_sg_cnt = token; srp_parse_options() 3128 target->allow_ext_sg = !!token; srp_parse_options() 3138 target->sg_tablesize = token; srp_parse_options() 3146 target->comp_vector = token; srp_parse_options() 3155 target->tl_retry_count = token; srp_parse_options() 3159 pr_warn("unknown parameter or missing value '%s' in target creation request\n", srp_parse_options() 3171 pr_warn("target creation request is missing parameter '%s'\n", srp_parse_options() 3174 if (target->scsi_host->cmd_per_lun > target->scsi_host->can_queue srp_parse_options() 3177 target->scsi_host->cmd_per_lun, srp_parse_options() 3178 target->scsi_host->can_queue); srp_parse_options() 3192 struct srp_target_port *target; srp_create_target() local 3210 target = host_to_target(target_host); srp_create_target() 3212 target->io_class = SRP_REV16A_IB_IO_CLASS; srp_create_target() 3213 target->scsi_host = target_host; srp_create_target() 3214 target->srp_host = host; srp_create_target() 3215 target->lkey = host->srp_dev->pd->local_dma_lkey; srp_create_target() 3216 target->global_mr = host->srp_dev->global_mr; srp_create_target() 3217 target->cmd_sg_cnt = cmd_sg_entries; srp_create_target() 3218 target->sg_tablesize = indirect_sg_entries ? : cmd_sg_entries; srp_create_target() 3219 target->allow_ext_sg = allow_ext_sg; srp_create_target() 3220 target->tl_retry_count = 7; srp_create_target() 3221 target->queue_size = SRP_DEFAULT_QUEUE_SIZE; srp_create_target() 3227 scsi_host_get(target->scsi_host); srp_create_target() 3231 ret = srp_parse_options(buf, target); srp_create_target() 3235 target->req_ring_size = target->queue_size - SRP_TSK_MGMT_SQ_SIZE; srp_create_target() 3237 if (!srp_conn_unique(target->srp_host, target)) { srp_create_target() 3238 shost_printk(KERN_INFO, target->scsi_host, srp_create_target() 3239 PFX "Already connected to target port with id_ext=%016llx;ioc_guid=%016llx;initiator_ext=%016llx\n", srp_create_target() 3240 be64_to_cpu(target->id_ext), srp_create_target() 3241 be64_to_cpu(target->ioc_guid), srp_create_target() 3242 be64_to_cpu(target->initiator_ext)); srp_create_target() 3247 if (!srp_dev->has_fmr && !srp_dev->has_fr && !target->allow_ext_sg && srp_create_target() 3248 target->cmd_sg_cnt < target->sg_tablesize) { srp_create_target() 3250 target->sg_tablesize = target->cmd_sg_cnt; srp_create_target() 3253 target_host->sg_tablesize = target->sg_tablesize; srp_create_target() 3254 target->indirect_size = target->sg_tablesize * srp_create_target() 3256 target->max_iu_len = sizeof (struct srp_cmd) + srp_create_target() 3258 target->cmd_sg_cnt * sizeof (struct srp_direct_buf); srp_create_target() 3260 INIT_WORK(&target->tl_err_work, srp_tl_err_work); srp_create_target() 3261 INIT_WORK(&target->remove_work, srp_remove_work); srp_create_target() 3262 spin_lock_init(&target->lock); srp_create_target() 3263 ret = ib_query_gid(ibdev, host->port, 0, &target->sgid, NULL); srp_create_target() 3268 target->ch_count = max_t(unsigned, num_online_nodes(), srp_create_target() 3273 target->ch = kcalloc(target->ch_count, sizeof(*target->ch), srp_create_target() 3275 if (!target->ch) srp_create_target() 3280 const int ch_start = (node_idx * target->ch_count / for_each_online_node() 3282 const int ch_end = ((node_idx + 1) * target->ch_count / for_each_online_node() 3285 num_online_nodes() + target->comp_vector) for_each_online_node() 3288 num_online_nodes() + target->comp_vector) for_each_online_node() 3297 ch = &target->ch[ch_start + cpu_idx]; for_each_online_cpu() 3298 ch->target = target; for_each_online_cpu() 3317 shost_printk(KERN_ERR, target->scsi_host, for_each_online_cpu() 3320 target->ch_count); for_each_online_cpu() 3324 srp_free_ch_ib(target, ch); for_each_online_cpu() 3325 srp_free_req_data(target, ch); for_each_online_cpu() 3326 target->ch_count = ch - target->ch; for_each_online_cpu() 3338 target->scsi_host->nr_hw_queues = target->ch_count; 3340 ret = srp_add_target(host, target); 3344 if (target->state != SRP_TARGET_REMOVED) { 3345 shost_printk(KERN_DEBUG, target->scsi_host, PFX 3346 "new target: id_ext %016llx ioc_guid %016llx pkey %04x service_id %016llx sgid %pI6 dgid %pI6\n", 3347 be64_to_cpu(target->id_ext), 3348 be64_to_cpu(target->ioc_guid), 3349 be16_to_cpu(target->pkey), 3350 be64_to_cpu(target->service_id), 3351 target->sgid.raw, target->orig_dgid.raw); 3359 scsi_host_put(target->scsi_host); 3361 scsi_host_put(target->scsi_host); 3366 srp_disconnect_target(target); 3368 for (i = 0; i < target->ch_count; i++) { 3369 ch = &target->ch[i]; 3370 srp_free_ch_ib(target, ch); 3371 srp_free_req_data(target, ch); 3374 kfree(target->ch); 3537 struct srp_target_port *target; srp_remove_one() local 3547 * target ports can be created. srp_remove_one() 3552 * Remove all target ports. srp_remove_one() 3555 list_for_each_entry(target, &host->target_list, list) srp_remove_one() 3556 srp_queue_remove_work(target); srp_remove_one() 3560 * Wait for tl_err and target port removal tasks. srp_remove_one() 2896 srp_conn_unique(struct srp_host *host, struct srp_target_port *target) srp_conn_unique() argument
|
/linux-4.4.14/drivers/scsi/arm/ |
H A D | queue.h | 67 * Function: struct scsi_cmnd *queue_remove_tgtluntag (queue, target, lun, tag) 68 * Purpose : remove a SCSI command from the queue for a specified target/lun/tag 70 * target - target that we want 75 extern struct scsi_cmnd *queue_remove_tgtluntag(Queue_t *queue, int target, 79 * Function: queue_remove_all_target(queue, target) 80 * Purpose : remove all SCSI commands from the queue for a specified target 82 * target - target device id 85 extern void queue_remove_all_target(Queue_t *queue, int target); 88 * Function: int queue_probetgtlun (queue, target, lun) 90 * target/lun. 92 * target - target we want to probe 93 * lun - lun on target 96 extern int queue_probetgtlun (Queue_t *queue, int target, int lun);
|
H A D | queue.c | 158 * exclude - bit array of target&lun which is busy 201 * Function: struct scsi_cmnd *queue_remove_tgtluntag (queue, target, lun, tag) 202 * Purpose : remove a SCSI command from the queue for a specified target/lun/tag 204 * target - target that we want 209 struct scsi_cmnd *queue_remove_tgtluntag(Queue_t *queue, int target, int lun, queue_remove_tgtluntag() argument 219 if (q->SCpnt->device->id == target && q->SCpnt->device->lun == lun && queue_remove_tgtluntag() 231 * Function: queue_remove_all_target(queue, target) 232 * Purpose : remove all SCSI commands from the queue for a specified target 234 * target - target device id 237 void queue_remove_all_target(Queue_t *queue, int target) queue_remove_all_target() argument 245 if (q->SCpnt->device->id == target) queue_remove_all_target() 252 * Function: int queue_probetgtlun (queue, target, lun) 254 * target/lun. 256 * target - target we want to probe 257 * lun - lun on target 260 int queue_probetgtlun (Queue_t *queue, int target, int lun) queue_probetgtlun() argument 269 if (q->SCpnt->device->id == target && q->SCpnt->device->lun == lun) { queue_probetgtlun()
|
/linux-4.4.14/fs/kernfs/ |
H A D | symlink.c | 21 * @target: target node for the symlink to point to 27 struct kernfs_node *target) kernfs_create_link() 37 kn->ns = target->ns; kernfs_create_link() 38 kn->symlink.target_kn = target; kernfs_create_link() 39 kernfs_get(target); /* ref owned by symlink */ kernfs_create_link() 50 struct kernfs_node *target, char *path) kernfs_get_target_path() 59 kn = target->parent; kernfs_get_target_path() 71 /* determine end of target string for reverse fillup */ kernfs_get_target_path() 72 kn = target; kernfs_get_target_path() 85 /* reverse fillup of target string from target to base */ kernfs_get_target_path() 86 kn = target; kernfs_get_target_path() 105 struct kernfs_node *target = kn->symlink.target_kn; kernfs_getlink() local 109 error = kernfs_get_target_path(parent, target, path); kernfs_getlink() 25 kernfs_create_link(struct kernfs_node *parent, const char *name, struct kernfs_node *target) kernfs_create_link() argument 49 kernfs_get_target_path(struct kernfs_node *parent, struct kernfs_node *target, char *path) kernfs_get_target_path() argument
|
/linux-4.4.14/net/bridge/netfilter/ |
H A D | ebt_mark.c | 11 /* The mark target can be used in any chain, 25 int action = info->target & -16; ebt_mark_tg() 36 return info->target | ~EBT_VERDICT_BITS; ebt_mark_tg() 44 tmp = info->target | ~EBT_VERDICT_BITS; ebt_mark_tg_check() 49 tmp = info->target & ~EBT_VERDICT_BITS; ebt_mark_tg_check() 58 compat_uint_t target; member in struct:compat_ebt_mark_t_info 67 kern->target = user->target; mark_tg_compat_from_user() 76 put_user(kern->target, &user->target)) mark_tg_compat_to_user() 86 .target = ebt_mark_tg,
|
H A D | ebt_snat.c | 28 if (!(info->target & NAT_ARP_BIT) && ebt_snat_tg() 42 return info->target | ~EBT_VERDICT_BITS; ebt_snat_tg() 50 tmp = info->target | ~EBT_VERDICT_BITS; ebt_snat_tg_check() 56 tmp = info->target | EBT_VERDICT_BITS; ebt_snat_tg_check() 68 .target = ebt_snat_tg,
|
H A D | ebt_dnat.c | 26 return info->target; ebt_dnat_tg() 34 if (BASE_CHAIN && info->target == EBT_RETURN) ebt_dnat_tg_check() 55 .target = ebt_dnat_tg,
|
H A D | ebt_arpreply.c | 57 return info->target; ebt_arpreply_tg() 65 if (BASE_CHAIN && info->target == EBT_RETURN) ebt_arpreply_tg_check() 79 .target = ebt_arpreply_tg, 97 MODULE_DESCRIPTION("Ebtables: ARP reply target");
|
H A D | ebt_redirect.c | 33 return info->target; ebt_redirect_tg() 41 if (BASE_CHAIN && info->target == EBT_RETURN) ebt_redirect_tg_check() 61 .target = ebt_redirect_tg,
|
/linux-4.4.14/net/ipv4/netfilter/ |
H A D | arpt_mangle.c | 9 MODULE_DESCRIPTION("arptables arp payload mangle target"); 12 target(struct sk_buff *skb, const struct xt_action_param *par) target() function 54 return mangle->target; target() 65 if (mangle->target != NF_DROP && mangle->target != NF_ACCEPT && checkentry() 66 mangle->target != XT_CONTINUE) checkentry() 74 .target = target,
|
H A D | ip_tables.c | 226 if (strcmp(t->target.u.kernel.target->name, XT_ERROR_TARGET) == 0) { get_chainname_rulenum() 227 /* Head of user chain: ERROR target with chainname */ get_chainname_rulenum() 228 *chainname = t->target.data; get_chainname_rulenum() 234 strcmp(t->target.u.kernel.target->name, get_chainname_rulenum() 237 /* Tail of chains: STANDARD target (return/policy) */ get_chainname_rulenum() 377 IP_NF_ASSERT(t->u.kernel.target); 385 /* Standard target? */ 386 if (!t->u.kernel.target->target) { 420 acpar.target = t->u.kernel.target; 423 verdict = t->u.kernel.target->target(skb, &acpar); 447 const struct ipt_entry *target) find_jump_target() 452 if (iter == target) find_jump_target() 492 (strcmp(t->target.u.user.name, mark_source_chains() 497 if ((strcmp(t->target.u.user.name, mark_source_chains() 542 if (strcmp(t->target.u.user.name, mark_source_chains() 639 .target = t->u.kernel.target, check_target() 650 t->u.kernel.target->name); check_target() 661 struct xt_target *target; find_check_entry() local 685 target = xt_request_find_target(NFPROTO_IPV4, t->u.user.name, 687 if (IS_ERR(target)) { 689 ret = PTR_ERR(target); 692 t->u.kernel.target = target; 700 module_put(t->u.kernel.target->me); 771 "use the STANDARD target with " check_entry_size_and_hooks() 798 par.target = t->u.kernel.target; cleanup_entry() 801 if (par.target->destroy != NULL) cleanup_entry() 802 par.target->destroy(&par); cleanup_entry() 803 module_put(par.target->me); cleanup_entry() 998 t->u.kernel.target->name, copy_entries_to_user() 999 strlen(t->u.kernel.target->name)+1) != 0) { copy_entries_to_user() 1043 off += xt_compat_target_offset(t->u.kernel.target); compat_calc_entry() 1439 module_put(t->u.kernel.target->me); compat_release_entry() 1451 struct xt_target *target; check_compat_entry_size_and_hooks() local 1490 target = xt_request_find_target(NFPROTO_IPV4, t->u.user.name, 1492 if (IS_ERR(target)) { 1495 ret = PTR_ERR(target); 1498 t->u.kernel.target = target; 1500 off += xt_compat_target_offset(target); 1509 module_put(t->u.kernel.target->me); 1525 struct xt_target *target; compat_copy_entry_from_user() local 1544 target = t->u.kernel.target; compat_copy_entry_from_user() 1887 int target; do_ipt_get_ctl() local 1900 target = 1; do_ipt_get_ctl() 1902 target = 0; do_ipt_get_ctl() 1906 target, &ret), do_ipt_get_ctl() 2034 .target = ipt_error, 446 find_jump_target(const struct xt_table_info *t, const struct ipt_entry *target) find_jump_target() argument
|
H A D | arp_tables.c | 159 dprintf("Source or target device address mismatch.\n"); arp_packet_match() 168 dprintf("Source or target IP address mismatch.\n"); arp_packet_match() 310 /* Standard target? */ arpt_do_table() 311 if (!t->u.kernel.target->target) { arpt_do_table() 339 acpar.target = t->u.kernel.target; arpt_do_table() 341 verdict = t->u.kernel.target->target(skb, &acpar); arpt_do_table() 371 const struct arpt_entry *target) find_jump_target() 376 if (iter == target) find_jump_target() 419 (strcmp(t->target.u.user.name, mark_source_chains() 424 if ((strcmp(t->target.u.user.name, mark_source_chains() 461 if (strcmp(t->target.u.user.name, mark_source_chains() 504 .target = t->u.kernel.target, check_target() 513 t->u.kernel.target->name); check_target() 523 struct xt_target *target; find_check_entry() local 531 target = xt_request_find_target(NFPROTO_ARP, t->u.user.name, find_check_entry() 533 if (IS_ERR(target)) { find_check_entry() 535 ret = PTR_ERR(target); find_check_entry() 538 t->u.kernel.target = target; find_check_entry() 545 module_put(t->u.kernel.target->me); find_check_entry() 609 "use the STANDARD target with " check_entry_size_and_hooks() 629 par.target = t->u.kernel.target; cleanup_entry() 632 if (par.target->destroy != NULL) cleanup_entry() 633 par.target->destroy(&par); cleanup_entry() 634 module_put(par.target->me); cleanup_entry() 815 t->u.kernel.target->name, copy_entries_to_user() 816 strlen(t->u.kernel.target->name)+1) != 0) { copy_entries_to_user() 858 off += xt_compat_target_offset(t->u.kernel.target); compat_calc_entry() 1195 module_put(t->u.kernel.target->me); compat_release_entry() 1206 struct xt_target *target; check_compat_entry_size_and_hooks() local 1237 target = xt_request_find_target(NFPROTO_ARP, t->u.user.name, check_compat_entry_size_and_hooks() 1239 if (IS_ERR(target)) { check_compat_entry_size_and_hooks() 1242 ret = PTR_ERR(target); check_compat_entry_size_and_hooks() 1245 t->u.kernel.target = target; check_compat_entry_size_and_hooks() 1247 off += xt_compat_target_offset(target); check_compat_entry_size_and_hooks() 1256 module_put(t->u.kernel.target->me); check_compat_entry_size_and_hooks() 1267 struct xt_target *target; compat_copy_entry_from_user() local 1282 target = t->u.kernel.target; compat_copy_entry_from_user() 1738 .target = arpt_error, 370 find_jump_target(const struct xt_table_info *t, const struct arpt_entry *target) find_jump_target() argument
|
/linux-4.4.14/tools/build/ |
H A D | fixdep.c | 23 char *target; variable 29 fprintf(stderr, "Usage: fixdep <depfile> <target> <cmdline>\n"); usage() 34 * Print out the commandline prefixed with cmd_<target filename> := 38 printf("cmd_%s := %s\n\n", target, cmdline); print_cmdline() 64 /* Is the token we found a target name? */ parse_dep_file() 66 /* Don't write any target names into the dependency file */ parse_dep_file() 86 * process the first target name, which parse_dep_file() 88 * and ignore any other target names, parse_dep_file() 95 target, s); parse_dep_file() 97 target); parse_dep_file() 115 printf("\n%s: $(deps_%s)\n\n", target, target); parse_dep_file() 116 printf("$(deps_%s):\n", target); parse_dep_file() 161 target = argv[2]; main()
|
/linux-4.4.14/arch/tile/lib/ |
H A D | delay.c | 32 cycles_t target = get_cycles(); __ndelay() local 33 target += ns2cycles(nsecs); __ndelay() 34 while (get_cycles() < target) __ndelay() 41 cycles_t target = get_cycles() + cycles; __delay() local 42 while (get_cycles() < target) __delay()
|
/linux-4.4.14/drivers/staging/lustre/lustre/fld/ |
H A D | fld_request.c | 120 struct lu_fld_target *target; fld_rrb_scan() local 134 list_for_each_entry(target, &fld->lcf_targets, ft_chain) { fld_rrb_scan() 135 if (target->ft_idx == hash) fld_rrb_scan() 136 return target; fld_rrb_scan() 140 /* It is possible the remote target(MDT) are not connected to fld_rrb_scan() 147 CERROR("%s: Can't find target by hash %d (seq %#llx). Targets (%d):\n", fld_rrb_scan() 150 list_for_each_entry(target, &fld->lcf_targets, ft_chain) { fld_rrb_scan() 151 const char *srv_name = target->ft_srv != NULL ? fld_rrb_scan() 152 target->ft_srv->lsf_name : "<null>"; fld_rrb_scan() 153 const char *exp_name = target->ft_exp != NULL ? fld_rrb_scan() 154 (char *)target->ft_exp->exp_obd->obd_uuid.uuid : fld_rrb_scan() 158 target->ft_exp, exp_name, target->ft_srv, fld_rrb_scan() 159 srv_name, target->ft_idx); fld_rrb_scan() 163 * If target is not found, there is logical error anyway, so here is fld_rrb_scan() 184 struct lu_fld_target *target; fld_client_get_target() local 189 target = fld->lcf_hash->fh_scan_func(fld, seq); fld_client_get_target() 192 if (target != NULL) { fld_client_get_target() 193 CDEBUG(D_INFO, "%s: Found target (idx %llu) by seq %#llx\n", fld_client_get_target() 194 fld->lcf_name, target->ft_idx, seq); fld_client_get_target() 197 return target; fld_client_get_target() 208 struct lu_fld_target *target, *tmp; fld_client_add_target() local 216 CERROR("%s: Attempt to add target %s (idx %llu) on fly - skip it\n", fld_client_add_target() 220 CDEBUG(D_INFO, "%s: Adding target %s (idx %llu)\n", fld_client_add_target() 223 target = kzalloc(sizeof(*target), GFP_NOFS); fld_client_add_target() 224 if (!target) fld_client_add_target() 231 kfree(target); fld_client_add_target() 238 target->ft_exp = tar->ft_exp; fld_client_add_target() 239 if (target->ft_exp != NULL) fld_client_add_target() 240 class_export_get(target->ft_exp); fld_client_add_target() 241 target->ft_srv = tar->ft_srv; fld_client_add_target() 242 target->ft_idx = tar->ft_idx; fld_client_add_target() 244 list_add_tail(&target->ft_chain, fld_client_add_target() 257 struct lu_fld_target *target, *tmp; fld_client_del_target() local 260 list_for_each_entry_safe(target, tmp, fld_client_del_target() 262 if (target->ft_idx == idx) { fld_client_del_target() 264 list_del(&target->ft_chain); fld_client_del_target() 267 if (target->ft_exp != NULL) fld_client_del_target() 268 class_export_put(target->ft_exp); fld_client_del_target() 270 kfree(target); fld_client_del_target() 375 struct lu_fld_target *target, *tmp; fld_client_fini() local 378 list_for_each_entry_safe(target, tmp, fld_client_fini() 381 list_del(&target->ft_chain); fld_client_fini() 382 if (target->ft_exp != NULL) fld_client_fini() 383 class_export_put(target->ft_exp); fld_client_fini() 384 kfree(target); fld_client_fini() 453 struct lu_fld_target *target; fld_client_lookup() local 465 target = fld_client_get_target(fld, seq); fld_client_lookup() 466 LASSERT(target != NULL); fld_client_lookup() 468 CDEBUG(D_INFO, "%s: Lookup fld entry (seq: %#llx) on target %s (idx %llu)\n", fld_client_lookup() 469 fld->lcf_name, seq, fld_target_name(target), target->ft_idx); fld_client_lookup() 473 rc = fld_client_rpc(target->ft_exp, &res, FLD_LOOKUP); fld_client_lookup()
|
/linux-4.4.14/drivers/staging/rdma/hfi1/ |
H A D | twsi.c | 85 static void i2c_wait_for_writes(struct hfi1_devdata *dd, u32 target) i2c_wait_for_writes() argument 92 hfi1_gpio_mod(dd, target, 0, 0, 0); i2c_wait_for_writes() 107 static void scl_out(struct hfi1_devdata *dd, u32 target, u8 bit) scl_out() argument 116 hfi1_gpio_mod(dd, target, 0, bit ? 0 : mask, mask); scl_out() 128 if (mask & hfi1_gpio_mod(dd, target, 0, 0, 0)) scl_out() 136 i2c_wait_for_writes(dd, target); scl_out() 139 static void sda_out(struct hfi1_devdata *dd, u32 target, u8 bit) sda_out() argument 146 hfi1_gpio_mod(dd, target, 0, bit ? 0 : mask, mask); sda_out() 148 i2c_wait_for_writes(dd, target); sda_out() 152 static u8 sda_in(struct hfi1_devdata *dd, u32 target, int wait) sda_in() argument 158 hfi1_gpio_mod(dd, target, 0, 0, mask); sda_in() 159 read_val = hfi1_gpio_mod(dd, target, 0, 0, 0); sda_in() 161 i2c_wait_for_writes(dd, target); sda_in() 169 static int i2c_ackrcv(struct hfi1_devdata *dd, u32 target) i2c_ackrcv() argument 175 ack_received = sda_in(dd, target, 1); i2c_ackrcv() 176 scl_out(dd, target, 1); i2c_ackrcv() 177 ack_received = sda_in(dd, target, 1) == 0; i2c_ackrcv() 178 scl_out(dd, target, 0); i2c_ackrcv() 182 static void stop_cmd(struct hfi1_devdata *dd, u32 target); 190 static int rd_byte(struct hfi1_devdata *dd, u32 target, int last) rd_byte() argument 198 scl_out(dd, target, 1); rd_byte() 199 data |= sda_in(dd, target, 0); rd_byte() 200 scl_out(dd, target, 0); rd_byte() 203 scl_out(dd, target, 1); rd_byte() 204 stop_cmd(dd, target); rd_byte() 206 sda_out(dd, target, 0); rd_byte() 207 scl_out(dd, target, 1); rd_byte() 208 scl_out(dd, target, 0); rd_byte() 209 sda_out(dd, target, 1); rd_byte() 221 static int wr_byte(struct hfi1_devdata *dd, u32 target, u8 data) wr_byte() argument 228 sda_out(dd, target, bit); wr_byte() 229 scl_out(dd, target, 1); wr_byte() 230 scl_out(dd, target, 0); wr_byte() 232 return (!i2c_ackrcv(dd, target)) ? 1 : 0; wr_byte() 239 static void start_seq(struct hfi1_devdata *dd, u32 target) start_seq() argument 241 sda_out(dd, target, 1); start_seq() 242 scl_out(dd, target, 1); start_seq() 243 sda_out(dd, target, 0); start_seq() 245 scl_out(dd, target, 0); start_seq() 254 static void stop_seq(struct hfi1_devdata *dd, u32 target) stop_seq() argument 256 scl_out(dd, target, 0); stop_seq() 257 sda_out(dd, target, 0); stop_seq() 258 scl_out(dd, target, 1); stop_seq() 259 sda_out(dd, target, 1); stop_seq() 268 static void stop_cmd(struct hfi1_devdata *dd, u32 target) stop_cmd() argument 270 stop_seq(dd, target); stop_cmd() 279 int hfi1_twsi_reset(struct hfi1_devdata *dd, u32 target) hfi1_twsi_reset() argument 295 hfi1_gpio_mod(dd, target, 0, 0, mask); hfi1_twsi_reset() 308 scl_out(dd, target, 0); hfi1_twsi_reset() 309 scl_out(dd, target, 1); hfi1_twsi_reset() 311 was_high |= sda_in(dd, target, 0); hfi1_twsi_reset() 320 pins = hfi1_gpio_mod(dd, target, 0, 0, 0); hfi1_twsi_reset() 326 sda_out(dd, target, 0); hfi1_twsi_reset() 329 sda_out(dd, target, 1); hfi1_twsi_reset() 343 static int twsi_wr(struct hfi1_devdata *dd, u32 target, int data, int flags) twsi_wr() argument 348 start_seq(dd, target); twsi_wr() 351 ret = wr_byte(dd, target, data); twsi_wr() 354 stop_cmd(dd, target); twsi_wr() 372 int hfi1_twsi_blk_rd(struct hfi1_devdata *dd, u32 target, int dev, int addr, hfi1_twsi_blk_rd() argument 383 ret = twsi_wr(dd, target, addr, HFI1_TWSI_START); hfi1_twsi_blk_rd() 386 ret = twsi_wr(dd, target, dev | WRITE_CMD, HFI1_TWSI_START); hfi1_twsi_blk_rd() 388 stop_cmd(dd, target); hfi1_twsi_blk_rd() 399 ret = twsi_wr(dd, target, addr, 0); hfi1_twsi_blk_rd() 409 ret = twsi_wr(dd, target, dev | READ_CMD, HFI1_TWSI_START); hfi1_twsi_blk_rd() 412 stop_cmd(dd, target); hfi1_twsi_blk_rd() 429 *bp++ = rd_byte(dd, target, !len); hfi1_twsi_blk_rd() 449 int hfi1_twsi_blk_wr(struct hfi1_devdata *dd, u32 target, int dev, int addr, hfi1_twsi_blk_wr() argument 459 if (twsi_wr(dd, target, (addr << 1) | WRITE_CMD, hfi1_twsi_blk_wr() 465 if (twsi_wr(dd, target, hfi1_twsi_blk_wr() 468 ret = twsi_wr(dd, target, addr, 0); hfi1_twsi_blk_wr() 482 if (twsi_wr(dd, target, *bp++, 0)) hfi1_twsi_blk_wr() 485 stop_cmd(dd, target); hfi1_twsi_blk_wr() 499 while (twsi_wr(dd, target, hfi1_twsi_blk_wr() 501 stop_cmd(dd, target); hfi1_twsi_blk_wr() 506 rd_byte(dd, target, 1); hfi1_twsi_blk_wr() 513 stop_cmd(dd, target); hfi1_twsi_blk_wr()
|
H A D | qsfp.c | 67 static int __i2c_write(struct hfi1_pportdata *ppd, u32 target, int i2c_addr, __i2c_write() argument 75 ret = hfi1_twsi_reset(dd, target); __i2c_write() 86 ret = hfi1_twsi_blk_wr(dd, target, i2c_addr, offset, __i2c_write() 102 int i2c_write(struct hfi1_pportdata *ppd, u32 target, int i2c_addr, int offset, i2c_write() argument 110 ret = __i2c_write(ppd, target, i2c_addr, offset, bp, len); i2c_write() 120 static int __i2c_read(struct hfi1_pportdata *ppd, u32 target, int i2c_addr, __i2c_read() argument 129 ret = hfi1_twsi_reset(dd, target); __i2c_read() 142 ret = hfi1_twsi_blk_rd(dd, target, i2c_addr, offset, __i2c_read() 174 int i2c_read(struct hfi1_pportdata *ppd, u32 target, int i2c_addr, int offset, i2c_read() argument 182 ret = __i2c_read(ppd, target, i2c_addr, offset, bp, len); i2c_read() 189 int qsfp_write(struct hfi1_pportdata *ppd, u32 target, int addr, void *bp, qsfp_write() argument 209 ret = __i2c_write(ppd, target, QSFP_DEV, qsfp_write() 226 ret = __i2c_write(ppd, target, QSFP_DEV, offset, bp + count, qsfp_write() 242 int qsfp_read(struct hfi1_pportdata *ppd, u32 target, int addr, void *bp, qsfp_read() argument 261 ret = __i2c_write(ppd, target, QSFP_DEV, qsfp_read() 278 ret = __i2c_read(ppd, target, QSFP_DEV, offset, bp + count, qsfp_read() 301 u32 target = ppd->dd->hfi1_id; refresh_qsfp_cache() local 314 ret = qsfp_read(ppd, target, 0, cache, 256); refresh_qsfp_cache() 331 ret = qsfp_read(ppd, target, 384, cache + 256, 128); refresh_qsfp_cache() 336 ret = qsfp_read(ppd, target, 640, cache + 384, 128); refresh_qsfp_cache() 341 ret = qsfp_read(ppd, target, 896, cache + 512, 128); refresh_qsfp_cache() 348 ret = qsfp_read(ppd, target, 640, cache + 384, 128); refresh_qsfp_cache() 353 ret = qsfp_read(ppd, target, 896, cache + 512, 128); refresh_qsfp_cache() 360 ret = qsfp_read(ppd, target, 384, cache + 256, 128); refresh_qsfp_cache() 365 ret = qsfp_read(ppd, target, 896, cache + 512, 128); refresh_qsfp_cache() 372 ret = qsfp_read(ppd, target, 896, cache + 512, 128); refresh_qsfp_cache()
|
/linux-4.4.14/fs/sysfs/ |
H A D | symlink.c | 25 struct kernfs_node *kn, *target = NULL; sysfs_do_create_link_sd() local 36 target = target_kobj->sd; sysfs_do_create_link_sd() 37 kernfs_get(target); sysfs_do_create_link_sd() 41 if (!target) sysfs_do_create_link_sd() 44 kn = kernfs_create_link(parent, name, target); sysfs_do_create_link_sd() 45 kernfs_put(target); sysfs_do_create_link_sd() 58 * @target: object we're pointing to. 61 int sysfs_create_link_sd(struct kernfs_node *kn, struct kobject *target, sysfs_create_link_sd() argument 64 return sysfs_do_create_link_sd(kn, target, name, 1); sysfs_create_link_sd() 67 static int sysfs_do_create_link(struct kobject *kobj, struct kobject *target, sysfs_do_create_link() argument 80 return sysfs_do_create_link_sd(parent, target, name, warn); sysfs_do_create_link() 86 * @target: object we're pointing to. 89 int sysfs_create_link(struct kobject *kobj, struct kobject *target, sysfs_create_link() argument 92 return sysfs_do_create_link(kobj, target, name, 1); sysfs_create_link() 99 * @target: object we're pointing to. 105 int sysfs_create_link_nowarn(struct kobject *kobj, struct kobject *target, sysfs_create_link_nowarn() argument 108 return sysfs_do_create_link(kobj, target, name, 0); sysfs_create_link_nowarn() 126 * We don't own @target and it may be removed at any time. sysfs_delete_link()
|
H A D | group.c | 330 * @target: The target kobject of the symlink to create. 334 struct kobject *target, const char *link_name) sysfs_add_link_to_group() 343 error = sysfs_create_link_sd(parent, target, link_name); sysfs_add_link_to_group() 373 * @target_kobj: The target kobject. 374 * @target_name: The name of the target group or attribute. 380 struct kernfs_node *target; __compat_only_sysfs_link_entry_to_kobj() local 390 target = target_kobj->sd; __compat_only_sysfs_link_entry_to_kobj() 391 if (target) __compat_only_sysfs_link_entry_to_kobj() 392 kernfs_get(target); __compat_only_sysfs_link_entry_to_kobj() 394 if (!target) __compat_only_sysfs_link_entry_to_kobj() 399 kernfs_put(target); __compat_only_sysfs_link_entry_to_kobj() 408 kernfs_put(target); __compat_only_sysfs_link_entry_to_kobj() 333 sysfs_add_link_to_group(struct kobject *kobj, const char *group_name, struct kobject *target, const char *link_name) sysfs_add_link_to_group() argument
|
H A D | sysfs.h | 40 int sysfs_create_link_sd(struct kernfs_node *kn, struct kobject *target,
|
/linux-4.4.14/arch/mips/kernel/ |
H A D | jump_label.c | 25 * - the ISA bit of the target, either 0 or 1 respectively, 27 * - the amount the jump target address is shifted right to fit in the 33 * - the jump target alignment, either 4 or 2 bytes. 49 BUG_ON((e->target & ~J_RANGE_MASK) != ((e->code + 4) & ~J_RANGE_MASK)); arch_jump_label_transform() 52 BUG_ON((e->target & J_ALIGN_MASK) != J_ISA_BIT); arch_jump_label_transform() 56 insn.j_format.target = e->target >> J_RANGE_SHIFT; arch_jump_label_transform()
|
H A D | ptrace.c | 51 static void init_fp_ctx(struct task_struct *target) init_fp_ctx() argument 53 /* If FP has been used then the target already has context */ init_fp_ctx() 54 if (tsk_used_math(target)) init_fp_ctx() 58 memset(&target->thread.fpu.fpr, ~0, sizeof(target->thread.fpu.fpr)); init_fp_ctx() 63 * Record that the target has "used" math, such that the context init_fp_ctx() 67 set_stopped_child_used_math(target); init_fp_ctx() 290 static int gpr32_get(struct task_struct *target, gpr32_get() argument 295 struct pt_regs *regs = task_pt_regs(target); gpr32_get() 318 static int gpr32_set(struct task_struct *target, gpr32_set() argument 323 struct pt_regs *regs = task_pt_regs(target); gpr32_set() 369 static int gpr64_get(struct task_struct *target, gpr64_get() argument 374 struct pt_regs *regs = task_pt_regs(target); gpr64_get() 397 static int gpr64_set(struct task_struct *target, gpr64_set() argument 402 struct pt_regs *regs = task_pt_regs(target); gpr64_set() 442 static int fpr_get(struct task_struct *target, fpr_get() argument 453 if (sizeof(target->thread.fpu.fpr[i]) == sizeof(elf_fpreg_t)) fpr_get() 455 &target->thread.fpu, fpr_get() 459 fpr_val = get_fpr64(&target->thread.fpu.fpr[i], 0); fpr_get() 470 static int fpr_set(struct task_struct *target, fpr_set() argument 481 init_fp_ctx(target); fpr_set() 483 if (sizeof(target->thread.fpu.fpr[i]) == sizeof(elf_fpreg_t)) fpr_set() 485 &target->thread.fpu, fpr_set() 494 set_fpr64(&target->thread.fpu.fpr[i], 0, fpr_val); fpr_set()
|
/linux-4.4.14/drivers/thermal/ |
H A D | gov_bang_bang.c | 45 if (instance->target == THERMAL_NO_TARGET) thermal_zone_trip_update() 46 instance->target = 0; thermal_zone_trip_update() 49 if (instance->target != 0 && instance->target != 1) { thermal_zone_trip_update() 51 instance->name, instance->target); thermal_zone_trip_update() 52 instance->target = 1; thermal_zone_trip_update() 59 if (instance->target == 0 && tz->temperature >= trip_temp) thermal_zone_trip_update() 60 instance->target = 1; thermal_zone_trip_update() 61 else if (instance->target == 1 && thermal_zone_trip_update() 63 instance->target = 0; thermal_zone_trip_update() 65 dev_dbg(&instance->cdev->device, "target=%d\n", thermal_zone_trip_update() 66 (int)instance->target); thermal_zone_trip_update()
|
H A D | step_wise.c | 63 next_target = instance->target; get_target_state() 160 old_target = instance->target; thermal_zone_trip_update() 161 instance->target = get_target_state(instance, trend, throttle); thermal_zone_trip_update() 162 dev_dbg(&instance->cdev->device, "old_target=%d, target=%d\n", thermal_zone_trip_update() 163 old_target, (int)instance->target); thermal_zone_trip_update() 165 if (instance->initialized && old_target == instance->target) thermal_zone_trip_update() 170 instance->target != THERMAL_NO_TARGET) thermal_zone_trip_update() 174 instance->target == THERMAL_NO_TARGET) thermal_zone_trip_update()
|
/linux-4.4.14/arch/sparc/kernel/ |
H A D | ptrace_64.c | 123 static int get_from_target(struct task_struct *target, unsigned long uaddr, get_from_target() argument 126 if (target == current) { get_from_target() 130 int len2 = access_process_vm(target, uaddr, kbuf, len, 0); get_from_target() 137 static int set_to_target(struct task_struct *target, unsigned long uaddr, set_to_target() argument 140 if (target == current) { set_to_target() 144 int len2 = access_process_vm(target, uaddr, kbuf, len, 1); set_to_target() 151 static int regwindow64_get(struct task_struct *target, regwindow64_get() argument 161 if (get_from_target(target, rw_addr, &win32, sizeof(win32))) regwindow64_get() 169 if (get_from_target(target, rw_addr, wbuf, sizeof(*wbuf))) regwindow64_get() 176 static int regwindow64_set(struct task_struct *target, regwindow64_set() argument 191 if (set_to_target(target, rw_addr, &win32, sizeof(win32))) regwindow64_set() 195 if (set_to_target(target, rw_addr, wbuf, sizeof(*wbuf))) regwindow64_set() 207 static int genregs64_get(struct task_struct *target, genregs64_get() argument 212 const struct pt_regs *regs = task_pt_regs(target); genregs64_get() 215 if (target == current) genregs64_get() 224 if (regwindow64_get(target, regs, &window)) genregs64_get() 257 static int genregs64_set(struct task_struct *target, genregs64_set() argument 262 struct pt_regs *regs = task_pt_regs(target); genregs64_set() 265 if (target == current) genregs64_set() 274 if (regwindow64_get(target, regs, &window)) genregs64_set() 283 regwindow64_set(target, regs, &window)) genregs64_set() 331 static int fpregs64_get(struct task_struct *target, fpregs64_get() argument 336 const unsigned long *fpregs = task_thread_info(target)->fpregs; fpregs64_get() 340 if (target == current) fpregs64_get() 343 fprs = task_thread_info(target)->fpsaved[0]; fpregs64_get() 369 fsr = task_thread_info(target)->xfsr[0]; fpregs64_get() 370 gsr = task_thread_info(target)->gsr[0]; fpregs64_get() 398 static int fpregs64_set(struct task_struct *target, fpregs64_set() argument 403 unsigned long *fpregs = task_thread_info(target)->fpregs; fpregs64_set() 407 if (target == current) fpregs64_set() 415 task_thread_info(target)->xfsr, fpregs64_set() 420 task_thread_info(target)->gsr, fpregs64_set() 424 fprs = task_thread_info(target)->fpsaved[0]; fpregs64_set() 433 task_thread_info(target)->fpsaved[0] = fprs; fpregs64_set() 475 static int genregs32_get(struct task_struct *target, genregs32_get() argument 480 const struct pt_regs *regs = task_pt_regs(target); genregs32_get() 486 if (target == current) genregs32_get() 498 if (target == current) { genregs32_get() 505 if (access_process_vm(target, genregs32_get() 523 if (target == current) { genregs32_get() 531 if (access_process_vm(target, genregs32_get() 537 if (access_process_vm(target, genregs32_get() 584 static int genregs32_set(struct task_struct *target, genregs32_set() argument 589 struct pt_regs *regs = task_pt_regs(target); genregs32_set() 595 if (target == current) genregs32_set() 607 if (target == current) { genregs32_set() 614 if (access_process_vm(target, genregs32_set() 634 if (target == current) { genregs32_set() 642 if (access_process_vm(target, genregs32_set() 648 if (access_process_vm(target, genregs32_set() 703 static int fpregs32_get(struct task_struct *target, fpregs32_get() argument 708 const unsigned long *fpregs = task_thread_info(target)->fpregs; fpregs32_get() 714 if (target == current) fpregs32_get() 717 fprs = task_thread_info(target)->fpsaved[0]; fpregs32_get() 719 fsr = task_thread_info(target)->xfsr[0]; fpregs32_get() 757 static int fpregs32_set(struct task_struct *target, fpregs32_set() argument 762 unsigned long *fpregs = task_thread_info(target)->fpregs; fpregs32_set() 766 if (target == current) fpregs32_set() 769 fprs = task_thread_info(target)->fpsaved[0]; fpregs32_set() 787 val = task_thread_info(target)->xfsr[0]; fpregs32_set() 790 task_thread_info(target)->xfsr[0] = val; fpregs32_set() 795 task_thread_info(target)->fpsaved[0] = fprs; fpregs32_set()
|
H A D | jump_label.c | 20 s32 off = (s32)entry->target - (s32)entry->code; arch_jump_label_transform()
|
H A D | ptrace_32.c | 48 static int genregs32_get(struct task_struct *target, genregs32_get() argument 53 const struct pt_regs *regs = target->thread.kregs; genregs32_get() 59 if (target == current) genregs32_get() 126 static int genregs32_set(struct task_struct *target, genregs32_set() argument 131 struct pt_regs *regs = target->thread.kregs; genregs32_set() 137 if (target == current) genregs32_set() 210 static int fpregs32_get(struct task_struct *target, fpregs32_get() argument 215 const unsigned long *fpregs = target->thread.float_regs; fpregs32_get() 219 if (target == current) fpregs32_get() 233 &target->thread.fsr, fpregs32_get() 254 static int fpregs32_set(struct task_struct *target, fpregs32_set() argument 259 unsigned long *fpregs = target->thread.float_regs; fpregs32_set() 263 if (target == current) fpregs32_set() 275 &target->thread.fsr, fpregs32_set()
|
H A D | vio.c | 174 u64 target; mdesc_for_each_arc() local 176 target = mdesc_arc_target(hp, a); mdesc_for_each_arc() 178 irq = mdesc_get_property(hp, target, "tx-ino", NULL); mdesc_for_each_arc() 182 irq = mdesc_get_property(hp, target, "rx-ino", NULL); mdesc_for_each_arc() 188 chan_id = mdesc_get_property(hp, target, "id", NULL); mdesc_for_each_arc() 231 u64 target; mdesc_for_each_arc() local 233 target = mdesc_arc_target(hp, a); mdesc_for_each_arc() 234 cfg_handle = mdesc_get_property(hp, target, mdesc_for_each_arc() 378 u64 target = mdesc_arc_target(hp, a); mdesc_for_each_arc() local 379 const char *name = mdesc_node_name(hp, target); mdesc_for_each_arc()
|
/linux-4.4.14/include/linux/netfilter_arp/ |
H A D | arp_tables.h | 20 struct xt_standard_target target; member in struct:arpt_standard 25 struct xt_error_target target; member in struct:arpt_error 37 .target = XT_TARGET_INIT(XT_STANDARD_TARGET, \ 39 .target.verdict = -(__verdict) - 1, \ 45 .target = XT_TARGET_INIT(XT_ERROR_TARGET, \ 47 .target.errorname = "ERROR", \
|
/linux-4.4.14/include/linux/netfilter_ipv4/ |
H A D | ip_tables.h | 35 struct xt_standard_target target; member in struct:ipt_standard 40 struct xt_error_target target; member in struct:ipt_error 52 .target = XT_TARGET_INIT(XT_STANDARD_TARGET, \ 54 .target.verdict = -(__verdict) - 1, \ 60 .target = XT_TARGET_INIT(XT_ERROR_TARGET, \ 62 .target.errorname = "ERROR", \
|
/linux-4.4.14/arch/x86/kernel/ |
H A D | pci-calgary_64.c | 523 unsigned long target = ((unsigned long)bar) | offset; calgary_reg() local 524 return (void __iomem*)target; calgary_reg() 548 void __iomem *target; calgary_tce_cache_blast() local 551 target = calgary_reg(bbar, phb_offset(tbl->it_busno) | PHB_AER_OFFSET); calgary_tce_cache_blast() 552 aer = readl(target); calgary_tce_cache_blast() 553 writel(0, target); calgary_tce_cache_blast() 556 target = calgary_reg(bbar, phb_offset(tbl->it_busno) | PHB_PLSSR_OFFSET); calgary_tce_cache_blast() 557 val = readl(target); calgary_tce_cache_blast() 560 target = calgary_reg(bbar, split_queue_offset(tbl->it_busno)); calgary_tce_cache_blast() 562 val = readq(target); calgary_tce_cache_blast() 569 target = calgary_reg(bbar, tar_offset(tbl->it_busno)); calgary_tce_cache_blast() 570 writeq(tbl->tar_val, target); calgary_tce_cache_blast() 573 target = calgary_reg(bbar, phb_offset(tbl->it_busno) | PHB_AER_OFFSET); calgary_tce_cache_blast() 574 writel(aer, target); calgary_tce_cache_blast() 575 (void)readl(target); /* flush */ calgary_tce_cache_blast() 581 void __iomem *target; calioc2_tce_cache_blast() local 593 target = calgary_reg(bbar, phb_offset(bus) | PHB_PAGE_MIG_CTRL); calioc2_tce_cache_blast() 594 val = be32_to_cpu(readl(target)); calioc2_tce_cache_blast() 595 printk(KERN_DEBUG "1a. read 0x%x [LE] from %p\n", val, target); calioc2_tce_cache_blast() 597 printk(KERN_DEBUG "1b. writing 0x%x [LE] to %p\n", val, target); calioc2_tce_cache_blast() 598 writel(cpu_to_be32(val), target); calioc2_tce_cache_blast() local 602 target = calgary_reg(bbar, split_queue_offset(bus)); calioc2_tce_cache_blast() 604 val64 = readq(target); calioc2_tce_cache_blast() 611 target = calgary_reg(bbar, phb_offset(bus) | PHB_PAGE_MIG_DEBUG); calioc2_tce_cache_blast() 612 val = be32_to_cpu(readl(target)); calioc2_tce_cache_blast() 613 printk(KERN_DEBUG "3. read 0x%x [LE] from %p\n", val, target); calioc2_tce_cache_blast() 626 target = calgary_reg(bbar, phb_offset(bus) | PHB_PAGE_MIG_CTRL); calioc2_tce_cache_blast() 627 printk(KERN_DEBUG "5a. slamming into HardStop by reading %p\n", target); calioc2_tce_cache_blast() 628 val = be32_to_cpu(readl(target)); calioc2_tce_cache_blast() 629 printk(KERN_DEBUG "5b. read 0x%x [LE] from %p\n", val, target); calioc2_tce_cache_blast() 630 target = calgary_reg(bbar, phb_offset(bus) | PHB_PAGE_MIG_DEBUG); calioc2_tce_cache_blast() 631 val = be32_to_cpu(readl(target)); calioc2_tce_cache_blast() 632 printk(KERN_DEBUG "5c. read 0x%x [LE] from %p (debug)\n", val, target); calioc2_tce_cache_blast() 636 target = calgary_reg(bbar, tar_offset(bus)); calioc2_tce_cache_blast() 637 writeq(tbl->tar_val, target); calioc2_tce_cache_blast() 641 target = calgary_reg(bbar, phb_offset(bus) | PHB_PAGE_MIG_CTRL); calioc2_tce_cache_blast() 642 val = be32_to_cpu(readl(target)); calioc2_tce_cache_blast() 643 printk(KERN_DEBUG "7b. read 0x%x [LE] from %p\n", val, target); calioc2_tce_cache_blast() 647 target = calgary_reg(bbar, phb_offset(bus) | PHB_PAGE_MIG_CTRL); calioc2_tce_cache_blast() 649 printk(KERN_DEBUG "8b. writing 0x%x [LE] to %p\n", val, target); calioc2_tce_cache_blast() 650 writel(cpu_to_be32(val), target); calioc2_tce_cache_blast() local 651 val = be32_to_cpu(readl(target)); calioc2_tce_cache_blast() 652 printk(KERN_DEBUG "8c. read 0x%x [LE] from %p\n", val, target); calioc2_tce_cache_blast() 669 void __iomem *target; calgary_reserve_peripheral_mem_1() local 677 target = calgary_reg(bbar, phb_offset(busnum) | PHB_MEM_1_LOW); calgary_reserve_peripheral_mem_1() 678 low = be32_to_cpu(readl(target)); calgary_reserve_peripheral_mem_1() 679 target = calgary_reg(bbar, phb_offset(busnum) | PHB_MEM_1_HIGH); calgary_reserve_peripheral_mem_1() 680 high = be32_to_cpu(readl(target)); calgary_reserve_peripheral_mem_1() 681 target = calgary_reg(bbar, phb_offset(busnum) | PHB_MEM_1_SIZE); calgary_reserve_peripheral_mem_1() 682 sizelow = be32_to_cpu(readl(target)); calgary_reserve_peripheral_mem_1() 692 void __iomem *target; calgary_reserve_peripheral_mem_2() local 701 target = calgary_reg(bbar, phb_offset(busnum) | PHB_CONFIG_RW_OFFSET); calgary_reserve_peripheral_mem_2() 702 val32 = be32_to_cpu(readl(target)); calgary_reserve_peripheral_mem_2() 706 target = calgary_reg(bbar, phb_offset(busnum) | PHB_MEM_2_LOW); calgary_reserve_peripheral_mem_2() 707 low = be32_to_cpu(readl(target)); calgary_reserve_peripheral_mem_2() 708 target = calgary_reg(bbar, phb_offset(busnum) | PHB_MEM_2_HIGH); calgary_reserve_peripheral_mem_2() 709 high = be32_to_cpu(readl(target)); calgary_reserve_peripheral_mem_2() 710 target = calgary_reg(bbar, phb_offset(busnum) | PHB_MEM_2_SIZE_LOW); calgary_reserve_peripheral_mem_2() 711 sizelow = be32_to_cpu(readl(target)); calgary_reserve_peripheral_mem_2() 712 target = calgary_reg(bbar, phb_offset(busnum) | PHB_MEM_2_SIZE_HIGH); calgary_reserve_peripheral_mem_2() 713 sizehigh = be32_to_cpu(readl(target)); calgary_reserve_peripheral_mem_2() 757 void __iomem *target; calgary_setup_tar() local 784 target = calgary_reg(bbar, tar_offset(dev->bus->number)); calgary_setup_tar() 785 val64 = be64_to_cpu(readq(target)); calgary_setup_tar() 798 writeq(tbl->tar_val, target); calgary_setup_tar() 799 readq(target); /* flush */ calgary_setup_tar() 808 void __iomem *target; calgary_free_bus() local 811 target = calgary_reg(tbl->bbar, tar_offset(dev->bus->number)); calgary_free_bus() 812 val64 = be64_to_cpu(readq(target)); calgary_free_bus() 814 writeq(cpu_to_be64(val64), target); calgary_free_bus() local 815 readq(target); /* flush */ calgary_free_bus() 832 void __iomem *target; calgary_dump_error_regs() local 835 target = calgary_reg(bbar, phb_offset(tbl->it_busno) | PHB_CSR_OFFSET); calgary_dump_error_regs() 836 csr = be32_to_cpu(readl(target)); calgary_dump_error_regs() 838 target = calgary_reg(bbar, phb_offset(tbl->it_busno) | PHB_PLSSR_OFFSET); calgary_dump_error_regs() 839 plssr = be32_to_cpu(readl(target)); calgary_dump_error_regs() 850 void __iomem *target; calioc2_dump_error_regs() local 857 target = calgary_reg(bbar, phboff | PHB_CSR_OFFSET); calioc2_dump_error_regs() 858 csr = be32_to_cpu(readl(target)); calioc2_dump_error_regs() 860 target = calgary_reg(bbar, phboff | PHB_PLSSR_OFFSET); calioc2_dump_error_regs() 861 plssr = be32_to_cpu(readl(target)); calioc2_dump_error_regs() 863 target = calgary_reg(bbar, phboff | 0x290); calioc2_dump_error_regs() 864 csmr = be32_to_cpu(readl(target)); calioc2_dump_error_regs() 866 target = calgary_reg(bbar, phboff | 0x800); calioc2_dump_error_regs() 867 mck = be32_to_cpu(readl(target)); calioc2_dump_error_regs() 879 target = calgary_reg(bbar, phboff | erroff); calioc2_dump_error_regs() 880 errregs[i] = be32_to_cpu(readl(target)); calioc2_dump_error_regs() 886 target = calgary_reg(bbar, phboff | PHB_ROOT_COMPLEX_STATUS); calioc2_dump_error_regs() 887 rcstat = be32_to_cpu(readl(target)); calioc2_dump_error_regs() 898 void __iomem *target; calgary_watchdog() local 900 target = calgary_reg(bbar, phb_offset(tbl->it_busno) | PHB_CSR_OFFSET); calgary_watchdog() 901 val32 = be32_to_cpu(readl(target)); calgary_watchdog() 908 writel(0, target); calgary_watchdog() 911 target = calgary_reg(bbar, phb_offset(tbl->it_busno) | calgary_watchdog() 913 val32 = be32_to_cpu(readl(target)); calgary_watchdog() 915 writel(cpu_to_be32(val32), target); calgary_watchdog() local 916 readl(target); /* flush */ calgary_watchdog() 927 void __iomem *target; calgary_set_split_completion_timeout() local 944 target = calgary_reg(bbar, CALGARY_CONFIG_REG); calgary_set_split_completion_timeout() 945 val64 = be64_to_cpu(readq(target)); calgary_set_split_completion_timeout() 951 writeq(cpu_to_be64(val64), target); calgary_set_split_completion_timeout() local 952 readq(target); /* flush */ calgary_set_split_completion_timeout() 959 void __iomem *target; calioc2_handle_quirks() local 965 target = calgary_reg(bbar, phb_offset(busnum) | PHB_SAVIOR_L2); calioc2_handle_quirks() 966 val = cpu_to_be32(readl(target)); calioc2_handle_quirks() 968 writel(cpu_to_be32(val), target); calioc2_handle_quirks() local 988 void __iomem *target; calgary_enable_translation() local 997 target = calgary_reg(bbar, phb_offset(busnum) | PHB_CONFIG_RW_OFFSET); calgary_enable_translation() 998 val32 = be32_to_cpu(readl(target)); calgary_enable_translation() 1007 writel(cpu_to_be32(val32), target); calgary_enable_translation() local 1008 readl(target); /* flush */ calgary_enable_translation() 1020 void __iomem *target; calgary_disable_translation() local 1029 target = calgary_reg(bbar, phb_offset(busnum) | PHB_CONFIG_RW_OFFSET); calgary_disable_translation() 1030 val32 = be32_to_cpu(readl(target)); calgary_disable_translation() 1034 writel(cpu_to_be32(val32), target); calgary_disable_translation() local 1035 readl(target); /* flush */ calgary_disable_translation() 1089 void __iomem *target; calgary_locate_bbars() local 1108 target = calgary_reg(bbar, offset); calgary_locate_bbars() 1110 val = be32_to_cpu(readl(target)); calgary_locate_bbars() 1327 void __iomem *target; get_tce_space_from_tar() local 1345 target = calgary_reg(bus_info[bus].bbar, get_tce_space_from_tar() 1347 tce_space = be64_to_cpu(readq(target)); get_tce_space_from_tar()
|
/linux-4.4.14/drivers/gpu/drm/nouveau/nvkm/subdev/therm/ |
H A D | fan.c | 32 nvkm_fan_update(struct nvkm_fan *fan, bool immediate, int target) nvkm_fan_update() argument 41 /* update target fan speed, restricting to allowed range */ nvkm_fan_update() 43 if (target < 0) nvkm_fan_update() 44 target = fan->percent; nvkm_fan_update() 45 target = max_t(u8, target, fan->bios.min_duty); nvkm_fan_update() 46 target = min_t(u8, target, fan->bios.max_duty); nvkm_fan_update() 47 if (fan->percent != target) { nvkm_fan_update() 48 nvkm_debug(subdev, "FAN target: %d\n", target); nvkm_fan_update() 49 fan->percent = target; nvkm_fan_update() 52 /* check that we're not already at the target duty cycle */ nvkm_fan_update() 54 if (duty == target) { nvkm_fan_update() 65 if (duty < target) nvkm_fan_update() 66 duty = min(duty + 3, target); nvkm_fan_update() 67 else if (duty > target) nvkm_fan_update() 68 duty = max(duty - 3, target); nvkm_fan_update() 70 duty = target; nvkm_fan_update() 85 /* schedule next fan update, if not at target speed already */ nvkm_fan_update() 86 if (list_empty(&fan->alarm.head) && target != duty) { nvkm_fan_update() 91 if (duty > target) nvkm_fan_update() 93 else if (duty == target) nvkm_fan_update()
|
/linux-4.4.14/net/sched/ |
H A D | act_ipt.c | 2 * net/sched/act_ipt.c iptables target interface 36 struct xt_target *target; ipt_init_target() local 39 target = xt_request_find_target(AF_INET, t->u.user.name, ipt_init_target() 41 if (IS_ERR(target)) ipt_init_target() 42 return PTR_ERR(target); ipt_init_target() 44 t->u.kernel.target = target; ipt_init_target() 47 par.target = target; ipt_init_target() 54 module_put(t->u.kernel.target->me); ipt_init_target() 63 .target = t->u.kernel.target, ipt_destroy_target() 66 if (par.target->destroy != NULL) ipt_destroy_target() 67 par.target->destroy(&par); ipt_destroy_target() 68 module_put(par.target->me); ipt_destroy_target() 196 par.target = ipt->tcfi_t->u.kernel.target; tcf_ipt() 198 ret = par.target->target(skb, &par); tcf_ipt() 231 * user name = target name tcf_ipt_dump() 241 strcpy(t->u.user.name, ipt->tcfi_t->u.kernel.target->name); tcf_ipt_dump() 284 MODULE_DESCRIPTION("Iptables target actions");
|
H A D | sch_pie.c | 41 psched_time_t target; /* user specified target delay in pschedtime */ member in struct:pie_params 85 params->target = PSCHED_NS2TICKS(20 * NSEC_PER_MSEC); /* 20 ms */ pie_params_init() 109 /* If current delay is less than half of target, and drop_early() 112 if ((q->vars.qdelay < q->params.target / 2) drop_early() 200 /* target is in us */ pie_change() 201 u32 target = nla_get_u32(tb[TCA_PIE_TARGET]); pie_change() local 204 q->params.target = PSCHED_NS2TICKS((u64)target * NSEC_PER_USEC); pie_change() 364 delta += alpha * ((qdelay - q->params.target)); calculate_probability() 417 if ((q->vars.qdelay < q->params.target / 2) && calculate_probability() 418 (q->vars.qdelay_old < q->params.target / 2) && calculate_probability() 470 /* convert target from pschedtime to us */ pie_dump() 472 ((u32) PSCHED_TICKS2NS(q->params.target)) / pie_dump()
|
/linux-4.4.14/tools/perf/ui/browsers/ |
H A D | map.c | 45 char target[512]; map_browser__search() local 49 target, "ENTER: OK, ESC: Cancel", 0); map_browser__search() 53 if (target[0] == '0' && tolower(target[1]) == 'x') { map_browser__search() 54 u64 addr = strtoull(target, NULL, 16); map_browser__search() 57 sym = map__find_symbol_by_name(browser->map, target, NULL); map_browser__search() 65 ui_helpline__fpush("%s not found!", target); map_browser__search()
|
/linux-4.4.14/drivers/of/ |
H A D | overlay.c | 28 * @target: target of the overlay operation 35 struct device_node *target; member in struct:of_overlay_info 57 struct device_node *target, const struct device_node *overlay); 60 struct device_node *target, struct property *prop) of_overlay_apply_single_property() 65 tprop = of_find_property(target, prop->name, NULL); of_overlay_apply_single_property() 79 return of_changeset_add_property(&ov->cset, target, propn); of_overlay_apply_single_property() 82 return of_changeset_update_property(&ov->cset, target, propn); of_overlay_apply_single_property() 86 struct device_node *target, struct device_node *child) of_overlay_apply_single_device_node() 97 tchild = of_get_child_by_name(target, cname); of_overlay_apply_single_device_node() 103 /* create empty tree as a target */ of_overlay_apply_single_device_node() 104 tchild = __of_node_dup(child, "%s/%s", target->full_name, cname); of_overlay_apply_single_device_node() 109 tchild->parent = target; of_overlay_apply_single_device_node() 126 * Note that the in case of an error the target node is left 131 struct device_node *target, const struct device_node *overlay) of_overlay_apply_one() 138 ret = of_overlay_apply_single_property(ov, target, prop); for_each_property_of_node() 141 __func__, target->full_name, prop->name); for_each_property_of_node() 147 ret = of_overlay_apply_single_device_node(ov, target, child); for_each_child_of_node() 150 __func__, target->full_name, for_each_child_of_node() 177 err = of_overlay_apply_one(ov, ovinfo->target, ovinfo->overlay); of_overlay_apply() 180 __func__, ovinfo->target->full_name); of_overlay_apply() 189 * Find the target node using a number of different strategies 192 * "target" property containing the phandle of the target 193 * "target-path" property containing the path of the target 201 /* first try to go by using the target as a phandle */ find_target_node() 202 ret = of_property_read_u32(info_node, "target", &val); find_target_node() 207 ret = of_property_read_string(info_node, "target-path", &path); find_target_node() 211 pr_err("%s: Failed to find target for node %p (%s)\n", __func__, find_target_node() 224 * from a device node. This device node must have a target property 225 * which contains a phandle of the overlay target node, and an 227 * Both ovinfo->target & ovinfo->overlay have their references taken. 238 ovinfo->target = find_target_node(info_node); of_fill_overlay_info() 239 if (ovinfo->target == NULL) of_fill_overlay_info() 245 of_node_put(ovinfo->target); of_fill_overlay_info() 318 of_node_put(ovinfo->target); of_free_overlay_info() 59 of_overlay_apply_single_property(struct of_overlay *ov, struct device_node *target, struct property *prop) of_overlay_apply_single_property() argument 85 of_overlay_apply_single_device_node(struct of_overlay *ov, struct device_node *target, struct device_node *child) of_overlay_apply_single_device_node() argument 130 of_overlay_apply_one(struct of_overlay *ov, struct device_node *target, const struct device_node *overlay) of_overlay_apply_one() argument
|
/linux-4.4.14/fs/ncpfs/ |
H A D | ncplib_kernel.c | 153 ncp_negotiate_buffersize(struct ncp_server *server, int size, int *target) ncp_negotiate_buffersize() argument 164 *target = min_t(unsigned int, ncp_reply_be16(server, 0), size); ncp_negotiate_buffersize() 205 int n, struct ncp_volume_info* target) { ncp_get_volume_info_with_number() 215 target->total_blocks = ncp_reply_dword_lh(server, 0); ncp_get_volume_info_with_number() 216 target->free_blocks = ncp_reply_dword_lh(server, 4); ncp_get_volume_info_with_number() 217 target->purgeable_blocks = ncp_reply_dword_lh(server, 8); ncp_get_volume_info_with_number() 218 target->not_yet_purgeable_blocks = ncp_reply_dword_lh(server, 12); ncp_get_volume_info_with_number() 219 target->total_dir_entries = ncp_reply_dword_lh(server, 16); ncp_get_volume_info_with_number() 220 target->available_dir_entries = ncp_reply_dword_lh(server, 20); ncp_get_volume_info_with_number() 221 target->sectors_per_block = ncp_reply_byte(server, 28); ncp_get_volume_info_with_number() 223 memset(&(target->volume_name), 0, sizeof(target->volume_name)); ncp_get_volume_info_with_number() 231 memcpy(&(target->volume_name), ncp_reply_data(server, 30), len); ncp_get_volume_info_with_number() 239 struct ncp_volume_info* target) { ncp_get_directory_info() 249 target->total_blocks = ncp_reply_dword_lh(server, 0); ncp_get_directory_info() 250 target->free_blocks = ncp_reply_dword_lh(server, 4); ncp_get_directory_info() 251 target->purgeable_blocks = 0; ncp_get_directory_info() 252 target->not_yet_purgeable_blocks = 0; ncp_get_directory_info() 253 target->total_dir_entries = ncp_reply_dword_lh(server, 8); ncp_get_directory_info() 254 target->available_dir_entries = ncp_reply_dword_lh(server, 12); ncp_get_directory_info() 255 target->sectors_per_block = ncp_reply_byte(server, 20); ncp_get_directory_info() 257 memset(&(target->volume_name), 0, sizeof(target->volume_name)); ncp_get_directory_info() 265 memcpy(&(target->volume_name), ncp_reply_data(server, 22), len); ncp_get_directory_info() 352 void ncp_extract_file_info(const void *structure, struct nw_info_struct *target) ncp_extract_file_info() argument 357 memcpy(target, structure, info_struct_size); ncp_extract_file_info() 359 target->nameLen = *name_len; ncp_extract_file_info() 360 memcpy(target->entryName, name_len + 1, *name_len); ncp_extract_file_info() 361 target->entryName[*name_len] = '\0'; ncp_extract_file_info() 362 target->volNumber = le32_to_cpu(target->volNumber); ncp_extract_file_info() 368 struct nw_nfs_info *target) ncp_extract_nfs_info() 370 target->mode = DVAL_LH(structure); ncp_extract_nfs_info() 371 target->rdev = DVAL_LH(structure + 8); ncp_extract_nfs_info() 376 struct nw_info_struct *target) ncp_obtain_nfs_info() 381 __u32 volnum = target->volNumber; ncp_obtain_nfs_info() 390 ncp_add_dword(server, target->dirEntNum); ncp_obtain_nfs_info() 396 ncp_extract_nfs_info(ncp_reply_data(server, 0), &target->nfs); ncp_obtain_nfs_info() 398 target->entryName, target->nfs.mode, ncp_obtain_nfs_info() 399 target->nfs.rdev); ncp_obtain_nfs_info() 401 target->nfs.mode = 0; ncp_obtain_nfs_info() 402 target->nfs.rdev = 0; ncp_obtain_nfs_info() 409 target->nfs.mode = 0; ncp_obtain_nfs_info() 410 target->nfs.rdev = 0; ncp_obtain_nfs_info() 420 struct nw_info_struct *target) ncp_obtain_info() 426 if (target == NULL) { ncp_obtain_info() 440 ncp_extract_file_info(ncp_reply_data(server, 0), target); ncp_obtain_info() 443 result = ncp_obtain_nfs_info(server, target); ncp_obtain_info() 624 const char *volname, struct nw_info_struct *target) ncp_lookup_volume() 628 memset(target, 0, sizeof(*target)); ncp_lookup_volume() 630 &target->volNumber, &target->dirEntNum, &target->DosDirNum); ncp_lookup_volume() 634 ncp_update_known_namespace(server, target->volNumber, NULL); ncp_lookup_volume() 635 target->nameLen = strlen(volname); ncp_lookup_volume() 636 memcpy(target->entryName, volname, target->nameLen+1); ncp_lookup_volume() 637 target->attributes = aDIR; ncp_lookup_volume() 639 target->creationTime = target->modifyTime = cpu_to_le16(0x0000); ncp_lookup_volume() 640 target->creationDate = target->modifyDate = target->lastAccessDate = cpu_to_le16(0x0C21); ncp_lookup_volume() 641 target->nfs.mode = 0; ncp_lookup_volume() 774 /* If both dir and name are NULL, then in target there's already a 781 struct ncp_entry_info *target) ncp_open_create_file_or_subdir() 809 target->opened = 1; ncp_open_create_file_or_subdir() 811 /* in target there's a new finfo to fill */ ncp_open_create_file_or_subdir() 812 ncp_extract_file_info(ncp_reply_data(server, 6), &(target->i)); ncp_open_create_file_or_subdir() 813 target->volume = target->i.volNumber; ncp_open_create_file_or_subdir() 816 target->file_handle); ncp_open_create_file_or_subdir() 820 (void)ncp_obtain_nfs_info(server, &(target->i)); ncp_open_create_file_or_subdir() 830 struct nw_search_sequence *target) ncp_initialize_search() 845 memcpy(target, ncp_reply_data(server, 0), sizeof(*target)); ncp_initialize_search() 968 __u32 offset, __u16 to_read, char *target, int *bytes_read) ncp_read_kernel() 985 memcpy(target, source, *bytes_read); ncp_read_kernel() 204 ncp_get_volume_info_with_number(struct ncp_server* server, int n, struct ncp_volume_info* target) ncp_get_volume_info_with_number() argument 238 ncp_get_directory_info(struct ncp_server* server, __u8 n, struct ncp_volume_info* target) ncp_get_directory_info() argument 367 ncp_extract_nfs_info(const unsigned char *structure, struct nw_nfs_info *target) ncp_extract_nfs_info() argument 375 ncp_obtain_nfs_info(struct ncp_server *server, struct nw_info_struct *target) ncp_obtain_nfs_info() argument 419 ncp_obtain_info(struct ncp_server *server, struct inode *dir, const char *path, struct nw_info_struct *target) ncp_obtain_info() argument 623 ncp_lookup_volume(struct ncp_server *server, const char *volname, struct nw_info_struct *target) ncp_lookup_volume() argument 776 ncp_open_create_file_or_subdir(struct ncp_server *server, struct inode *dir, const char *name, int open_create_mode, __le32 create_attributes, __le16 desired_acc_rights, struct ncp_entry_info *target) ncp_open_create_file_or_subdir() argument 829 ncp_initialize_search(struct ncp_server *server, struct inode *dir, struct nw_search_sequence *target) ncp_initialize_search() argument 967 ncp_read_kernel(struct ncp_server *server, const char *file_id, __u32 offset, __u16 to_read, char *target, int *bytes_read) ncp_read_kernel() argument
|
/linux-4.4.14/include/uapi/linux/netfilter/ |
H A D | xt_set.h | 28 /* match and target infos */ 38 /* Revision 1 match and target */ 46 /* match and target infos */ 56 /* Revision 2 target */ 74 /* Revision 3 target */
|
H A D | xt_CHECKSUM.h | 1 /* Header file for iptables ipt_CHECKSUM target
|
H A D | xt_TPROXY.h | 6 /* TPROXY target is capable of marking the packet to perform
|
H A D | xt_AUDIT.h | 2 * Header file for iptables xt_AUDIT target
|
H A D | xt_DSCP.h | 16 /* target info */
|
H A D | xt_NFQUEUE.h | 13 /* target info */
|
H A D | x_tables.h | 46 struct xt_target *target; member in struct:xt_entry_target::__anon14279::__anon14281 58 .target.u.user = { \ 65 struct xt_entry_target target; member in struct:xt_standard_target 70 struct xt_entry_target target; member in struct:xt_error_target 84 /* For standard target */
|
/linux-4.4.14/net/netfilter/ |
H A D | nft_compat.c | 52 par->target = xt; nft_compat_set_par() 62 struct xt_target *target = expr->ops->data; nft_target_eval_xt() local 66 nft_compat_set_par((struct xt_action_param *)&pkt->xt, target, info); nft_target_eval_xt() 68 ret = target->target(skb, &pkt->xt); nft_target_eval_xt() 88 struct xt_target *target = expr->ops->data; nft_target_eval_bridge() local 92 nft_compat_set_par((struct xt_action_param *)&pkt->xt, target, info); nft_target_eval_bridge() 94 ret = target->target(skb, &pkt->xt); nft_target_eval_bridge() 127 struct xt_target *target, void *info, nft_target_set_tgchk_param() 152 par->target = target; nft_target_set_tgchk_param() 211 struct xt_target *target = expr->ops->data; nft_target_init() local 219 ret = nft_compat_chain_validate_dependency(target->table, ctx->chain); nft_target_init() 223 target_compat_from_user(target, nla_data(tb[NFTA_TARGET_INFO]), info); nft_target_init() 231 nft_target_set_tgchk_param(&par, ctx, target, info, &e, proto, inv); nft_target_init() 237 /* The standard target cannot be used */ nft_target_init() 238 if (target->target == NULL) { nft_target_init() 245 module_put(target->me); nft_target_init() 252 struct xt_target *target = expr->ops->data; nft_target_destroy() local 257 par.target = target; nft_target_destroy() 260 if (par.target->destroy != NULL) nft_target_destroy() 261 par.target->destroy(&par); nft_target_destroy() 263 module_put(target->me); nft_target_destroy() 268 const struct xt_target *target = expr->ops->data; nft_target_dump() local 271 if (nla_put_string(skb, NFTA_TARGET_NAME, target->name) || nft_target_dump() 272 nla_put_be32(skb, NFTA_TARGET_REV, htonl(target->revision)) || nft_target_dump() 273 nla_put(skb, NFTA_TARGET_INFO, XT_ALIGN(target->targetsize), info)) nft_target_dump() 286 struct xt_target *target = expr->ops->data; nft_target_validate() local 296 if (!(hook_mask & target->hooks)) nft_target_validate() 299 ret = nft_compat_chain_validate_dependency(target->table, nft_target_validate() 492 int rev, int target) nfnl_compat_fill_info() 510 nla_put_be32(skb, NFTA_COMPAT_TYPE, htonl(target))) nfnl_compat_fill_info() 526 int ret = 0, target; nfnl_compat_get() local 540 target = ntohl(nla_get_be32(tb[NFTA_COMPAT_TYPE])); nfnl_compat_get() 564 rev, target, &ret), nfnl_compat_get() 580 name, ret, target) <= 0) { nfnl_compat_get() 714 struct xt_target *target; nft_target_select_ops() local 727 /* Re-use the existing target if it's already loaded. */ nft_target_select_ops() 729 struct xt_target *target = nft_target->ops.data; nft_target_select_ops() local 731 if (nft_target_cmp(target, tg_name, rev, family)) { nft_target_select_ops() 732 if (!try_module_get(target->me)) nft_target_select_ops() 739 target = xt_request_find_target(family, tg_name, rev); nft_target_select_ops() 740 if (IS_ERR(target)) nft_target_select_ops() 743 /* This is the first time we use this target, allocate operations */ nft_target_select_ops() 749 nft_target->ops.size = NFT_EXPR_SIZE(XT_ALIGN(target->targetsize)); nft_target_select_ops() 754 nft_target->ops.data = target; nft_target_select_ops() 775 .name = "target", 828 MODULE_ALIAS_NFT_EXPR("target"); 125 nft_target_set_tgchk_param(struct xt_tgchk_param *par, const struct nft_ctx *ctx, struct xt_target *target, void *info, union nft_entry *entry, u16 proto, bool inv) nft_target_set_tgchk_param() argument 490 nfnl_compat_fill_info(struct sk_buff *skb, u32 portid, u32 seq, u32 type, int event, u16 family, const char *name, int rev, int target) nfnl_compat_fill_info() argument
|
H A D | x_tables.c | 51 struct list_head target; member in struct:xt_af 71 int xt_register_target(struct xt_target *target) xt_register_target() argument 73 u_int8_t af = target->family; xt_register_target() 76 list_add(&target->list, &xt[af].target); xt_register_target() 83 xt_unregister_target(struct xt_target *target) xt_unregister_target() argument 85 u_int8_t af = target->family; xt_unregister_target() 88 list_del(&target->list); xt_unregister_target() 94 xt_register_targets(struct xt_target *target, unsigned int n) xt_register_targets() argument 100 err = xt_register_target(&target[i]); xt_register_targets() 108 xt_unregister_targets(target, i); xt_register_targets() 114 xt_unregister_targets(struct xt_target *target, unsigned int n) xt_unregister_targets() argument 117 xt_unregister_target(&target[n]); xt_unregister_targets() 221 /* Find target, grabs ref. Returns ERR_PTR() on error. */ xt_find_target() 228 list_for_each_entry(t, &xt[af].target, list) { xt_find_target() 251 struct xt_target *target; xt_request_find_target() local 253 target = xt_find_target(af, name, revision); xt_request_find_target() 254 if (IS_ERR(target)) { xt_request_find_target() 256 target = xt_find_target(af, name, revision); xt_request_find_target() 259 return target; xt_request_find_target() 288 list_for_each_entry(t, &xt[af].target, list) { target_revfn() 304 int xt_find_revision(u8 af, const char *name, u8 revision, int target, xt_find_revision() argument 310 if (target == 1) xt_find_revision() 418 /** xt_check_entry_match - check that matches end before start of target 421 * @target: beginning of this rules target (alleged end of matches) 424 * Validates that all matches add up to the beginning of the target, 429 static int xt_check_entry_match(const char *match, const char *target, xt_check_entry_match() argument 433 int length = target - match; xt_check_entry_match() 638 * match sizes (if any) align with the target offset. 643 * the target structure begins. 654 * ip(6)t_entry match [mtdata] match [mtdata] target [tgdata] ip(6)t_entry 663 * This is where matches (if any) and the target reside. 664 * target_offset: beginning of target. 681 /* target start is within the ip/ip6/arpt_entry struct */ xt_check_entry_offsets() 709 if (XT_ALIGN(par->target->targetsize) != size) { xt_check_target() 710 pr_err("%s_tables: %s.%u target: invalid size " xt_check_target() 712 xt_prefix[par->family], par->target->name, xt_check_target() 713 par->target->revision, xt_check_target() 714 XT_ALIGN(par->target->targetsize), size); xt_check_target() 717 if (par->target->table != NULL && xt_check_target() 718 strcmp(par->target->table, par->table) != 0) { xt_check_target() 719 pr_err("%s_tables: %s target: only valid in %s table, not %s\n", xt_check_target() 720 xt_prefix[par->family], par->target->name, xt_check_target() 721 par->target->table, par->table); xt_check_target() 724 if (par->target->hooks && (par->hook_mask & ~par->target->hooks) != 0) { xt_check_target() 727 pr_err("%s_tables: %s target: used from hooks %s, but only " xt_check_target() 729 xt_prefix[par->family], par->target->name, xt_check_target() 732 textify_hooks(allow, sizeof(allow), par->target->hooks, xt_check_target() 736 if (par->target->proto && (par->target->proto != proto || inv_proto)) { xt_check_target() 737 pr_err("%s_tables: %s target: only valid for protocol %u\n", xt_check_target() 738 xt_prefix[par->family], par->target->name, xt_check_target() 739 par->target->proto); xt_check_target() 742 if (par->target->checkentry != NULL) { xt_check_target() 743 ret = par->target->checkentry(par); xt_check_target() 829 int xt_compat_target_offset(const struct xt_target *target) xt_compat_target_offset() argument 831 u_int16_t csize = target->compatsize ? : target->targetsize; xt_compat_target_offset() 832 return XT_ALIGN(target->targetsize) - COMPAT_XT_ALIGN(csize); xt_compat_target_offset() 839 const struct xt_target *target = t->u.kernel.target; xt_compat_target_from_user() local 841 int pad, off = xt_compat_target_offset(target); xt_compat_target_from_user() 847 if (target->compat_from_user) xt_compat_target_from_user() 848 target->compat_from_user(t->data, ct->data); xt_compat_target_from_user() 851 pad = XT_ALIGN(target->targetsize) - target->targetsize; xt_compat_target_from_user() 853 memset(t->data + target->targetsize, 0, pad); xt_compat_target_from_user() 857 strlcpy(name, target->name, sizeof(name)); xt_compat_target_from_user() 858 module_put(target->me); xt_compat_target_from_user() 869 const struct xt_target *target = t->u.kernel.target; xt_compat_target_to_user() local 871 int off = xt_compat_target_offset(target); xt_compat_target_to_user() 876 copy_to_user(ct->u.user.name, t->u.kernel.target->name, xt_compat_target_to_user() 877 strlen(t->u.kernel.target->name) + 1)) xt_compat_target_to_user() 880 if (target->compat_to_user) { xt_compat_target_to_user() 881 if (target->compat_to_user((void __user *)ct->data, t->data)) xt_compat_target_to_user() 992 * TEE target. xt_jumpstack_alloc() 1245 &xt[NFPROTO_UNSPEC].target : &xt[NFPROTO_UNSPEC].match; xt_mttg_seq_next() 1254 &xt[trav->nfproto].target : &xt[trav->nfproto].match; xt_mttg_seq_next() 1364 const struct xt_target *target; xt_target_seq_show() local 1371 target = list_entry(trav->curr, struct xt_target, list); xt_target_seq_show() 1372 if (*target->name) xt_target_seq_show() 1373 seq_printf(seq, "%s\n", target->name); xt_target_seq_show() 1567 INIT_LIST_HEAD(&xt[i].target);
|
H A D | xt_NFQUEUE.c | 79 if (par->target->revision == 2 && info->flags > 1) nfqueue_tg_check() 81 if (par->target->revision == 3 && info->flags & ~NFQ_FLAG_MASK) nfqueue_tg_check() 116 .target = nfqueue_tg, 125 .target = nfqueue_tg_v1, 134 .target = nfqueue_tg_v2, 143 .target = nfqueue_tg_v3,
|
H A D | xt_TRACE.c | 25 .target = trace_tg,
|
H A D | xt_HL.c | 2 * TTL modification target for IP tables 5 * Hop Limit modification target for ip6tables 25 MODULE_DESCRIPTION("Xtables: Hoplimit/TTL Limit field modification target"); 138 .target = ttl_tg, 148 .target = hl_tg6,
|
H A D | xt_nat.c | 23 par->target->name); xt_nat_checkentry_v0() 109 .target = xt_snat_target_v0, 121 .target = xt_dnat_target_v0, 132 .target = xt_snat_target_v1, 142 .target = xt_dnat_target_v1,
|
H A D | xt_REDIRECT.c | 10 * Based on Rusty Russell's IPv4 REDIRECT target. Development of IPv6 75 .target = redirect_tg6, 86 .target = redirect_tg4,
|
H A D | xt_CONNSECMARK.c | 5 * with the SECMARK target and state match. 28 MODULE_DESCRIPTION("Xtables: target for copying between connection and security mark"); 94 pr_info("target only valid in the \'mangle\' " connsecmark_tg_check() 127 .target = connsecmark_tg,
|
H A D | xt_DSCP.c | 121 .target = dscp_tg, 130 .target = dscp_tg6, 140 .target = tos_tg, 149 .target = tos_tg6,
|
H A D | xt_set.c | 11 /* Kernel module which implements the set match and SET target 25 MODULE_DESCRIPTION("Xtables: IP set match and target module"); 284 pr_warn("Cannot find add_set index %u as target\n", set_target_v0_checkentry() 293 pr_warn("Cannot find del_set index %u as target\n", set_target_v0_checkentry() 302 pr_warn("Protocol error: SET target dimension is over the limit!\n"); set_target_v0_checkentry() 328 /* Revision 1 target */ 357 pr_warn("Cannot find add_set index %u as target\n", set_target_v1_checkentry() 366 pr_warn("Cannot find del_set index %u as target\n", set_target_v1_checkentry() 375 pr_warn("Protocol error: SET target dimension is over the limit!\n"); set_target_v1_checkentry() 397 /* Revision 2 target */ 424 /* Revision 3 target */ 478 pr_warn("Cannot find add_set index %u as target\n", set_target_v3_checkentry() 488 pr_warn("Cannot find del_set index %u as target\n", set_target_v3_checkentry() 513 pr_warn("Cannot find map_set index %u as target\n", set_target_v3_checkentry() 528 pr_warn("Protocol error: SET target dimension is over the limit!\n"); set_target_v3_checkentry() 655 .target = set_target_v0, 665 .target = set_target_v1, 675 .target = set_target_v1, 686 .target = set_target_v2, 696 .target = set_target_v2, 707 .target = set_target_v3, 717 .target = set_target_v3,
|
H A D | xt_CLASSIFY.c | 47 .target = classify_tg, 56 .target = classify_tg,
|
/linux-4.4.14/include/linux/netfilter_bridge/ |
H A D | ebtables.h | 43 unsigned int (*target)(struct sk_buff *skb, member in struct:ebt_watcher 45 unsigned int hook_num, const struct xt_target *target, 48 const struct xt_target *target, void *targinfo, 50 void (*destroy)(const struct xt_target *target, void *targinfo); 61 unsigned int (*target)(struct sk_buff *skb, member in struct:ebt_target 63 unsigned int hook_num, const struct xt_target *target, 66 const struct xt_target *target, void *targinfo, 68 void (*destroy)(const struct xt_target *target, void *targinfo); 125 /* True if the target is not a standard target */ 126 #define INVALID_TARGET (info->target < -NUM_STANDARD_TARGETS || info->target >= 0)
|
/linux-4.4.14/include/linux/ |
H A D | nfs.h | 32 static inline void nfs_copy_fh(struct nfs_fh *target, const struct nfs_fh *source) nfs_copy_fh() argument 34 target->size = source->size; nfs_copy_fh() 35 memcpy(target->data, source->data, source->size); nfs_copy_fh()
|
H A D | device-mapper.h | 29 * In the constructor the target parameter will already have the 32 typedef int (*dm_ctr_fn) (struct dm_target *target, 44 * = 0: The target will handle the io by resubmitting it later 46 * = 2: The target wants to push back the io 62 * multipath target might want to requeue a failed io). 63 * 2 : The target wants to push back the io 102 * target until it encounters a non-zero return code, which it then returns. 114 * 0: The target can handle the next I/O immediately. 115 * 1: The target can't handle the next I/O immediately. 138 * Information about a target type 175 * Any table that contains an instance of this target must have only one. 181 * Indicates that a target does not support read-only devices. 188 * Any device that contains a table with an instance of this target may never 189 * have tables containing any different target type. 198 * target requires. 206 /* target limits */ 210 /* If non-zero, maximum size of I/O submitted to a target. */ 215 * to the target for the purpose of flushing cache. 218 * It is a responsibility of the target driver to remap these bios 224 * The number of discard bios that will be submitted to the target. 230 * The number of WRITE SAME bios that will be submitted to the target. 237 * target to use. dm_per_bio_data returns the data location. 243 * duplicate bios should be sent to the target when writing 248 /* target specific data */ 255 * Set if this target needs to receive flushes regardless of 261 * Set if this target needs to receive discards regardless of 267 * Set if the target required discard bios to be split 273 * Set if this target does not return zeroes on discarded blocks. 278 /* Each target can link one of these into the table */ 287 * This structure shouldn't be touched directly by target drivers. 429 * Then call this once for each target. 549 * Definitions of return values from target end_io function. 555 * Definitions of return values from target map function. 591 * Sector offset taken relative to the start of the target instead of
|
H A D | mbus.h | 19 * The 4-bit MBUS target ID of the DRAM controller. 72 int mvebu_mbus_add_window_remap_by_id(unsigned int target, 76 int mvebu_mbus_add_window_by_id(unsigned int target, unsigned int attribute,
|
H A D | regset.h | 26 * @target: thread being examined 39 typedef int user_regset_active_fn(struct task_struct *target, 44 * @target: thread being examined 58 typedef int user_regset_get_fn(struct task_struct *target, 65 * @target: thread being examined 79 typedef int user_regset_set_fn(struct task_struct *target, 86 * @target: thread being examined 105 typedef int user_regset_writeback_fn(struct task_struct *target, 130 * have called wait_task_inactive() on. (The target thread always might 324 * @target: thread to be examined 331 static inline int copy_regset_to_user(struct task_struct *target, 345 return regset->get(target, regset, offset, size, NULL, data); 350 * @target: thread to be examined 357 static inline int copy_regset_from_user(struct task_struct *target, 371 return regset->set(target, regset, offset, size, NULL, data);
|
/linux-4.4.14/arch/s390/include/asm/ |
H A D | ftrace.h | 74 unsigned long target; ftrace_generate_call_insn() local 77 target = is_module_addr((void *) ip) ? ftrace_plt : FTRACE_ADDR; ftrace_generate_call_insn() 79 insn->disp = (target - ip) / 2; ftrace_generate_call_insn()
|
H A D | jump_label.h | 47 jump_label_t target; member in struct:jump_entry
|
/linux-4.4.14/net/vmw_vsock/ |
H A D | vmci_transport_notify.h | 50 int (*poll_in) (struct sock *sk, size_t target, 52 int (*poll_out) (struct sock *sk, size_t target, 59 int (*recv_init) (struct sock *sk, size_t target, 61 int (*recv_pre_block) (struct sock *sk, size_t target, 63 int (*recv_pre_dequeue) (struct sock *sk, size_t target, 65 int (*recv_post_dequeue) (struct sock *sk, size_t target,
|
/linux-4.4.14/fs/ocfs2/dlm/ |
H A D | dlmthread.c | 301 struct dlm_lock *lock, *target; dlm_shuffle_lists() local 322 target = list_entry(res->converting.next, struct dlm_lock, list); dlm_shuffle_lists() 323 if (target->ml.convert_type == LKM_IVMODE) { dlm_shuffle_lists() 329 if (lock==target) dlm_shuffle_lists() 332 target->ml.convert_type)) { dlm_shuffle_lists() 340 if (lock->ml.highest_blocked < target->ml.convert_type) dlm_shuffle_lists() 342 target->ml.convert_type; dlm_shuffle_lists() 347 if (lock==target) dlm_shuffle_lists() 350 target->ml.convert_type)) { dlm_shuffle_lists() 356 if (lock->ml.highest_blocked < target->ml.convert_type) dlm_shuffle_lists() 358 target->ml.convert_type; dlm_shuffle_lists() 364 spin_lock(&target->spinlock); dlm_shuffle_lists() 365 BUG_ON(target->ml.highest_blocked != LKM_IVMODE); dlm_shuffle_lists() 370 dlm_get_lock_cookie_node(be64_to_cpu(target->ml.cookie)), dlm_shuffle_lists() 371 dlm_get_lock_cookie_seq(be64_to_cpu(target->ml.cookie)), dlm_shuffle_lists() 372 target->ml.type, dlm_shuffle_lists() 373 target->ml.convert_type, target->ml.node); dlm_shuffle_lists() 375 target->ml.type = target->ml.convert_type; dlm_shuffle_lists() 376 target->ml.convert_type = LKM_IVMODE; dlm_shuffle_lists() 377 list_move_tail(&target->list, &res->granted); dlm_shuffle_lists() 379 BUG_ON(!target->lksb); dlm_shuffle_lists() 380 target->lksb->status = DLM_NORMAL; dlm_shuffle_lists() 382 spin_unlock(&target->spinlock); dlm_shuffle_lists() 385 __dlm_queue_ast(dlm, target); dlm_shuffle_lists() 393 target = list_entry(res->blocked.next, struct dlm_lock, list); dlm_shuffle_lists() 396 if (lock==target) dlm_shuffle_lists() 398 if (!dlm_lock_compatible(lock->ml.type, target->ml.type)) { dlm_shuffle_lists() 404 if (lock->ml.highest_blocked < target->ml.type) dlm_shuffle_lists() 405 lock->ml.highest_blocked = target->ml.type; dlm_shuffle_lists() 410 if (lock==target) dlm_shuffle_lists() 412 if (!dlm_lock_compatible(lock->ml.type, target->ml.type)) { dlm_shuffle_lists() 418 if (lock->ml.highest_blocked < target->ml.type) dlm_shuffle_lists() 419 lock->ml.highest_blocked = target->ml.type; dlm_shuffle_lists() 426 spin_lock(&target->spinlock); dlm_shuffle_lists() 427 BUG_ON(target->ml.highest_blocked != LKM_IVMODE); dlm_shuffle_lists() 432 dlm_get_lock_cookie_node(be64_to_cpu(target->ml.cookie)), dlm_shuffle_lists() 433 dlm_get_lock_cookie_seq(be64_to_cpu(target->ml.cookie)), dlm_shuffle_lists() 434 target->ml.type, target->ml.node); dlm_shuffle_lists() 436 /* target->ml.type is already correct */ dlm_shuffle_lists() 437 list_move_tail(&target->list, &res->granted); dlm_shuffle_lists() 439 BUG_ON(!target->lksb); dlm_shuffle_lists() 440 target->lksb->status = DLM_NORMAL; dlm_shuffle_lists() 442 spin_unlock(&target->spinlock); dlm_shuffle_lists() 445 __dlm_queue_ast(dlm, target); dlm_shuffle_lists()
|
/linux-4.4.14/drivers/char/agp/ |
H A D | isoch.c | 82 struct isoch_data *master, target; agp_3_5_isochronous_node_enable() local 106 * We don't exactly do this, we divide target resources by ndevs agp_3_5_isochronous_node_enable() 121 /* Extract power-on defaults from the target */ agp_3_5_isochronous_node_enable() 122 target.maxbw = (tnistat >> 16) & 0xff; agp_3_5_isochronous_node_enable() 123 target.n = (tnistat >> 8) & 0xff; agp_3_5_isochronous_node_enable() 124 target.y = (tnistat >> 6) & 0x3; agp_3_5_isochronous_node_enable() 125 target.l = (tnistat >> 3) & 0x7; agp_3_5_isochronous_node_enable() 126 target.rq = (tstatus >> 24) & 0xff; agp_3_5_isochronous_node_enable() 128 y_max = target.y; agp_3_5_isochronous_node_enable() 155 if (tot_bw > target.maxbw) { 163 target.y = y_max; 166 * Write the calculated payload size into the target's NICMD 168 * in the target's NISTAT register, so we need to do this now 173 tnicmd |= target.y << 6; 176 /* Reread the target's ISOCH_N */ 178 target.n = (tnistat >> 8) & 0xff; 182 master[cdev].y = target.y; 189 * than the target can handle. */ 190 if (tot_n > target.n) { 199 /* Calculate left over ISOCH_N capability in the target. We'll give 201 rem = target.n - tot_n; 225 * target is providing. */ 226 rq_isoch = (target.y > 0x1) ? target.n * (1 << (target.y - 1)) : target.n; 227 rq_async = target.rq - rq_isoch; 229 /* Exit if the minimal RQ needs of the masters exceeds what the target 240 /* Calculate asynchronous RQ capability in the target (per master) as 283 * target by ndevs. Distribute this many slots to each AGP 3.0 device, 329 /* Extract some power-on defaults from the target */ agp_3_5_enable() 442 * Call functions to divide target resources amongst the AGP 3.0
|
/linux-4.4.14/include/xen/ |
H A D | balloon.h | 8 /* We aim for 'current allocation' == 'target allocation'. */ 24 void balloon_set_new_target(unsigned long target);
|
/linux-4.4.14/tools/testing/selftests/timers/ |
H A D | nsleep-lat.c | 111 struct timespec start, end, target; nanosleep_lat_test() local 115 target.tv_sec = ns/NSEC_PER_SEC; nanosleep_lat_test() 116 target.tv_nsec = ns%NSEC_PER_SEC; nanosleep_lat_test() 120 if (clock_nanosleep(clockid, 0, &target, NULL)) nanosleep_lat_test() 128 clock_nanosleep(clockid, 0, &target, NULL); nanosleep_lat_test() 139 target = timespec_add(start, ns); nanosleep_lat_test() 140 clock_nanosleep(clockid, TIMER_ABSTIME, &target, NULL); nanosleep_lat_test() 142 latency += timespec_sub(target, end); nanosleep_lat_test()
|
H A D | nanosleep.c | 114 struct timespec now, target, rel; nanosleep_test() local 119 target = timespec_add(now, ns); nanosleep_test() 121 if (clock_nanosleep(clockid, TIMER_ABSTIME, &target, NULL)) nanosleep_test() 125 if (!in_order(target, now)) nanosleep_test() 133 target = timespec_add(now, ns); nanosleep_test() 137 if (!in_order(target, now)) nanosleep_test()
|
H A D | mqueue-lat.c | 74 struct timespec start, end, now, target; mqueue_lat_test() local 92 target = now; mqueue_lat_test() 93 target = timespec_add(now, TARGET_TIMEOUT); /* 100ms */ mqueue_lat_test() 95 ret = mq_timedreceive(q, buf, sizeof(buf), NULL, &target); mqueue_lat_test()
|
/linux-4.4.14/arch/blackfin/include/asm/ |
H A D | elf.h | 70 #define R_BFIN_PCREL10 3 /* type 3, if cc jump <target> */ 71 #define R_BFIN_PCREL12_JUMP 4 /* type 4, jump <target> */ 72 #define R_BFIN_RIMM16 5 /* type 0x5, rN = <target> */ 73 #define R_BFIN_LUIMM16 6 /* # 0x6, preg.l=<target> Load imm 16 to lower half */ 74 #define R_BFIN_HUIMM16 7 /* # 0x7, preg.h=<target> Load imm 16 to upper half */ 75 #define R_BFIN_PCREL12_JUMP_S 8 /* # 0x8 jump.s <target> */ 76 #define R_BFIN_PCREL24_JUMP_X 9 /* # 0x9 jump.x <target> */ 77 #define R_BFIN_PCREL24 10 /* # 0xa call <target> , not expandable */ 80 #define R_BFIN_PCREL24_JUMP_L 13 /* 0xd jump.l <target> */ 81 #define R_BFIN_PCREL24_CALL_X 14 /* 0xE, call.x <target> if <target> is above 24 bit limit call through P1 */
|
/linux-4.4.14/arch/x86/kernel/fpu/ |
H A D | regset.c | 13 int regset_fpregs_active(struct task_struct *target, const struct user_regset *regset) regset_fpregs_active() argument 15 struct fpu *target_fpu = &target->thread.fpu; regset_fpregs_active() 20 int regset_xregset_fpregs_active(struct task_struct *target, const struct user_regset *regset) regset_xregset_fpregs_active() argument 22 struct fpu *target_fpu = &target->thread.fpu; regset_xregset_fpregs_active() 27 int xfpregs_get(struct task_struct *target, const struct user_regset *regset, xfpregs_get() argument 31 struct fpu *fpu = &target->thread.fpu; xfpregs_get() 43 int xfpregs_set(struct task_struct *target, const struct user_regset *regset, xfpregs_set() argument 47 struct fpu *fpu = &target->thread.fpu; xfpregs_set() 74 int xstateregs_get(struct task_struct *target, const struct user_regset *regset, xstateregs_get() argument 78 struct fpu *fpu = &target->thread.fpu; xstateregs_get() 103 int xstateregs_set(struct task_struct *target, const struct user_regset *regset, xstateregs_set() argument 107 struct fpu *fpu = &target->thread.fpu; xstateregs_set() 269 int fpregs_get(struct task_struct *target, const struct user_regset *regset, fpregs_get() argument 273 struct fpu *fpu = &target->thread.fpu; fpregs_get() 279 return fpregs_soft_get(target, regset, pos, count, kbuf, ubuf); fpregs_get() 289 convert_from_fxsr(kbuf, target); fpregs_get() 293 convert_from_fxsr(&env, target); fpregs_get() 298 int fpregs_set(struct task_struct *target, const struct user_regset *regset, fpregs_set() argument 302 struct fpu *fpu = &target->thread.fpu; fpregs_set() 310 return fpregs_soft_set(target, regset, pos, count, kbuf, ubuf); fpregs_set() 318 convert_from_fxsr(&env, target); fpregs_set() 322 convert_to_fxsr(target, &env); fpregs_set()
|
/linux-4.4.14/drivers/nfc/pn544/ |
H A D | pn544.c | 456 struct nfc_target *target, u8 comm_mode, pn544_hci_dep_link_up() 462 r = nfc_hci_get_param(hdev, target->hci_reader_gate, pn544_hci_dep_link_up() 478 r = nfc_dep_link_is_up(hdev->ndev, target->idx, comm_mode, pn544_hci_dep_link_up() 493 struct nfc_target *target) pn544_hci_target_from_gate() 497 target->supported_protocols = NFC_PROTO_FELICA_MASK; pn544_hci_target_from_gate() 500 target->supported_protocols = NFC_PROTO_JEWEL_MASK; pn544_hci_target_from_gate() 501 target->sens_res = 0x0c00; pn544_hci_target_from_gate() 504 target->supported_protocols = NFC_PROTO_NFC_DEP_MASK; pn544_hci_target_from_gate() 515 struct nfc_target *target) pn544_hci_complete_target_discovered() 523 if (target->supported_protocols & NFC_PROTO_NFC_DEP_MASK) { pn544_hci_complete_target_discovered() 530 target->hci_reader_gate = PN544_RF_READER_NFCIP1_INITIATOR_GATE; pn544_hci_complete_target_discovered() 531 } else if (target->supported_protocols & NFC_PROTO_MIFARE_MASK) { pn544_hci_complete_target_discovered() 532 if (target->nfcid1_len != 4 && target->nfcid1_len != 7 && pn544_hci_complete_target_discovered() 533 target->nfcid1_len != 10) pn544_hci_complete_target_discovered() 538 target->nfcid1, target->nfcid1_len, NULL); pn544_hci_complete_target_discovered() 539 } else if (target->supported_protocols & NFC_PROTO_FELICA_MASK) { pn544_hci_complete_target_discovered() 560 target->supported_protocols = NFC_PROTO_NFC_DEP_MASK; pn544_hci_complete_target_discovered() 561 target->hci_reader_gate = pn544_hci_complete_target_discovered() 569 } else if (target->supported_protocols & NFC_PROTO_ISO14443_MASK) { pn544_hci_complete_target_discovered() 574 if (target->sens_res == 0x4403) /* Type 4 Mifare DESFire */ pn544_hci_complete_target_discovered() 615 struct nfc_target *target, pn544_hci_im_transceive() 622 target->hci_reader_gate); pn544_hci_im_transceive() 624 switch (target->hci_reader_gate) { pn544_hci_im_transceive() 626 if (target->supported_protocols & NFC_PROTO_MIFARE_MASK) { pn544_hci_im_transceive() 645 target->hci_reader_gate, pn544_hci_im_transceive() 659 return nfc_hci_send_cmd_async(hdev, target->hci_reader_gate, pn544_hci_im_transceive() 664 return nfc_hci_send_cmd_async(hdev, target->hci_reader_gate, pn544_hci_im_transceive() 670 return nfc_hci_send_event(hdev, target->hci_reader_gate, pn544_hci_im_transceive() 694 struct nfc_target *target) pn544_hci_check_presence() 696 pr_debug("supported protocol %d\b", target->supported_protocols); pn544_hci_check_presence() 697 if (target->supported_protocols & (NFC_PROTO_ISO14443_MASK | pn544_hci_check_presence() 699 return nfc_hci_send_cmd(hdev, target->hci_reader_gate, pn544_hci_check_presence() 702 } else if (target->supported_protocols & NFC_PROTO_MIFARE_MASK) { pn544_hci_check_presence() 703 if (target->nfcid1_len != 4 && target->nfcid1_len != 7 && pn544_hci_check_presence() 704 target->nfcid1_len != 10) pn544_hci_check_presence() 709 target->nfcid1, target->nfcid1_len, NULL); pn544_hci_check_presence() 710 } else if (target->supported_protocols & (NFC_PROTO_JEWEL_MASK | pn544_hci_check_presence() 713 } else if (target->supported_protocols & NFC_PROTO_NFC_DEP_MASK) { pn544_hci_check_presence() 714 return nfc_hci_send_cmd(hdev, target->hci_reader_gate, pn544_hci_check_presence() 455 pn544_hci_dep_link_up(struct nfc_hci_dev *hdev, struct nfc_target *target, u8 comm_mode, u8 *gb, size_t gb_len) pn544_hci_dep_link_up() argument 492 pn544_hci_target_from_gate(struct nfc_hci_dev *hdev, u8 gate, struct nfc_target *target) pn544_hci_target_from_gate() argument 513 pn544_hci_complete_target_discovered(struct nfc_hci_dev *hdev, u8 gate, struct nfc_target *target) pn544_hci_complete_target_discovered() argument 614 pn544_hci_im_transceive(struct nfc_hci_dev *hdev, struct nfc_target *target, struct sk_buff *skb, data_exchange_cb_t cb, void *cb_context) pn544_hci_im_transceive() argument 693 pn544_hci_check_presence(struct nfc_hci_dev *hdev, struct nfc_target *target) pn544_hci_check_presence() argument
|
/linux-4.4.14/arch/sh/kernel/ |
H A D | ptrace_32.c | 138 static int genregs_get(struct task_struct *target, genregs_get() argument 143 const struct pt_regs *regs = task_pt_regs(target); genregs_get() 162 static int genregs_set(struct task_struct *target, genregs_set() argument 167 struct pt_regs *regs = task_pt_regs(target); genregs_set() 186 int fpregs_get(struct task_struct *target, fpregs_get() argument 193 ret = init_fpu(target); fpregs_get() 199 &target->thread.xstate->hardfpu, 0, -1); fpregs_get() 202 &target->thread.xstate->softfpu, 0, -1); fpregs_get() 205 static int fpregs_set(struct task_struct *target, fpregs_set() argument 212 ret = init_fpu(target); fpregs_set() 216 set_stopped_child_used_math(target); fpregs_set() 220 &target->thread.xstate->hardfpu, 0, -1); fpregs_set() 223 &target->thread.xstate->softfpu, 0, -1); fpregs_set() 226 static int fpregs_active(struct task_struct *target, fpregs_active() argument 229 return tsk_used_math(target) ? regset->n : 0; fpregs_active() 234 static int dspregs_get(struct task_struct *target, dspregs_get() argument 240 (struct pt_dspregs *)&target->thread.dsp_status.dsp_regs; dspregs_get() 252 static int dspregs_set(struct task_struct *target, dspregs_set() argument 258 (struct pt_dspregs *)&target->thread.dsp_status.dsp_regs; dspregs_set() 270 static int dspregs_active(struct task_struct *target, dspregs_active() argument 273 struct pt_regs *regs = task_pt_regs(target); dspregs_active()
|
H A D | ptrace_64.c | 148 static int genregs_get(struct task_struct *target, genregs_get() argument 153 const struct pt_regs *regs = task_pt_regs(target); genregs_get() 181 static int genregs_set(struct task_struct *target, genregs_set() argument 186 struct pt_regs *regs = task_pt_regs(target); genregs_set() 216 int fpregs_get(struct task_struct *target, fpregs_get() argument 223 ret = init_fpu(target); fpregs_get() 228 &target->thread.xstate->hardfpu, 0, -1); fpregs_get() 231 static int fpregs_set(struct task_struct *target, fpregs_set() argument 238 ret = init_fpu(target); fpregs_set() 242 set_stopped_child_used_math(target); fpregs_set() 245 &target->thread.xstate->hardfpu, 0, -1); fpregs_set() 248 static int fpregs_active(struct task_struct *target, fpregs_active() argument 251 return tsk_used_math(target) ? regset->n : 0; fpregs_active()
|
/linux-4.4.14/include/uapi/linux/netfilter_arp/ |
H A D | arpt_mangle.h | 17 int target; member in struct:arpt_mangle
|
/linux-4.4.14/net/nfc/ |
H A D | digital_technology.c | 173 struct nfc_target *target); 226 struct nfc_target *target = arg; digital_in_recv_ats() local 249 rc = digital_target_found(ddev, target, NFC_PROTO_ISO14443); digital_in_recv_ats() 253 kfree(target); digital_in_recv_ats() 260 struct nfc_target *target) digital_in_send_rats() 273 target); digital_in_send_rats() 283 struct nfc_target *target = arg; digital_in_recv_sel_res() local 310 rc = digital_in_send_sdd_req(ddev, target); digital_in_recv_sel_res() 317 target->sel_res = sel_res; digital_in_recv_sel_res() 324 rc = digital_in_send_rats(ddev, target); digital_in_recv_sel_res() 337 rc = digital_target_found(ddev, target, nfc_proto); digital_in_recv_sel_res() 340 kfree(target); digital_in_recv_sel_res() 350 struct nfc_target *target, digital_in_send_sel_req() 365 if (target->nfcid1_len <= 4) digital_in_send_sel_req() 367 else if (target->nfcid1_len < 10) digital_in_send_sel_req() 387 target); digital_in_send_sel_req() 398 struct nfc_target *target = arg; digital_in_recv_sdd_res() local 435 memcpy(target->nfcid1 + target->nfcid1_len, sdd_res->nfcid1 + offset, digital_in_recv_sdd_res() 437 target->nfcid1_len += size; digital_in_recv_sdd_res() 439 rc = digital_in_send_sel_req(ddev, target, sdd_res); digital_in_recv_sdd_res() 445 kfree(target); digital_in_recv_sdd_res() 451 struct nfc_target *target) digital_in_send_sdd_req() 466 if (target->nfcid1_len == 0) digital_in_send_sdd_req() 468 else if (target->nfcid1_len == 3) digital_in_send_sdd_req() 477 target); digital_in_send_sdd_req() 483 struct nfc_target *target = NULL; digital_in_recv_sens_res() local 497 target = kzalloc(sizeof(struct nfc_target), GFP_KERNEL); digital_in_recv_sens_res() 498 if (!target) { digital_in_recv_sens_res() 503 target->sens_res = __le16_to_cpu(*(__le16 *)resp->data); digital_in_recv_sens_res() 505 if (!DIGITAL_SENS_RES_IS_VALID(target->sens_res)) { digital_in_recv_sens_res() 511 if (DIGITAL_SENS_RES_IS_T1T(target->sens_res)) digital_in_recv_sens_res() 512 rc = digital_target_found(ddev, target, NFC_PROTO_JEWEL); digital_in_recv_sens_res() 514 rc = digital_in_send_sdd_req(ddev, target); digital_in_recv_sens_res() 520 kfree(target); digital_in_recv_sens_res() 582 struct nfc_target *target = arg; digital_in_recv_attrib_res() local 606 rc = digital_target_found(ddev, target, NFC_PROTO_ISO14443_B); digital_in_recv_attrib_res() 610 kfree(target); digital_in_recv_attrib_res() 617 struct nfc_target *target, digital_in_send_attrib_req() 643 target); digital_in_send_attrib_req() 653 struct nfc_target *target = NULL; digital_in_recv_sensb_res() local 696 target = kzalloc(sizeof(struct nfc_target), GFP_KERNEL); digital_in_recv_sensb_res() 697 if (!target) { digital_in_recv_sensb_res() 702 rc = digital_in_send_attrib_req(ddev, target, sensb_res); digital_in_recv_sensb_res() 708 kfree(target); digital_in_recv_sensb_res() 753 struct nfc_target target; digital_in_recv_sensf_res() local 777 memset(&target, 0, sizeof(struct nfc_target)); digital_in_recv_sensf_res() 781 memcpy(target.sensf_res, sensf_res, resp->len); digital_in_recv_sensf_res() 782 target.sensf_res_len = resp->len; digital_in_recv_sensf_res() 784 memcpy(target.nfcid2, sensf_res->nfcid2, NFC_NFCID2_MAXSIZE); digital_in_recv_sensf_res() 785 target.nfcid2_len = NFC_NFCID2_MAXSIZE; digital_in_recv_sensf_res() 787 if (target.nfcid2[0] == DIGITAL_SENSF_NFCID2_NFC_DEP_B1 && digital_in_recv_sensf_res() 788 target.nfcid2[1] == DIGITAL_SENSF_NFCID2_NFC_DEP_B2) digital_in_recv_sensf_res() 793 rc = digital_target_found(ddev, &target, proto); digital_in_recv_sensf_res() 850 struct nfc_target *target = NULL; digital_in_recv_iso15693_inv_res() local 872 target = kzalloc(sizeof(*target), GFP_KERNEL); digital_in_recv_iso15693_inv_res() 873 if (!target) { digital_in_recv_iso15693_inv_res() 878 target->is_iso15693 = 1; digital_in_recv_iso15693_inv_res() 879 target->iso15693_dsfid = res->dsfid; digital_in_recv_iso15693_inv_res() 880 memcpy(target->iso15693_uid, &res->uid, sizeof(target->iso15693_uid)); digital_in_recv_iso15693_inv_res() 882 rc = digital_target_found(ddev, target, NFC_PROTO_ISO15693); digital_in_recv_iso15693_inv_res() 884 kfree(target); digital_in_recv_iso15693_inv_res() 259 digital_in_send_rats(struct nfc_digital_dev *ddev, struct nfc_target *target) digital_in_send_rats() argument 349 digital_in_send_sel_req(struct nfc_digital_dev *ddev, struct nfc_target *target, struct digital_sdd_res *sdd_res) digital_in_send_sel_req() argument 450 digital_in_send_sdd_req(struct nfc_digital_dev *ddev, struct nfc_target *target) digital_in_send_sdd_req() argument 616 digital_in_send_attrib_req(struct nfc_digital_dev *ddev, struct nfc_target *target, struct digital_sensb_res *sensb_res) digital_in_send_attrib_req() argument
|
H A D | digital_core.c | 308 struct nfc_target *target, u8 protocol) digital_target_found() 389 target->supported_protocols = (1 << protocol); digital_target_found() 394 rc = nfc_targets_found(ddev->nfc_dev, target, 1); digital_target_found() 493 pr_err("A target is already active\n"); digital_start_poll() 590 struct nfc_target *target, digital_dep_link_up() 596 rc = digital_in_send_atr_req(ddev, target, comm_mode, gb, gb_len); digital_dep_link_up() 614 struct nfc_target *target, __u32 protocol) digital_activate_target() 619 pr_err("Can't activate a target while polling\n"); digital_activate_target() 624 pr_err("A target is already active\n"); digital_activate_target() 634 struct nfc_target *target, digital_deactivate_target() 640 pr_err("No active target\n"); digital_deactivate_target() 692 static int digital_in_send(struct nfc_dev *nfc_dev, struct nfc_target *target, digital_in_send() argument 710 rc = digital_in_send_dep_req(ddev, target, skb, data_exch); digital_in_send() 307 digital_target_found(struct nfc_digital_dev *ddev, struct nfc_target *target, u8 protocol) digital_target_found() argument 589 digital_dep_link_up(struct nfc_dev *nfc_dev, struct nfc_target *target, __u8 comm_mode, __u8 *gb, size_t gb_len) digital_dep_link_up() argument 613 digital_activate_target(struct nfc_dev *nfc_dev, struct nfc_target *target, __u32 protocol) digital_activate_target() argument 633 digital_deactivate_target(struct nfc_dev *nfc_dev, struct nfc_target *target, u8 mode) digital_deactivate_target() argument
|
H A D | core.c | 206 * The device remains polling for targets until a target is found or 213 pr_debug("dev_name %s initiator protocols 0x%x target protocols 0x%x\n", nfc_start_poll() 296 struct nfc_target *target; nfc_dep_link_up() local 321 target = nfc_find_target(dev, target_index); nfc_dep_link_up() 322 if (target == NULL) { nfc_dep_link_up() 327 rc = dev->ops->dep_link_up(dev, target, comm_mode, gb, gb_len); nfc_dep_link_up() 329 dev->active_target = target; nfc_dep_link_up() 380 struct nfc_target *target; nfc_dep_link_is_up() local 382 target = nfc_find_target(dev, target_idx); nfc_dep_link_is_up() 383 if (target == NULL) nfc_dep_link_is_up() 386 dev->active_target = target; nfc_dep_link_is_up() 399 * nfc_activate_target - prepare the target for data exchange 401 * @dev: The nfc device that found the target 402 * @target_idx: index of the target that must be activated 408 struct nfc_target *target; nfc_activate_target() local 425 target = nfc_find_target(dev, target_idx); nfc_activate_target() 426 if (target == NULL) { nfc_activate_target() 431 rc = dev->ops->activate_target(dev, target, protocol); nfc_activate_target() 433 dev->active_target = target; nfc_activate_target() 447 * nfc_deactivate_target - deactivate a nfc target 449 * @dev: The nfc device that found the target 450 * @target_idx: index of the target that must be deactivated 490 * @dev: The nfc device that found the target 491 * @target_idx: index of the target 667 /* Only LLCP target mode for now */ nfc_tm_data_received() 821 * nfc_target_lost - inform that an activated target went out of field 823 * @dev: The nfc device that had the activated target in field 824 * @target_idx: the nfc index of the target 826 * The device driver must call this function when the activated target
|
/linux-4.4.14/arch/powerpc/include/asm/ |
H A D | code-patching.h | 17 * "b" == create_branch(addr, target, 0); 18 * "ba" == create_branch(addr, target, BRANCH_ABSOLUTE); 19 * "bl" == create_branch(addr, target, BRANCH_SET_LINK); 20 * "bla" == create_branch(addr, target, BRANCH_ABSOLUTE | BRANCH_SET_LINK); 26 unsigned long target, int flags); 28 unsigned long target, int flags); 29 int patch_branch(unsigned int *addr, unsigned long target, int flags);
|
/linux-4.4.14/drivers/nfc/st21nfca/ |
H A D | core.c | 588 struct nfc_target *target) st21nfca_get_iso15693_inventory() 607 memcpy(target->iso15693_uid, inventory_skb->data, inventory_skb->len); st21nfca_get_iso15693_inventory() 608 target->iso15693_dsfid = inventory_skb->data[1]; st21nfca_get_iso15693_inventory() 609 target->is_iso15693 = 1; st21nfca_get_iso15693_inventory() 616 struct nfc_target *target, u8 comm_mode, st21nfca_hci_dep_link_up() 621 info->dep_info.idx = target->idx; st21nfca_hci_dep_link_up() 636 struct nfc_target *target) st21nfca_hci_target_from_gate() 645 target->supported_protocols = NFC_PROTO_FELICA_MASK; st21nfca_hci_target_from_gate() 653 target->supported_protocols = NFC_PROTO_JEWEL_MASK; st21nfca_hci_target_from_gate() 654 target->sens_res = 0x0c00; st21nfca_hci_target_from_gate() 664 target->supported_protocols = st21nfca_hci_target_from_gate() 666 if (target->supported_protocols == 0xffffffff) st21nfca_hci_target_from_gate() 669 target->sens_res = atqa; st21nfca_hci_target_from_gate() 670 target->sel_res = sak; st21nfca_hci_target_from_gate() 671 memcpy(target->nfcid1, uid, len); st21nfca_hci_target_from_gate() 672 target->nfcid1_len = len; st21nfca_hci_target_from_gate() 677 target->supported_protocols = NFC_PROTO_ISO15693_MASK; st21nfca_hci_target_from_gate() 678 r = st21nfca_get_iso15693_inventory(hdev, target); st21nfca_hci_target_from_gate() 691 struct nfc_target *target) st21nfca_hci_complete_target_discovered() 716 memcpy(target->sensf_res, nfcid_skb->data, st21nfca_hci_complete_target_discovered() 718 target->sensf_res_len = nfcid_skb->len; st21nfca_hci_complete_target_discovered() 720 if (target->sensf_res[0] == 0x01 && st21nfca_hci_complete_target_discovered() 721 target->sensf_res[1] == 0xfe) st21nfca_hci_complete_target_discovered() 722 target->supported_protocols = st21nfca_hci_complete_target_discovered() 725 target->supported_protocols = st21nfca_hci_complete_target_discovered() 740 memcpy(target->sensf_res, nfcid_skb->data, st21nfca_hci_complete_target_discovered() 742 target->sensf_res_len = nfcid_skb->len; st21nfca_hci_complete_target_discovered() 743 target->supported_protocols = NFC_PROTO_NFC_DEP_MASK; st21nfca_hci_complete_target_discovered() 745 target->hci_reader_gate = ST21NFCA_RF_READER_F_GATE; st21nfca_hci_complete_target_discovered() 778 struct nfc_target *target, st21nfca_hci_im_transceive() 785 target->hci_reader_gate, skb->len); st21nfca_hci_im_transceive() 787 switch (target->hci_reader_gate) { st21nfca_hci_im_transceive() 789 if (target->supported_protocols == NFC_PROTO_NFC_DEP_MASK) st21nfca_hci_im_transceive() 793 return nfc_hci_send_cmd_async(hdev, target->hci_reader_gate, st21nfca_hci_im_transceive() 799 return nfc_hci_send_cmd_async(hdev, target->hci_reader_gate, st21nfca_hci_im_transceive() 809 return nfc_hci_send_cmd_async(hdev, target->hci_reader_gate, st21nfca_hci_im_transceive() 826 struct nfc_target *target) st21nfca_hci_check_presence() 830 switch (target->hci_reader_gate) { st21nfca_hci_check_presence() 840 return nfc_hci_send_cmd(hdev, target->hci_reader_gate, st21nfca_hci_check_presence() 843 return nfc_hci_send_cmd(hdev, target->hci_reader_gate, st21nfca_hci_check_presence() 587 st21nfca_get_iso15693_inventory(struct nfc_hci_dev *hdev, struct nfc_target *target) st21nfca_get_iso15693_inventory() argument 615 st21nfca_hci_dep_link_up(struct nfc_hci_dev *hdev, struct nfc_target *target, u8 comm_mode, u8 *gb, size_t gb_len) st21nfca_hci_dep_link_up() argument 635 st21nfca_hci_target_from_gate(struct nfc_hci_dev *hdev, u8 gate, struct nfc_target *target) st21nfca_hci_target_from_gate() argument 689 st21nfca_hci_complete_target_discovered(struct nfc_hci_dev *hdev, u8 gate, struct nfc_target *target) st21nfca_hci_complete_target_discovered() argument 777 st21nfca_hci_im_transceive(struct nfc_hci_dev *hdev, struct nfc_target *target, struct sk_buff *skb, data_exchange_cb_t cb, void *cb_context) st21nfca_hci_im_transceive() argument 825 st21nfca_hci_check_presence(struct nfc_hci_dev *hdev, struct nfc_target *target) st21nfca_hci_check_presence() argument
|
/linux-4.4.14/arch/s390/numa/ |
H A D | toptree.c | 105 * @target: Pointer to the node to which @cand will added as a child 113 static int toptree_insert(struct toptree *cand, struct toptree *target) toptree_insert() argument 115 if (!cand || !target) toptree_insert() 117 if (target->level != (cand->level + 1)) toptree_insert() 119 list_add_tail(&cand->sibling, &target->children); toptree_insert() 120 cand->parent = target; toptree_insert() 121 toptree_update_mask(target); toptree_insert() 128 * @target: Pointer to the node to which @cand's children will be attached 132 static void toptree_move_children(struct toptree *cand, struct toptree *target) toptree_move_children() argument 137 toptree_move(child, target); toptree_move_children() 176 * @target: Pointer to the node where @cand should go 178 * In the easiest case @cand is exactly on the level below @target 179 * and will be immediately moved to the target. 181 * If @target's level is not the direct parent level of @cand, 183 * @cand and @target. The "stacking" nodes' IDs are taken from 189 void toptree_move(struct toptree *cand, struct toptree *target) toptree_move() argument 193 if (cand->level + 1 == target->level) { toptree_move() 195 toptree_insert(cand, target); toptree_move() 211 } while (stack_target->level < (target->level - 1)); toptree_move() 215 toptree_insert(stack_target, target); toptree_move()
|
/linux-4.4.14/security/apparmor/ |
H A D | file.c | 85 if (sa->aad->fs.target) { file_audit_cb() 86 audit_log_format(ab, " target="); file_audit_cb() 87 audit_log_untrustedstring(ab, sa->aad->fs.target); file_audit_cb() 99 * @target: name of target (MAYBE NULL) 108 const char *target, kuid_t ouid, const char *info, int error) aa_audit_file() 118 aad.fs.target = target; aa_audit_file() 313 * @target: target permission set 315 * test target x permissions are equal OR a subset of link x permissions 317 * a subset of permissions that the target has. 321 static inline bool xindex_is_subset(u32 link, u32 target) xindex_is_subset() argument 323 if (((link & ~AA_X_UNSAFE) != (target & ~AA_X_UNSAFE)) || xindex_is_subset() 324 ((link & AA_X_UNSAFE) && !(target & AA_X_UNSAFE))) xindex_is_subset() 333 * @old_dentry: the target dentry (NOT NULL) 337 * Handle the permission test for a link & target pair. Permission 339 * first, and if allowed, the target is tested. The target test 341 * making the target permission dependent on the link permission match. 344 * on link are a subset of the permission granted to target. 352 struct path target = { new_dir->mnt, old_dentry }; aa_path_link() local 373 error = aa_path_name(&target, profile->path_flags, &buffer2, &tname, aa_path_link() 386 /* test to see if target can be paired with link */ aa_path_link() 398 info = "target restricted"; aa_path_link() 407 * subset of the allowed permissions on target. aa_path_link() 423 info = "link not subset of target"; aa_path_link() 106 aa_audit_file(struct aa_profile *profile, struct file_perms *perms, gfp_t gfp, int op, u32 request, const char *name, const char *target, kuid_t ouid, const char *info, int error) aa_audit_file() argument
|
H A D | ipc.c | 28 audit_log_format(ab, " target="); audit_cb() 29 audit_log_untrustedstring(ab, sa->aad->target); audit_cb() 35 * @target: profile being traced (NOT NULL) 41 struct aa_profile *target, int error) aa_audit_ptrace() 48 aad.target = target; aa_audit_ptrace() 40 aa_audit_ptrace(struct aa_profile *profile, struct aa_profile *target, int error) aa_audit_ptrace() argument
|
H A D | domain.c | 291 * x_to_profile - get target profile for a given xindex 349 const char *name = NULL, *target = NULL, *info = NULL; apparmor_bprm_set_creds() local 450 target = new_profile->base.hname; apparmor_bprm_set_creds() 501 target = new_profile->base.hname; apparmor_bprm_set_creds() 515 name, target, cond.uid, info, error); apparmor_bprm_set_creds() 612 const char *target = NULL, *info = NULL; aa_change_hat() local 669 target = name; aa_change_hat() 679 target = hat->base.hname; aa_change_hat() 681 info = "target not hat"; aa_change_hat() 707 target = previous_profile->base.hname; aa_change_hat() 718 target, GLOBAL_ROOT_UID, info, error); aa_change_hat() 747 struct aa_profile *profile, *target = NULL; aa_change_profile() local 810 target = aa_lookup_profile(ns, hname); aa_change_profile() 811 if (!target) { aa_change_profile() 817 target = aa_new_null_profile(profile, 0); aa_change_profile() 818 if (!target) { aa_change_profile() 825 /* check if tracing task is allowed to trace target domain */ aa_change_profile() 826 error = may_change_ptraced_domain(target); aa_change_profile() 836 error = aa_set_current_onexec(target); aa_change_profile() 838 error = aa_replace_current_profile(target); aa_change_profile() 846 aa_put_profile(target); aa_change_profile()
|
/linux-4.4.14/drivers/uwb/ |
H A D | uwb-debug.c | 90 struct uwb_dev *target; cmd_rsv_establish() local 93 memcpy(&macaddr, cmd->target, sizeof(macaddr)); cmd_rsv_establish() 94 target = uwb_dev_get_by_macaddr(rc, &macaddr); cmd_rsv_establish() 95 if (target == NULL) cmd_rsv_establish() 100 uwb_dev_put(target); cmd_rsv_establish() 104 rsv->target.type = UWB_RSV_TARGET_DEV; cmd_rsv_establish() 105 rsv->target.dev = target; cmd_rsv_establish() 218 char owner[UWB_ADDR_STRSIZE], target[UWB_ADDR_STRSIZE]; reservations_print() local 222 if (rsv->target.type == UWB_RSV_TARGET_DEV) { reservations_print() 223 devaddr = rsv->target.dev->dev_addr; reservations_print() 226 devaddr = rsv->target.devaddr; reservations_print() 229 uwb_dev_addr_print(target, sizeof(target), &devaddr); reservations_print() 233 owner, target, uwb_rsv_state_str(rsv->state)); reservations_print()
|
/linux-4.4.14/net/nfc/nci/ |
H A D | ntf.c | 193 struct nfc_target *target, nci_add_new_protocol() 223 pr_err("the target found does not have the desired protocol\n"); nci_add_new_protocol() 230 target->sens_res = nfca_poll->sens_res; nci_add_new_protocol() 231 target->sel_res = nfca_poll->sel_res; nci_add_new_protocol() 232 target->nfcid1_len = nfca_poll->nfcid1_len; nci_add_new_protocol() 233 if (target->nfcid1_len > 0) { nci_add_new_protocol() 234 memcpy(target->nfcid1, nfca_poll->nfcid1, nci_add_new_protocol() 235 target->nfcid1_len); nci_add_new_protocol() 240 target->sensb_res_len = nfcb_poll->sensb_res_len; nci_add_new_protocol() 241 if (target->sensb_res_len > 0) { nci_add_new_protocol() 242 memcpy(target->sensb_res, nfcb_poll->sensb_res, nci_add_new_protocol() 243 target->sensb_res_len); nci_add_new_protocol() 248 target->sensf_res_len = nfcf_poll->sensf_res_len; nci_add_new_protocol() 249 if (target->sensf_res_len > 0) { nci_add_new_protocol() 250 memcpy(target->sensf_res, nfcf_poll->sensf_res, nci_add_new_protocol() 251 target->sensf_res_len); nci_add_new_protocol() 256 target->is_iso15693 = 1; nci_add_new_protocol() 257 target->iso15693_dsfid = nfcv_poll->dsfid; nci_add_new_protocol() 258 memcpy(target->iso15693_uid, nfcv_poll->uid, NFC_ISO15693_UID_MAXSIZE); nci_add_new_protocol() 264 target->supported_protocols |= protocol; nci_add_new_protocol() 274 struct nfc_target *target; nci_add_new_target() local 278 target = &ndev->targets[i]; nci_add_new_target() 279 if (target->logical_idx == ntf->rf_discovery_id) { nci_add_new_target() 280 /* This target already exists, add the new protocol */ nci_add_new_target() 281 nci_add_new_protocol(ndev, target, ntf->rf_protocol, nci_add_new_target() 288 /* This is a new target, check if we've enough room */ nci_add_new_target() 290 pr_debug("not enough room, ignoring new target...\n"); nci_add_new_target() 294 target = &ndev->targets[ndev->n_targets]; nci_add_new_target() 296 rc = nci_add_new_protocol(ndev, target, ntf->rf_protocol, nci_add_new_target() 300 target->logical_idx = ntf->rf_discovery_id; nci_add_new_target() 303 pr_debug("logical idx %d, n_targets %d\n", target->logical_idx, nci_add_new_target() 454 struct nfc_target *target; nci_target_auto_activated() local 457 target = &ndev->targets[ndev->n_targets]; nci_target_auto_activated() 459 rc = nci_add_new_protocol(ndev, target, ntf->rf_protocol, nci_target_auto_activated() 465 target->logical_idx = ntf->rf_discovery_id; nci_target_auto_activated() 469 target->logical_idx, ndev->n_targets); nci_target_auto_activated() 650 /* A single target was found and activated nci_rf_intf_activated_ntf_packet() 656 /* A selected target was activated, so complete the nci_rf_intf_activated_ntf_packet() 192 nci_add_new_protocol(struct nci_dev *ndev, struct nfc_target *target, __u8 rf_protocol, __u8 rf_tech_and_mode, void *params) nci_add_new_protocol() argument
|
/linux-4.4.14/drivers/acpi/acpica/ |
H A D | rsdump.c | 209 u8 *target = NULL; acpi_rs_dump_descriptor() local 219 previous_target = target; acpi_rs_dump_descriptor() 220 target = ACPI_ADD_PTR(u8, resource, table->offset); acpi_rs_dump_descriptor() 243 acpi_rs_out_string(name, ACPI_CAST_PTR(char, target)); acpi_rs_dump_descriptor() 254 [*target])); acpi_rs_dump_descriptor() 256 acpi_rs_out_integer8(name, ACPI_GET8(target)); acpi_rs_dump_descriptor() 262 acpi_rs_out_integer16(name, ACPI_GET16(target)); acpi_rs_dump_descriptor() 267 acpi_rs_out_integer32(name, ACPI_GET32(target)); acpi_rs_dump_descriptor() 272 acpi_rs_out_integer64(name, ACPI_GET64(target)); acpi_rs_dump_descriptor() 281 pointer[*target & acpi_rs_dump_descriptor() 289 pointer[*target & acpi_rs_dump_descriptor() 297 pointer[*target & acpi_rs_dump_descriptor() 309 target); acpi_rs_dump_descriptor() 323 (u8, target))); acpi_rs_dump_descriptor() 335 target); acpi_rs_dump_descriptor() local 347 target)); acpi_rs_dump_descriptor() 359 (u16, target))); acpi_rs_dump_descriptor() 369 target)); acpi_rs_dump_descriptor() 379 target)); acpi_rs_dump_descriptor()
|
H A D | rsmisc.c | 78 char *target; acpi_rs_convert_aml_to_resource() local 179 target = ACPI_ADD_PTR(void, aml, info->value); acpi_rs_convert_aml_to_resource() 180 item_count = ACPI_GET16(target) - ACPI_GET16(source); acpi_rs_convert_aml_to_resource() 201 target = ACPI_ADD_PTR(void, aml, (info->value + 2)); acpi_rs_convert_aml_to_resource() 202 if (ACPI_GET16(target)) { acpi_rs_convert_aml_to_resource() 206 target = ACPI_ADD_PTR(void, aml, info->value); acpi_rs_convert_aml_to_resource() 208 ACPI_GET16(target) - ACPI_GET16(source); acpi_rs_convert_aml_to_resource() 263 target = (char *)ACPI_ADD_PTR(void, resource, acpi_rs_convert_aml_to_resource() 266 *(u16 **)destination = ACPI_CAST_PTR(u16, target); acpi_rs_convert_aml_to_resource() 271 acpi_rs_move_data(target, source, item_count, acpi_rs_convert_aml_to_resource() 279 target = (char *)ACPI_ADD_PTR(void, resource, acpi_rs_convert_aml_to_resource() 282 *(u8 **)destination = ACPI_CAST_PTR(u8, target); acpi_rs_convert_aml_to_resource() 287 acpi_rs_move_data(target, source, item_count, acpi_rs_convert_aml_to_resource() 295 target = (char *)ACPI_ADD_PTR(void, resource, acpi_rs_convert_aml_to_resource() 298 *(u8 **)destination = ACPI_CAST_PTR(u8, target); acpi_rs_convert_aml_to_resource() 303 acpi_rs_move_data(target, source, item_count, acpi_rs_convert_aml_to_resource() 311 target = (char *)ACPI_ADD_PTR(void, resource, acpi_rs_convert_aml_to_resource() 314 *(u8 **)destination = ACPI_CAST_PTR(u8, target); acpi_rs_convert_aml_to_resource() 321 acpi_rs_move_data(target, source, item_count, acpi_rs_convert_aml_to_resource() 332 target = ACPI_ADD_PTR(char, resource, info->value); acpi_rs_convert_aml_to_resource() 333 memcpy(destination, source, ACPI_GET16(target)); acpi_rs_convert_aml_to_resource() 361 target = ACPI_ADD_PTR(char, resource, acpi_rs_convert_aml_to_resource() 372 target); acpi_rs_convert_aml_to_resource() 386 target = ACPI_ADD_PTR(char, resource, info->value); acpi_rs_convert_aml_to_resource() 387 ACPI_SET8(target, item_count); acpi_rs_convert_aml_to_resource() 402 target = ACPI_ADD_PTR(char, resource, info->value); acpi_rs_convert_aml_to_resource() 403 ACPI_SET8(target, item_count); acpi_rs_convert_aml_to_resource() 476 char *target; acpi_rs_convert_resource_to_aml() local 571 target = ACPI_ADD_PTR(void, aml, info->value); acpi_rs_convert_resource_to_aml() 572 ACPI_SET16(target, aml_length); acpi_rs_convert_resource_to_aml() 596 target = ACPI_ADD_PTR(void, aml, info->value); acpi_rs_convert_resource_to_aml() 601 ACPI_SET16(target, aml_length); acpi_rs_convert_resource_to_aml()
|
H A D | exstoren.c | 58 * target_type - Current type of the target 167 * converting the source type to the target type (implicit 169 * the target. 177 * target object type then stored in the object. This means 178 * that the target object type (for an initialized target) will 218 * of the target as per the ACPI specification. acpi_ex_store_object_to_object()
|
/linux-4.4.14/drivers/gpu/drm/nouveau/nvkm/engine/dma/ |
H A D | user.c | 81 nvif_ioctl(parent, "create dma vers %d target %d access %d " nvkm_dmaobj_ctor() 83 args->v0.version, args->v0.target, args->v0.access, nvkm_dmaobj_ctor() 85 dmaobj->target = args->v0.target; nvkm_dmaobj_ctor() 98 switch (dmaobj->target) { nvkm_dmaobj_ctor() 100 dmaobj->target = NV_MEM_TARGET_VM; nvkm_dmaobj_ctor() 109 dmaobj->target = NV_MEM_TARGET_VRAM; nvkm_dmaobj_ctor() 114 dmaobj->target = NV_MEM_TARGET_PCI; nvkm_dmaobj_ctor() 120 dmaobj->target = NV_MEM_TARGET_PCI_NOSNOOP; nvkm_dmaobj_ctor()
|
/linux-4.4.14/arch/powerpc/kernel/ |
H A D | ptrace.c | 264 static int gpr_get(struct task_struct *target, const struct user_regset *regset, gpr_get() argument 270 if (target->thread.regs == NULL) gpr_get() 273 if (!FULL_REGS(target->thread.regs)) { gpr_get() 276 target->thread.regs->gpr[i] = NV_REG_POISON; gpr_get() 280 target->thread.regs, gpr_get() 283 unsigned long msr = get_user_msr(target); gpr_get() 295 &target->thread.regs->orig_gpr3, gpr_get() 305 static int gpr_set(struct task_struct *target, const struct user_regset *regset, gpr_set() argument 312 if (target->thread.regs == NULL) gpr_set() 315 CHECK_FULL_REGS(target->thread.regs); gpr_set() 318 target->thread.regs, gpr_set() 326 ret = set_user_msr(target, reg); gpr_set() 334 &target->thread.regs->orig_gpr3, gpr_set() 349 ret = set_user_trap(target, reg); gpr_set() 360 static int fpr_get(struct task_struct *target, const struct user_regset *regset, fpr_get() argument 368 flush_fp_to_thread(target); fpr_get() 373 buf[i] = target->thread.TS_FPR(i); fpr_get() 374 buf[32] = target->thread.fp_state.fpscr; fpr_get() 382 &target->thread.fp_state, 0, -1); fpr_get() 386 static int fpr_set(struct task_struct *target, const struct user_regset *regset, fpr_set() argument 394 flush_fp_to_thread(target); fpr_set() 402 target->thread.TS_FPR(i) = buf[i]; fpr_set() 403 target->thread.fp_state.fpscr = buf[32]; fpr_set() 410 &target->thread.fp_state, 0, -1); fpr_set() 428 static int vr_active(struct task_struct *target, vr_active() argument 431 flush_altivec_to_thread(target); vr_active() 432 return target->thread.used_vr ? regset->n : 0; vr_active() 435 static int vr_get(struct task_struct *target, const struct user_regset *regset, vr_get() argument 441 flush_altivec_to_thread(target); vr_get() 447 &target->thread.vr_state, 0, vr_get() 458 vrsave.word = target->thread.vrsave; vr_get() 466 static int vr_set(struct task_struct *target, const struct user_regset *regset, vr_set() argument 472 flush_altivec_to_thread(target); vr_set() 478 &target->thread.vr_state, 0, vr_set() 489 vrsave.word = target->thread.vrsave; vr_set() 493 target->thread.vrsave = vrsave.word; vr_set() 507 static int vsr_active(struct task_struct *target, vsr_active() argument 510 flush_vsx_to_thread(target); vsr_active() 511 return target->thread.used_vsr ? regset->n : 0; vsr_active() 514 static int vsr_get(struct task_struct *target, const struct user_regset *regset, vsr_get() argument 521 flush_vsx_to_thread(target); vsr_get() 524 buf[i] = target->thread.fp_state.fpr[i][TS_VSRLOWOFFSET]; vsr_get() 531 static int vsr_set(struct task_struct *target, const struct user_regset *regset, vsr_set() argument 538 flush_vsx_to_thread(target); vsr_set() 543 target->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = buf[i]; vsr_set() 562 static int evr_active(struct task_struct *target, evr_active() argument 565 flush_spe_to_thread(target); evr_active() 566 return target->thread.used_spe ? regset->n : 0; evr_active() 569 static int evr_get(struct task_struct *target, const struct user_regset *regset, evr_get() argument 575 flush_spe_to_thread(target); evr_get() 578 &target->thread.evr, evr_get() 579 0, sizeof(target->thread.evr)); evr_get() 586 &target->thread.acc, evr_get() 587 sizeof(target->thread.evr), -1); evr_get() 592 static int evr_set(struct task_struct *target, const struct user_regset *regset, evr_set() argument 598 flush_spe_to_thread(target); evr_set() 601 &target->thread.evr, evr_set() 602 0, sizeof(target->thread.evr)); evr_set() 609 &target->thread.acc, evr_set() 610 sizeof(target->thread.evr), -1); evr_set() 676 static int gpr32_get(struct task_struct *target, gpr32_get() argument 681 const unsigned long *regs = &target->thread.regs->gpr[0]; gpr32_get() 687 if (target->thread.regs == NULL) gpr32_get() 690 if (!FULL_REGS(target->thread.regs)) { gpr32_get() 693 target->thread.regs->gpr[i] = NV_REG_POISON; gpr32_get() 708 reg = get_user_msr(target); gpr32_get() 733 static int gpr32_set(struct task_struct *target, gpr32_set() argument 738 unsigned long *regs = &target->thread.regs->gpr[0]; gpr32_set() 743 if (target->thread.regs == NULL) gpr32_set() 746 CHECK_FULL_REGS(target->thread.regs); gpr32_set() 767 set_user_msr(target, reg); gpr32_set() 793 set_user_trap(target, reg); gpr32_set()
|
H A D | jump_label.c | 21 patch_branch(addr, entry->target, 0); arch_jump_label_transform()
|
/linux-4.4.14/drivers/misc/ |
H A D | vmw_balloon.c | 240 unsigned int target; member in struct:vmballoon_stats 260 bool is_2m_pages, unsigned int *target); 262 bool is_2m_pages, unsigned int *target); 282 unsigned int target; member in struct:vmballoon 408 unsigned long target; vmballoon_send_get_target() local 427 STATS_INC(b->stats.target); vmballoon_send_get_target() 429 status = VMWARE_BALLOON_CMD(GET_TARGET, limit, dummy, target); vmballoon_send_get_target() 431 *new_target = target; vmballoon_send_get_target() 446 unsigned int *hv_status, unsigned int *target) vmballoon_send_lock_page() 457 *hv_status = status = VMWARE_BALLOON_CMD(LOCK, pfn, dummy, *target); vmballoon_send_lock_page() 467 unsigned int num_pages, bool is_2m_pages, unsigned int *target) vmballoon_send_batched_lock() 476 *target); vmballoon_send_batched_lock() 479 *target); vmballoon_send_batched_lock() 494 unsigned int *target) vmballoon_send_unlock_page() 505 status = VMWARE_BALLOON_CMD(UNLOCK, pfn, dummy, *target); vmballoon_send_unlock_page() 515 unsigned int num_pages, bool is_2m_pages, unsigned int *target) vmballoon_send_batched_unlock() 524 *target); vmballoon_send_batched_unlock() 527 *target); vmballoon_send_batched_unlock() 596 bool is_2m_pages, unsigned int *target) vmballoon_lock_page() 605 target); vmballoon_lock_page() 639 unsigned int num_pages, bool is_2m_pages, unsigned int *target) vmballoon_lock_batched_page() 645 target); vmballoon_lock_batched_page() 698 bool is_2m_pages, unsigned int *target) vmballoon_unlock_page() 705 if (!vmballoon_send_unlock_page(b, page_to_pfn(page), target)) { vmballoon_unlock_page() 722 unsigned int *target) vmballoon_unlock_batched_page() 729 target); vmballoon_unlock_batched_page() 793 * Inflate the balloon towards its target size. Note that we try to limit 806 pr_debug("%s - size: %d, target %d\n", __func__, b->size, b->target); vmballoon_inflate() 812 * free pages in the guest quickly (if the balloon target is high). vmballoon_inflate() 814 * the guest to start swapping if balloon target is not met yet, vmballoon_inflate() 837 __func__, b->target - b->size, rate, b->rate_alloc); vmballoon_inflate() 841 < b->target) { vmballoon_inflate() 854 b->ops->lock(b, num_pages, true, &b->target); vmballoon_inflate() 901 &b->target); vmballoon_inflate() 916 b->ops->lock(b, num_pages, is_2m_pages, &b->target); vmballoon_inflate() 941 pr_debug("%s - size: %d, target %d\n", __func__, b->size, b->target); vmballoon_deflate() 943 /* free pages to reach target */ vmballoon_deflate() 953 (b->target > 0 && vmballoon_deflate() 956 < b->target + vmballoon_page_size(true))) vmballoon_deflate() 966 is_2m_pages, &b->target); vmballoon_deflate() 976 b->ops->unlock(b, num_pages, is_2m_pages, &b->target); vmballoon_deflate() 1119 unsigned int target; vmballoon_work() local 1129 if (!b->reset_required && vmballoon_send_get_target(b, &target)) { vmballoon_work() 1130 /* update target, adjust size */ vmballoon_work() 1131 b->target = target; vmballoon_work() 1133 if (b->size < target) vmballoon_work() 1135 else if (target == 0 || vmballoon_work() 1136 b->size > target + vmballoon_page_size(true)) vmballoon_work() 1168 "target: %8d pages\n" vmballoon_debug_show() 1170 b->target, b->size); vmballoon_debug_show() 1187 "target: %8u (%4u failed)\n" vmballoon_debug_show() 1207 stats->target, stats->target_fail, vmballoon_debug_show() 445 vmballoon_send_lock_page(struct vmballoon *b, unsigned long pfn, unsigned int *hv_status, unsigned int *target) vmballoon_send_lock_page() argument 466 vmballoon_send_batched_lock(struct vmballoon *b, unsigned int num_pages, bool is_2m_pages, unsigned int *target) vmballoon_send_batched_lock() argument 493 vmballoon_send_unlock_page(struct vmballoon *b, unsigned long pfn, unsigned int *target) vmballoon_send_unlock_page() argument 514 vmballoon_send_batched_unlock(struct vmballoon *b, unsigned int num_pages, bool is_2m_pages, unsigned int *target) vmballoon_send_batched_unlock() argument 595 vmballoon_lock_page(struct vmballoon *b, unsigned int num_pages, bool is_2m_pages, unsigned int *target) vmballoon_lock_page() argument 638 vmballoon_lock_batched_page(struct vmballoon *b, unsigned int num_pages, bool is_2m_pages, unsigned int *target) vmballoon_lock_batched_page() argument 697 vmballoon_unlock_page(struct vmballoon *b, unsigned int num_pages, bool is_2m_pages, unsigned int *target) vmballoon_unlock_page() argument 720 vmballoon_unlock_batched_page(struct vmballoon *b, unsigned int num_pages, bool is_2m_pages, unsigned int *target) vmballoon_unlock_batched_page() argument
|
/linux-4.4.14/fs/cifs/ |
H A D | cifs_unicode.c | 46 convert_sfu_char(const __u16 src_char, char *target) convert_sfu_char() argument 55 *target = ':'; convert_sfu_char() 58 *target = '*'; convert_sfu_char() 61 *target = '?'; convert_sfu_char() 64 *target = '|'; convert_sfu_char() 67 *target = '>'; convert_sfu_char() 70 *target = '<'; convert_sfu_char() 80 convert_sfm_char(const __u16 src_char, char *target) convert_sfm_char() argument 84 *target = ':'; convert_sfm_char() 87 *target = '*'; convert_sfm_char() 90 *target = '?'; convert_sfm_char() 93 *target = '|'; convert_sfm_char() 96 *target = '>'; convert_sfm_char() 99 *target = '<'; convert_sfm_char() 102 *target = '\\'; convert_sfm_char() 113 * @target - where converted character should be copied 119 * responsibility of the caller to ensure that the target buffer is large 123 cifs_mapchar(char *target, const __u16 *from, const struct nls_table *cp, cifs_mapchar() argument 131 if ((maptype == SFM_MAP_UNI_RSVD) && convert_sfm_char(src_char, target)) cifs_mapchar() 134 convert_sfu_char(src_char, target)) cifs_mapchar() 138 len = cp->uni2char(src_char, target, NLS_MAX_CHARSET_SIZE); cifs_mapchar() 148 len = utf16s_to_utf8s(from, 3, UTF16_LITTLE_ENDIAN, target, 6); cifs_mapchar() 154 *target = '?'; cifs_mapchar() 444 cifsConvertToUTF16(__le16 *target, const char *source, int srclen, cifsConvertToUTF16() argument 457 return cifs_strtoUTF16(target, source, PATH_MAX, cp); cifsConvertToUTF16() 511 put_unaligned(dst_char, &target[j]); cifsConvertToUTF16() 516 put_unaligned(dst_char, &target[j]); cifsConvertToUTF16() 519 put_unaligned(dst_char, &target[j]); cifsConvertToUTF16() 522 put_unaligned(dst_char, &target[j]); cifsConvertToUTF16() 525 put_unaligned(dst_char, &target[j]); cifsConvertToUTF16() 528 put_unaligned(dst_char, &target[j]); cifsConvertToUTF16() 540 * but will take exactly two bytes in the target string cifsConvertToUTF16() 543 put_unaligned(dst_char, &target[j]); cifsConvertToUTF16() 547 put_unaligned(0, &target[j]); /* Null terminate target unicode string */ cifsConvertToUTF16()
|
H A D | ioctl.c | 53 cifs_dbg(FYI, "file target not open for write\n"); cifs_ioctl_clone() 57 /* check if target volume is readonly and take reference */ cifs_ioctl_clone() 88 /* check source and target on same server (or volume if dup_extents) */ cifs_ioctl_clone() 90 cifs_dbg(VFS, "source and target of copy not on same share\n"); cifs_ioctl_clone() 95 cifs_dbg(VFS, "source and target of copy not on same server\n"); cifs_ioctl_clone() 107 * server could even support copy of range where source = target cifs_ioctl_clone() 132 /* force revalidate of size and timestamps of target file now cifs_ioctl_clone() 133 that target is updated on the server */ cifs_ioctl_clone()
|
/linux-4.4.14/include/scsi/ |
H A D | scsi_transport_srp.h | 32 * struct srp_rport - SRP initiator or target port 34 * Fields that are relevant for SRP initiator and SRP target drivers: 37 * @roles: Role of this port - initiator or target. 53 /* for initiator and target drivers */ 86 * @reconnect: Callback function for reconnecting to the target. See also 92 * Fields that are only relevant for SRP target drivers: 106 /* for target drivers */ 130 * @rport: SRP target port pointer.
|
/linux-4.4.14/arch/s390/kernel/ |
H A D | ptrace.c | 876 static int s390_regs_get(struct task_struct *target, s390_regs_get() argument 881 if (target == current) s390_regs_get() 882 save_access_regs(target->thread.acrs); s390_regs_get() 887 *k++ = __peek_user(target, pos); s390_regs_get() 894 if (__put_user(__peek_user(target, pos), u++)) s390_regs_get() 903 static int s390_regs_set(struct task_struct *target, s390_regs_set() argument 910 if (target == current) s390_regs_set() 911 save_access_regs(target->thread.acrs); s390_regs_set() 916 rc = __poke_user(target, pos, *k++); s390_regs_set() 927 rc = __poke_user(target, pos, word); s390_regs_set() 933 if (rc == 0 && target == current) s390_regs_set() 934 restore_access_regs(target->thread.acrs); s390_regs_set() 939 static int s390_fpregs_get(struct task_struct *target, s390_fpregs_get() argument 945 if (target == current) s390_fpregs_get() 948 fp_regs.fpc = target->thread.fpu.fpc; s390_fpregs_get() 949 fpregs_store(&fp_regs, &target->thread.fpu); s390_fpregs_get() 955 static int s390_fpregs_set(struct task_struct *target, s390_fpregs_set() argument 963 if (target == current) s390_fpregs_set() 968 u32 ufpc[2] = { target->thread.fpu.fpc, 0 }; s390_fpregs_set() 975 target->thread.fpu.fpc = ufpc[0]; s390_fpregs_set() 985 convert_fp_to_vx(target->thread.fpu.vxrs, fprs); s390_fpregs_set() 987 memcpy(target->thread.fpu.fprs, &fprs, sizeof(fprs)); s390_fpregs_set() 992 static int s390_last_break_get(struct task_struct *target, s390_last_break_get() argument 1000 *k = task_thread_info(target)->last_break; s390_last_break_get() 1003 if (__put_user(task_thread_info(target)->last_break, u)) s390_last_break_get() 1010 static int s390_last_break_set(struct task_struct *target, s390_last_break_set() argument 1018 static int s390_tdb_get(struct task_struct *target, s390_tdb_get() argument 1023 struct pt_regs *regs = task_pt_regs(target); s390_tdb_get() 1028 data = target->thread.trap_tdb; s390_tdb_get() 1032 static int s390_tdb_set(struct task_struct *target, s390_tdb_set() argument 1040 static int s390_vxrs_low_get(struct task_struct *target, s390_vxrs_low_get() argument 1050 if (target == current) s390_vxrs_low_get() 1053 vxrs[i] = *((__u64 *)(target->thread.fpu.vxrs + i) + 1); s390_vxrs_low_get() 1057 static int s390_vxrs_low_set(struct task_struct *target, s390_vxrs_low_set() argument 1067 if (target == current) s390_vxrs_low_set() 1073 *((__u64 *)(target->thread.fpu.vxrs + i) + 1) = vxrs[i]; s390_vxrs_low_set() 1078 static int s390_vxrs_high_get(struct task_struct *target, s390_vxrs_high_get() argument 1087 if (target == current) s390_vxrs_high_get() 1089 memcpy(vxrs, target->thread.fpu.vxrs + __NUM_VXRS_LOW, sizeof(vxrs)); s390_vxrs_high_get() 1094 static int s390_vxrs_high_set(struct task_struct *target, s390_vxrs_high_set() argument 1103 if (target == current) s390_vxrs_high_set() 1107 target->thread.fpu.vxrs + __NUM_VXRS_LOW, 0, -1); s390_vxrs_high_set() 1111 static int s390_system_call_get(struct task_struct *target, s390_system_call_get() argument 1116 unsigned int *data = &task_thread_info(target)->system_call; s390_system_call_get() 1121 static int s390_system_call_set(struct task_struct *target, s390_system_call_set() argument 1126 unsigned int *data = &task_thread_info(target)->system_call; s390_system_call_set() 1198 static int s390_compat_regs_get(struct task_struct *target, s390_compat_regs_get() argument 1203 if (target == current) s390_compat_regs_get() 1204 save_access_regs(target->thread.acrs); s390_compat_regs_get() 1209 *k++ = __peek_user_compat(target, pos); s390_compat_regs_get() 1216 if (__put_user(__peek_user_compat(target, pos), u++)) s390_compat_regs_get() 1225 static int s390_compat_regs_set(struct task_struct *target, s390_compat_regs_set() argument 1232 if (target == current) s390_compat_regs_set() 1233 save_access_regs(target->thread.acrs); s390_compat_regs_set() 1238 rc = __poke_user_compat(target, pos, *k++); s390_compat_regs_set() 1249 rc = __poke_user_compat(target, pos, word); s390_compat_regs_set() 1255 if (rc == 0 && target == current) s390_compat_regs_set() 1256 restore_access_regs(target->thread.acrs); s390_compat_regs_set() 1261 static int s390_compat_regs_high_get(struct task_struct *target, s390_compat_regs_high_get() argument 1269 &task_pt_regs(target)->gprs[pos / sizeof(compat_ulong_t)]; s390_compat_regs_high_get() 1289 static int s390_compat_regs_high_set(struct task_struct *target, s390_compat_regs_high_set() argument 1298 &task_pt_regs(target)->gprs[pos / sizeof(compat_ulong_t)]; s390_compat_regs_high_set() 1322 static int s390_compat_last_break_get(struct task_struct *target, s390_compat_last_break_get() argument 1330 last_break = task_thread_info(target)->last_break; s390_compat_last_break_get() 1343 static int s390_compat_last_break_set(struct task_struct *target, s390_compat_last_break_set() argument
|
/linux-4.4.14/fs/xfs/ |
H A D | xfs_buf.h | 38 XBRW_READ = 1, /* transfer into target memory */ 39 XBRW_WRITE = 2, /* transfer from target memory */ 40 XBRW_ZERO = 3, /* Zero target memory */ 167 xfs_buftarg_t *b_target; /* buffer target (device) */ 194 struct xfs_buf *_xfs_buf_find(struct xfs_buftarg *target, 200 struct xfs_buftarg *target, xfs_incore() 206 return _xfs_buf_find(target, &map, 1, flags, NULL); xfs_incore() 209 struct xfs_buf *_xfs_buf_alloc(struct xfs_buftarg *target, 215 struct xfs_buftarg *target, xfs_buf_alloc() 221 return _xfs_buf_alloc(target, &map, 1, flags); xfs_buf_alloc() 224 struct xfs_buf *xfs_buf_get_map(struct xfs_buftarg *target, 227 struct xfs_buf *xfs_buf_read_map(struct xfs_buftarg *target, 231 void xfs_buf_readahead_map(struct xfs_buftarg *target, 237 struct xfs_buftarg *target, xfs_buf_get() 243 return xfs_buf_get_map(target, &map, 1, flags); xfs_buf_get() 248 struct xfs_buftarg *target, xfs_buf_read() 255 return xfs_buf_read_map(target, &map, 1, flags, ops); xfs_buf_read() 260 struct xfs_buftarg *target, xfs_buf_readahead() 266 return xfs_buf_readahead_map(target, &map, 1, ops); xfs_buf_readahead() 269 struct xfs_buf *xfs_buf_get_empty(struct xfs_buftarg *target, size_t numblks); 273 struct xfs_buf *xfs_buf_get_uncached(struct xfs_buftarg *target, size_t numblks, 275 int xfs_buf_read_uncached(struct xfs_buftarg *target, xfs_daddr_t daddr, 199 xfs_incore( struct xfs_buftarg *target, xfs_daddr_t blkno, size_t numblks, xfs_buf_flags_t flags) xfs_incore() argument 214 xfs_buf_alloc( struct xfs_buftarg *target, xfs_daddr_t blkno, size_t numblks, xfs_buf_flags_t flags) xfs_buf_alloc() argument 236 xfs_buf_get( struct xfs_buftarg *target, xfs_daddr_t blkno, size_t numblks, xfs_buf_flags_t flags) xfs_buf_get() argument 247 xfs_buf_read( struct xfs_buftarg *target, xfs_daddr_t blkno, size_t numblks, xfs_buf_flags_t flags, const struct xfs_buf_ops *ops) xfs_buf_read() argument 259 xfs_buf_readahead( struct xfs_buftarg *target, xfs_daddr_t blkno, size_t numblks, const struct xfs_buf_ops *ops) xfs_buf_readahead() argument
|
/linux-4.4.14/drivers/scsi/ |
H A D | nsp32.c | 459 unsigned char target = scmd_id(SCpnt); nsp32_selection_autopara() local 524 /* syncreg, ackwidth, target id, SREQ sampling rate */ nsp32_selection_autopara() 527 param->target_id = BIT(host_id) | BIT(target); nsp32_selection_autopara() 590 unsigned char target = scmd_id(SCpnt); nsp32_selection_autoscsi() local 635 * set SCSIOUT LATCH(initiator)/TARGET(target) (OR-ed) ID nsp32_selection_autoscsi() 637 nsp32_write1(base, SCSI_OUT_LATCH_TARGET_ID, BIT(host_id) | BIT(target)); nsp32_selection_autoscsi() 806 * reselection target id&lun must be already set. 847 data->cur_target = &(data->target[newid]); nsp32_reselection() 912 nsp32_target *target; nsp32_queuecommand_lck() local 917 "enter. target: 0x%x LUN: 0x%llx cmnd: 0x%x cmndlen: 0x%x " nsp32_queuecommand_lck() 930 /* check target ID is not same as this initiator ID */ nsp32_queuecommand_lck() 932 nsp32_dbg(NSP32_DEBUG_QUEUECOMMAND, "target==host???"); nsp32_queuecommand_lck() 938 /* check target LUN is allowable value */ nsp32_queuecommand_lck() 982 * If target is the first time to transfer after the reset nsp32_queuecommand_lck() 983 * (target don't have SDTR_DONE and SDTR_INITIATOR), sync nsp32_queuecommand_lck() 986 target = &data->target[scmd_id(SCpnt)]; nsp32_queuecommand_lck() 987 data->cur_target = target; nsp32_queuecommand_lck() 989 if (!(target->sync_flag & (SDTR_DONE | SDTR_INITIATOR | SDTR_TARGET))) { nsp32_queuecommand_lck() 993 nsp32_set_max_sync(data, target, &period, &offset); nsp32_queuecommand_lck() 995 target->sync_flag |= SDTR_INITIATOR; nsp32_queuecommand_lck() 997 nsp32_set_async(data, target); nsp32_queuecommand_lck() 998 target->sync_flag |= SDTR_DONE; nsp32_queuecommand_lck() 1003 target->limit_entry, period, offset); nsp32_queuecommand_lck() 1004 } else if (target->sync_flag & SDTR_INITIATOR) { nsp32_queuecommand_lck() 1006 * It was negotiating SDTR with target, sending from the nsp32_queuecommand_lck() 1010 nsp32_set_async(data, target); nsp32_queuecommand_lck() 1011 target->sync_flag &= ~SDTR_INITIATOR; nsp32_queuecommand_lck() 1012 target->sync_flag |= SDTR_DONE; nsp32_queuecommand_lck() 1016 } else if (target->sync_flag & SDTR_TARGET) { nsp32_queuecommand_lck() 1018 * It was negotiating SDTR with target, sending from target, nsp32_queuecommand_lck() 1022 nsp32_set_async(data, target); nsp32_queuecommand_lck() 1023 target->sync_flag &= ~SDTR_TARGET; nsp32_queuecommand_lck() 1024 target->sync_flag |= SDTR_DONE; nsp32_queuecommand_lck() 1027 "Unknown SDTR from target is reached, fall back to async."); nsp32_queuecommand_lck() 1031 "target: %d sync_flag: 0x%x syncreg: 0x%x ackwidth: 0x%x", nsp32_queuecommand_lck() 1032 SCpnt->device->id, target->sync_flag, target->syncreg, nsp32_queuecommand_lck() 1033 target->ackwidth); nsp32_queuecommand_lck() 1481 for (id = 0; id < ARRAY_SIZE(data->target); id++) { nsp32_show_info() 1490 if (data->target[id].sync_flag == SDTR_DONE) { nsp32_show_info() 1491 if (data->target[id].period == 0 && nsp32_show_info() 1492 data->target[id].offset == ASYNC_OFFSET ) { nsp32_show_info() 1501 if (data->target[id].period != 0) { nsp32_show_info() 1503 speed = 1000000 / (data->target[id].period * 4); nsp32_show_info() 1508 data->target[id].offset nsp32_show_info() 1638 * target SDTR check nsp32_busfree_occur() 1650 * SDTR negotiation pulled by the target has been nsp32_busfree_occur() 2048 message_reject from target, SDTR negotiation is failed */ nsp32_msgin_occur() 2052 * Current target is negotiating SDTR, but it's nsp32_msgin_occur() 2130 * Exchange this message between initiator and target. nsp32_msgin_occur() 2233 nsp32_target *target = data->cur_target; nsp32_analyze_sdtr() local 2246 * If this inititor sent the SDTR message, then target responds SDTR, nsp32_analyze_sdtr() 2249 * If initiator did not send the SDTR, but target sends SDTR, nsp32_analyze_sdtr() 2252 if (target->sync_flag & SDTR_INITIATOR) { nsp32_analyze_sdtr() 2254 * Initiator sent SDTR, the target responds and nsp32_analyze_sdtr() 2257 nsp32_dbg(NSP32_DEBUG_MSGINOCCUR, "target responds SDTR"); nsp32_analyze_sdtr() 2259 target->sync_flag &= ~SDTR_INITIATOR; nsp32_analyze_sdtr() 2260 target->sync_flag |= SDTR_DONE; nsp32_analyze_sdtr() 2267 * Negotiation is failed, the target send back nsp32_analyze_sdtr() 2275 * Negotiation is succeeded, the target want nsp32_analyze_sdtr() 2293 * Negotiation is failed, the target send back nsp32_analyze_sdtr() 2299 entry = nsp32_search_period_entry(data, target, get_period); nsp32_analyze_sdtr() 2312 nsp32_set_sync_entry(data, target, entry, get_offset); nsp32_analyze_sdtr() 2315 nsp32_dbg(NSP32_DEBUG_MSGINOCCUR, "target send SDTR"); nsp32_analyze_sdtr() 2317 target->sync_flag |= SDTR_INITIATOR; nsp32_analyze_sdtr() 2330 entry = nsp32_search_period_entry(data, target, get_period); nsp32_analyze_sdtr() 2333 nsp32_set_async(data, target); nsp32_analyze_sdtr() 2336 nsp32_set_sync_entry(data, target, entry, get_offset); nsp32_analyze_sdtr() 2341 target->period = get_period; nsp32_analyze_sdtr() 2347 * If the current message is unacceptable, send back to the target nsp32_analyze_sdtr() 2353 nsp32_set_async(data, target); /* set as ASYNC transfer mode */ nsp32_analyze_sdtr() 2355 target->period = 0; nsp32_analyze_sdtr() 2363 * target and speed period value. If failed to search, return negative value. 2366 nsp32_target *target, nsp32_search_period_entry() 2371 if (target->limit_entry >= data->syncnum) { nsp32_search_period_entry() 2373 target->limit_entry = 0; nsp32_search_period_entry() 2376 for (i = target->limit_entry; i < data->syncnum; i++) { nsp32_search_period_entry() 2396 * target <-> initiator use ASYNC transfer 2398 static void nsp32_set_async(nsp32_hw_data *data, nsp32_target *target) nsp32_set_async() argument 2400 unsigned char period = data->synct[target->limit_entry].period_num; nsp32_set_async() 2402 target->offset = ASYNC_OFFSET; nsp32_set_async() 2403 target->period = 0; nsp32_set_async() 2404 target->syncreg = TO_SYNCREG(period, ASYNC_OFFSET); nsp32_set_async() 2405 target->ackwidth = 0; nsp32_set_async() 2406 target->sample_reg = 0; nsp32_set_async() 2413 * target <-> initiator use maximum SYNC transfer 2416 nsp32_target *target, nsp32_set_max_sync() 2422 period_num = data->synct[target->limit_entry].period_num; nsp32_set_max_sync() 2423 *period = data->synct[target->limit_entry].start_period; nsp32_set_max_sync() 2424 ackwidth = data->synct[target->limit_entry].ackwidth; nsp32_set_max_sync() 2427 target->syncreg = TO_SYNCREG(period_num, *offset); nsp32_set_max_sync() 2428 target->ackwidth = ackwidth; nsp32_set_max_sync() 2429 target->offset = *offset; nsp32_set_max_sync() 2430 target->sample_reg = 0; /* disable SREQ sampling */ nsp32_set_max_sync() 2435 * target <-> initiator use entry number speed 2438 nsp32_target *target, nsp32_set_sync_entry() 2449 target->syncreg = TO_SYNCREG(period, offset); nsp32_set_sync_entry() 2450 target->ackwidth = ackwidth; nsp32_set_sync_entry() 2451 target->offset = offset; nsp32_set_sync_entry() 2452 target->sample_reg = sample_rate | SAMPLING_ENABLE; nsp32_set_sync_entry() 2462 * connected target responds SCSI REQ negation. We have to wait 2685 * setup target nsp32_detect() 2687 for (i = 0; i < ARRAY_SIZE(data->target); i++) { nsp32_detect() 2688 nsp32_target *target = &(data->target[i]); nsp32_detect() local 2690 target->limit_entry = 0; nsp32_detect() 2691 target->sync_flag = 0; nsp32_detect() 2692 nsp32_set_async(data, target); nsp32_detect() 2887 for (i = 0; i < ARRAY_SIZE(data->target); i++) { nsp32_do_bus_reset() 2888 nsp32_target *target = &data->target[i]; nsp32_do_bus_reset() local 2890 target->sync_flag = 0; nsp32_do_bus_reset() 2891 nsp32_set_async(data, target); nsp32_do_bus_reset() 3015 nsp32_target *target; nsp32_getprom_at24() local 3060 target = &data->target[i]; nsp32_getprom_at24() 3062 target->limit_entry = 0; /* set as ULTRA20M */ nsp32_getprom_at24() 3065 entry = nsp32_search_period_entry(data, target, ret); nsp32_getprom_at24() 3070 target->limit_entry = entry; nsp32_getprom_at24() 3101 nsp32_target *target; nsp32_getprom_c16() local 3115 target = &data->target[i]; nsp32_getprom_c16() 3134 entry = nsp32_search_period_entry(data, target, val); nsp32_getprom_c16() 3139 target->limit_entry = entry; nsp32_getprom_c16() 2365 nsp32_search_period_entry(nsp32_hw_data *data, nsp32_target *target, unsigned char period) nsp32_search_period_entry() argument 2415 nsp32_set_max_sync(nsp32_hw_data *data, nsp32_target *target, unsigned char *period, unsigned char *offset) nsp32_set_max_sync() argument 2437 nsp32_set_sync_entry(nsp32_hw_data *data, nsp32_target *target, int entry, unsigned char offset) nsp32_set_sync_entry() argument
|
H A D | qla1280.c | 90 that the target device actually supports it 118 - Kill all leftovers of target-mode support which never worked anyway 315 - Added new routine to set target parameters for ISP12160. 802 qla1280_wait_for_pending_commands(struct scsi_qla_host *ha, int bus, int target) qla1280_wait_for_pending_commands() argument 812 * Wait for all commands with the designated bus/target qla1280_wait_for_pending_commands() 822 if (target >= 0 && SCSI_TCN_32(cmd) != target) qla1280_wait_for_pending_commands() 851 int bus, target, lun; qla1280_error_action() local 864 target = SCSI_TCN_32(cmd); qla1280_error_action() 900 target, lun); qla1280_error_action() 921 "command.\n", ha->host_no, bus, target, lun); qla1280_error_action() 922 if (qla1280_device_reset(ha, bus, target) == 0) { qla1280_error_action() 925 wait_for_target = target; qla1280_error_action() 973 ha->host_no, bus, target, lun); qla1280_error_action() 1143 qla1280_set_target_parameters(struct scsi_qla_host *ha, int bus, int target) qla1280_set_target_parameters() argument 1156 mb[1] = (uint16_t)((bus ? target | BIT_7 : target) << 8); qla1280_set_target_parameters() 1157 mb[2] = nv->bus[bus].target[target].parameter.renegotiate_on_error << 8; qla1280_set_target_parameters() 1158 mb[2] |= nv->bus[bus].target[target].parameter.stop_queue_on_check << 9; qla1280_set_target_parameters() 1159 mb[2] |= nv->bus[bus].target[target].parameter.auto_request_sense << 10; qla1280_set_target_parameters() 1160 mb[2] |= nv->bus[bus].target[target].parameter.tag_queuing << 11; qla1280_set_target_parameters() 1161 mb[2] |= nv->bus[bus].target[target].parameter.enable_sync << 12; qla1280_set_target_parameters() 1162 mb[2] |= nv->bus[bus].target[target].parameter.enable_wide << 13; qla1280_set_target_parameters() 1163 mb[2] |= nv->bus[bus].target[target].parameter.parity_checking << 14; qla1280_set_target_parameters() 1164 mb[2] |= nv->bus[bus].target[target].parameter.disconnect_allowed << 15; qla1280_set_target_parameters() 1167 mb[2] |= nv->bus[bus].target[target].ppr_1x160.flags.enable_ppr << 5; qla1280_set_target_parameters() 1168 mb[3] = (nv->bus[bus].target[target].flags.flags1x160.sync_offset << 8); qla1280_set_target_parameters() 1169 mb[6] = (nv->bus[bus].target[target].ppr_1x160.flags.ppr_options << 8) | qla1280_set_target_parameters() 1170 nv->bus[bus].target[target].ppr_1x160.flags.ppr_bus_width; qla1280_set_target_parameters() 1173 mb[3] = (nv->bus[bus].target[target].flags.flags1x80.sync_offset << 8); qla1280_set_target_parameters() 1175 mb[3] |= nv->bus[bus].target[target].sync_period; qla1280_set_target_parameters() 1182 mb[1] = (uint16_t)((bus ? target | BIT_7 : target) << 8); qla1280_set_target_parameters() 1185 mb[3] = nv->bus[bus].target[target].execution_throttle; qla1280_set_target_parameters() 1192 ha->host_no, bus, target); qla1280_set_target_parameters() 1214 int target = device->id; qla1280_slave_configure() local 1226 (ha->bus_settings[bus].qtag_enables & (BIT_0 << target))) { qla1280_slave_configure() 1232 nv->bus[bus].target[target].parameter.enable_sync = device->sdtr; qla1280_slave_configure() 1233 nv->bus[bus].target[target].parameter.enable_wide = device->wdtr; qla1280_slave_configure() 1234 nv->bus[bus].target[target].ppr_1x160.flags.enable_ppr = device->ppr; qla1280_slave_configure() 1238 (~driver_setup.sync_mask & (1 << target)))) qla1280_slave_configure() 1239 nv->bus[bus].target[target].parameter.enable_sync = 0; qla1280_slave_configure() 1242 (~driver_setup.wide_mask & (1 << target)))) qla1280_slave_configure() 1243 nv->bus[bus].target[target].parameter.enable_wide = 0; qla1280_slave_configure() 1247 (~driver_setup.ppr_mask & (1 << target)))) qla1280_slave_configure() 1248 nv->bus[bus].target[target].ppr_1x160.flags.enable_ppr = 0; qla1280_slave_configure() 1252 if (nv->bus[bus].target[target].parameter.enable_sync) qla1280_slave_configure() 1253 status = qla1280_set_target_parameters(ha, bus, target); qla1280_slave_configure() 1272 int bus, target, lun; qla1280_done() local 1286 target = SCSI_TCN_32(cmd); qla1280_done() 1293 qla1280_marker(ha, bus, target, 0, MK_SYNC_ID); qla1280_done() 2015 qla1280_set_target_defaults(struct scsi_qla_host *ha, int bus, int target) qla1280_set_target_defaults() argument 2019 nv->bus[bus].target[target].parameter.renegotiate_on_error = 1; qla1280_set_target_defaults() 2020 nv->bus[bus].target[target].parameter.auto_request_sense = 1; qla1280_set_target_defaults() 2021 nv->bus[bus].target[target].parameter.tag_queuing = 1; qla1280_set_target_defaults() 2022 nv->bus[bus].target[target].parameter.enable_sync = 1; qla1280_set_target_defaults() 2024 nv->bus[bus].target[target].parameter.enable_wide = 1; qla1280_set_target_defaults() 2026 nv->bus[bus].target[target].execution_throttle = qla1280_set_target_defaults() 2028 nv->bus[bus].target[target].parameter.parity_checking = 1; qla1280_set_target_defaults() 2029 nv->bus[bus].target[target].parameter.disconnect_allowed = 1; qla1280_set_target_defaults() 2032 nv->bus[bus].target[target].flags.flags1x160.device_enable = 1; qla1280_set_target_defaults() 2033 nv->bus[bus].target[target].flags.flags1x160.sync_offset = 0x0e; qla1280_set_target_defaults() 2034 nv->bus[bus].target[target].sync_period = 9; qla1280_set_target_defaults() 2035 nv->bus[bus].target[target].ppr_1x160.flags.enable_ppr = 1; qla1280_set_target_defaults() 2036 nv->bus[bus].target[target].ppr_1x160.flags.ppr_options = 2; qla1280_set_target_defaults() 2037 nv->bus[bus].target[target].ppr_1x160.flags.ppr_bus_width = 1; qla1280_set_target_defaults() 2039 nv->bus[bus].target[target].flags.flags1x80.device_enable = 1; qla1280_set_target_defaults() 2040 nv->bus[bus].target[target].flags.flags1x80.sync_offset = 12; qla1280_set_target_defaults() 2041 nv->bus[bus].target[target].sync_period = 10; qla1280_set_target_defaults() 2049 int bus, target; qla1280_set_defaults() local 2094 for (target = 0; target < MAX_TARGETS; target++) qla1280_set_defaults() 2095 qla1280_set_target_defaults(ha, bus, target); qla1280_set_defaults() 2100 qla1280_config_target(struct scsi_qla_host *ha, int bus, int target) qla1280_config_target() argument 2109 mb[1] = (uint16_t)((bus ? target | BIT_7 : target) << 8); qla1280_config_target() 2113 * enable this later if we determine the target actually qla1280_config_target() 2120 mb[3] = nv->bus[bus].target[target].flags.flags1x160.sync_offset << 8; qla1280_config_target() 2122 mb[3] = nv->bus[bus].target[target].flags.flags1x80.sync_offset << 8; qla1280_config_target() 2123 mb[3] |= nv->bus[bus].target[target].sync_period; qla1280_config_target() 2127 flag = (BIT_0 << target); qla1280_config_target() 2128 if (nv->bus[bus].target[target].parameter.tag_queuing) qla1280_config_target() 2133 if (nv->bus[bus].target[target].flags.flags1x160.device_enable) qla1280_config_target() 2137 if (nv->bus[bus].target[target].flags.flags1x80.device_enable) qla1280_config_target() 2140 if (nv->bus[bus].target[target].flags.flags1x80.lun_disable) qla1280_config_target() 2147 mb[1] = (uint16_t)((bus ? target | BIT_7 : target) << 8); qla1280_config_target() 2150 mb[3] = nv->bus[bus].target[target].execution_throttle; qla1280_config_target() 2162 int target, status; qla1280_config_bus() local 2182 /* Set target parameters. */ qla1280_config_bus() 2183 for (target = 0; target < MAX_TARGETS; target++) qla1280_config_bus() 2184 status |= qla1280_config_target(ha, bus, target); qla1280_config_bus() 2194 int bus, target, status = 0; qla1280_nvram_config() local 2202 for (target = 0; target < MAX_TARGETS; target++) { qla1280_nvram_config() 2203 nv->bus[bus].target[target].parameter. qla1280_nvram_config() 2639 * Issue bus device reset message to the target. 2644 * target = SCSI ID. 2650 qla1280_device_reset(struct scsi_qla_host *ha, int bus, int target) qla1280_device_reset() argument 2658 mb[1] = (bus ? (target | BIT_7) : target) << 8; qla1280_device_reset() 2663 qla1280_marker(ha, bus, target, 0, MK_SYNC_ID); qla1280_device_reset() 2687 unsigned int bus, target, lun; qla1280_abort_command() local 2693 target = SCSI_TCN_32(sp->cmd); qla1280_abort_command() 2699 mb[1] = (bus ? target | BIT_7 : target) << 8 | lun; qla1280_abort_command() 2760 pkt->target = (uint8_t) (bus ? (id | BIT_7) : id); qla1280_marker() 2854 dprintk(2, " bus %i, target %i, lun %i\n", qla1280_64bit_start_scsi() 2875 /* Set device target ID and LUN */ qla1280_64bit_start_scsi() 2877 pkt->target = SCSI_BUS_32(cmd) ? qla1280_64bit_start_scsi() 3144 /* Set device target ID and LUN */ qla1280_32bit_start_scsi() 3146 pkt->target = SCSI_BUS_32(cmd) ? qla1280_32bit_start_scsi() 3674 unsigned int bus, target, lun; qla1280_status_entry() local 3700 /* Generate LU queue on cntrl, target, LUN */ qla1280_status_entry() 3702 target = SCSI_TCN_32(cmd); qla1280_status_entry() 3743 "l %i\n", bus, target, lun); qla1280_status_entry() 3968 int bus, target, lun; qla1280_get_target_parameters() local 3971 target = device->id; qla1280_get_target_parameters() 3976 mb[1] = (uint16_t) (bus ? target | BIT_7 : target); qla1280_get_target_parameters() 3981 printk(KERN_INFO "scsi(%li:%d:%d:%d):", ha->host_no, bus, target, lun); qla1280_get_target_parameters() 4042 printk(" chan=%d, target = 0x%02x, lun = 0x%02x, cmd_len = 0x%02x\n", __qla1280_print_scsi_cmd()
|
H A D | gdth_ioctl.h | 94 u8 target; /* target ID */ member in struct:__anon9444::__anon9445::__anon9452 113 u8 target; /* target ID */ member in struct:__anon9444::__anon9445::__anon9453 210 u8 target; /* target ID */ member in struct:__anon9461::__anon9462::__anon9467 257 u8 target; /* target ID */ member in struct:__anon9469::__anon9470::__anon9477 326 u8 target; /* target ID */ member in struct:__anon9484::__anon9485
|
H A D | esp_scsi.c | 239 esp->target[i].esp_config3 = val; esp_set_all_config3() 300 esp->prev_cfg3 = esp->target[0].esp_config3; esp_reset_esp() 313 u8 cfg3 = esp->target[0].esp_config3; esp_reset_esp() 320 u32 cfg3 = esp->target[0].esp_config3; esp_reset_esp() 325 esp->prev_cfg3 = esp->target[0].esp_config3; esp_reset_esp() 341 (esp->target[0].esp_config3 | esp_reset_esp() 343 esp->prev_cfg3 = esp->target[0].esp_config3; esp_reset_esp() 495 u8 val = esp->target[tgt].esp_config3; esp_write_tgt_config3() 506 u8 off = esp->target[tgt].esp_offset; esp_write_tgt_sync() 507 u8 per = esp->target[tgt].esp_period; esp_write_tgt_sync() 551 struct scsi_target *target = tp->starget; esp_need_to_nego_wide() local 553 return spi_width(target) != tp->nego_goal_width; esp_need_to_nego_wide() 558 struct scsi_target *target = tp->starget; esp_need_to_nego_sync() local 561 if (!spi_offset(target) && !tp->nego_goal_offset) esp_need_to_nego_sync() 564 if (spi_offset(target) == tp->nego_goal_offset && esp_need_to_nego_sync() 565 spi_period(target) == tp->nego_goal_period) esp_need_to_nego_sync() 747 tp = &esp->target[tgt]; esp_maybe_execute_command() 763 /* Need to negotiate. If the target is broken esp_maybe_execute_command() 1144 int target, lun; esp_reconnect() local 1148 /* FASHME puts the target and lun numbers directly esp_reconnect() 1151 target = esp->fifo[0]; esp_reconnect() 1157 * the target is given as a sample of the arbitration esp_reconnect() 1159 * see the ID of the ESP and the one reconnecting target esp_reconnect() 1168 target = ffs(bits) - 1; esp_reconnect() 1185 esp_write_tgt_sync(esp, target); esp_reconnect() 1186 esp_write_tgt_config3(esp, target); esp_reconnect() 1191 esp_write8(target | ESP_BUSID_RESELID | ESP_BUSID_CTR32BIT, esp_reconnect() 1194 tp = &esp->target[target]; esp_reconnect() 1199 target, lun); esp_reconnect() 1256 struct esp_target_data *tp = &esp->target[cmd->device->id]; esp_finish_select() 1292 * wide parameters if this target starts responding esp_finish_select() 1295 esp->target[dev->id].flags |= ESP_TGT_CHECK_NEGO; esp_finish_select() 1395 * target as an extra data byte. Fun. esp_data_bytes_sent() 1402 * target in synchronous mode. esp_data_bytes_sent() 1471 tp = &esp->target[tgt]; esp_msgin_reject() 1611 tp = &esp->target[tgt]; esp_msgin_extended() 1629 /* Analyze msgin bytes received from target so far. Return non-zero 1830 /* XXX force sync mode for this target XXX */ esp_process_event() 2092 struct esp_target_data *tp = &esp->target[i]; esp_reset_cleanup() 2285 esp->target[i].flags = 0; esp_init_swstate() 2286 esp->target[i].nego_goal_period = 0; esp_init_swstate() 2287 esp->target[i].nego_goal_offset = 0; esp_init_swstate() 2288 esp->target[i].nego_goal_width = 0; esp_init_swstate() 2289 esp->target[i].nego_goal_tags = 0; esp_init_swstate() 2338 * to select a target forever if you let it. This value tells the esp_set_clock_params() 2444 struct esp_target_data *tp = &esp->target[starget->id]; esp_target_alloc() 2454 struct esp_target_data *tp = &esp->target[starget->id]; esp_target_destroy() 2462 struct esp_target_data *tp = &esp->target[dev->id]; esp_slave_alloc() 2484 struct esp_target_data *tp = &esp->target[dev->id]; esp_slave_configure() 2569 /* Send out an abort, encouraging the target to esp_eh_abort_handler() 2585 * to the target. Coming up with all the code to esp_eh_abort_handler() 2615 * XXX since we know which target/lun in particular is esp_eh_abort_handler() 2713 static void esp_set_offset(struct scsi_target *target, int offset) esp_set_offset() argument 2715 struct Scsi_Host *host = dev_to_shost(target->dev.parent); esp_set_offset() 2717 struct esp_target_data *tp = &esp->target[target->id]; esp_set_offset() 2726 static void esp_set_period(struct scsi_target *target, int period) esp_set_period() argument 2728 struct Scsi_Host *host = dev_to_shost(target->dev.parent); esp_set_period() 2730 struct esp_target_data *tp = &esp->target[target->id]; esp_set_period() 2736 static void esp_set_width(struct scsi_target *target, int width) esp_set_width() argument 2738 struct Scsi_Host *host = dev_to_shost(target->dev.parent); esp_set_width() 2740 struct esp_target_data *tp = &esp->target[target->id]; esp_set_width()
|
H A D | mesh.h | 51 #define SEQ_TARGET 0x40 /* put the controller into target mode */ 56 #define SEQ_SELECT 2 /* select a target */ 93 #define EXC_SELWATN 0x20 /* (as target) we were selected with ATN */ 94 #define EXC_SELECTED 0x10 /* (as target) we were selected w/o ATN */ 101 #define ERR_UNEXPDISC 0x40 /* target unexpectedly disconnected */
|
H A D | scsi_scan.c | 16 * Scan LUN 0; if the target responds to LUN 0 (even if there is no 22 * If target is SCSI-3 or up, issue a REPORT LUN, and scan 69 * SCSI_SCAN_NO_RESPONSE: no valid response received from the target, this 72 * SCSI_SCAN_TARGET_PRESENT: target responded, but no device is available 75 * SCSI_SCAN_LUN_PRESENT: target responded, and a device is available on a 198 * @starget: which target to allocate a &scsi_device for 354 * Search for an existing target for this sdev. __scsi_find_target() 370 * scsi_target_reap_ref_release - remove target from visibility 371 * @kref: the reap_ref in the target being released 374 * under this target is visible anymore, so render the target invisible in 375 * sysfs. Note: we have to be in user context here because the target reaps 384 * if we get here and the target is still in the CREATED state that scsi_target_reap_ref_release() 401 * scsi_alloc_target - allocate a new or find an existing target 402 * @parent: parent of the target (need not be a scsi host) 403 * @channel: target channel number (zero if no channels) 404 * @id: target id number 406 * Return an existing target if one exists, provided it hasn't already 407 * gone into STARGET_DEL state, otherwise allocate a new target. 409 * The target is returned with an incremented reference, so the caller 433 dev_set_name(dev, "target%d:%d:%d", shost->host_no, channel, id); scsi_alloc_target() 459 dev_printk(KERN_ERR, dev, "target allocation failed, error %d\n", error); scsi_alloc_target() 473 * take the reference, the target must be alive. If we can't, it must scsi_alloc_target() 474 * be dying and we need to wait for a new target scsi_alloc_target() 484 * Unfortunately, we found a dying target; need to wait until it's scsi_alloc_target() 487 * reap_ref above. However, since the target being released, it's scsi_alloc_target() 490 * an already invisible target. scsi_alloc_target() 495 * for a tick to avoid busy waiting for the target to die. scsi_alloc_target() 502 * scsi_target_reap - check to see if target is in use and destroy if not 503 * @starget: target to be checked 505 * This is used after removing a LUN or doing a last put of the target 506 * it checks atomically that nothing is using the target and removes 1033 * @starget: pointer to target device structure 1034 * @lun: LUN of target device 1046 * SCSI_SCAN_TARGET_PRESENT: target responded, but no device is 1109 * is a target id responding. scsi_probe_and_add_lun() 1188 * scsi_sequential_lun_scan - sequentially scan a SCSI target 1189 * @starget: pointer to target structure to scan 1278 * @starget: which target 1286 * If BLINK_REPORTLUN2 is set, scan a target that supports more than 8 1316 * Don't attempt if the target doesn't support REPORT LUNS. scsi_report_lun_scan() 1508 uint target, u64 lun) scsi_add_device() 1511 __scsi_add_device(host, channel, target, lun, NULL); scsi_add_device() 1569 * The REPORT LUN did not scan the target, __scsi_scan_target() 1579 * paired with scsi_alloc_target(): determine if the target has __scsi_scan_target() 1588 * scsi_scan_target - scan a target id, possibly including all LUNs on the target. 1591 * @id: target id to scan 1596 * Scan the target id on @parent, @channel, and @id. Scan at least LUN 0, 1597 * and possibly all LUNs on the target id. 1599 * First try a REPORT LUN scan, if that does not scan the target, do a 1600 * sequential scan of LUNs on the target id. 1635 * the FC ID can be the same as a target id scsi_scan_channel() 1688 /* target removed before the device could be added */ shost_for_each_device() 1507 scsi_add_device(struct Scsi_Host *host, uint channel, uint target, u64 lun) scsi_add_device() argument
|
/linux-4.4.14/arch/sparc/include/asm/ |
H A D | jump_label.h | 46 jump_label_t target; member in struct:jump_entry
|
/linux-4.4.14/arch/metag/kernel/ |
H A D | ptrace.c | 138 static int metag_gp_regs_get(struct task_struct *target, metag_gp_regs_get() argument 143 const struct pt_regs *regs = task_pt_regs(target); metag_gp_regs_get() 147 static int metag_gp_regs_set(struct task_struct *target, metag_gp_regs_set() argument 152 struct pt_regs *regs = task_pt_regs(target); metag_gp_regs_set() 184 static int metag_cb_regs_get(struct task_struct *target, metag_cb_regs_get() argument 189 const struct pt_regs *regs = task_pt_regs(target); metag_cb_regs_get() 193 static int metag_cb_regs_set(struct task_struct *target, metag_cb_regs_set() argument 198 struct pt_regs *regs = task_pt_regs(target); metag_cb_regs_set() 273 static int metag_rp_state_get(struct task_struct *target, metag_rp_state_get() argument 278 const struct pt_regs *regs = task_pt_regs(target); metag_rp_state_get() 282 static int metag_rp_state_set(struct task_struct *target, metag_rp_state_set() argument 287 struct pt_regs *regs = task_pt_regs(target); metag_rp_state_set() 291 static int metag_tls_get(struct task_struct *target, metag_tls_get() argument 296 void __user *tls = target->thread.tls_ptr; metag_tls_get() 300 static int metag_tls_set(struct task_struct *target, metag_tls_set() argument 312 target->thread.tls_ptr = tls; metag_tls_set()
|
/linux-4.4.14/arch/arm/kernel/ |
H A D | jump_label.c | 16 insn = arm_gen_branch(entry->code, entry->target); __arch_jump_label_transform()
|
/linux-4.4.14/drivers/target/iscsi/ |
H A D | iscsi_target_device.c | 20 #include <target/target_core_base.h> 21 #include <target/target_core_fabric.h> 23 #include <target/iscsi/iscsi_target_core.h>
|
H A D | iscsi_target_transport.c | 3 #include <target/iscsi/iscsi_transport.h>
|
/linux-4.4.14/scripts/ |
H A D | markup_oops.pl | 40 my $target = "0"; 162 $target = $1; 165 $target = $1; 186 my $decodestart = Math::BigInt->from_hex("0x$target") - Math::BigInt->from_hex("0x$func_offset"); 187 my $decodestop = Math::BigInt->from_hex("0x$target") + 8192; 188 if ($target eq "0") { 209 $vmaoffset = Math::BigInt->from_hex("0x$target") - Math::BigInt->from_hex("0x$fu") - Math::BigInt->from_hex("0x$func_offset"); 222 my ($address, $target) = @_; 224 my $ta = "0x".$target; 245 if (InRange($1, $target)) { 253 if (!InRange($val, $target)) { 256 if ($val eq $target) {
|
/linux-4.4.14/tools/perf/ |
H A D | perf.h | 40 #include "util/target.h" 43 struct target target; member in struct:record_opts
|
H A D | builtin-probe.c | 59 char *target; member in struct:__anon15666 76 if (params.target) { parse_probe_event() 77 pev->target = strdup(params.target); parse_probe_event() 78 if (!pev->target) parse_probe_event() 124 if (!params.target && ptr && *ptr == '/') { set_target() 125 params.target = strdup(ptr); set_target() 126 if (!params.target) set_target() 202 free(params.target); opt_set_target() 203 params.target = tmp; opt_set_target() 297 free(params.target); cleanup_params() 506 "target executable name or path", opt_set_target), __cmd_probe() 508 "target module name (for online) or path (for offline)", __cmd_probe() 575 ret = show_available_funcs(params.target, params.filter, __cmd_probe() 582 ret = show_line_range(¶ms.line_range, params.target, __cmd_probe() 606 /* Ensure the last given target is used */ __cmd_probe() 607 if (params.target && !params.target_used) { __cmd_probe()
|
H A D | Makefile | 62 # Needed if no target specified: 72 # The clean target is not really parallel, don't print the jobs info: 78 # The build-test target is not really parallel, don't print the jobs info:
|
/linux-4.4.14/drivers/dma/ppc4xx/ |
H A D | xor.h | 28 #define XOR_CBCR_TGT_BIT (1<<30) /* target present */ 75 u32 cbtah; /* target address high */ 76 u32 cbtal; /* target address low */ 95 u32 cbtahr; /* operand target address high register */ 96 u32 cbtalr; /* operand target address low register */
|
/linux-4.4.14/arch/arm64/kernel/ |
H A D | alternative.c | 42 * Check if the target PC is within an alternative block. 70 unsigned long target; get_alt_insn() local 72 target = (unsigned long)altinsnptr + offset; get_alt_insn() 79 if (branch_insn_requires_update(alt, target)) { get_alt_insn() 80 offset = target - (unsigned long)insnptr; get_alt_insn()
|
H A D | ptrace.c | 372 static int hw_break_get(struct task_struct *target, hw_break_get() argument 403 ret = ptrace_hbp_get_addr(note_type, target, idx, &addr); hw_break_get() 412 ret = ptrace_hbp_get_ctrl(note_type, target, idx, &ctrl); hw_break_get() 433 static int hw_break_set(struct task_struct *target, hw_break_set() argument 456 ret = ptrace_hbp_set_addr(note_type, target, idx, addr); hw_break_set() 465 ret = ptrace_hbp_set_ctrl(note_type, target, idx, ctrl); hw_break_set() 483 static int gpr_get(struct task_struct *target, gpr_get() argument 488 struct user_pt_regs *uregs = &task_pt_regs(target)->user_regs; gpr_get() 492 static int gpr_set(struct task_struct *target, const struct user_regset *regset, gpr_set() argument 506 task_pt_regs(target)->user_regs = newregs; gpr_set() 513 static int fpr_get(struct task_struct *target, const struct user_regset *regset, fpr_get() argument 518 uregs = &target->thread.fpsimd_state.user_fpsimd; fpr_get() 522 static int fpr_set(struct task_struct *target, const struct user_regset *regset, fpr_set() argument 533 target->thread.fpsimd_state.user_fpsimd = newstate; fpr_set() 534 fpsimd_flush_task_state(target); fpr_set() 538 static int tls_get(struct task_struct *target, const struct user_regset *regset, tls_get() argument 542 unsigned long *tls = &target->thread.tp_value; tls_get() 546 static int tls_set(struct task_struct *target, const struct user_regset *regset, tls_set() argument 557 target->thread.tp_value = tls; tls_set() 561 static int system_call_get(struct task_struct *target, system_call_get() argument 566 int syscallno = task_pt_regs(target)->syscallno; system_call_get() 572 static int system_call_set(struct task_struct *target, system_call_set() argument 583 task_pt_regs(target)->syscallno = syscallno; system_call_set() 668 static int compat_gpr_get(struct task_struct *target, compat_gpr_get() argument 691 reg = task_pt_regs(target)->pc; compat_gpr_get() 694 reg = task_pt_regs(target)->pstate; compat_gpr_get() 697 reg = task_pt_regs(target)->orig_x0; compat_gpr_get() 700 reg = task_pt_regs(target)->regs[idx]; compat_gpr_get() 720 static int compat_gpr_set(struct task_struct *target, compat_gpr_set() argument 738 newregs = *task_pt_regs(target); compat_gpr_set() 774 *task_pt_regs(target) = newregs; compat_gpr_set() 781 static int compat_vfp_get(struct task_struct *target, compat_vfp_get() argument 790 uregs = &target->thread.fpsimd_state.user_fpsimd; compat_vfp_get() 808 static int compat_vfp_set(struct task_struct *target, compat_vfp_set() argument 820 uregs = &target->thread.fpsimd_state.user_fpsimd; compat_vfp_set() 831 fpsimd_flush_task_state(target); compat_vfp_set() 835 static int compat_tls_get(struct task_struct *target, compat_tls_get() argument 839 compat_ulong_t tls = (compat_ulong_t)target->thread.tp_value; compat_tls_get() 843 static int compat_tls_set(struct task_struct *target, compat_tls_set() argument 855 target->thread.tp_value = tls; compat_tls_set()
|
/linux-4.4.14/include/trace/events/ |
H A D | thermal.h | 38 TP_PROTO(struct thermal_cooling_device *cdev, unsigned long target), 40 TP_ARGS(cdev, target), 44 __field(unsigned long, target) 49 __entry->target = target; 52 TP_printk("type=%s target=%lu", __get_str(type), __entry->target)
|
/linux-4.4.14/arch/mn10300/kernel/ |
H A D | ptrace.c | 81 static int genregs_get(struct task_struct *target, genregs_get() argument 86 const struct pt_regs *regs = task_pt_regs(target); genregs_get() 108 static int genregs_set(struct task_struct *target, genregs_set() argument 113 struct pt_regs *regs = task_pt_regs(target); genregs_set() 157 static int fpuregs_get(struct task_struct *target, fpuregs_get() argument 162 const struct fpu_state_struct *fpregs = &target->thread.fpu_state; fpuregs_get() 165 unlazy_fpu(target); fpuregs_get() 179 static int fpuregs_set(struct task_struct *target, fpuregs_set() argument 184 struct fpu_state_struct fpu_state = target->thread.fpu_state; fpuregs_set() 192 fpu_kill_state(target); fpuregs_set() 193 target->thread.fpu_state = fpu_state; fpuregs_set() 194 set_using_fpu(target); fpuregs_set() 203 static int fpuregs_active(struct task_struct *target, fpuregs_active() argument 206 return is_using_fpu(target) ? regset->n : 0; fpuregs_active()
|
/linux-4.4.14/net/ipv6/netfilter/ |
H A D | ip6_tables.c | 255 if (strcmp(t->target.u.kernel.target->name, XT_ERROR_TARGET) == 0) { get_chainname_rulenum() 256 /* Head of user chain: ERROR target with chainname */ get_chainname_rulenum() 257 *chainname = t->target.data; get_chainname_rulenum() 263 strcmp(t->target.u.kernel.target->name, get_chainname_rulenum() 266 /* Tail of chains: STANDARD target (return/policy) */ get_chainname_rulenum() 400 IP_NF_ASSERT(t->u.kernel.target); 408 /* Standard target? */ 409 if (!t->u.kernel.target->target) { 435 acpar.target = t->u.kernel.target; 438 verdict = t->u.kernel.target->target(skb, &acpar); 459 const struct ip6t_entry *target) find_jump_target() 464 if (iter == target) find_jump_target() 504 (strcmp(t->target.u.user.name, mark_source_chains() 509 if ((strcmp(t->target.u.user.name, mark_source_chains() 554 if (strcmp(t->target.u.user.name, mark_source_chains() 651 .target = t->u.kernel.target, check_target() 663 t->u.kernel.target->name); check_target() 674 struct xt_target *target; find_check_entry() local 698 target = xt_request_find_target(NFPROTO_IPV6, t->u.user.name, 700 if (IS_ERR(target)) { 702 ret = PTR_ERR(target); 705 t->u.kernel.target = target; 712 module_put(t->u.kernel.target->me); 783 "use the STANDARD target with " check_entry_size_and_hooks() 809 par.target = t->u.kernel.target; cleanup_entry() 812 if (par.target->destroy != NULL) cleanup_entry() 813 par.target->destroy(&par); cleanup_entry() 814 module_put(par.target->me); cleanup_entry() 1010 t->u.kernel.target->name, copy_entries_to_user() 1011 strlen(t->u.kernel.target->name)+1) != 0) { copy_entries_to_user() 1055 off += xt_compat_target_offset(t->u.kernel.target); compat_calc_entry() 1450 module_put(t->u.kernel.target->me); compat_release_entry() 1462 struct xt_target *target; check_compat_entry_size_and_hooks() local 1501 target = xt_request_find_target(NFPROTO_IPV6, t->u.user.name, 1503 if (IS_ERR(target)) { 1506 ret = PTR_ERR(target); 1509 t->u.kernel.target = target; 1511 off += xt_compat_target_offset(target); 1520 module_put(t->u.kernel.target->me); 1889 int target; do_ip6t_get_ctl() local 1902 target = 1; do_ip6t_get_ctl() 1904 target = 0; do_ip6t_get_ctl() 1908 target, &ret), do_ip6t_get_ctl() 2036 .target = ip6t_error, 458 find_jump_target(const struct xt_table_info *t, const struct ip6t_entry *target) find_jump_target() argument
|
H A D | ip6t_MASQUERADE.c | 8 * Based on Rusty Russell's IPv6 MASQUERADE target. Development of IPv6 43 .target = masquerade_tg6,
|
/linux-4.4.14/arch/powerpc/lib/ |
H A D | code-patching.c | 30 int patch_branch(unsigned int *addr, unsigned long target, int flags) patch_branch() argument 32 return patch_instruction(addr, create_branch(addr, target, flags)); patch_branch() 36 unsigned long target, int flags) create_branch() 41 offset = target; create_branch() 45 /* Check we can represent the target in the instruction format */ create_branch() 49 /* Mask out the flags and target, so they don't step on each other. */ create_branch() 56 unsigned long target, int flags) create_cond_branch() 61 offset = target; create_cond_branch() 65 /* Check we can represent the target in the instruction format */ create_cond_branch() 69 /* Mask out the flags and target, so they don't step on each other. */ create_cond_branch() 150 unsigned long target; translate_branch() local 152 target = branch_target(src); translate_branch() 155 return create_branch(dest, target, *src); translate_branch() 157 return create_cond_branch(dest, target, *src); translate_branch() 197 /* All bits of target set, and flags */ test_branch_iform() 253 /* Unaligned target */ test_branch_iform() 285 /* All bits of target set, and flags */ test_branch_bform() 335 /* Unaligned target */ test_branch_bform() 35 create_branch(const unsigned int *addr, unsigned long target, int flags) create_branch() argument 55 create_cond_branch(const unsigned int *addr, unsigned long target, int flags) create_cond_branch() argument
|
/linux-4.4.14/scripts/dtc/ |
H A D | dtc-parser.y | 146 struct node *target = get_node_by_ref($1, $3); 148 add_label(&target->labels, $2); 149 if (target) 150 merge_nodes(target, $4); 157 struct node *target = get_node_by_ref($1, $2); 159 if (target) 160 merge_nodes(target, $3); 167 struct node *target = get_node_by_ref($1, $3); 169 if (target) 170 delete_node(target);
|
/linux-4.4.14/arch/frv/kernel/ |
H A D | ptrace.c | 40 static int genregs_get(struct task_struct *target, genregs_get() argument 45 const struct user_int_regs *iregs = &target->thread.user->i; genregs_get() 60 static int genregs_set(struct task_struct *target, genregs_set() argument 65 struct user_int_regs *iregs = &target->thread.user->i; genregs_set() 105 static int fpmregs_get(struct task_struct *target, fpmregs_get() argument 110 const struct user_fpmedia_regs *fpregs = &target->thread.user->f; fpmregs_get() 125 static int fpmregs_set(struct task_struct *target, fpmregs_set() argument 130 struct user_fpmedia_regs *fpregs = &target->thread.user->f; fpmregs_set() 145 static int fpmregs_active(struct task_struct *target, fpmregs_active() argument 148 return tsk_used_math(target) ? regset->n : 0; fpmregs_active()
|
/linux-4.4.14/drivers/usb/misc/ |
H A D | ftdi-elan.c | 160 struct u132_target target[4]; member in struct:usb_ftdi 299 struct u132_target *target, u8 *buffer, int length); 351 struct u132_target *target = &ftdi->target[ed_number]; ftdi_elan_abandon_targets() local 352 if (target->active == 1) { ftdi_elan_abandon_targets() 353 target->condition_code = TD_DEVNOTRESP; ftdi_elan_abandon_targets() 355 ftdi_elan_do_callback(ftdi, target, NULL, 0); ftdi_elan_abandon_targets() 370 struct u132_target *target = &ftdi->target[ed_number]; ftdi_elan_flush_targets() local 371 target->abandoning = 1; ftdi_elan_flush_targets() 372 wait_1:if (target->active == 1) { ftdi_elan_flush_targets() 394 wait_2:if (target->active == 1) { ftdi_elan_flush_targets() 428 struct u132_target *target = &ftdi->target[ed_number]; ftdi_elan_cancel_targets() local 429 target->abandoning = 1; ftdi_elan_cancel_targets() 430 wait:if (target->active == 1) { ftdi_elan_cancel_targets() 837 struct u132_target *target, u8 *buffer, int length) ftdi_elan_do_callback() 839 struct urb *urb = target->urb; ftdi_elan_do_callback() 840 int halted = target->halted; ftdi_elan_do_callback() 841 int skipped = target->skipped; ftdi_elan_do_callback() 842 int actual = target->actual; ftdi_elan_do_callback() 843 int non_null = target->non_null; ftdi_elan_do_callback() 844 int toggle_bits = target->toggle_bits; ftdi_elan_do_callback() 845 int error_count = target->error_count; ftdi_elan_do_callback() 846 int condition_code = target->condition_code; ftdi_elan_do_callback() 847 int repeat_number = target->repeat_number; ftdi_elan_do_callback() 849 int, int, int, int) = target->callback; ftdi_elan_do_callback() 850 target->active -= 1; ftdi_elan_do_callback() 851 target->callback = NULL; ftdi_elan_do_callback() 852 (*callback) (target->endp, urb, buffer, length, toggle_bits, ftdi_elan_do_callback() 858 struct u132_target *target, u16 ed_length, int ed_number, int ed_type, have_ed_set_response() 863 target->actual = 0; have_ed_set_response() 864 target->non_null = (ed_length >> 15) & 0x0001; have_ed_set_response() 865 target->repeat_number = (ed_length >> 11) & 0x000F; have_ed_set_response() 867 if (payload == 0 || target->abandoning > 0) { have_ed_set_response() 868 target->abandoning = 0; have_ed_set_response() 870 ftdi_elan_do_callback(ftdi, target, 4 + ftdi->response, have_ed_set_response() 883 if (payload == 0 || target->abandoning > 0) { have_ed_set_response() 884 target->abandoning = 0; have_ed_set_response() 886 ftdi_elan_do_callback(ftdi, target, 4 + ftdi->response, have_ed_set_response() 899 target->abandoning = 0; have_ed_set_response() 901 ftdi_elan_do_callback(ftdi, target, 4 + ftdi->response, have_ed_set_response() 908 target->abandoning = 0; have_ed_set_response() 910 ftdi_elan_do_callback(ftdi, target, 4 + ftdi->response, have_ed_set_response() 920 struct u132_target *target, u16 ed_length, int ed_number, int ed_type, have_ed_get_response() 924 target->condition_code = TD_DEVNOTRESP; have_ed_get_response() 925 target->actual = (ed_length >> 0) & 0x01FF; have_ed_get_response() 926 target->non_null = (ed_length >> 15) & 0x0001; have_ed_get_response() 927 target->repeat_number = (ed_length >> 11) & 0x000F; have_ed_get_response() 929 if (target->active) have_ed_get_response() 930 ftdi_elan_do_callback(ftdi, target, NULL, 0); have_ed_get_response() 931 target->abandoning = 0; have_ed_get_response() 1047 struct u132_target *target = &ftdi->target[ed_number]; ftdi_elan_respond_engine() local 1061 ftdi_elan_do_callback(ftdi, target, 4 + ftdi->response, ftdi_elan_respond_engine() 1105 struct u132_target *target = &ftdi->target[ ftdi_elan_respond_engine() local 1107 target->halted = (ftdi->response[0] >> 3) & ftdi_elan_respond_engine() 1109 target->skipped = (ftdi->response[0] >> 2) & ftdi_elan_respond_engine() 1111 target->toggle_bits = (ftdi->response[3] >> 6) ftdi_elan_respond_engine() 1113 target->error_count = (ftdi->response[3] >> 4) ftdi_elan_respond_engine() 1115 target->condition_code = (ftdi->response[ ftdi_elan_respond_engine() 1118 b = have_ed_set_response(ftdi, target, ftdi_elan_respond_engine() 1123 b = have_ed_get_response(ftdi, target, ftdi_elan_respond_engine() 1503 struct u132_target *target = &ftdi->target[ed]; ftdi_elan_edset_setup() local 1515 target->callback = callback; ftdi_elan_edset_setup() 1516 target->endp = endp; ftdi_elan_edset_setup() 1517 target->urb = urb; ftdi_elan_edset_setup() 1518 target->active = 1; ftdi_elan_edset_setup() 1560 struct u132_target *target = &ftdi->target[ed]; ftdi_elan_edset_input() local 1580 target->callback = callback; ftdi_elan_edset_input() 1581 target->endp = endp; ftdi_elan_edset_input() 1582 target->urb = urb; ftdi_elan_edset_input() 1583 target->active = 1; ftdi_elan_edset_input() 1625 struct u132_target *target = &ftdi->target[ed]; ftdi_elan_edset_empty() local 1637 target->callback = callback; ftdi_elan_edset_empty() 1638 target->endp = endp; ftdi_elan_edset_empty() 1639 target->urb = urb; ftdi_elan_edset_empty() 1640 target->active = 1; ftdi_elan_edset_empty() 1689 struct u132_target *target = &ftdi->target[ed]; ftdi_elan_edset_output() local 1716 target->callback = callback; ftdi_elan_edset_output() 1717 target->endp = endp; ftdi_elan_edset_output() 1718 target->urb = urb; ftdi_elan_edset_output() 1719 target->active = 1; ftdi_elan_edset_output() 1763 struct u132_target *target = &ftdi->target[ed]; ftdi_elan_edset_single() local 1781 target->callback = callback; ftdi_elan_edset_single() 1782 target->endp = endp; ftdi_elan_edset_single() 1783 target->urb = urb; ftdi_elan_edset_single() 1784 target->active = 1; ftdi_elan_edset_single() 1819 struct u132_target *target = &ftdi->target[ed]; ftdi_elan_edset_flush() local 1821 if (target->abandoning > 0) { ftdi_elan_edset_flush() 1825 target->abandoning = 1; ftdi_elan_edset_flush() 1826 wait_1:if (target->active == 1) { ftdi_elan_edset_flush() 836 ftdi_elan_do_callback(struct usb_ftdi *ftdi, struct u132_target *target, u8 *buffer, int length) ftdi_elan_do_callback() argument 857 have_ed_set_response(struct usb_ftdi *ftdi, struct u132_target *target, u16 ed_length, int ed_number, int ed_type, char *b) have_ed_set_response() argument 919 have_ed_get_response(struct usb_ftdi *ftdi, struct u132_target *target, u16 ed_length, int ed_number, int ed_type, char *b) have_ed_get_response() argument
|
/linux-4.4.14/scripts/basic/ |
H A D | fixdep.c | 65 * fixdep <depfile> <target> <cmdline> 73 * cmd_<target> = <cmdline> 75 * and then basically copies the .<target>.d file to stdout, in the 123 char *target; variable 129 fprintf(stderr, "Usage: fixdep <depfile> <target> <cmdline>\n"); usage() 134 * Print out the commandline prefixed with cmd_<target filename> := 138 printf("cmd_%s := %s\n\n", target, cmdline); print_cmdline() 319 /* Is the token we found a target name? */ parse_dep_file() 321 /* Don't write any target names into the dependency file */ parse_dep_file() 346 * process the first target name, which parse_dep_file() 348 * and ignore any other target names, parse_dep_file() 355 target, s); parse_dep_file() 357 target); parse_dep_file() 377 printf("\n%s: $(deps_%s)\n\n", target, target); parse_dep_file() 378 printf("$(deps_%s):\n", target); parse_dep_file() 437 target = argv[2]; main()
|
/linux-4.4.14/include/linux/netfilter/ |
H A D | x_tables.h | 13 * @target: the target extension 15 * @targetinfo: per-target data 32 const struct xt_target *target; member in union:xt_action_param::__anon13018 82 * struct xt_tgchk_param - parameters for target extensions' 94 const struct xt_target *target; member in struct:xt_tgchk_param 104 const struct xt_target *target; member in struct:xt_tgdtor_param 157 unsigned int (*target)(struct sk_buff *skb, member in struct:xt_target 232 int xt_register_target(struct xt_target *target); 233 void xt_unregister_target(struct xt_target *target); 234 int xt_register_targets(struct xt_target *target, unsigned int n); 235 void xt_unregister_targets(struct xt_target *target, unsigned int n); 237 int xt_register_match(struct xt_match *target); 238 void xt_unregister_match(struct xt_match *target); 269 int xt_find_revision(u8 af, const char *name, u8 revision, int target, 449 compat_uptr_t target; member in struct:compat_xt_entry_target::__anon13023::__anon13025 493 int xt_compat_target_offset(const struct xt_target *target);
|
/linux-4.4.14/drivers/scsi/qla2xxx/ |
H A D | qla_target.h | 7 * Forward port and refactoring to modern qla2xxx and target/configfs 11 * Additional file for the target driver support. 25 * target portion. 35 * data in the target add-on 40 * Must be changed on any change in any target visible interfaces or 58 * vs. regular (non-target) info. This is checked for in 74 * ISP target entries - Flags bit definitions. 83 /* (data from target to initiator) */ 85 /* (data from initiator to target) */ 118 ? le16_to_cpu((iocb)->u.isp2x.target.extended) \ 119 : (uint16_t)(iocb)->u.isp2x.target.id.standard) 127 * initiator, that needs to be handled by the target 138 target_id_t target; member in struct:imm_ntfy_from_isp::__anon10083::__anon10084 203 * This is sent to the ISP from the target driver. 213 target_id_t target; member in struct:nack_to_isp::__anon10090::__anon10091 267 #define ACCEPT_TGT_IO_TYPE 0x16 /* Accept target I/O entry. */ 274 * This structure is sent to the ISP 2xxx from target driver. 282 target_id_t target; member in struct:ctio_to_2xxx 325 #define ATIO_TYPE7 0x06 /* Accept target I/O entry for 24xx */ 393 * This is sent from the ISP to the target driver. 402 target_id_t target; member in struct:atio_from_isp::__anon10093::__anon10094 437 #define CTIO_TYPE7 0x12 /* Continue target I/O entry (for 24xx) */ 441 * This structure is sent to the ISP 24xx from the target driver. 489 * ISP queue - CTIO type 7 from ISP 24xx to target driver 607 * target driver by the ISP 24xx. 653 * target driver to the ISP 24xx. 682 * (sent by the target driver to the ISP 24xx) is sent by the 683 * ISP24xx firmware to the target driver. 710 * Type Definitions used by initiator & target halves 718 * target driver (from within qla_target.c) can issue to the 719 * target module (tcm_qla2xxx). 747 #include <target/target_core_base.h> 786 #define QLA_TGT_STATE_NEW 0 /* New command + target processing */ 787 #define QLA_TGT_STATE_NEED_DATA 1 /* target needs data to continue */ 788 #define QLA_TGT_STATE_DATA_IN 2 /* Data arrived + target processing */ 789 #define QLA_TGT_STATE_PROCESSED 3 /* target done processing */ 849 int tgt_stop; /* the target mode driver is being stopped */ 850 int tgt_stopped; /* the target mode driver has been stopped */
|
/linux-4.4.14/drivers/usb/gadget/legacy/ |
H A D | tcm_usb_gadget.h | 9 #include <target/target_core_base.h> 10 #include <target/target_core_fabric.h> 33 /* SAS port target portal group tag for TCM */
|
/linux-4.4.14/drivers/nfc/ |
H A D | nfcsim.c | 128 struct nfc_target *target, nfcsim_dep_link_up() 137 DEV_DBG(dev, "target_idx: %d, comm_mode: %d\n", target->idx, comm_mode); nfcsim_dep_link_up() 163 rc = nfc_dep_link_is_up(nfc_dev, target->idx, NFC_COMM_ACTIVE, nfcsim_dep_link_up() 239 struct nfc_target *target, u32 protocol) nfcsim_activate_target() 249 struct nfc_target *target, u8 mode) nfcsim_deactivate_target() 287 static int nfcsim_tx(struct nfc_dev *nfc_dev, struct nfc_target *target, nfcsim_tx() argument 338 struct nfc_target *target, struct sk_buff *skb, nfcsim_im_transceive() 341 return nfcsim_tx(nfc_dev, target, skb, cb, cb_context); nfcsim_im_transceive() 406 "initiator\n" : "target\n"); nfcsim_wq_poll() 424 * is polling in target mode. nfcsim_wq_poll() 426 * initiator and target at every round. nfcsim_wq_poll() 429 * target at some point, even if both are started in dual mode. nfcsim_wq_poll() 127 nfcsim_dep_link_up(struct nfc_dev *nfc_dev, struct nfc_target *target, u8 comm_mode, u8 *gb, size_t gb_len) nfcsim_dep_link_up() argument 238 nfcsim_activate_target(struct nfc_dev *nfc_dev, struct nfc_target *target, u32 protocol) nfcsim_activate_target() argument 248 nfcsim_deactivate_target(struct nfc_dev *nfc_dev, struct nfc_target *target, u8 mode) nfcsim_deactivate_target() argument 337 nfcsim_im_transceive(struct nfc_dev *nfc_dev, struct nfc_target *target, struct sk_buff *skb, data_exchange_cb_t cb, void *cb_context) nfcsim_im_transceive() argument
|
/linux-4.4.14/security/apparmor/include/ |
H A D | audit.h | 113 void *target; member in union:apparmor_audit_data::__anon15149 116 void *target; member in struct:apparmor_audit_data::__anon15149::__anon15150 123 const char *target; member in struct:apparmor_audit_data::__anon15149::__anon15152
|
/linux-4.4.14/arch/powerpc/platforms/86xx/ |
H A D | mpc86xx_smp.c | 63 unsigned long target, flags; smp_86xx_kick_cpu() local 78 target = (unsigned long) __secondary_start_mpc86xx; smp_86xx_kick_cpu() 79 patch_branch(vector, target, BRANCH_SET_LINK); smp_86xx_kick_cpu()
|
/linux-4.4.14/arch/parisc/math-emu/ |
H A D | decode_exc.c | 107 int target, exception_index = 1; decode_fpu() local 223 target = current_ir & fivebits; decode_fpu() 236 Sgl_decrement(Fpu_sgl(target)); decode_fpu() 239 sgl_denormalize(&Fpu_sgl(target),&inexact,Rounding_mode()); decode_fpu() 248 Dbl_decrement(Fpu_dblp1(target),Fpu_dblp2(target)); decode_fpu() 251 dbl_denormalize(&Fpu_dblp1(target),&Fpu_dblp2(target), decode_fpu() 301 target = current_ir & fivebits; decode_fpu() 304 Sgl_setoverflow(Fpu_sgl(target)); decode_fpu() 307 Dbl_setoverflow(Fpu_dblp1(target),Fpu_dblp2(target)); decode_fpu()
|
/linux-4.4.14/tools/perf/tests/ |
H A D | task-exit.c | 40 struct target target = { test__task_exit() local 76 err = perf_evlist__prepare_workload(evlist, &target, argv, false, test__task_exit()
|
H A D | bpf.c | 46 .target = { do_test() 77 opts.target.tid = opts.target.pid = pid; do_test() 86 err = perf_evlist__create_maps(evlist, &opts.target); do_test()
|
H A D | openat-syscall-tp-fields.c | 12 .target = { test__syscall_openat_tp_fields() 41 err = perf_evlist__create_maps(evlist, &opts.target); test__syscall_openat_tp_fields()
|
/linux-4.4.14/drivers/sh/clk/ |
H A D | core.c | 558 long clk_round_parent(struct clk *clk, unsigned long target, clk_round_parent() argument 568 *best_freq = clk_round_rate(clk, target); clk_round_parent() 569 return abs(target - *best_freq); clk_round_parent() 573 if (unlikely(freq->frequency / target <= div_min - 1)) { clk_round_parent() 577 if (error > target - freq_max) { clk_round_parent() 578 error = target - freq_max; clk_round_parent() 585 target - freq_max); clk_round_parent() 593 if (unlikely(freq->frequency / target >= div_max)) { clk_round_parent() 597 if (error > freq_min - target) { clk_round_parent() 598 error = freq_min - target; clk_round_parent() 605 freq_min - target); clk_round_parent() 613 div = freq->frequency / target; clk_round_parent() 617 if (freq_high - target < error) { clk_round_parent() 618 error = freq_high - target; clk_round_parent() 624 if (target - freq_low < error) { clk_round_parent() 625 error = target - freq_low; clk_round_parent()
|
/linux-4.4.14/drivers/media/common/b2c2/ |
H A D | flexcop-sram.c | 31 flexcop_sram_dest_target_t target) flexcop_sram_set_dest() 36 if (fc->rev != FLEXCOP_III && target == FC_SRAM_DEST_TARGET_FC3_CA) { flexcop_sram_set_dest() 37 err("SRAM destination target to available on FlexCopII(b)\n"); flexcop_sram_set_dest() 40 deb_sram("sram dest: %x target: %x\n", dest, target); flexcop_sram_set_dest() 43 v.sram_dest_reg_714.NET_Dest = target; flexcop_sram_set_dest() 45 v.sram_dest_reg_714.CAI_Dest = target; flexcop_sram_set_dest() 47 v.sram_dest_reg_714.CAO_Dest = target; flexcop_sram_set_dest() 49 v.sram_dest_reg_714.MEDIA_Dest = target; flexcop_sram_set_dest() 30 flexcop_sram_set_dest(struct flexcop_device *fc, flexcop_sram_dest_t dest, flexcop_sram_dest_target_t target) flexcop_sram_set_dest() argument
|
/linux-4.4.14/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ |
H A D | nv50.c | 64 vm_addr(struct nvkm_vma *vma, u64 phys, u32 memtype, u32 target) vm_addr() argument 68 phys |= target << 4; vm_addr() 82 u32 block, target; nv50_vm_map() local 85 /* IGPs don't have real VRAM, re-target to stolen system memory */ nv50_vm_map() 86 target = 0; nv50_vm_map() 89 target = 3; nv50_vm_map() 92 phys = vm_addr(vma, phys, mem->memtype, target); nv50_vm_map() 130 u32 target = (vma->access & NV_MEM_ACCESS_NOSNOOP) ? 3 : 2; nv50_vm_map_sg() local 134 u64 phys = vm_addr(vma, (u64)*list++, mem->memtype, target); nv50_vm_map_sg()
|
/linux-4.4.14/drivers/nfc/microread/ |
H A D | microread.c | 313 struct nfc_target *target, u8 comm_mode, microread_dep_link_up() 319 r = nfc_hci_get_param(hdev, target->hci_reader_gate, microread_dep_link_up() 332 r = nfc_dep_link_is_up(hdev->ndev, target->idx, comm_mode, microread_dep_link_up() 347 struct nfc_target *target) microread_target_from_gate() 351 target->supported_protocols = NFC_PROTO_NFC_DEP_MASK; microread_target_from_gate() 362 struct nfc_target *target) microread_complete_target_discovered() 411 struct nfc_target *target, microread_im_transceive() 419 pr_info("data exchange to gate 0x%x\n", target->hci_reader_gate); microread_im_transceive() 421 if (target->hci_reader_gate == MICROREAD_GATE_ID_P2P_INITIATOR) { microread_im_transceive() 424 return nfc_hci_send_event(hdev, target->hci_reader_gate, microread_im_transceive() 429 switch (target->hci_reader_gate) { microread_im_transceive() 452 target->hci_reader_gate); microread_im_transceive() 462 return nfc_hci_send_cmd_async(hdev, target->hci_reader_gate, microread_im_transceive() 487 pr_info("target discovered to gate 0x%x\n", gate); microread_target_discovered() 544 pr_info("discard target discovered to gate 0x%x\n", gate); microread_target_discovered() 557 pr_err("Failed to handle discovered target err=%d\n", r); microread_target_discovered() 312 microread_dep_link_up(struct nfc_hci_dev *hdev, struct nfc_target *target, u8 comm_mode, u8 *gb, size_t gb_len) microread_dep_link_up() argument 346 microread_target_from_gate(struct nfc_hci_dev *hdev, u8 gate, struct nfc_target *target) microread_target_from_gate() argument 360 microread_complete_target_discovered(struct nfc_hci_dev *hdev, u8 gate, struct nfc_target *target) microread_complete_target_discovered() argument 410 microread_im_transceive(struct nfc_hci_dev *hdev, struct nfc_target *target, struct sk_buff *skb, data_exchange_cb_t cb, void *cb_context) microread_im_transceive() argument
|
/linux-4.4.14/drivers/gpu/drm/nouveau/include/nvkm/engine/ |
H A D | dma.h | 11 u32 target; member in struct:nvkm_dmaobj
|
/linux-4.4.14/arch/x86/include/asm/ |
H A D | jump_label.h | 58 jump_label_t target; member in struct:jump_entry
|
/linux-4.4.14/scripts/package/ |
H A D | Makefile | 4 # RPM target 6 # The rpm target generates two rpm files: 21 # Note that the rpm-pkg target cannot be used with KBUILD_OUTPUT, 22 # but the binrpm-pkg target can; for some reason O= gets ignored. 38 echo "binrpm-pkg or bindeb-pkg target instead."; \ 55 rpmbuild $(RPMOPTS) --target $(UTS_MACHINE) -ta $(KERNELPATH).tar.gz 66 rpmbuild $(RPMOPTS) --define "_builddir $(objtree)" --target \ 70 # Deb target 133 $(error unknown target $@)))) \
|
/linux-4.4.14/include/net/iucv/ |
H A D | af_iucv.h | 85 /* target is unreachable */ 96 /* target unreachable (detected delayed) */ 134 u32 class; /* target class of message */ 147 #define SCM_IUCV_TRGCLS 0x0001 /* target class control message */
|
/linux-4.4.14/include/uapi/linux/netfilter_ipv4/ |
H A D | ipt_ECN.h | 1 /* Header file for iptables ipt_ECN target
|
/linux-4.4.14/include/uapi/linux/netfilter_ipv6/ |
H A D | ip6_tables.h | 107 stuff 3) the target to perform if the rule matches */ 116 /* Size of ipt_entry + matches + target */ 125 /* The matches (if any), then the target. */ 132 struct xt_standard_target target; member in struct:ip6t_standard 137 struct xt_error_target target; member in struct:ip6t_error 149 .target = XT_TARGET_INIT(XT_STANDARD_TARGET, \ 151 .target.verdict = -(__verdict) - 1, \ 157 .target = XT_TARGET_INIT(XT_ERROR_TARGET, \ 159 .target.errorname = "ERROR", \
|
/linux-4.4.14/arch/mips/include/asm/mach-bcm63xx/ |
H A D | bcm63xx_nvram.h | 10 * Initialized the local nvram copy from the target address and checks
|
/linux-4.4.14/arch/powerpc/boot/ |
H A D | ps3-hvcall.S | 72 .macro LOAD_64_REG target,high,low 74 or \target, r11, \low 77 .macro LOAD_64_STACK target,offset 78 ld \target, \offset(r1)
|
/linux-4.4.14/arch/arm/include/asm/ |
H A D | jump_label.h | 43 jump_label_t target; member in struct:jump_entry
|
/linux-4.4.14/fs/jffs2/ |
H A D | dir.c | 77 static struct dentry *jffs2_lookup(struct inode *dir_i, struct dentry *target, jffs2_lookup() argument 87 if (target->d_name.len > JFFS2_MAX_NAME_LEN) jffs2_lookup() 95 for (fd_list = dir_f->dents; fd_list && fd_list->nhash <= target->d_name.hash; fd_list = fd_list->next) { jffs2_lookup() 96 if (fd_list->nhash == target->d_name.hash && jffs2_lookup() 98 strlen(fd_list->name) == target->d_name.len && jffs2_lookup() 99 !strncmp(fd_list->name, target->d_name.name, target->d_name.len)) { jffs2_lookup() 112 return d_splice_alias(inode, target); jffs2_lookup() 278 static int jffs2_symlink (struct inode *dir_i, struct dentry *dentry, const char *target) jffs2_symlink() argument 289 int ret, targetlen = strlen(target); jffs2_symlink() 291 /* FIXME: If you care. We'd need to use frags for the target jffs2_symlink() 333 ri->data_crc = cpu_to_je32(crc32(0, target, targetlen)); jffs2_symlink() 336 fn = jffs2_write_dnode(c, f, ri, target, targetlen, ALLOC_NORMAL); jffs2_symlink() 348 /* We use f->target field to store the target path. */ jffs2_symlink() 349 f->target = kmemdup(target, targetlen + 1, GFP_KERNEL); jffs2_symlink() 350 if (!f->target) { jffs2_symlink() 357 inode->i_link = f->target; jffs2_symlink() 359 jffs2_dbg(1, "%s(): symlink's target '%s' cached\n", jffs2_symlink() 360 __func__, (char *)f->target); jffs2_symlink() 847 * We can't keep the target in dcache after that. jffs2_rename()
|
H A D | jffs2_fs_i.h | 45 /* The target path if this is the inode of a symlink */ 46 unsigned char *target; member in struct:jffs2_inode_info
|
/linux-4.4.14/drivers/gpu/drm/nouveau/ |
H A D | nouveau_chan.c | 96 u32 target; nouveau_channel_prep() local 107 target = TTM_PL_FLAG_TT | TTM_PL_FLAG_UNCACHED; nouveau_channel_prep() 109 target = TTM_PL_FLAG_VRAM; nouveau_channel_prep() 111 ret = nouveau_bo_new(drm->dev, size, 0, target, 0, 0, NULL, NULL, nouveau_channel_prep() 114 ret = nouveau_bo_pin(chan->push.buffer, target, false); nouveau_channel_prep() 138 args.target = NV_DMA_V0_TARGET_VM; nouveau_channel_prep() 149 args.target = NV_DMA_V0_TARGET_PCI; nouveau_channel_prep() 155 args.target = NV_DMA_V0_TARGET_VRAM; nouveau_channel_prep() 162 args.target = NV_DMA_V0_TARGET_AGP; nouveau_channel_prep() 168 args.target = NV_DMA_V0_TARGET_VM; nouveau_channel_prep() 306 args.target = NV_DMA_V0_TARGET_VM; nouveau_channel_init() 311 args.target = NV_DMA_V0_TARGET_VRAM; nouveau_channel_init() 323 args.target = NV_DMA_V0_TARGET_VM; nouveau_channel_init() 329 args.target = NV_DMA_V0_TARGET_AGP; nouveau_channel_init() 335 args.target = NV_DMA_V0_TARGET_VM; nouveau_channel_init()
|
/linux-4.4.14/drivers/staging/lustre/lustre/ptlrpc/ |
H A D | sec_config.c | 62 CERROR("unknown target %p(%s)\n", obd, type); sptlrpc_target_sec_part() 425 sc_local:1; /* local copy from target */ 427 struct list_head sc_tgts; /* target-specific rules */ 547 const char *target, sptlrpc_conf_merge_rule() 553 /* fsname == target means general rules for the whole fs */ sptlrpc_conf_merge_rule() 554 if (strcmp(conf->sc_fsname, target) == 0) { sptlrpc_conf_merge_rule() 557 conf_tgt = sptlrpc_conf_get_tgt(conf, target, 1); sptlrpc_conf_merge_rule() 571 * find one through the target name in the record inside conf_lock; 577 char *target, *param; __sptlrpc_process_config() local 582 target = lustre_cfg_string(lcfg, 1); __sptlrpc_process_config() 583 if (target == NULL) { __sptlrpc_process_config() 584 CERROR("missing target name\n"); __sptlrpc_process_config() 594 CDEBUG(D_SEC, "processing rule: %s.%s\n", target, param); __sptlrpc_process_config() 608 target2fsname(target, fsname, sizeof(fsname)); __sptlrpc_process_config() 616 rc = sptlrpc_conf_merge_rule(conf, target, &rule); __sptlrpc_process_config() 621 rc = sptlrpc_conf_merge_rule(conf, target, &rule); __sptlrpc_process_config() 762 struct obd_uuid *target, sptlrpc_conf_choose_flavor() 771 target2fsname(target->uuid, name, sizeof(name)); sptlrpc_conf_choose_flavor() 779 /* convert uuid name (supposed end with _UUID) to target name */ sptlrpc_conf_choose_flavor() 780 len = strlen(target->uuid); sptlrpc_conf_choose_flavor() 782 memcpy(name, target->uuid, len - 5); sptlrpc_conf_choose_flavor() 546 sptlrpc_conf_merge_rule(struct sptlrpc_conf *conf, const char *target, struct sptlrpc_rule *rule) sptlrpc_conf_merge_rule() argument 760 sptlrpc_conf_choose_flavor(enum lustre_sec_part from, enum lustre_sec_part to, struct obd_uuid *target, lnet_nid_t nid, struct sptlrpc_flavor *sf) sptlrpc_conf_choose_flavor() argument
|
/linux-4.4.14/drivers/thermal/int340x_thermal/ |
H A D | acpi_thermal_rel.h | 18 acpi_handle target; member in struct:art 34 acpi_handle target; member in struct:trt
|
/linux-4.4.14/drivers/md/ |
H A D | dm-cache-policy.h | 24 * When the core target has to remap a bio it calls the 'map' method of the 25 * policy. This returns an instruction telling the core target what to do. 85 * This is the instruction passed back to the core target. 155 * Called when a cache target is first created. Used to load a 165 * Override functions used on the error paths of the core target. 181 * Provide a dirty block to be written back by the core target. If 203 * these, the core target sends regular tick() calls to the policy. 236 * what gets passed on the target line to select your policy.
|
H A D | dm-table.c | 162 * Allocate both the target array and offset array at once. alloc_targets() 309 DMWARN("%s: %s too small for target: " device_area_is_invalid() 446 DMWARN("%s: adding target device %s caused an alignment inconsistency: " dm_set_device_limits() 487 * Checks to see if the target joins onto the end of the table. 601 * be compatible with the logical_block_size of the target processing it. 620 * target, how many sectors must the next target handle? validate_hardware_logical_block_alignment() 636 /* combine all target devices' limits */ validate_hardware_logical_block_alignment() 678 DMERR("%s: target type %s must appear alone in table", dm_table_add_target() 689 DMERR("%s: zero-length target", dm_device_name(t->md)); dm_table_add_target() 695 DMERR("%s: %s: unknown target type", dm_device_name(t->md), dm_table_add_target() 702 DMERR("%s: target type %s must appear alone in table", dm_table_add_target() 710 DMERR("%s: target type %s may not be included in read-only tables", dm_table_add_target() 717 DMERR("%s: immutable target type %s cannot be mixed with other target types", dm_table_add_target() 723 DMERR("%s: immutable target type %s cannot be mixed with other target types", dm_table_add_target() 736 * Does this target adjoin the previous one ? dm_table_add_target() 853 DMWARN("Inconsistent table: different target types" dm_table_set_type() 880 * Request-based dm supports only tables that have a single target now. dm_table_set_type() 1185 * Search the btree for the correct target. 1219 * target's iterate_devices method. 1220 * Returns false if the result is unknown because a target doesn't 1263 * Combine queue limits of all the devices this target uses. dm_calculate_queue_limits() 1273 * Check each device area is consistent with the target's dm_calculate_queue_limits() 1282 * Merge this target's queue limits into the overall limits dm_calculate_queue_limits() 1286 DMWARN("%s: adding target device " dm_calculate_queue_limits() 1459 * Unless any target used by the table set discards_supported, dm_table_supports_discards()
|
/linux-4.4.14/drivers/misc/ibmasm/ |
H A D | i2o.h | 29 u8 target; member in struct:i2o_header 43 .target = 0x00, \
|
/linux-4.4.14/drivers/gpu/drm/nouveau/include/nvkm/core/ |
H A D | memory.h | 20 enum nvkm_memory_target (*target)(struct nvkm_memory *); member in struct:nvkm_memory_func 35 #define nvkm_memory_target(p) (p)->func->target(p)
|
/linux-4.4.14/arch/arc/kernel/ |
H A D | ptrace.c | 21 static int genregs_get(struct task_struct *target, genregs_get() argument 26 const struct pt_regs *ptregs = task_pt_regs(target); genregs_get() 27 const struct callee_regs *cregs = task_callee_regs(target); genregs_get() 90 REG_O_ONE(efa, &target->thread.fault_address); genregs_get() 94 stop_pc_val = target->thread.fault_address; genregs_get() 107 static int genregs_set(struct task_struct *target, genregs_set() argument 112 const struct pt_regs *ptregs = task_pt_regs(target); genregs_set() 113 const struct callee_regs *cregs = task_callee_regs(target); genregs_set()
|
/linux-4.4.14/samples/seccomp/ |
H A D | Makefile | 20 # Try to match the kernel target. 43 # for the host toolchain. So disable tests if target architecture
|
/linux-4.4.14/security/selinux/ss/ |
H A D | constraint.h | 33 #define CEXPR_TARGET 8 /* target if set, source otherwise */ 34 #define CEXPR_XTARGET 16 /* special 3rd target for validatetrans rule */
|
/linux-4.4.14/fs/configfs/ |
H A D | symlink.c | 113 struct config_item **target, struct super_block *sb) get_target() 120 *target = configfs_get_config_item(path->dentry); get_target() 121 if (!*target) { get_target() 212 * drop_link(this, target) and drop_item(target) is preserved. configfs_unlink() 235 static int configfs_get_target_path(struct config_item * item, struct config_item * target, configfs_get_target_path() argument 242 size = item_path_length(target) + depth * 3 - 1; configfs_get_target_path() 251 fill_item_path(target, path, size); configfs_get_target_path() 112 get_target(const char *symname, struct path *path, struct config_item **target, struct super_block *sb) get_target() argument
|
/linux-4.4.14/kernel/ |
H A D | test_kprobes.c | 27 static u32 (*target)(u32 value); variable 67 ret = target(rand1); test_kprobe() 126 ret = target(rand1); test_kprobes() 184 ret = target(rand1); test_jprobe() 214 ret = target(rand1); test_jprobes() 272 ret = target(rand1); test_kretprobe() 320 ret = target(rand1); test_kretprobes() 341 target = kprobe_target; init_test_probes()
|
/linux-4.4.14/include/uapi/linux/ |
H A D | eventpoll.h | 41 /* Set the One Shot behaviour for the target file descriptor */ 44 /* Set the Edge Triggered behaviour for the target file descriptor */
|
H A D | fsl_hypervisor.h | 113 * @target: the partition ID of the target partition, or -1 for this 127 * the 'source' paritition' to the 'target' partition. 134 * 'target' (but not both) must be -1. In other words, either 136 * source == local and target == remote 138 * source == remote and target == local 143 __u32 target; member in struct:fsl_hv_ioctl_memcpy
|
/linux-4.4.14/arch/powerpc/sysdev/ |
H A D | cpm2.c | 143 int cpm2_clk_setup(enum cpm_clk_target target, int clock, int mode) cpm2_clk_setup() argument 213 switch (target) { cpm2_clk_setup() 243 printk(KERN_ERR "cpm2_clock_setup: invalid clock target\n"); cpm2_clk_setup() 248 if (clk_map[i][0] == target && clk_map[i][1] == clock) { cpm2_clk_setup() 273 int cpm2_smc_clk_setup(enum cpm_clk_target target, int clock) cpm2_smc_clk_setup() argument 295 switch (target) { cpm2_smc_clk_setup() 307 printk(KERN_ERR "cpm2_smc_clock_setup: invalid clock target\n"); cpm2_smc_clk_setup() 312 if (clk_map[i][0] == target && clk_map[i][1] == clock) { cpm2_smc_clk_setup()
|
/linux-4.4.14/arch/arm/plat-orion/include/plat/ |
H A D | addr-map.h | 39 const u8 target; member in struct:orion_addr_map_info 49 const u32 size, const u8 target,
|
/linux-4.4.14/net/ipv6/ |
H A D | exthdrs_core.c | 161 * if target < 0. "last header" is transport protocol header, ESP, or 168 * If target header is found, its offset is set in *offset and return protocol 181 * IP6_FH_F_AUTH flag is set and target < 0, then this function will 186 int target, unsigned short *fragoff, int *flags) ipv6_find_hdr() 212 found = (nexthdr == target); ipv6_find_hdr() 215 if (target < 0 || found) ipv6_find_hdr() 253 if (target < 0 && ipv6_find_hdr() 268 if (flags && (*flags & IP6_FH_F_AUTH) && (target < 0)) ipv6_find_hdr() 185 ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset, int target, unsigned short *fragoff, int *flags) ipv6_find_hdr() argument
|
/linux-4.4.14/arch/um/drivers/ |
H A D | mconsole_user.c | 181 struct sockaddr_un target; mconsole_notify() local 199 target.sun_family = AF_UNIX; mconsole_notify() 200 strcpy(target.sun_path, sock_name); mconsole_notify() 211 n = sendto(notify_sock, &packet, len, 0, (struct sockaddr *) &target, mconsole_notify() 212 sizeof(target)); mconsole_notify()
|
/linux-4.4.14/arch/powerpc/platforms/cell/ |
H A D | spu_priv1_mmio.c | 77 u64 target; cpu_affinity_set() local 88 target = iic_get_target_id(cpu); cpu_affinity_set() 89 route = target << 48 | target << 32 | target << 16; cpu_affinity_set()
|
/linux-4.4.14/arch/h8300/kernel/ |
H A D | ptrace.c | 88 static int regs_get(struct task_struct *target, regs_get() argument 99 *reg++ = h8300_get_reg(target, r); regs_get() 105 static int regs_set(struct task_struct *target, regs_set() argument 117 *reg++ = h8300_get_reg(target, r); regs_set() 126 h8300_put_reg(target, r, *reg++); regs_set()
|
/linux-4.4.14/drivers/scsi/sym53c8xx_2/ |
H A D | sym_hipd.c | 81 static void sym_print_nego_msg(struct sym_hcb *np, int target, char *label, u_char *msg) sym_print_nego_msg() argument 83 struct sym_tcb *tp = &np->target[target]; sym_print_nego_msg() 915 struct sym_tcb *tp = &np->target[i]; sym_prepare_setting() 1421 struct sym_tcb *tp = &np->target[cp->target]; sym_prepare_nego() 1467 sym_print_nego_msg(np, cp->target, sym_prepare_nego() 1658 struct sym_tcb *tp = &np->target[cp->target]; sym_flush_comp_queue() 1884 * Fill in target structure. sym_start_up() 1890 struct sym_tcb *tp = &np->target[i]; sym_start_up() 1942 * Switch trans mode for current job and its target. 1944 static void sym_settrans(struct sym_hcb *np, int target, u_char opts, u_char ofs, sym_settrans() argument 1949 struct sym_tcb *tp = &np->target[target]; sym_settrans() 1951 assert(target == (INB(np, nc_sdid) & 0x0f)); sym_settrans() 2033 * patch ALL busy ccbs of this target. sym_settrans() 2038 if (cp->target != target) sym_settrans() 2075 static void sym_setwide(struct sym_hcb *np, int target, u_char wide) sym_setwide() argument 2077 struct sym_tcb *tp = &np->target[target]; sym_setwide() 2080 sym_settrans(np, target, 0, 0, 0, wide, 0, 0); sym_setwide() 2104 sym_setsync(struct sym_hcb *np, int target, sym_setsync() argument 2107 struct sym_tcb *tp = &np->target[target]; sym_setsync() 2111 sym_settrans(np, target, 0, ofs, per, wide, div, fak); sym_setsync() 2137 sym_setpprot(struct sym_hcb *np, int target, u_char opts, u_char ofs, sym_setpprot() argument 2140 struct sym_tcb *tp = &np->target[target]; sym_setpprot() 2143 sym_settrans(np, target, opts, ofs, per, wide, div, fak); sym_setpprot() 2461 * the target -> add the amount to the rest sym_int_ma() 2675 * COMMAND --> MSG IN SCSI parity error detected by target. sym_int_ma() 2676 * COMMAND --> STATUS Bad command or refused by target. sym_int_ma() 2677 * MSG OUT --> MSG IN Message rejected by target. sym_int_ma() 2678 * MSG OUT --> COMMAND Bogus target that discards extended sym_int_ma() 2682 * trusts the target. Why to annoy it ? sym_int_ma() 2685 * If a target does not get all the messages after selection, sym_int_ma() 2686 * the code assumes blindly that the target discards extended sym_int_ma() 2688 * If the target does not want all our response to negotiation, sym_int_ma() 2971 * a given target/lun/task condition (-1 means all), 2978 sym_dequeue_from_squeue(struct sym_hcb *np, int i, int target, int lun, int task) sym_dequeue_from_squeue() argument 2990 * that matches the target/lun/task condition. sym_dequeue_from_squeue() 3000 if ((target == -1 || cp->target == target) && sym_dequeue_from_squeue() 3092 sym_dequeue_from_squeue(np, i, cp->target, cp->lun, -1); sym_sir_bad_scsi_status() 3113 * async. 8 bit data transfers with that target, sym_sir_bad_scsi_status() 3193 * queue all disconnected CCBs for a given target that 3198 int sym_clear_tasks(struct sym_hcb *np, int cam_status, int target, int lun, int task) sym_clear_tasks() argument 3221 cp->target != target || sym_clear_tasks() 3253 * If we have to reset a target, clear tasks of a unit, 3255 * restart the SCRIPTS for selecting the target. Once 3264 * target to get rid of the useless SCSI bus ownership. 3266 * - If the target is to be reset, we send it a M_RESET 3276 * by the target, the SCRIPTS interrupts again 3279 * target according to our message. 3287 int target=-1, lun=-1, task; sym_sir_task_recovery() local 3298 * Do we have any target to reset or unit to clear ? sym_sir_task_recovery() 3301 tp = &np->target[i]; sym_sir_task_recovery() 3304 target = i; sym_sir_task_recovery() 3311 target = i; sym_sir_task_recovery() 3315 if (target != -1) sym_sir_task_recovery() 3323 if (target == -1) { sym_sir_task_recovery() 3329 target = cp->target; sym_sir_task_recovery() 3336 * If some target is to be selected, sym_sir_task_recovery() 3339 if (target != -1) { sym_sir_task_recovery() 3340 tp = &np->target[target]; sym_sir_task_recovery() 3341 np->abrt_sel.sel_id = target; sym_sir_task_recovery() 3393 i = sym_dequeue_from_squeue(np, i, cp->target, cp->lun, -1); sym_sir_task_recovery() 3418 * The SCRIPTS processor has selected a target sym_sir_task_recovery() 3422 target = INB(np, nc_sdid) & 0xf; sym_sir_task_recovery() 3423 tp = &np->target[target]; sym_sir_task_recovery() 3428 * If the target is to be reset, prepare a sym_sir_task_recovery() 3468 * abort for this target. sym_sir_task_recovery() 3476 if (cp->target != target) sym_sir_task_recovery() 3529 * The target has accepted our message and switched sym_sir_task_recovery() 3533 target = INB(np, nc_sdid) & 0xf; sym_sir_task_recovery() 3534 tp = &np->target[target]; sym_sir_task_recovery() 3545 * been performed by the target. sym_sir_task_recovery() 3549 * this target from our task list (lun=task=-1) sym_sir_task_recovery() 3584 sym_dequeue_from_squeue(np, i, target, lun, -1); sym_sir_task_recovery() 3585 sym_clear_tasks(np, DID_ABORT, target, lun, task); sym_sir_task_recovery() 3928 * If the target doesn't answer this message immediately 3941 * state, it's a target initiated negotiation. We prepare a 3943 * this answer to the target. 3945 * If the target doesn't fetch the answer (no message out phase), 3950 * to this target, in the controller's register, and in the "phys" 3960 int target = cp->target; sym_sync_nego_check() local 3964 sym_print_nego_msg(np, target, "sync msgin", np->msgin); sym_sync_nego_check() 4010 sym_setsync (np, target, ofs, per, div, fak); sym_sync_nego_check() 4024 sym_print_nego_msg(np, target, "sync msgout", np->msgout); sym_sync_nego_check() 4032 sym_setsync (np, target, 0, 0, 0, 0); sym_sync_nego_check() 4073 sym_ppr_nego_check(struct sym_hcb *np, int req, int target) sym_ppr_nego_check() argument 4075 struct sym_tcb *tp = &np->target[target]; sym_ppr_nego_check() 4085 sym_print_nego_msg(np, target, "ppr msgin", np->msgin); sym_ppr_nego_check() 4136 sym_setpprot(np, target, opts, ofs, per, wide, div, fak); sym_ppr_nego_check() 4150 sym_print_nego_msg(np, target, "ppr msgout", np->msgout); sym_ppr_nego_check() 4158 sym_setpprot (np, target, 0, 0, 0, 0, 0, 0); sym_ppr_nego_check() 4191 result = sym_ppr_nego_check(np, req, cp->target); sym_ppr_nego() 4212 int target = cp->target; sym_wide_nego_check() local 4216 sym_print_nego_msg(np, target, "wide msgin", np->msgin); sym_wide_nego_check() 4248 sym_setwide (np, target, wide); sym_wide_nego_check() 4264 sym_print_nego_msg(np, target, "wide msgout", np->msgout); sym_wide_nego_check() 4308 sym_print_nego_msg(np, cp->target, sym_wide_nego() 4332 * A target that understands a PPR message should never 4342 sym_setpprot (np, cp->target, 0, 0, 0, 0, 0, 0); sym_nego_default() 4353 sym_setsync (np, cp->target, 0, 0, 0, 0); sym_nego_default() 4356 sym_setwide (np, cp->target, 0); sym_nego_default() 4382 u_char target = INB(np, nc_sdid) & 0x0f; sym_int_sir() local 4383 struct sym_tcb *tp = &np->target[target]; sym_int_sir() 4654 struct sym_tcb *tp = &np->target[tn]; sym_get_ccb() 4760 cp->target = tn; sym_get_ccb() 4779 struct sym_tcb *tp = &np->target[cp->target]; sym_free_ccb() 4827 * We donnot queue more than 1 ccb per target sym_free_ccb() 4975 struct sym_tcb *tp = &np->target[tn]; sym_alloc_lcb() 4979 * Initialize the target control block if not yet. sym_alloc_lcb() 5059 struct sym_tcb *tp = &np->target[tn]; sym_alloc_lcb_tags() 5102 * for the target. 5106 struct sym_tcb *tp = &np->target[tn]; sym_free_lcb() 5155 * Retrieve the target descriptor. sym_queue_scsiio() 5157 tp = &np->target[cp->target]; sym_queue_scsiio() 5243 cp->phys.select.sel_id = cp->target; sym_queue_scsiio() 5279 * Reset a SCSI target (all LUNs of this target). 5281 int sym_reset_scsi_target(struct sym_hcb *np, int target) sym_reset_scsi_target() argument 5285 if (target == np->myaddr || (u_int)target >= SYM_CONF_MAX_TARGET) sym_reset_scsi_target() 5288 tp = &np->target[target]; sym_reset_scsi_target() 5383 * Get target and lun pointers. sym_complete_error() 5385 tp = &np->target[cp->target]; sym_complete_error() 5417 i = sym_dequeue_from_squeue(np, i, cp->target, sdev->lun, -1); sym_complete_error() 5511 * Get target and lun pointers. sym_complete_ok() 5513 tp = &np->target[cp->target]; sym_complete_ok() 5641 * Allocate the target bus address array. sym_hcb_attach() 5758 * for a target prior the probing of devices (bad lun table). sym_hcb_attach() 5759 * A private table will be allocated for the target on the sym_hcb_attach() 5767 for (i = 0 ; i < 64 ; i++) /* 64 luns/target, no less */ sym_hcb_attach() 5772 * address of each target control block. sym_hcb_attach() 5776 np->targtbl[i] = cpu_to_scr(vtobus(&np->target[i])); sym_hcb_attach() 5777 np->target[i].head.luntbl_sa = sym_hcb_attach() 5779 np->target[i].head.lun0_sa = sym_hcb_attach() 5808 int target; sym_hcb_free() local 5832 for (target = 0; target < SYM_CONF_MAX_TARGET ; target++) { sym_hcb_free() 5833 tp = &np->target[target]; sym_hcb_free()
|
/linux-4.4.14/drivers/scsi/megaraid/ |
H A D | mega_common.h | 58 * @dev_target : actual target on the device 66 * target on the controller. 119 * @max_target : max target supported - inclusive 206 #define SCP2TARGET(scp) (scp)->device->id // to target 225 * @target : target id of the device or logical drive number 229 * the corresponding physical channel and target or logical drive number 231 #define MRAID_GET_DEVICE_MAP(adp, scp, p_chan, target, islogical) \ 242 target = \ 248 target = ((adp)->device_ids[SCP2CHANNEL(scp)] \
|
/linux-4.4.14/drivers/pci/pcie/aer/ |
H A D | aer_inject.c | 145 u32 *target = NULL; find_pci_config_dword() local 152 target = &err->uncor_status; find_pci_config_dword() 156 target = &err->cor_status; find_pci_config_dword() 160 target = &err->header_log0; find_pci_config_dword() 163 target = &err->header_log1; find_pci_config_dword() 166 target = &err->header_log2; find_pci_config_dword() 169 target = &err->header_log3; find_pci_config_dword() 172 target = &err->root_status; find_pci_config_dword() 176 target = &err->source_id; find_pci_config_dword() 181 return target; find_pci_config_dword()
|
/linux-4.4.14/arch/alpha/kernel/ |
H A D | sys_marvel.c | 181 val &= ~(0x1ffUL << 24); /* clear the target pid */ io7_redirect_irq() 182 val |= ((unsigned long)where << 24); /* set the new target pid */ io7_redirect_irq() 195 * LSI_CTL has target PID @ 14 io7_redirect_one_lsi() 198 val &= ~(0x1ffUL << 14); /* clear the target pid */ io7_redirect_one_lsi() 199 val |= ((unsigned long)where << 14); /* set the new target pid */ io7_redirect_one_lsi() 212 * MSI_CTL has target PID @ 14 io7_redirect_one_msi() 215 val &= ~(0x1ffUL << 14); /* clear the target pid */ io7_redirect_one_msi() 216 val |= ((unsigned long)where << 14); /* set the new target pid */ io7_redirect_one_msi() 227 * LSI_CTL has target PID @ 14 init_one_io7_lsi() 238 * MSI_CTL has target PID @ 14 init_one_io7_msi()
|
/linux-4.4.14/arch/x86/kernel/cpu/ |
H A D | perf_event_intel_cstate.c | 474 int i, id, target; cstate_cpu_exit() local 479 target = -1; cstate_cpu_exit() 485 target = i; for_each_online_cpu() 489 if (cpumask_test_and_clear_cpu(cpu, &cstate_core_cpu_mask) && target >= 0) 490 cpumask_set_cpu(target, &cstate_core_cpu_mask); 492 if (target >= 0) 493 perf_pmu_migrate_context(&cstate_core_pmu, cpu, target); 499 target = -1; 505 target = i; for_each_online_cpu() 509 if (cpumask_test_and_clear_cpu(cpu, &cstate_pkg_cpu_mask) && target >= 0) 510 cpumask_set_cpu(target, &cstate_pkg_cpu_mask); 512 if (target >= 0) 513 perf_pmu_migrate_context(&cstate_pkg_pmu, cpu, target);
|
/linux-4.4.14/arch/avr32/kernel/ |
H A D | setup.c | 179 unsigned long target; find_free_region() local 181 target = ALIGN(mem->start, align); find_free_region() 183 if ((target + size) <= res->start) find_free_region() 185 if (target <= res->end) find_free_region() 186 target = ALIGN(res->end + 1, align); find_free_region() 189 if ((target + size) > (mem->end + 1)) find_free_region() 192 return target; find_free_region() 200 resource_size_t target; alloc_reserved_region() local 204 target = find_free_region(mem, size, align); alloc_reserved_region() 205 if (target <= mem->end) { alloc_reserved_region() 206 ret = add_reserved_region(target, target + size - 1, alloc_reserved_region() 209 *start = target; alloc_reserved_region()
|
/linux-4.4.14/drivers/scsi/aic7xxx/ |
H A D | aic7xxx_core.c | 268 static int ahc_abort_scbs(struct ahc_softc *ahc, int target, 280 * transaction for this target or target lun. 290 * Allow the next untagged transaction for this target or target lun 455 * of the target role. Since the parameters for a connection ahc_fetch_transinfo() 456 * in the initiator role to a given target are the same as ahc_fetch_transinfo() 457 * when the roles are reversed, we pretend we are the target. ahc_fetch_transinfo() 1077 devinfo.target, ahc_handle_seqint() 1111 * We can't allow the target to disconnect. ahc_handle_seqint() 1113 * having the target disconnect will make this ahc_handle_seqint() 1166 "target - issuing BUS DEVICE RESET\n", ahc_handle_seqint() 1167 ahc_name(ahc), devinfo.channel, devinfo.target); ahc_handle_seqint() 1201 "target (0x%x). Rejecting\n", ahc_handle_seqint() 1202 ahc_name(ahc), devinfo.channel, devinfo.target, rejbyte); ahc_handle_seqint() 1223 ahc_name(ahc), devinfo.channel, devinfo.target, ahc_handle_seqint() 1234 ahc_name(ahc), devinfo.channel, devinfo.target, ahc_handle_seqint() 1317 * throw it away in the hope that the target will ahc_handle_seqint() 1383 * and allows the target to complete its transfer. ahc_handle_seqint() 1420 * target does a command complete. ahc_handle_seqint() 1457 ahc_name(ahc), devinfo.channel, devinfo.target, ahc_handle_seqint() 1711 * Force a renegotiation with this target just in ahc_handle_scsiint() 1713 * unknown (or unreported) by the target. ahc_handle_scsiint() 1785 u_int target; ahc_handle_scsiint() local 1792 * We may have an entry in the waiting Q for this target, ahc_handle_scsiint() 1794 * go about selecting the target while we handle the event. ahc_handle_scsiint() 1817 target = SCSIID_TARGET(ahc, saved_scsiid); ahc_handle_scsiint() 1821 target, saved_lun, channel, ROLE_INITIATOR); ahc_handle_scsiint() 1837 ahc_abort_scbs(ahc, target, channel, ahc_handle_scsiint() 1852 && ahc_match_scb(ahc, scb, target, channel, ahc_handle_scsiint() 1861 target, ahc_handle_scsiint() 1882 devinfo.target, ahc_handle_scsiint() 1928 ahc_abort_scbs(ahc, target, channel, ahc_handle_scsiint() 1980 devinfo->target, ahc_force_renegotiation() 2136 * Allocate per target mode instance (ID we respond to as a target) 2183 * Free per target mode instance (ID we respond to as a target) 2210 * Called when we have an active connection to a target on the bus, 2213 * the target. 2239 * period otherwise we may allow a target initiated ahc_devlimited_syncrate() 2244 * incoming negotiations even if target initiated ahc_devlimited_syncrate() 2267 * Return the period and offset that should be sent to the target 2307 * When responding to a target that requests ahc_find_syncrate() 2311 * we want to respond to the target with ahc_find_syncrate() 2488 * active update, the specified target is currently talking to us on 2514 devinfo->target, &tstate); ahc_set_syncrate() 2555 * this target. ahc_set_syncrate() 2587 ahc_send_async(ahc, devinfo->channel, devinfo->target, ahc_set_syncrate() 2591 printk("%s: target %d synchronous at %sMHz%s, " ahc_set_syncrate() 2593 devinfo->target, syncrate->rate, ahc_set_syncrate() 2597 printk("%s: target %d using " ahc_set_syncrate() 2599 ahc_name(ahc), devinfo->target); ahc_set_syncrate() 2615 * active update, the specified target is currently talking to us on 2632 devinfo->target, &tstate); ahc_set_width() 2657 ahc_send_async(ahc, devinfo->channel, devinfo->target, ahc_set_width() 2660 printk("%s: target %d using %dbit transfers\n", ahc_set_width() 2661 ahc_name(ahc), devinfo->target, ahc_set_width() 2673 * Update the current state of tagged queuing for a given target. 2682 ahc_send_async(ahc, devinfo->channel, devinfo->target, ahc_set_tags() 2714 devinfo.target, &tstate); ahc_update_pending_scbs() 2820 ahc_compile_devinfo(struct ahc_devinfo *devinfo, u_int our_id, u_int target, ahc_compile_devinfo() argument 2824 devinfo->target = target; ahc_compile_devinfo() 2826 devinfo->target_offset = target; ahc_compile_devinfo() 2838 devinfo->target, devinfo->lun); ahc_print_devinfo() 2913 * Q for this target, and we don't want to go about ahc_setup_initiator_msgout() 2930 * Q for this target, and we don't want to go about ahc_setup_initiator_msgout() 2959 * currently active target. 2980 devinfo->target, &tstate); ahc_build_transfer_msg() 3031 * goal syncrate to be limited to what the target device ahc_build_transfer_msg() 3070 ahc_name(ahc), devinfo->channel, devinfo->target, ahc_construct_sdtr() 3088 ahc_name(ahc), devinfo->channel, devinfo->target, ahc_construct_wdtr() 3111 devinfo->channel, devinfo->target, devinfo->lun, ahc_construct_ppr() 3127 * The target didn't care to respond to our ahc_clear_msg_state() 3157 * The reconnecting target either did not send an ahc_handle_proto_violation() 3180 * The target never bothered to provide status to ahc_handle_proto_violation() 3302 * The target has requested a retry. ahc_handle_message_phase() 3380 * assert ATN so the target takes us to the ahc_handle_message_phase() 3531 * See if we sent a particular extended message to the target. 3532 * If "full" is true, return true only if the target saw the full 3533 * message. If "full" is false, return true if the target saw at 3597 devinfo->target, &tstate); ahc_parse_msg() 3672 devinfo->target, devinfo->lun, ahc_parse_msg() 3702 devinfo->target, devinfo->lun); ahc_parse_msg() 3744 devinfo->target, devinfo->lun, ahc_parse_msg() 3751 * target, since we asked first. ahc_parse_msg() 3760 devinfo->target, devinfo->lun, ahc_parse_msg() 3773 devinfo->target, devinfo->lun); ahc_parse_msg() 3891 devinfo->target, devinfo->lun); ahc_parse_msg() 3896 devinfo->target, devinfo->lun); ahc_parse_msg() 3910 devinfo->target, devinfo->lun, ahc_parse_msg() 3956 ahc_abort_scbs(ahc, devinfo->target, devinfo->channel, ahc_parse_msg() 4011 * target. If we did, this is a signal that ahc_handle_msg_reject() 4012 * the target is refusing negotiation. ahc_handle_msg_reject() 4025 devinfo->target, &tstate); ahc_handle_msg_reject() 4038 devinfo->target, devinfo->lun); ahc_handle_msg_reject() 4053 devinfo->channel, devinfo->target, devinfo->lun); ahc_handle_msg_reject() 4058 * No need to clear the sync rate. If the target ahc_handle_msg_reject() 4060 * unaffected. If the target started the negotiation, ahc_handle_msg_reject() 4082 devinfo->target, devinfo->lun); ahc_handle_msg_reject() 4092 devinfo->channel, devinfo->target, devinfo->lun); ahc_handle_msg_reject() 4098 ahc_name(ahc), devinfo->channel, devinfo->target, ahc_handle_msg_reject() 4106 * Resend the identify for this CCB as the target ahc_handle_msg_reject() 4119 * the untagged queue for this target. ahc_handle_msg_reject() 4133 * Requeue all tagged commands for this target ahc_handle_msg_reject() 4147 ahc_name(ahc), devinfo->channel, devinfo->target, ahc_handle_msg_reject() 4331 found = ahc_abort_scbs(ahc, devinfo->target, devinfo->channel, ahc_handle_devreset() 4337 * Send an immediate notify ccb to all target mord peripheral ahc_handle_devreset() 4366 ahc_send_async(ahc, devinfo->channel, devinfo->target, ahc_handle_devreset() 4372 message, devinfo->channel, devinfo->target, found); ahc_handle_devreset() 4392 panic("ahc_intr: AWAITING target message with no message"); ahc_setup_target_msgin() 5137 * target and lun pair. ahc_chip_init() 5219 * If we are a target, we'll enable select in operations once ahc_chip_init() 5323 * Only allow target mode features if this unit has them enabled. ahc_init() 5353 * When providing for the target mode role, we must additionally ahc_init() 5354 * provide space for the incoming target command fifo and an extra ahc_init() 5394 /* All target command blocks start out invalid. */ ahc_init() 5412 * data for any target mode initiator. ahc_init() 5509 * connection type we have with the target. ahc_init() 5670 * are acting in a target role. ahc_suspend() 5693 * Return the untagged transaction id for a given target/channel lun. 5755 ahc_match_scb(struct ahc_softc *ahc, struct scb *scb, int target, ahc_match_scb() argument 5765 match = ((targ == target) || (target == CAM_TARGET_WILDCARD)); ahc_match_scb() 5793 int target; ahc_freeze_devq() local 5797 target = SCB_GET_TARGET(ahc, scb); ahc_freeze_devq() 5801 ahc_search_qinfifo(ahc, target, channel, lun, ahc_freeze_devq() 5862 ahc_search_qinfifo(struct ahc_softc *ahc, int target, char channel, ahc_search_qinfifo() argument 5911 if (ahc_match_scb(ahc, scb, target, channel, lun, tag, role)) { ahc_search_qinfifo() 6022 if (ahc_match_scb(ahc, scb, target, channel, ahc_search_qinfifo() 6062 found += ahc_search_untagged_queues(ahc, /*ahc_io_ctx_t*/NULL, target, ahc_search_qinfifo() 6072 int target, char channel, int lun, uint32_t status, ahc_search_untagged_queues() 6093 if (target != CAM_TARGET_WILDCARD) { ahc_search_untagged_queues() 6095 i = target; ahc_search_untagged_queues() 6127 if (ahc_match_scb(ahc, scb, target, channel, lun, ahc_search_untagged_queues() 6169 ahc_search_disc_list(struct ahc_softc *ahc, int target, char channel, ahc_search_disc_list() argument 6209 if (ahc_match_scb(ahc, scbp, target, channel, lun, ahc_search_disc_list() 6326 * Abort all SCBs that match the given description (target/channel/lun/tag), 6332 ahc_abort_scbs(struct ahc_softc *ahc, int target, char channel, ahc_abort_scbs() argument 6354 found = ahc_search_qinfifo(ahc, target, channel, lun, SCB_LIST_NULL, ahc_abort_scbs() 6358 * Clean out the busy target table for any untagged commands. ahc_abort_scbs() 6362 if (target != CAM_TARGET_WILDCARD) { ahc_abort_scbs() 6363 i = target; ahc_abort_scbs() 6375 * a target. ahc_abort_scbs() 6396 || ahc_match_scb(ahc, scbp, target, channel, ahc_abort_scbs() 6409 ahc_search_disc_list(ahc, target, channel, lun, tag, ahc_abort_scbs() 6429 && ahc_match_scb(ahc, scbp, target, channel, lun, tag, role))) ahc_abort_scbs() 6435 * commands for this target that are still active. ahc_abort_scbs() 6443 if (ahc_match_scb(ahc, scbp, target, channel, lun, tag, role)) { ahc_abort_scbs() 6458 ahc_platform_abort_scbs(ahc, target, channel, lun, tag, role, status); ahc_abort_scbs() 6486 u_int initiator, target, max_scsiid; ahc_reset_channel() local 6548 * if we are in target mode. ahc_reset_channel() 6567 * if we are in target mode. ahc_reset_channel() 6592 * Send an immediate notify ccb to all target more peripheral ahc_reset_channel() 6595 for (target = 0; target <= max_scsiid; target++) { ahc_reset_channel() 6599 tstate = ahc->enabled_targets[target]; ahc_reset_channel() 6622 for (target = 0; target <= max_scsiid; target++) { ahc_reset_channel() 6624 if (ahc->enabled_targets[target] == NULL) ahc_reset_channel() 6629 ahc_compile_devinfo(&devinfo, target, initiator, ahc_reset_channel() 6669 * 4) No residual but target did not ahc_calc_residual() 6739 * Add a target mode event to this lun's queue 6789 * Send any target mode events queued up waiting 7174 int target; ahc_dump_card_state() local 7329 for (target = 0; target <= maxtarget; target++) { ahc_dump_card_state() 7330 untagged_q = &ahc->untagged_queues[target]; ahc_dump_card_state() 7333 printk("Untagged Q(%d): ", target); ahc_dump_card_state() 7401 u_int target; ahc_handle_en_lun() local 7424 * the ID of the first target to have an ahc_handle_en_lun() 7425 * enabled lun in target mode. There are ahc_handle_en_lun() 7427 * target id other than our_id. ahc_handle_en_lun() 7435 * reselect-out operations, the only target ahc_handle_en_lun() 7439 * a previous target mode ID has been enabled. ahc_handle_en_lun() 7450 * on a different target id. ahc_handle_en_lun() 7456 * Only allow our target id to change ahc_handle_en_lun() 7478 * If we aren't in target mode, switch modes. ahc_handle_en_lun() 7501 * the caller that we cannot support target mode. ahc_handle_en_lun() 7518 target = ccb->ccb_h.target_id; ahc_handle_en_lun() 7521 target_mask = 0x01 << target; ahc_handle_en_lun() 7551 if (target != CAM_TARGET_WILDCARD && tstate == NULL) { ahc_handle_en_lun() 7552 tstate = ahc_alloc_tstate(ahc, target, channel); ahc_handle_en_lun() 7582 if (target != CAM_TARGET_WILDCARD) { ahc_handle_en_lun() 7608 if (target != our_id) { ahc_handle_en_lun() 7620 ahc->our_id = target; ahc_handle_en_lun() 7622 ahc->our_id_b = target; ahc_handle_en_lun() 7628 ahc_outb(ahc, SCSIID, target); ahc_handle_en_lun() 7649 printk("Lun now enabled for target mode\n"); ahc_handle_en_lun() 7696 /* Can we clean up the target too? */ ahc_handle_en_lun() 7697 if (target != CAM_TARGET_WILDCARD) { ahc_handle_en_lun() 7707 ahc_free_tstate(ahc, target, channel, ahc_handle_en_lun() 7835 * Lazily update our position in the target mode incoming ahc_run_tqinfifo() 7866 int target; ahc_handle_target_cmd() local 7870 target = SCSIID_OUR_ID(cmd->scsiid); ahc_handle_target_cmd() 7874 tstate = ahc->enabled_targets[target]; ahc_handle_target_cmd() 7898 initiator, target, lun, ahc_handle_target_cmd() 7905 atio->ccb_h.target_id = target; ahc_handle_target_cmd() 7956 * continue target I/O comes in response ahc_handle_target_cmd() 7961 initiator, target, lun, ahc->pending_device); ahc_handle_target_cmd() 6071 ahc_search_untagged_queues(struct ahc_softc *ahc, ahc_io_ctx_t ctx, int target, char channel, int lun, uint32_t status, ahc_search_action action) ahc_search_untagged_queues() argument
|
/linux-4.4.14/drivers/net/bonding/ |
H A D | bond_options.c | 33 static int bond_option_arp_ip_target_add(struct bonding *bond, __be32 target); 34 static int bond_option_arp_ip_target_rem(struct bonding *bond, __be32 target); 643 * @bond: target bond device 683 * @bond: target bond device 942 __be32 target, _bond_options_arp_ip_target_set() 952 targets[slot] = target; _bond_options_arp_ip_target_set() 956 static int _bond_option_arp_ip_target_add(struct bonding *bond, __be32 target) _bond_option_arp_ip_target_add() argument 961 if (!bond_is_ip_target_ok(target)) { _bond_option_arp_ip_target_add() 962 netdev_err(bond->dev, "invalid ARP target %pI4 specified for addition\n", _bond_option_arp_ip_target_add() 963 &target); _bond_option_arp_ip_target_add() 967 if (bond_get_targets_ip(targets, target) != -1) { /* dup */ _bond_option_arp_ip_target_add() 968 netdev_err(bond->dev, "ARP target %pI4 is already present\n", _bond_option_arp_ip_target_add() 969 &target); _bond_option_arp_ip_target_add() 975 netdev_err(bond->dev, "ARP target table is full!\n"); _bond_option_arp_ip_target_add() 979 netdev_info(bond->dev, "Adding ARP target %pI4\n", &target); _bond_option_arp_ip_target_add() 981 _bond_options_arp_ip_target_set(bond, ind, target, jiffies); _bond_option_arp_ip_target_add() 986 static int bond_option_arp_ip_target_add(struct bonding *bond, __be32 target) bond_option_arp_ip_target_add() argument 988 return _bond_option_arp_ip_target_add(bond, target); bond_option_arp_ip_target_add() 991 static int bond_option_arp_ip_target_rem(struct bonding *bond, __be32 target) bond_option_arp_ip_target_rem() argument 999 if (!bond_is_ip_target_ok(target)) { bond_option_arp_ip_target_rem() 1000 netdev_err(bond->dev, "invalid ARP target %pI4 specified for removal\n", bond_option_arp_ip_target_rem() 1001 &target); bond_option_arp_ip_target_rem() 1005 ind = bond_get_targets_ip(targets, target); bond_option_arp_ip_target_rem() 1007 netdev_err(bond->dev, "unable to remove nonexistent ARP target %pI4\n", bond_option_arp_ip_target_rem() 1008 &target); bond_option_arp_ip_target_rem() 1013 netdev_warn(bond->dev, "Removing last arp target with arp_interval on\n"); bond_option_arp_ip_target_rem() 1015 netdev_info(bond->dev, "Removing ARP target %pI4\n", &target); bond_option_arp_ip_target_rem() 1042 __be32 target; bond_option_arp_ip_targets_set() local 1045 if (!in4_pton(newval->string+1, -1, (u8 *)&target, -1, NULL)) { bond_option_arp_ip_targets_set() 1046 netdev_err(bond->dev, "invalid ARP target %pI4 specified\n", bond_option_arp_ip_targets_set() 1047 &target); bond_option_arp_ip_targets_set() 1051 ret = bond_option_arp_ip_target_add(bond, target); bond_option_arp_ip_targets_set() 1053 ret = bond_option_arp_ip_target_rem(bond, target); bond_option_arp_ip_targets_set() 1057 target = newval->value; bond_option_arp_ip_targets_set() 1058 ret = bond_option_arp_ip_target_add(bond, target); bond_option_arp_ip_targets_set() 941 _bond_options_arp_ip_target_set(struct bonding *bond, int slot, __be32 target, unsigned long last_rx) _bond_options_arp_ip_target_set() argument
|
/linux-4.4.14/drivers/scsi/fnic/ |
H A D | fcpio.h | 122 * of the tag field will be the target command and target task management 310 * target command 313 u16 rx_id; /* FC rx_id of target command */ 330 * target command 333 u16 rx_id; /* FC rx_id of target command */ 344 * used for requesting the firmware to send out a response for a target 348 u16 rx_id; /* FC rx_id of target command */ 368 * the target tmf request 371 u16 rx_id; /* FC rx_id of target command */ 379 * used by the host to request the firmware to abort a target request that was 383 u16 rx_id; /* rx_id of the target request */ 528 * used by the firmware to notify the host of an incoming target SCSI 16-Byte 561 * used by the firmware to notify the host of an incoming target SCSI 32-Byte 579 * used by the firmware to notify the host of a response to a host target 583 u16 rx_id; /* rx_id of the target request */ 617 u16 rx_id; /* rx_id of the target request */ 768 u8 target; member in struct:fcpio_lunmap_entry
|
/linux-4.4.14/include/target/ |
H A D | target_core_fabric.h | 10 * Setting this value tells target-core to enforce this limit, and 13 * target-core will currently reset se_cmd->data_length to this 37 * target-core should signal the PROTECT=1 feature bit for 39 * HW offload or target-core emulation performs the associated 182 * The LIO target core uses DMA_TO_DEVICE to mean that data is going 183 * to the target (eg handling a WRITE) and DMA_FROM_DEVICE to mean 184 * that data is coming from the target (eg handling a READ). However,
|
/linux-4.4.14/arch/mips/net/ |
H A D | bpf_jit.c | 96 * @target: Memory location for the compiled filter 104 u32 *target; member in struct:jit_ctx 124 if ((ctx)->target != NULL) { \ 125 u32 *p = &(ctx)->target[ctx->idx]; \ 137 if ((ctx)->target != NULL) { \ 138 u32 *p = &(ctx)->target[ctx->idx]; \ 164 if (ctx->target != NULL) { emit_load_imm() 167 u32 *p = &ctx->target[ctx->idx]; emit_load_imm() 169 p = &ctx->target[ctx->idx + 1]; emit_load_imm() 172 u32 *p = &ctx->target[ctx->idx]; emit_load_imm() 378 if (ctx->target != NULL) { emit_div() 379 u32 *p = &ctx->target[ctx->idx]; emit_div() 381 p = &ctx->target[ctx->idx + 1]; emit_div() 390 if (ctx->target != NULL) { emit_mod() 391 u32 *p = &ctx->target[ctx->idx]; emit_mod() 393 p = &ctx->target[ctx->idx + 1]; emit_mod() 456 if (ctx->target == NULL) b_imm() 479 if (ctx->target != NULL) { emit_bcond() 480 u32 *p = &ctx->target[ctx->idx]; emit_bcond() 669 if (ctx->target == NULL) build_body() 1184 if (ctx->target == NULL) build_body() 1218 ctx.target = module_alloc(alloc_size); bpf_jit_compile() 1219 if (ctx.target == NULL) bpf_jit_compile() 1223 memset(ctx.target, 0, alloc_size); bpf_jit_compile() 1233 flush_icache_range((ptr)ctx.target, (ptr)(ctx.target + ctx.idx)); bpf_jit_compile() 1237 bpf_jit_dump(fp->len, alloc_size, 2, ctx.target); bpf_jit_compile() 1239 fp->bpf_func = (void *)ctx.target; bpf_jit_compile()
|