pkt                57 arch/powerpc/platforms/ps3/gelic_udbg.c 	u8 pkt[1520];
pkt               132 arch/powerpc/platforms/ps3/gelic_udbg.c 	dbg.descr.buf_addr = bus_addr + offsetof(struct debug_block, pkt);
pkt               144 arch/powerpc/platforms/ps3/gelic_udbg.c 	h_eth = (struct ethhdr *)dbg.pkt;
pkt               380 arch/sparc/include/asm/vio.h 	int	(*handle_attr)(struct vio_driver_state *vio, void *pkt);
pkt               499 arch/sparc/include/asm/vio.h int vio_control_pkt_engine(struct vio_driver_state *vio, void *pkt);
pkt               290 arch/sparc/kernel/ds.c 	} pkt;
pkt               298 arch/sparc/kernel/ds.c 	memset(&pkt, 0, sizeof(pkt));
pkt               299 arch/sparc/kernel/ds.c 	pkt.data.tag.type = DS_DATA;
pkt               300 arch/sparc/kernel/ds.c 	pkt.data.tag.len = sizeof(pkt) - sizeof(struct ds_msg_tag);
pkt               301 arch/sparc/kernel/ds.c 	pkt.data.handle = cp->handle;
pkt               302 arch/sparc/kernel/ds.c 	pkt.res.req_num = rp->req_num;
pkt               303 arch/sparc/kernel/ds.c 	pkt.res.result = DS_OK;
pkt               305 arch/sparc/kernel/ds.c 	ds_send(lp, &pkt, sizeof(pkt));
pkt               329 arch/sparc/kernel/ds.c 	} pkt;
pkt               336 arch/sparc/kernel/ds.c 	memset(&pkt, 0, sizeof(pkt));
pkt               337 arch/sparc/kernel/ds.c 	pkt.data.tag.type = DS_DATA;
pkt               338 arch/sparc/kernel/ds.c 	pkt.data.tag.len = sizeof(pkt) - sizeof(struct ds_msg_tag);
pkt               339 arch/sparc/kernel/ds.c 	pkt.data.handle = cp->handle;
pkt               340 arch/sparc/kernel/ds.c 	pkt.res.req_num = rp->req_num;
pkt               341 arch/sparc/kernel/ds.c 	pkt.res.result = DS_OK;
pkt               342 arch/sparc/kernel/ds.c 	pkt.res.reason[0] = 0;
pkt               344 arch/sparc/kernel/ds.c 	ds_send(lp, &pkt, sizeof(pkt));
pkt               369 arch/sparc/kernel/ds.c 	} pkt;
pkt               376 arch/sparc/kernel/ds.c 	memset(&pkt, 0, sizeof(pkt));
pkt               377 arch/sparc/kernel/ds.c 	pkt.data.tag.type = DS_DATA;
pkt               378 arch/sparc/kernel/ds.c 	pkt.data.tag.len = sizeof(pkt) - sizeof(struct ds_msg_tag);
pkt               379 arch/sparc/kernel/ds.c 	pkt.data.handle = cp->handle;
pkt               380 arch/sparc/kernel/ds.c 	pkt.res.req_num = rp->req_num;
pkt               381 arch/sparc/kernel/ds.c 	pkt.res.result = DS_OK;
pkt               382 arch/sparc/kernel/ds.c 	pkt.res.reason[0] = 0;
pkt               384 arch/sparc/kernel/ds.c 	ds_send(lp, &pkt, sizeof(pkt));
pkt               430 arch/sparc/kernel/ds.c 	} pkt;
pkt               433 arch/sparc/kernel/ds.c 	memset(&pkt, 0, sizeof(pkt));
pkt               434 arch/sparc/kernel/ds.c 	pkt.data.tag.type = DS_DATA;
pkt               435 arch/sparc/kernel/ds.c 	pkt.data.handle = cp->handle;
pkt               436 arch/sparc/kernel/ds.c 	pkt.tag.req_num = tag->req_num;
pkt               437 arch/sparc/kernel/ds.c 	pkt.tag.type = DR_CPU_ERROR;
pkt               438 arch/sparc/kernel/ds.c 	pkt.tag.num_records = 0;
pkt               443 arch/sparc/kernel/ds.c 	pkt.data.tag.len = msg_len - sizeof(struct ds_msg_tag);
pkt               445 arch/sparc/kernel/ds.c 	__ds_send(dp->lp, &pkt, msg_len);
pkt               781 arch/sparc/kernel/ds.c 		} pkt;
pkt               786 arch/sparc/kernel/ds.c 		    sizeof(pkt) - sizeof(pkt.header)) {
pkt               791 arch/sparc/kernel/ds.c 				sizeof(pkt) - sizeof(pkt.header), var, value);
pkt               795 arch/sparc/kernel/ds.c 		memset(&pkt, 0, sizeof(pkt));
pkt               796 arch/sparc/kernel/ds.c 		pkt.header.data.tag.type = DS_DATA;
pkt               797 arch/sparc/kernel/ds.c 		pkt.header.data.handle = cp->handle;
pkt               798 arch/sparc/kernel/ds.c 		pkt.header.msg.hdr.type = DS_VAR_SET_REQ;
pkt               799 arch/sparc/kernel/ds.c 		base = p = &pkt.header.msg.name_and_value[0];
pkt               809 arch/sparc/kernel/ds.c 		pkt.header.data.tag.len = msg_len - sizeof(struct ds_msg_tag);
pkt               817 arch/sparc/kernel/ds.c 		__ds_send(dp->lp, &pkt, msg_len);
pkt               922 arch/sparc/kernel/ds.c static int ds_handshake(struct ds_info *dp, struct ds_msg_tag *pkt)
pkt               926 arch/sparc/kernel/ds.c 		if (pkt->type != DS_INIT_ACK)
pkt               937 arch/sparc/kernel/ds.c 	if (pkt->type == DS_REG_ACK) {
pkt               938 arch/sparc/kernel/ds.c 		struct ds_reg_ack *ap = (struct ds_reg_ack *) pkt;
pkt               949 arch/sparc/kernel/ds.c 	} else if (pkt->type == DS_REG_NACK) {
pkt               950 arch/sparc/kernel/ds.c 		struct ds_reg_nack *np = (struct ds_reg_nack *) pkt;
pkt              1047 arch/sparc/kernel/ds.c static int ds_data(struct ds_info *dp, struct ds_msg_tag *pkt, int len)
pkt              1049 arch/sparc/kernel/ds.c 	struct ds_data *dpkt = (struct ds_data *) pkt;
pkt              1057 arch/sparc/kernel/ds.c 		memcpy(&qp->req, pkt, len);
pkt                50 arch/sparc/kernel/viohs.c 	struct vio_ver_info pkt;
pkt                54 arch/sparc/kernel/viohs.c 	memset(&pkt, 0, sizeof(pkt));
pkt                55 arch/sparc/kernel/viohs.c 	init_tag(&pkt.tag, VIO_TYPE_CTRL, VIO_SUBTYPE_INFO, VIO_VER_INFO);
pkt                56 arch/sparc/kernel/viohs.c 	pkt.major = major;
pkt                57 arch/sparc/kernel/viohs.c 	pkt.minor = minor;
pkt                58 arch/sparc/kernel/viohs.c 	pkt.dev_class = vio->dev_class;
pkt                63 arch/sparc/kernel/viohs.c 	return send_ctrl(vio, &pkt.tag, sizeof(pkt));
pkt               163 arch/sparc/kernel/viohs.c 	struct vio_msg_tag *pkt = arg;
pkt               166 arch/sparc/kernel/viohs.c 	       pkt->type, pkt->stype, pkt->stype_env, pkt->sid);
pkt               180 arch/sparc/kernel/viohs.c 		struct vio_dring_register pkt;
pkt               194 arch/sparc/kernel/viohs.c 	init_tag(&u.pkt.tag, VIO_TYPE_CTRL, VIO_SUBTYPE_INFO, VIO_DRING_REG);
pkt               195 arch/sparc/kernel/viohs.c 	u.pkt.dring_ident = 0;
pkt               196 arch/sparc/kernel/viohs.c 	u.pkt.num_descr = dr->num_entries;
pkt               197 arch/sparc/kernel/viohs.c 	u.pkt.descr_size = dr->entry_size;
pkt               198 arch/sparc/kernel/viohs.c 	u.pkt.options = VIO_TX_DRING;
pkt               199 arch/sparc/kernel/viohs.c 	u.pkt.num_cookies = dr->ncookies;
pkt               203 arch/sparc/kernel/viohs.c 	       u.pkt.num_descr, u.pkt.descr_size, u.pkt.options,
pkt               204 arch/sparc/kernel/viohs.c 	       u.pkt.num_cookies);
pkt               207 arch/sparc/kernel/viohs.c 		u.pkt.cookies[i] = dr->cookies[i];
pkt               211 arch/sparc/kernel/viohs.c 		       (unsigned long long) u.pkt.cookies[i].cookie_addr,
pkt               212 arch/sparc/kernel/viohs.c 		       (unsigned long long) u.pkt.cookies[i].cookie_size);
pkt               215 arch/sparc/kernel/viohs.c 	return send_ctrl(vio, &u.pkt.tag, bytes);
pkt               220 arch/sparc/kernel/viohs.c 	struct vio_rdx pkt;
pkt               222 arch/sparc/kernel/viohs.c 	memset(&pkt, 0, sizeof(pkt));
pkt               224 arch/sparc/kernel/viohs.c 	init_tag(&pkt.tag, VIO_TYPE_CTRL, VIO_SUBTYPE_INFO, VIO_RDX);
pkt               228 arch/sparc/kernel/viohs.c 	return send_ctrl(vio, &pkt.tag, sizeof(pkt));
pkt               256 arch/sparc/kernel/viohs.c 			    struct vio_ver_info *pkt)
pkt               262 arch/sparc/kernel/viohs.c 	       pkt->major, pkt->minor, pkt->dev_class);
pkt               270 arch/sparc/kernel/viohs.c 	vap = find_by_major(vio, pkt->major);
pkt               272 arch/sparc/kernel/viohs.c 	vio->_peer_sid = pkt->tag.sid;
pkt               275 arch/sparc/kernel/viohs.c 		pkt->tag.stype = VIO_SUBTYPE_NACK;
pkt               276 arch/sparc/kernel/viohs.c 		pkt->major = 0;
pkt               277 arch/sparc/kernel/viohs.c 		pkt->minor = 0;
pkt               279 arch/sparc/kernel/viohs.c 		err = send_ctrl(vio, &pkt->tag, sizeof(*pkt));
pkt               280 arch/sparc/kernel/viohs.c 	} else if (vap->major != pkt->major) {
pkt               281 arch/sparc/kernel/viohs.c 		pkt->tag.stype = VIO_SUBTYPE_NACK;
pkt               282 arch/sparc/kernel/viohs.c 		pkt->major = vap->major;
pkt               283 arch/sparc/kernel/viohs.c 		pkt->minor = vap->minor;
pkt               285 arch/sparc/kernel/viohs.c 		       pkt->major, pkt->minor);
pkt               286 arch/sparc/kernel/viohs.c 		err = send_ctrl(vio, &pkt->tag, sizeof(*pkt));
pkt               289 arch/sparc/kernel/viohs.c 			.major = pkt->major,
pkt               290 arch/sparc/kernel/viohs.c 			.minor = pkt->minor,
pkt               294 arch/sparc/kernel/viohs.c 		pkt->minor = ver.minor;
pkt               295 arch/sparc/kernel/viohs.c 		pkt->tag.stype = VIO_SUBTYPE_ACK;
pkt               296 arch/sparc/kernel/viohs.c 		pkt->dev_class = vio->dev_class;
pkt               298 arch/sparc/kernel/viohs.c 		       pkt->major, pkt->minor);
pkt               299 arch/sparc/kernel/viohs.c 		err = send_ctrl(vio, &pkt->tag, sizeof(*pkt));
pkt               312 arch/sparc/kernel/viohs.c 			   struct vio_ver_info *pkt)
pkt               315 arch/sparc/kernel/viohs.c 	       pkt->major, pkt->minor, pkt->dev_class);
pkt               318 arch/sparc/kernel/viohs.c 		if (vio->ver.major != pkt->major ||
pkt               319 arch/sparc/kernel/viohs.c 		    vio->ver.minor != pkt->minor) {
pkt               320 arch/sparc/kernel/viohs.c 			pkt->tag.stype = VIO_SUBTYPE_NACK;
pkt               321 arch/sparc/kernel/viohs.c 			(void) send_ctrl(vio, &pkt->tag, sizeof(*pkt));
pkt               325 arch/sparc/kernel/viohs.c 		vio->ver.major = pkt->major;
pkt               326 arch/sparc/kernel/viohs.c 		vio->ver.minor = pkt->minor;
pkt               345 arch/sparc/kernel/viohs.c 			    struct vio_ver_info *pkt)
pkt               350 arch/sparc/kernel/viohs.c 	       pkt->major, pkt->minor, pkt->dev_class);
pkt               352 arch/sparc/kernel/viohs.c 	if (pkt->major == 0 && pkt->minor == 0)
pkt               354 arch/sparc/kernel/viohs.c 	nver = find_by_major(vio, pkt->major);
pkt               364 arch/sparc/kernel/viohs.c static int process_ver(struct vio_driver_state *vio, struct vio_ver_info *pkt)
pkt               366 arch/sparc/kernel/viohs.c 	switch (pkt->tag.stype) {
pkt               368 arch/sparc/kernel/viohs.c 		return process_ver_info(vio, pkt);
pkt               371 arch/sparc/kernel/viohs.c 		return process_ver_ack(vio, pkt);
pkt               374 arch/sparc/kernel/viohs.c 		return process_ver_nack(vio, pkt);
pkt               381 arch/sparc/kernel/viohs.c static int process_attr(struct vio_driver_state *vio, void *pkt)
pkt               391 arch/sparc/kernel/viohs.c 	err = vio->ops->handle_attr(vio, pkt);
pkt               428 arch/sparc/kernel/viohs.c 			     struct vio_dring_register *pkt)
pkt               435 arch/sparc/kernel/viohs.c 	       (unsigned long long) pkt->dring_ident,
pkt               436 arch/sparc/kernel/viohs.c 	       pkt->num_descr, pkt->descr_size, pkt->options,
pkt               437 arch/sparc/kernel/viohs.c 	       pkt->num_cookies);
pkt               447 arch/sparc/kernel/viohs.c 		if (!(pkt->options & VIO_TX_DRING))
pkt               449 arch/sparc/kernel/viohs.c 		pkt->options = VIO_TX_DRING;
pkt               454 arch/sparc/kernel/viohs.c 	vio->desc_buf = kzalloc(pkt->descr_size, GFP_ATOMIC);
pkt               458 arch/sparc/kernel/viohs.c 	vio->desc_buf_len = pkt->descr_size;
pkt               462 arch/sparc/kernel/viohs.c 	dr->num_entries = pkt->num_descr;
pkt               463 arch/sparc/kernel/viohs.c 	dr->entry_size = pkt->descr_size;
pkt               464 arch/sparc/kernel/viohs.c 	dr->ncookies = pkt->num_cookies;
pkt               466 arch/sparc/kernel/viohs.c 		dr->cookies[i] = pkt->cookies[i];
pkt               471 arch/sparc/kernel/viohs.c 		       pkt->cookies[i].cookie_addr,
pkt               473 arch/sparc/kernel/viohs.c 		       pkt->cookies[i].cookie_size);
pkt               476 arch/sparc/kernel/viohs.c 	pkt->tag.stype = VIO_SUBTYPE_ACK;
pkt               477 arch/sparc/kernel/viohs.c 	pkt->dring_ident = ++dr->ident;
pkt               481 arch/sparc/kernel/viohs.c 	       (unsigned long long) pkt->dring_ident,
pkt               482 arch/sparc/kernel/viohs.c 	       pkt->num_descr, pkt->descr_size, pkt->options,
pkt               483 arch/sparc/kernel/viohs.c 	       pkt->num_cookies);
pkt               485 arch/sparc/kernel/viohs.c 	len = (sizeof(*pkt) +
pkt               487 arch/sparc/kernel/viohs.c 	if (send_ctrl(vio, &pkt->tag, len) < 0)
pkt               495 arch/sparc/kernel/viohs.c 	pkt->tag.stype = VIO_SUBTYPE_NACK;
pkt               497 arch/sparc/kernel/viohs.c 	(void) send_ctrl(vio, &pkt->tag, sizeof(*pkt));
pkt               503 arch/sparc/kernel/viohs.c 			    struct vio_dring_register *pkt)
pkt               509 arch/sparc/kernel/viohs.c 	       (unsigned long long) pkt->dring_ident,
pkt               510 arch/sparc/kernel/viohs.c 	       pkt->num_descr, pkt->descr_size, pkt->options,
pkt               511 arch/sparc/kernel/viohs.c 	       pkt->num_cookies);
pkt               518 arch/sparc/kernel/viohs.c 	dr->ident = pkt->dring_ident;
pkt               530 arch/sparc/kernel/viohs.c 			     struct vio_dring_register *pkt)
pkt               534 arch/sparc/kernel/viohs.c 	       (unsigned long long) pkt->dring_ident,
pkt               535 arch/sparc/kernel/viohs.c 	       pkt->num_descr, pkt->descr_size, pkt->options,
pkt               536 arch/sparc/kernel/viohs.c 	       pkt->num_cookies);
pkt               542 arch/sparc/kernel/viohs.c 			struct vio_dring_register *pkt)
pkt               547 arch/sparc/kernel/viohs.c 	switch (pkt->tag.stype) {
pkt               549 arch/sparc/kernel/viohs.c 		return process_dreg_info(vio, pkt);
pkt               552 arch/sparc/kernel/viohs.c 		return process_dreg_ack(vio, pkt);
pkt               555 arch/sparc/kernel/viohs.c 		return process_dreg_nack(vio, pkt);
pkt               563 arch/sparc/kernel/viohs.c 			  struct vio_dring_unregister *pkt)
pkt               569 arch/sparc/kernel/viohs.c 	if (pkt->dring_ident != dr->ident)
pkt               583 arch/sparc/kernel/viohs.c static int process_rdx_info(struct vio_driver_state *vio, struct vio_rdx *pkt)
pkt               587 arch/sparc/kernel/viohs.c 	pkt->tag.stype = VIO_SUBTYPE_ACK;
pkt               589 arch/sparc/kernel/viohs.c 	if (send_ctrl(vio, &pkt->tag, sizeof(*pkt)) < 0)
pkt               596 arch/sparc/kernel/viohs.c static int process_rdx_ack(struct vio_driver_state *vio, struct vio_rdx *pkt)
pkt               607 arch/sparc/kernel/viohs.c static int process_rdx_nack(struct vio_driver_state *vio, struct vio_rdx *pkt)
pkt               614 arch/sparc/kernel/viohs.c static int process_rdx(struct vio_driver_state *vio, struct vio_rdx *pkt)
pkt               619 arch/sparc/kernel/viohs.c 	switch (pkt->tag.stype) {
pkt               621 arch/sparc/kernel/viohs.c 		return process_rdx_info(vio, pkt);
pkt               624 arch/sparc/kernel/viohs.c 		return process_rdx_ack(vio, pkt);
pkt               627 arch/sparc/kernel/viohs.c 		return process_rdx_nack(vio, pkt);
pkt               634 arch/sparc/kernel/viohs.c int vio_control_pkt_engine(struct vio_driver_state *vio, void *pkt)
pkt               636 arch/sparc/kernel/viohs.c 	struct vio_msg_tag *tag = pkt;
pkt               642 arch/sparc/kernel/viohs.c 		err = process_ver(vio, pkt);
pkt               646 arch/sparc/kernel/viohs.c 		err = process_attr(vio, pkt);
pkt               650 arch/sparc/kernel/viohs.c 		err = process_dreg(vio, pkt);
pkt               654 arch/sparc/kernel/viohs.c 		err = process_dunreg(vio, pkt);
pkt               658 arch/sparc/kernel/viohs.c 		err = process_rdx(vio, pkt);
pkt               662 arch/sparc/kernel/viohs.c 		err = process_unknown(vio, pkt);
pkt               243 block/opal_proto.h 	struct opal_packet pkt;
pkt               710 block/sed-opal.c 	hdr->pkt.tsn = cpu_to_be32(tsn);
pkt               711 block/sed-opal.c 	hdr->pkt.hsn = cpu_to_be32(hsn);
pkt               721 block/sed-opal.c 	hdr->pkt.length = cpu_to_be32(cmd->pos - sizeof(hdr->cp) -
pkt               722 block/sed-opal.c 				      sizeof(hdr->pkt));
pkt               868 block/sed-opal.c 	plen = be32_to_cpu(hdr->pkt.length);
pkt               110 drivers/ata/pdc_adma.c 	u8			*pkt;
pkt               261 drivers/ata/pdc_adma.c 	u8  *buf = pp->pkt, *last_buf = NULL;
pkt               301 drivers/ata/pdc_adma.c 	u8  *buf = pp->pkt;
pkt               434 drivers/ata/pdc_adma.c 			if (pp->pkt[0] & cATERR)
pkt               436 drivers/ata/pdc_adma.c 			else if (pp->pkt[0] != cDONE)
pkt               447 drivers/ata/pdc_adma.c 					"pkt[0] 0x%02X", pp->pkt[0]);
pkt               543 drivers/ata/pdc_adma.c 	pp->pkt = dmam_alloc_coherent(dev, ADMA_PKT_BYTES, &pp->pkt_dma,
pkt               545 drivers/ata/pdc_adma.c 	if (!pp->pkt)
pkt               239 drivers/ata/sata_inic162x.c 	struct inic_pkt	*pkt;
pkt               330 drivers/ata/sata_inic162x.c 	struct inic_cpb *cpb = &pp->pkt->cpb;
pkt               484 drivers/ata/sata_inic162x.c 	struct inic_pkt *pkt = pp->pkt;
pkt               485 drivers/ata/sata_inic162x.c 	struct inic_cpb *cpb = &pkt->cpb;
pkt               486 drivers/ata/sata_inic162x.c 	struct inic_prd *prd = pkt->prd;
pkt               497 drivers/ata/sata_inic162x.c 	memset(pkt, 0, sizeof(struct inic_pkt));
pkt               526 drivers/ata/sata_inic162x.c 		memcpy(pkt->cdb, qc->cdb, ATAPI_CDB_LEN);
pkt               685 drivers/ata/sata_inic162x.c 	memset(pp->pkt, 0, sizeof(struct inic_pkt));
pkt               710 drivers/ata/sata_inic162x.c 	pp->pkt = dmam_alloc_coherent(dev, sizeof(struct inic_pkt),
pkt               712 drivers/ata/sata_inic162x.c 	if (!pp->pkt)
pkt               129 drivers/ata/sata_promise.c 	u8			*pkt;
pkt               330 drivers/ata/sata_promise.c 	pp->pkt = dmam_alloc_coherent(dev, 128, &pp->pkt_dma, GFP_KERNEL);
pkt               331 drivers/ata/sata_promise.c 	if (!pp->pkt)
pkt               492 drivers/ata/sata_promise.c 	u8 *buf = pp->pkt;
pkt               649 drivers/ata/sata_promise.c 				   qc->dev->devno, pp->pkt);
pkt               651 drivers/ata/sata_promise.c 			i = pdc_prep_lba48(&qc->tf, pp->pkt, i);
pkt               653 drivers/ata/sata_promise.c 			i = pdc_prep_lba28(&qc->tf, pp->pkt, i);
pkt               654 drivers/ata/sata_promise.c 		pdc_pkt_footer(&qc->tf, pp->pkt, i);
pkt              1017 drivers/ata/sata_promise.c 	pp->pkt[2] = seq;
pkt                93 drivers/ata/sata_qstor.c 	u8			*pkt;
pkt               241 drivers/ata/sata_qstor.c 	u8 *prd = pp->pkt + QS_CPB_BYTES;
pkt               266 drivers/ata/sata_qstor.c 	u8 dflags = QS_DF_PORD, *buf = pp->pkt;
pkt               476 drivers/ata/sata_qstor.c 	pp->pkt = dmam_alloc_coherent(dev, QS_PKT_BYTES, &pp->pkt_dma,
pkt               478 drivers/ata/sata_qstor.c 	if (!pp->pkt)
pkt               184 drivers/ata/sata_sx4.c 	u8			*pkt;
pkt               292 drivers/ata/sata_sx4.c 	pp->pkt = dmam_alloc_coherent(dev, 128, &pp->pkt_dma, GFP_KERNEL);
pkt               293 drivers/ata/sata_sx4.c 	if (!pp->pkt)
pkt               525 drivers/block/pktcdvd.c 	struct packet_data *pkt;
pkt               527 drivers/block/pktcdvd.c 	pkt = kzalloc(sizeof(struct packet_data), GFP_KERNEL);
pkt               528 drivers/block/pktcdvd.c 	if (!pkt)
pkt               531 drivers/block/pktcdvd.c 	pkt->frames = frames;
pkt               532 drivers/block/pktcdvd.c 	pkt->w_bio = bio_kmalloc(GFP_KERNEL, frames);
pkt               533 drivers/block/pktcdvd.c 	if (!pkt->w_bio)
pkt               537 drivers/block/pktcdvd.c 		pkt->pages[i] = alloc_page(GFP_KERNEL|__GFP_ZERO);
pkt               538 drivers/block/pktcdvd.c 		if (!pkt->pages[i])
pkt               542 drivers/block/pktcdvd.c 	spin_lock_init(&pkt->lock);
pkt               543 drivers/block/pktcdvd.c 	bio_list_init(&pkt->orig_bios);
pkt               550 drivers/block/pktcdvd.c 		pkt->r_bios[i] = bio;
pkt               553 drivers/block/pktcdvd.c 	return pkt;
pkt               557 drivers/block/pktcdvd.c 		struct bio *bio = pkt->r_bios[i];
pkt               564 drivers/block/pktcdvd.c 		if (pkt->pages[i])
pkt               565 drivers/block/pktcdvd.c 			__free_page(pkt->pages[i]);
pkt               566 drivers/block/pktcdvd.c 	bio_put(pkt->w_bio);
pkt               568 drivers/block/pktcdvd.c 	kfree(pkt);
pkt               576 drivers/block/pktcdvd.c static void pkt_free_packet_data(struct packet_data *pkt)
pkt               580 drivers/block/pktcdvd.c 	for (i = 0; i < pkt->frames; i++) {
pkt               581 drivers/block/pktcdvd.c 		struct bio *bio = pkt->r_bios[i];
pkt               585 drivers/block/pktcdvd.c 	for (i = 0; i < pkt->frames / FRAMES_PER_PAGE; i++)
pkt               586 drivers/block/pktcdvd.c 		__free_page(pkt->pages[i]);
pkt               587 drivers/block/pktcdvd.c 	bio_put(pkt->w_bio);
pkt               588 drivers/block/pktcdvd.c 	kfree(pkt);
pkt               593 drivers/block/pktcdvd.c 	struct packet_data *pkt, *next;
pkt               597 drivers/block/pktcdvd.c 	list_for_each_entry_safe(pkt, next, &pd->cdrw.pkt_free_list, list) {
pkt               598 drivers/block/pktcdvd.c 		pkt_free_packet_data(pkt);
pkt               605 drivers/block/pktcdvd.c 	struct packet_data *pkt;
pkt               610 drivers/block/pktcdvd.c 		pkt = pkt_alloc_packet_data(pd->settings.size >> 2);
pkt               611 drivers/block/pktcdvd.c 		if (!pkt) {
pkt               615 drivers/block/pktcdvd.c 		pkt->id = nr_packets;
pkt               616 drivers/block/pktcdvd.c 		pkt->pd = pd;
pkt               617 drivers/block/pktcdvd.c 		list_add(&pkt->list, &pd->cdrw.pkt_free_list);
pkt               949 drivers/block/pktcdvd.c 	struct packet_data *pkt = bio->bi_private;
pkt               950 drivers/block/pktcdvd.c 	struct pktcdvd_device *pd = pkt->pd;
pkt               954 drivers/block/pktcdvd.c 		bio, (unsigned long long)pkt->sector,
pkt               958 drivers/block/pktcdvd.c 		atomic_inc(&pkt->io_errors);
pkt               959 drivers/block/pktcdvd.c 	if (atomic_dec_and_test(&pkt->io_wait)) {
pkt               960 drivers/block/pktcdvd.c 		atomic_inc(&pkt->run_sm);
pkt               968 drivers/block/pktcdvd.c 	struct packet_data *pkt = bio->bi_private;
pkt               969 drivers/block/pktcdvd.c 	struct pktcdvd_device *pd = pkt->pd;
pkt               972 drivers/block/pktcdvd.c 	pkt_dbg(2, pd, "id=%d, err=%d\n", pkt->id, bio->bi_status);
pkt               977 drivers/block/pktcdvd.c 	atomic_dec(&pkt->io_wait);
pkt               978 drivers/block/pktcdvd.c 	atomic_inc(&pkt->run_sm);
pkt               985 drivers/block/pktcdvd.c static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt)
pkt               992 drivers/block/pktcdvd.c 	BUG_ON(bio_list_empty(&pkt->orig_bios));
pkt               994 drivers/block/pktcdvd.c 	atomic_set(&pkt->io_wait, 0);
pkt               995 drivers/block/pktcdvd.c 	atomic_set(&pkt->io_errors, 0);
pkt              1001 drivers/block/pktcdvd.c 	spin_lock(&pkt->lock);
pkt              1002 drivers/block/pktcdvd.c 	bio_list_for_each(bio, &pkt->orig_bios) {
pkt              1003 drivers/block/pktcdvd.c 		int first_frame = (bio->bi_iter.bi_sector - pkt->sector) /
pkt              1008 drivers/block/pktcdvd.c 		BUG_ON(first_frame + num_frames > pkt->frames);
pkt              1012 drivers/block/pktcdvd.c 	spin_unlock(&pkt->lock);
pkt              1014 drivers/block/pktcdvd.c 	if (pkt->cache_valid) {
pkt              1016 drivers/block/pktcdvd.c 			(unsigned long long)pkt->sector);
pkt              1023 drivers/block/pktcdvd.c 	for (f = 0; f < pkt->frames; f++) {
pkt              1029 drivers/block/pktcdvd.c 		bio = pkt->r_bios[f];
pkt              1031 drivers/block/pktcdvd.c 		bio->bi_iter.bi_sector = pkt->sector + f * (CD_FRAMESIZE >> 9);
pkt              1034 drivers/block/pktcdvd.c 		bio->bi_private = pkt;
pkt              1039 drivers/block/pktcdvd.c 			f, pkt->pages[p], offset);
pkt              1040 drivers/block/pktcdvd.c 		if (!bio_add_page(bio, pkt->pages[p], CD_FRAMESIZE, offset))
pkt              1043 drivers/block/pktcdvd.c 		atomic_inc(&pkt->io_wait);
pkt              1051 drivers/block/pktcdvd.c 		frames_read, (unsigned long long)pkt->sector);
pkt              1062 drivers/block/pktcdvd.c 	struct packet_data *pkt;
pkt              1064 drivers/block/pktcdvd.c 	list_for_each_entry(pkt, &pd->cdrw.pkt_free_list, list) {
pkt              1065 drivers/block/pktcdvd.c 		if (pkt->sector == zone || pkt->list.next == &pd->cdrw.pkt_free_list) {
pkt              1066 drivers/block/pktcdvd.c 			list_del_init(&pkt->list);
pkt              1067 drivers/block/pktcdvd.c 			if (pkt->sector != zone)
pkt              1068 drivers/block/pktcdvd.c 				pkt->cache_valid = 0;
pkt              1069 drivers/block/pktcdvd.c 			return pkt;
pkt              1076 drivers/block/pktcdvd.c static void pkt_put_packet_data(struct pktcdvd_device *pd, struct packet_data *pkt)
pkt              1078 drivers/block/pktcdvd.c 	if (pkt->cache_valid) {
pkt              1079 drivers/block/pktcdvd.c 		list_add(&pkt->list, &pd->cdrw.pkt_free_list);
pkt              1081 drivers/block/pktcdvd.c 		list_add_tail(&pkt->list, &pd->cdrw.pkt_free_list);
pkt              1091 drivers/block/pktcdvd.c static int pkt_start_recovery(struct packet_data *pkt)
pkt              1099 drivers/block/pktcdvd.c 	struct request *rq = pkt->rq;
pkt              1118 drivers/block/pktcdvd.c 	old_block = pkt->sector / (CD_FRAMESIZE >> 9);
pkt              1123 drivers/block/pktcdvd.c 	pkt->sector = new_sector;
pkt              1125 drivers/block/pktcdvd.c 	bio_reset(pkt->bio);
pkt              1126 drivers/block/pktcdvd.c 	bio_set_dev(pkt->bio, pd->bdev);
pkt              1127 drivers/block/pktcdvd.c 	bio_set_op_attrs(pkt->bio, REQ_OP_WRITE, 0);
pkt              1128 drivers/block/pktcdvd.c 	pkt->bio->bi_iter.bi_sector = new_sector;
pkt              1129 drivers/block/pktcdvd.c 	pkt->bio->bi_iter.bi_size = pkt->frames * CD_FRAMESIZE;
pkt              1130 drivers/block/pktcdvd.c 	pkt->bio->bi_vcnt = pkt->frames;
pkt              1132 drivers/block/pktcdvd.c 	pkt->bio->bi_end_io = pkt_end_io_packet_write;
pkt              1133 drivers/block/pktcdvd.c 	pkt->bio->bi_private = pkt;
pkt              1144 drivers/block/pktcdvd.c static inline void pkt_set_state(struct packet_data *pkt, enum packet_data_state state)
pkt              1150 drivers/block/pktcdvd.c 	enum packet_data_state old_state = pkt->state;
pkt              1152 drivers/block/pktcdvd.c 		pkt->id, (unsigned long long)pkt->sector,
pkt              1155 drivers/block/pktcdvd.c 	pkt->state = state;
pkt              1164 drivers/block/pktcdvd.c 	struct packet_data *pkt, *p;
pkt              1215 drivers/block/pktcdvd.c 	pkt = pkt_get_packet_data(pd, zone);
pkt              1218 drivers/block/pktcdvd.c 	pkt->sector = zone;
pkt              1219 drivers/block/pktcdvd.c 	BUG_ON(pkt->frames != pd->settings.size >> 2);
pkt              1220 drivers/block/pktcdvd.c 	pkt->write_size = 0;
pkt              1235 drivers/block/pktcdvd.c 		spin_lock(&pkt->lock);
pkt              1236 drivers/block/pktcdvd.c 		bio_list_add(&pkt->orig_bios, bio);
pkt              1237 drivers/block/pktcdvd.c 		pkt->write_size += bio->bi_iter.bi_size / CD_FRAMESIZE;
pkt              1238 drivers/block/pktcdvd.c 		spin_unlock(&pkt->lock);
pkt              1250 drivers/block/pktcdvd.c 	pkt->sleep_time = max(PACKET_WAIT_TIME, 1);
pkt              1251 drivers/block/pktcdvd.c 	pkt_set_state(pkt, PACKET_WAITING_STATE);
pkt              1252 drivers/block/pktcdvd.c 	atomic_set(&pkt->run_sm, 1);
pkt              1255 drivers/block/pktcdvd.c 	list_add(&pkt->list, &pd->cdrw.pkt_active_list);
pkt              1265 drivers/block/pktcdvd.c static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt)
pkt              1269 drivers/block/pktcdvd.c 	bio_reset(pkt->w_bio);
pkt              1270 drivers/block/pktcdvd.c 	pkt->w_bio->bi_iter.bi_sector = pkt->sector;
pkt              1271 drivers/block/pktcdvd.c 	bio_set_dev(pkt->w_bio, pd->bdev);
pkt              1272 drivers/block/pktcdvd.c 	pkt->w_bio->bi_end_io = pkt_end_io_packet_write;
pkt              1273 drivers/block/pktcdvd.c 	pkt->w_bio->bi_private = pkt;
pkt              1276 drivers/block/pktcdvd.c 	for (f = 0; f < pkt->frames; f++) {
pkt              1277 drivers/block/pktcdvd.c 		struct page *page = pkt->pages[(f * CD_FRAMESIZE) / PAGE_SIZE];
pkt              1280 drivers/block/pktcdvd.c 		if (!bio_add_page(pkt->w_bio, page, CD_FRAMESIZE, offset))
pkt              1283 drivers/block/pktcdvd.c 	pkt_dbg(2, pd, "vcnt=%d\n", pkt->w_bio->bi_vcnt);
pkt              1288 drivers/block/pktcdvd.c 	spin_lock(&pkt->lock);
pkt              1289 drivers/block/pktcdvd.c 	bio_list_copy_data(pkt->w_bio, pkt->orig_bios.head);
pkt              1291 drivers/block/pktcdvd.c 	pkt_set_state(pkt, PACKET_WRITE_WAIT_STATE);
pkt              1292 drivers/block/pktcdvd.c 	spin_unlock(&pkt->lock);
pkt              1295 drivers/block/pktcdvd.c 		pkt->write_size, (unsigned long long)pkt->sector);
pkt              1297 drivers/block/pktcdvd.c 	if (test_bit(PACKET_MERGE_SEGS, &pd->flags) || (pkt->write_size < pkt->frames))
pkt              1298 drivers/block/pktcdvd.c 		pkt->cache_valid = 1;
pkt              1300 drivers/block/pktcdvd.c 		pkt->cache_valid = 0;
pkt              1303 drivers/block/pktcdvd.c 	atomic_set(&pkt->io_wait, 1);
pkt              1304 drivers/block/pktcdvd.c 	bio_set_op_attrs(pkt->w_bio, REQ_OP_WRITE, 0);
pkt              1305 drivers/block/pktcdvd.c 	pkt_queue_bio(pd, pkt->w_bio);
pkt              1308 drivers/block/pktcdvd.c static void pkt_finish_packet(struct packet_data *pkt, blk_status_t status)
pkt              1313 drivers/block/pktcdvd.c 		pkt->cache_valid = 0;
pkt              1316 drivers/block/pktcdvd.c 	while ((bio = bio_list_pop(&pkt->orig_bios))) {
pkt              1322 drivers/block/pktcdvd.c static void pkt_run_state_machine(struct pktcdvd_device *pd, struct packet_data *pkt)
pkt              1324 drivers/block/pktcdvd.c 	pkt_dbg(2, pd, "pkt %d\n", pkt->id);
pkt              1327 drivers/block/pktcdvd.c 		switch (pkt->state) {
pkt              1329 drivers/block/pktcdvd.c 			if ((pkt->write_size < pkt->frames) && (pkt->sleep_time > 0))
pkt              1332 drivers/block/pktcdvd.c 			pkt->sleep_time = 0;
pkt              1333 drivers/block/pktcdvd.c 			pkt_gather_data(pd, pkt);
pkt              1334 drivers/block/pktcdvd.c 			pkt_set_state(pkt, PACKET_READ_WAIT_STATE);
pkt              1338 drivers/block/pktcdvd.c 			if (atomic_read(&pkt->io_wait) > 0)
pkt              1341 drivers/block/pktcdvd.c 			if (atomic_read(&pkt->io_errors) > 0) {
pkt              1342 drivers/block/pktcdvd.c 				pkt_set_state(pkt, PACKET_RECOVERY_STATE);
pkt              1344 drivers/block/pktcdvd.c 				pkt_start_write(pd, pkt);
pkt              1349 drivers/block/pktcdvd.c 			if (atomic_read(&pkt->io_wait) > 0)
pkt              1352 drivers/block/pktcdvd.c 			if (!pkt->w_bio->bi_status) {
pkt              1353 drivers/block/pktcdvd.c 				pkt_set_state(pkt, PACKET_FINISHED_STATE);
pkt              1355 drivers/block/pktcdvd.c 				pkt_set_state(pkt, PACKET_RECOVERY_STATE);
pkt              1360 drivers/block/pktcdvd.c 			if (pkt_start_recovery(pkt)) {
pkt              1361 drivers/block/pktcdvd.c 				pkt_start_write(pd, pkt);
pkt              1364 drivers/block/pktcdvd.c 				pkt_set_state(pkt, PACKET_FINISHED_STATE);
pkt              1369 drivers/block/pktcdvd.c 			pkt_finish_packet(pkt, pkt->w_bio->bi_status);
pkt              1381 drivers/block/pktcdvd.c 	struct packet_data *pkt, *next;
pkt              1386 drivers/block/pktcdvd.c 	list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
pkt              1387 drivers/block/pktcdvd.c 		if (atomic_read(&pkt->run_sm) > 0) {
pkt              1388 drivers/block/pktcdvd.c 			atomic_set(&pkt->run_sm, 0);
pkt              1389 drivers/block/pktcdvd.c 			pkt_run_state_machine(pd, pkt);
pkt              1397 drivers/block/pktcdvd.c 	list_for_each_entry_safe(pkt, next, &pd->cdrw.pkt_active_list, list) {
pkt              1398 drivers/block/pktcdvd.c 		if (pkt->state == PACKET_FINISHED_STATE) {
pkt              1399 drivers/block/pktcdvd.c 			list_del(&pkt->list);
pkt              1400 drivers/block/pktcdvd.c 			pkt_put_packet_data(pd, pkt);
pkt              1401 drivers/block/pktcdvd.c 			pkt_set_state(pkt, PACKET_IDLE_STATE);
pkt              1410 drivers/block/pktcdvd.c 	struct packet_data *pkt;
pkt              1417 drivers/block/pktcdvd.c 	list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
pkt              1418 drivers/block/pktcdvd.c 		states[pkt->state]++;
pkt              1430 drivers/block/pktcdvd.c 	struct packet_data *pkt;
pkt              1451 drivers/block/pktcdvd.c 			list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
pkt              1452 drivers/block/pktcdvd.c 				if (atomic_read(&pkt->run_sm) > 0)
pkt              1470 drivers/block/pktcdvd.c 			list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
pkt              1471 drivers/block/pktcdvd.c 				if (pkt->sleep_time && pkt->sleep_time < min_sleep_time)
pkt              1472 drivers/block/pktcdvd.c 					min_sleep_time = pkt->sleep_time;
pkt              1482 drivers/block/pktcdvd.c 			list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
pkt              1483 drivers/block/pktcdvd.c 				if (!pkt->sleep_time)
pkt              1485 drivers/block/pktcdvd.c 				pkt->sleep_time -= min_sleep_time - residue;
pkt              1486 drivers/block/pktcdvd.c 				if (pkt->sleep_time <= 0) {
pkt              1487 drivers/block/pktcdvd.c 					pkt->sleep_time = 0;
pkt              1488 drivers/block/pktcdvd.c 					atomic_inc(&pkt->run_sm);
pkt              2351 drivers/block/pktcdvd.c 	struct packet_data *pkt;
pkt              2363 drivers/block/pktcdvd.c 	list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
pkt              2364 drivers/block/pktcdvd.c 		if (pkt->sector == zone) {
pkt              2365 drivers/block/pktcdvd.c 			spin_lock(&pkt->lock);
pkt              2366 drivers/block/pktcdvd.c 			if ((pkt->state == PACKET_WAITING_STATE) ||
pkt              2367 drivers/block/pktcdvd.c 			    (pkt->state == PACKET_READ_WAIT_STATE)) {
pkt              2368 drivers/block/pktcdvd.c 				bio_list_add(&pkt->orig_bios, bio);
pkt              2369 drivers/block/pktcdvd.c 				pkt->write_size +=
pkt              2371 drivers/block/pktcdvd.c 				if ((pkt->write_size >= pkt->frames) &&
pkt              2372 drivers/block/pktcdvd.c 				    (pkt->state == PACKET_WAITING_STATE)) {
pkt              2373 drivers/block/pktcdvd.c 					atomic_inc(&pkt->run_sm);
pkt              2376 drivers/block/pktcdvd.c 				spin_unlock(&pkt->lock);
pkt              2382 drivers/block/pktcdvd.c 			spin_unlock(&pkt->lock);
pkt               210 drivers/block/sunvdc.c 	struct vio_msg_tag *pkt = arg;
pkt               213 drivers/block/sunvdc.c 	       pkt->type, pkt->stype, pkt->stype_env, pkt->sid);
pkt               224 drivers/block/sunvdc.c 	struct vio_disk_attr_info pkt;
pkt               226 drivers/block/sunvdc.c 	memset(&pkt, 0, sizeof(pkt));
pkt               228 drivers/block/sunvdc.c 	pkt.tag.type = VIO_TYPE_CTRL;
pkt               229 drivers/block/sunvdc.c 	pkt.tag.stype = VIO_SUBTYPE_INFO;
pkt               230 drivers/block/sunvdc.c 	pkt.tag.stype_env = VIO_ATTR_INFO;
pkt               231 drivers/block/sunvdc.c 	pkt.tag.sid = vio_send_sid(vio);
pkt               233 drivers/block/sunvdc.c 	pkt.xfer_mode = VIO_DRING_MODE;
pkt               234 drivers/block/sunvdc.c 	pkt.vdisk_block_size = port->vdisk_block_size;
pkt               235 drivers/block/sunvdc.c 	pkt.max_xfer_size = port->max_xfer_size;
pkt               238 drivers/block/sunvdc.c 	       pkt.xfer_mode, pkt.vdisk_block_size, pkt.max_xfer_size);
pkt               240 drivers/block/sunvdc.c 	return vio_ldc_send(&port->vio, &pkt, sizeof(pkt));
pkt               246 drivers/block/sunvdc.c 	struct vio_disk_attr_info *pkt = arg;
pkt               250 drivers/block/sunvdc.c 	       pkt->tag.stype, pkt->operations,
pkt               251 drivers/block/sunvdc.c 	       pkt->vdisk_size, pkt->vdisk_type, pkt->vdisk_mtype,
pkt               252 drivers/block/sunvdc.c 	       pkt->xfer_mode, pkt->vdisk_block_size,
pkt               253 drivers/block/sunvdc.c 	       pkt->max_xfer_size);
pkt               255 drivers/block/sunvdc.c 	if (pkt->tag.stype == VIO_SUBTYPE_ACK) {
pkt               256 drivers/block/sunvdc.c 		switch (pkt->vdisk_type) {
pkt               263 drivers/block/sunvdc.c 			       vio->name, pkt->vdisk_type);
pkt               267 drivers/block/sunvdc.c 		if (pkt->vdisk_block_size > port->vdisk_block_size) {
pkt               271 drivers/block/sunvdc.c 			       port->vdisk_block_size, pkt->vdisk_block_size);
pkt               275 drivers/block/sunvdc.c 		port->operations = pkt->operations;
pkt               276 drivers/block/sunvdc.c 		port->vdisk_type = pkt->vdisk_type;
pkt               278 drivers/block/sunvdc.c 			port->vdisk_size = pkt->vdisk_size;
pkt               279 drivers/block/sunvdc.c 			port->vdisk_mtype = pkt->vdisk_mtype;
pkt               281 drivers/block/sunvdc.c 		if (pkt->max_xfer_size < port->max_xfer_size)
pkt               282 drivers/block/sunvdc.c 			port->max_xfer_size = pkt->max_xfer_size;
pkt               283 drivers/block/sunvdc.c 		port->vdisk_block_size = pkt->vdisk_block_size;
pkt               287 drivers/block/sunvdc.c 			port->vdisk_phys_blksz = pkt->phys_block_size;
pkt               334 drivers/block/sunvdc.c 	struct vio_dring_data *pkt = msgbuf;
pkt               336 drivers/block/sunvdc.c 	if (unlikely(pkt->dring_ident != dr->ident ||
pkt               337 drivers/block/sunvdc.c 		     pkt->start_idx != pkt->end_idx ||
pkt               338 drivers/block/sunvdc.c 		     pkt->start_idx >= VDC_TX_RING_SIZE))
pkt               341 drivers/block/sunvdc.c 	vdc_end_one(port, dr, pkt->start_idx);
pkt                90 drivers/bluetooth/btrsi.c static int rsi_hci_recv_pkt(void *priv, const u8 *pkt)
pkt                95 drivers/bluetooth/btrsi.c 	int pkt_len = get_unaligned_le16(pkt) & 0x0fff;
pkt               101 drivers/bluetooth/btrsi.c 	memcpy(skb->data, pkt + RSI_FRAME_DESC_SIZE, pkt_len);
pkt               105 drivers/bluetooth/btrsi.c 	hci_skb_pkt_type(skb) = pkt[14];
pkt               163 drivers/bluetooth/hci_mrvl.c 	struct hci_mrvl_pkt *pkt = (void *)skb->data;
pkt               168 drivers/bluetooth/hci_mrvl.c 	if ((pkt->lhs ^ pkt->rhs) != 0xffff) {
pkt               182 drivers/bluetooth/hci_mrvl.c 	mrvl->tx_len = le16_to_cpu(pkt->lhs);
pkt               195 drivers/bluetooth/hci_mrvl.c 	struct hci_mrvl_pkt *pkt = (void *)skb->data;
pkt               198 drivers/bluetooth/hci_mrvl.c 	u16 version = le16_to_cpu(pkt->lhs);
pkt               201 drivers/bluetooth/hci_mrvl.c 	if ((pkt->lhs ^ pkt->rhs) != 0xffff) {
pkt               226 drivers/bluetooth/hci_nokia.c 	struct hci_nokia_alive_pkt *pkt;
pkt               232 drivers/bluetooth/hci_nokia.c 	len = H4_TYPE_SIZE + sizeof(*hdr) + sizeof(*pkt);
pkt               241 drivers/bluetooth/hci_nokia.c 	hdr->dlen = sizeof(*pkt);
pkt               242 drivers/bluetooth/hci_nokia.c 	pkt = skb_put(skb, sizeof(*pkt));
pkt               243 drivers/bluetooth/hci_nokia.c 	pkt->mid = NOKIA_ALIVE_REQ;
pkt               575 drivers/bluetooth/hci_nokia.c 	struct hci_nokia_alive_pkt *pkt;
pkt               579 drivers/bluetooth/hci_nokia.c 	if (hdr->dlen != sizeof(*pkt)) {
pkt               586 drivers/bluetooth/hci_nokia.c 	pkt = skb_pull(skb, sizeof(*hdr));
pkt               588 drivers/bluetooth/hci_nokia.c 	if (pkt->mid != NOKIA_ALIVE_RESP) {
pkt               590 drivers/bluetooth/hci_nokia.c 			pkt->mid);
pkt               961 drivers/gpu/drm/exynos/exynos_drm_dsi.c 	struct mipi_dsi_packet *pkt = &xfer->packet;
pkt               962 drivers/gpu/drm/exynos/exynos_drm_dsi.c 	const u8 *payload = pkt->payload + xfer->tx_done;
pkt               963 drivers/gpu/drm/exynos/exynos_drm_dsi.c 	u16 length = pkt->payload_length - xfer->tx_done;
pkt              1001 drivers/gpu/drm/exynos/exynos_drm_dsi.c 	reg = get_unaligned_le32(pkt->header);
pkt              1240 drivers/gpu/drm/exynos/exynos_drm_dsi.c 		struct mipi_dsi_packet *pkt = &xfer->packet;
pkt              1242 drivers/gpu/drm/exynos/exynos_drm_dsi.c 		dev_err(dsi->dev, "xfer timed out: %*ph %*ph\n", 4, pkt->header,
pkt              1243 drivers/gpu/drm/exynos/exynos_drm_dsi.c 			(int)pkt->payload_length, pkt->payload);
pkt               145 drivers/gpu/drm/i915/display/icl_dsi.c 			    struct mipi_dsi_packet pkt, bool enable_lpdt)
pkt               162 drivers/gpu/drm/i915/display/icl_dsi.c 	if (pkt.payload)
pkt               173 drivers/gpu/drm/i915/display/icl_dsi.c 	tmp |= ((pkt.header[0] & VC_MASK) << VC_SHIFT);
pkt               174 drivers/gpu/drm/i915/display/icl_dsi.c 	tmp |= ((pkt.header[0] & DT_MASK) << DT_SHIFT);
pkt               175 drivers/gpu/drm/i915/display/icl_dsi.c 	tmp |= (pkt.header[1] << PARAM_WC_LOWER_SHIFT);
pkt               176 drivers/gpu/drm/i915/display/icl_dsi.c 	tmp |= (pkt.header[2] << PARAM_WC_UPPER_SHIFT);
pkt               183 drivers/gpu/drm/i915/display/icl_dsi.c 			      struct mipi_dsi_packet pkt)
pkt               186 drivers/gpu/drm/i915/display/icl_dsi.c 	if (pkt.payload_length > MAX_PLOAD_CREDIT * 4) {
pkt               192 drivers/gpu/drm/i915/display/icl_dsi.c 	if (!add_payld_to_queue(host, pkt.payload,
pkt               193 drivers/gpu/drm/i915/display/icl_dsi.c 				pkt.payload_length)) {
pkt               600 drivers/gpu/drm/mcde/mcde_display.c 	u32 pkt;
pkt               609 drivers/gpu/drm/mcde/mcde_display.c 		pkt = MCDE_DSIVID0PKT;
pkt               618 drivers/gpu/drm/mcde/mcde_display.c 		pkt = MCDE_DSIVID1PKT;
pkt               627 drivers/gpu/drm/mcde/mcde_display.c 		pkt = MCDE_DSIVID2PKT;
pkt               666 drivers/gpu/drm/mcde/mcde_display.c 	writel(pkt_size, mcde->regs + pkt);
pkt               423 drivers/gpu/drm/r128/r128_drv.h #define CCE_PACKET3(pkt, n)		(R128_CCE_PACKET3 |		\
pkt               424 drivers/gpu/drm/r128/r128_drv.h 					 (pkt) | ((n) << 16))
pkt              1049 drivers/gpu/drm/radeon/evergreen_cs.c 				   struct radeon_cs_packet *pkt,
pkt              1071 drivers/gpu/drm/radeon/evergreen_cs.c 				      struct radeon_cs_packet *pkt)
pkt              1077 drivers/gpu/drm/radeon/evergreen_cs.c 	idx = pkt->idx + 1;
pkt              1078 drivers/gpu/drm/radeon/evergreen_cs.c 	reg = pkt->reg;
pkt              1079 drivers/gpu/drm/radeon/evergreen_cs.c 	for (i = 0; i <= pkt->count; i++, idx++, reg += 4) {
pkt              1080 drivers/gpu/drm/radeon/evergreen_cs.c 		r = evergreen_packet0_check(p, pkt, idx, reg);
pkt              1773 drivers/gpu/drm/radeon/evergreen_cs.c 				   struct radeon_cs_packet *pkt)
pkt              1786 drivers/gpu/drm/radeon/evergreen_cs.c 	idx = pkt->idx + 1;
pkt              1789 drivers/gpu/drm/radeon/evergreen_cs.c 	switch (pkt->opcode) {
pkt              1796 drivers/gpu/drm/radeon/evergreen_cs.c 		if (pkt->count != 1) {
pkt              1828 drivers/gpu/drm/radeon/evergreen_cs.c 		if (pkt->count != 1) {
pkt              1836 drivers/gpu/drm/radeon/evergreen_cs.c 		if (pkt->count) {
pkt              1846 drivers/gpu/drm/radeon/evergreen_cs.c 		if (pkt->count) {
pkt              1855 drivers/gpu/drm/radeon/evergreen_cs.c 		if (pkt->count != 1) {
pkt              1881 drivers/gpu/drm/radeon/evergreen_cs.c 		if (pkt->count != 0) {
pkt              1890 drivers/gpu/drm/radeon/evergreen_cs.c 		if (pkt->count != 3) {
pkt              1918 drivers/gpu/drm/radeon/evergreen_cs.c 		if (pkt->count != 4) {
pkt              1943 drivers/gpu/drm/radeon/evergreen_cs.c 		if (pkt->count != 1) {
pkt              1954 drivers/gpu/drm/radeon/evergreen_cs.c 		if (pkt->count != 2) {
pkt              1965 drivers/gpu/drm/radeon/evergreen_cs.c 		if (pkt->count < 2) {
pkt              1976 drivers/gpu/drm/radeon/evergreen_cs.c 		if (pkt->count != 2) {
pkt              1987 drivers/gpu/drm/radeon/evergreen_cs.c 		if (pkt->count != 3) {
pkt              2006 drivers/gpu/drm/radeon/evergreen_cs.c 		if (pkt->count != 2) {
pkt              2033 drivers/gpu/drm/radeon/evergreen_cs.c 		u64 size = pkt->opcode == PACKET3_DRAW_INDIRECT ? 16 : 20;
pkt              2040 drivers/gpu/drm/radeon/evergreen_cs.c 		if (pkt->count != 1) {
pkt              2059 drivers/gpu/drm/radeon/evergreen_cs.c 		if (pkt->count != 3) {
pkt              2070 drivers/gpu/drm/radeon/evergreen_cs.c 		if (pkt->count != 1) {
pkt              2087 drivers/gpu/drm/radeon/evergreen_cs.c 		if (pkt->count != 5) {
pkt              2116 drivers/gpu/drm/radeon/evergreen_cs.c 		if (pkt->count != 4) {
pkt              2214 drivers/gpu/drm/radeon/evergreen_cs.c 		if (pkt->count) {
pkt              2220 drivers/gpu/drm/radeon/evergreen_cs.c 		if (pkt->count != 3) {
pkt              2236 drivers/gpu/drm/radeon/evergreen_cs.c 		if (pkt->count != 2 && pkt->count != 0) {
pkt              2240 drivers/gpu/drm/radeon/evergreen_cs.c 		if (pkt->count) {
pkt              2260 drivers/gpu/drm/radeon/evergreen_cs.c 		if (pkt->count != 4) {
pkt              2282 drivers/gpu/drm/radeon/evergreen_cs.c 		if (pkt->count != 3) {
pkt              2302 drivers/gpu/drm/radeon/evergreen_cs.c 		end_reg = 4 * pkt->count + start_reg - 4;
pkt              2319 drivers/gpu/drm/radeon/evergreen_cs.c 		end_reg = 4 * pkt->count + start_reg - 4;
pkt              2335 drivers/gpu/drm/radeon/evergreen_cs.c 		if (pkt->count % 8) {
pkt              2340 drivers/gpu/drm/radeon/evergreen_cs.c 		end_reg = 4 * pkt->count + start_reg - 4;
pkt              2347 drivers/gpu/drm/radeon/evergreen_cs.c 		for (i = 0; i < (pkt->count / 8); i++) {
pkt              2443 drivers/gpu/drm/radeon/evergreen_cs.c 		end_reg = 4 * pkt->count + start_reg - 4;
pkt              2453 drivers/gpu/drm/radeon/evergreen_cs.c 		end_reg = 4 * pkt->count + start_reg - 4;
pkt              2463 drivers/gpu/drm/radeon/evergreen_cs.c 		end_reg = 4 * pkt->count + start_reg - 4;
pkt              2472 drivers/gpu/drm/radeon/evergreen_cs.c 		if (pkt->count % 3) {
pkt              2477 drivers/gpu/drm/radeon/evergreen_cs.c 		end_reg = 4 * pkt->count + start_reg - 4;
pkt              2486 drivers/gpu/drm/radeon/evergreen_cs.c 		if (pkt->count != 4) {
pkt              2533 drivers/gpu/drm/radeon/evergreen_cs.c 		if (pkt->count != 3) {
pkt              2559 drivers/gpu/drm/radeon/evergreen_cs.c 		if (pkt->count != 4) {
pkt              2623 drivers/gpu/drm/radeon/evergreen_cs.c 		if (pkt->count != 2) {
pkt              2666 drivers/gpu/drm/radeon/evergreen_cs.c 		DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode);
pkt              2674 drivers/gpu/drm/radeon/evergreen_cs.c 	struct radeon_cs_packet pkt;
pkt              2749 drivers/gpu/drm/radeon/evergreen_cs.c 		r = radeon_cs_packet_parse(p, &pkt, p->idx);
pkt              2755 drivers/gpu/drm/radeon/evergreen_cs.c 		p->idx += pkt.count + 2;
pkt              2756 drivers/gpu/drm/radeon/evergreen_cs.c 		switch (pkt.type) {
pkt              2758 drivers/gpu/drm/radeon/evergreen_cs.c 			r = evergreen_cs_parse_packet0(p, &pkt);
pkt              2763 drivers/gpu/drm/radeon/evergreen_cs.c 			r = evergreen_packet3_check(p, &pkt);
pkt              2766 drivers/gpu/drm/radeon/evergreen_cs.c 			DRM_ERROR("Unknown packet type %d !\n", pkt.type);
pkt              3349 drivers/gpu/drm/radeon/evergreen_cs.c 				      u32 *ib, struct radeon_cs_packet *pkt)
pkt              3351 drivers/gpu/drm/radeon/evergreen_cs.c 	u32 idx = pkt->idx + 1;
pkt              3356 drivers/gpu/drm/radeon/evergreen_cs.c 	switch (pkt->opcode) {
pkt              3423 drivers/gpu/drm/radeon/evergreen_cs.c 		end_reg = 4 * pkt->count + start_reg - 4;
pkt              3430 drivers/gpu/drm/radeon/evergreen_cs.c 		for (i = 0; i < pkt->count; i++) {
pkt              3498 drivers/gpu/drm/radeon/evergreen_cs.c 		if (pkt->count != 2) {
pkt              3525 drivers/gpu/drm/radeon/evergreen_cs.c 	struct radeon_cs_packet pkt;
pkt              3528 drivers/gpu/drm/radeon/evergreen_cs.c 		pkt.idx = idx;
pkt              3529 drivers/gpu/drm/radeon/evergreen_cs.c 		pkt.type = RADEON_CP_PACKET_GET_TYPE(ib->ptr[idx]);
pkt              3530 drivers/gpu/drm/radeon/evergreen_cs.c 		pkt.count = RADEON_CP_PACKET_GET_COUNT(ib->ptr[idx]);
pkt              3531 drivers/gpu/drm/radeon/evergreen_cs.c 		pkt.one_reg_wr = 0;
pkt              3532 drivers/gpu/drm/radeon/evergreen_cs.c 		switch (pkt.type) {
pkt              3541 drivers/gpu/drm/radeon/evergreen_cs.c 			pkt.opcode = RADEON_CP_PACKET3_GET_OPCODE(ib->ptr[idx]);
pkt              3542 drivers/gpu/drm/radeon/evergreen_cs.c 			ret = evergreen_vm_packet3_check(rdev, ib->ptr, &pkt);
pkt              3543 drivers/gpu/drm/radeon/evergreen_cs.c 			idx += pkt.count + 2;
pkt              3546 drivers/gpu/drm/radeon/evergreen_cs.c 			dev_err(rdev->dev, "Unknown packet type %d !\n", pkt.type);
pkt              1260 drivers/gpu/drm/radeon/r100.c 			    struct radeon_cs_packet *pkt,
pkt              1274 drivers/gpu/drm/radeon/r100.c 		radeon_cs_dump_packet(p, pkt);
pkt              1288 drivers/gpu/drm/radeon/r100.c 				radeon_cs_dump_packet(p, pkt);
pkt              1302 drivers/gpu/drm/radeon/r100.c 			     struct radeon_cs_packet *pkt,
pkt              1317 drivers/gpu/drm/radeon/r100.c 		      pkt->opcode);
pkt              1318 drivers/gpu/drm/radeon/r100.c 	    radeon_cs_dump_packet(p, pkt);
pkt              1326 drivers/gpu/drm/radeon/r100.c 				  pkt->opcode);
pkt              1327 drivers/gpu/drm/radeon/r100.c 			radeon_cs_dump_packet(p, pkt);
pkt              1339 drivers/gpu/drm/radeon/r100.c 				  pkt->opcode);
pkt              1340 drivers/gpu/drm/radeon/r100.c 			radeon_cs_dump_packet(p, pkt);
pkt              1352 drivers/gpu/drm/radeon/r100.c 					  pkt->opcode);
pkt              1353 drivers/gpu/drm/radeon/r100.c 			radeon_cs_dump_packet(p, pkt);
pkt              1366 drivers/gpu/drm/radeon/r100.c 			  struct radeon_cs_packet *pkt,
pkt              1375 drivers/gpu/drm/radeon/r100.c 	idx = pkt->idx + 1;
pkt              1376 drivers/gpu/drm/radeon/r100.c 	reg = pkt->reg;
pkt              1381 drivers/gpu/drm/radeon/r100.c 	if (pkt->one_reg_wr) {
pkt              1386 drivers/gpu/drm/radeon/r100.c 		if (((reg + (pkt->count << 2)) >> 7) > n) {
pkt              1390 drivers/gpu/drm/radeon/r100.c 	for (i = 0; i <= pkt->count; i++, idx++) {
pkt              1394 drivers/gpu/drm/radeon/r100.c 			r = check(p, pkt, idx, reg);
pkt              1399 drivers/gpu/drm/radeon/r100.c 		if (pkt->one_reg_wr) {
pkt              1552 drivers/gpu/drm/radeon/r100.c 			      struct radeon_cs_packet *pkt,
pkt              1575 drivers/gpu/drm/radeon/r100.c 			radeon_cs_dump_packet(p, pkt);
pkt              1583 drivers/gpu/drm/radeon/r100.c 		r = r100_reloc_pitch_offset(p, pkt, idx, reg);
pkt              1592 drivers/gpu/drm/radeon/r100.c 			radeon_cs_dump_packet(p, pkt);
pkt              1605 drivers/gpu/drm/radeon/r100.c 			radeon_cs_dump_packet(p, pkt);
pkt              1621 drivers/gpu/drm/radeon/r100.c 			radeon_cs_dump_packet(p, pkt);
pkt              1648 drivers/gpu/drm/radeon/r100.c 			radeon_cs_dump_packet(p, pkt);
pkt              1666 drivers/gpu/drm/radeon/r100.c 			radeon_cs_dump_packet(p, pkt);
pkt              1684 drivers/gpu/drm/radeon/r100.c 			radeon_cs_dump_packet(p, pkt);
pkt              1702 drivers/gpu/drm/radeon/r100.c 			radeon_cs_dump_packet(p, pkt);
pkt              1773 drivers/gpu/drm/radeon/r100.c 			radeon_cs_dump_packet(p, pkt);
pkt              1893 drivers/gpu/drm/radeon/r100.c 					 struct radeon_cs_packet *pkt,
pkt              1898 drivers/gpu/drm/radeon/r100.c 	idx = pkt->idx + 1;
pkt              1911 drivers/gpu/drm/radeon/r100.c 			      struct radeon_cs_packet *pkt)
pkt              1920 drivers/gpu/drm/radeon/r100.c 	idx = pkt->idx + 1;
pkt              1922 drivers/gpu/drm/radeon/r100.c 	switch (pkt->opcode) {
pkt              1924 drivers/gpu/drm/radeon/r100.c 		r = r100_packet3_load_vbpntr(p, pkt, idx);
pkt              1931 drivers/gpu/drm/radeon/r100.c 			DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode);
pkt              1932 drivers/gpu/drm/radeon/r100.c 			radeon_cs_dump_packet(p, pkt);
pkt              1936 drivers/gpu/drm/radeon/r100.c 		r = r100_cs_track_check_pkt3_indx_buffer(p, pkt, reloc->robj);
pkt              1945 drivers/gpu/drm/radeon/r100.c 			DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode);
pkt              1946 drivers/gpu/drm/radeon/r100.c 			radeon_cs_dump_packet(p, pkt);
pkt              1959 drivers/gpu/drm/radeon/r100.c 		track->immd_dwords = pkt->count - 1;
pkt              1971 drivers/gpu/drm/radeon/r100.c 		track->immd_dwords = pkt->count - 1;
pkt              1983 drivers/gpu/drm/radeon/r100.c 		track->immd_dwords = pkt->count;
pkt              2025 drivers/gpu/drm/radeon/r100.c 		DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode);
pkt              2033 drivers/gpu/drm/radeon/r100.c 	struct radeon_cs_packet pkt;
pkt              2043 drivers/gpu/drm/radeon/r100.c 		r = radeon_cs_packet_parse(p, &pkt, p->idx);
pkt              2047 drivers/gpu/drm/radeon/r100.c 		p->idx += pkt.count + 2;
pkt              2048 drivers/gpu/drm/radeon/r100.c 		switch (pkt.type) {
pkt              2051 drivers/gpu/drm/radeon/r100.c 				r = r100_cs_parse_packet0(p, &pkt,
pkt              2056 drivers/gpu/drm/radeon/r100.c 				r = r100_cs_parse_packet0(p, &pkt,
pkt              2064 drivers/gpu/drm/radeon/r100.c 			r = r100_packet3_check(p, &pkt);
pkt              2068 drivers/gpu/drm/radeon/r100.c 				  pkt.type);
pkt                91 drivers/gpu/drm/radeon/r100_track.h 		       struct radeon_cs_packet *pkt,
pkt                95 drivers/gpu/drm/radeon/r100_track.h 			    struct radeon_cs_packet *pkt,
pkt                99 drivers/gpu/drm/radeon/r100_track.h 			     struct radeon_cs_packet *pkt,
pkt               146 drivers/gpu/drm/radeon/r200.c 		       struct radeon_cs_packet *pkt,
pkt               168 drivers/gpu/drm/radeon/r200.c 			radeon_cs_dump_packet(p, pkt);
pkt               176 drivers/gpu/drm/radeon/r200.c 		r = r100_reloc_pitch_offset(p, pkt, idx, reg);
pkt               185 drivers/gpu/drm/radeon/r200.c 			radeon_cs_dump_packet(p, pkt);
pkt               198 drivers/gpu/drm/radeon/r200.c 			radeon_cs_dump_packet(p, pkt);
pkt               217 drivers/gpu/drm/radeon/r200.c 			radeon_cs_dump_packet(p, pkt);
pkt               270 drivers/gpu/drm/radeon/r200.c 			radeon_cs_dump_packet(p, pkt);
pkt               288 drivers/gpu/drm/radeon/r200.c 			radeon_cs_dump_packet(p, pkt);
pkt               365 drivers/gpu/drm/radeon/r200.c 			radeon_cs_dump_packet(p, pkt);
pkt               632 drivers/gpu/drm/radeon/r300.c 		struct radeon_cs_packet *pkt,
pkt               654 drivers/gpu/drm/radeon/r300.c 			radeon_cs_dump_packet(p, pkt);
pkt               660 drivers/gpu/drm/radeon/r300.c 		r = r100_reloc_pitch_offset(p, pkt, idx, reg);
pkt               673 drivers/gpu/drm/radeon/r300.c 			radeon_cs_dump_packet(p, pkt);
pkt               686 drivers/gpu/drm/radeon/r300.c 			radeon_cs_dump_packet(p, pkt);
pkt               715 drivers/gpu/drm/radeon/r300.c 			radeon_cs_dump_packet(p, pkt);
pkt               788 drivers/gpu/drm/radeon/r300.c 				radeon_cs_dump_packet(p, pkt);
pkt               873 drivers/gpu/drm/radeon/r300.c 				radeon_cs_dump_packet(p, pkt);
pkt              1088 drivers/gpu/drm/radeon/r300.c 			radeon_cs_dump_packet(p, pkt);
pkt              1130 drivers/gpu/drm/radeon/r300.c 			radeon_cs_dump_packet(p, pkt);
pkt              1177 drivers/gpu/drm/radeon/r300.c 			      struct radeon_cs_packet *pkt)
pkt              1186 drivers/gpu/drm/radeon/r300.c 	idx = pkt->idx + 1;
pkt              1188 drivers/gpu/drm/radeon/r300.c 	switch(pkt->opcode) {
pkt              1190 drivers/gpu/drm/radeon/r300.c 		r = r100_packet3_load_vbpntr(p, pkt, idx);
pkt              1197 drivers/gpu/drm/radeon/r300.c 			DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode);
pkt              1198 drivers/gpu/drm/radeon/r300.c 			radeon_cs_dump_packet(p, pkt);
pkt              1202 drivers/gpu/drm/radeon/r300.c 		r = r100_cs_track_check_pkt3_indx_buffer(p, pkt, reloc->robj);
pkt              1217 drivers/gpu/drm/radeon/r300.c 		track->immd_dwords = pkt->count - 1;
pkt              1232 drivers/gpu/drm/radeon/r300.c 		track->immd_dwords = pkt->count;
pkt              1278 drivers/gpu/drm/radeon/r300.c 		DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode);
pkt              1286 drivers/gpu/drm/radeon/r300.c 	struct radeon_cs_packet pkt;
pkt              1296 drivers/gpu/drm/radeon/r300.c 		r = radeon_cs_packet_parse(p, &pkt, p->idx);
pkt              1300 drivers/gpu/drm/radeon/r300.c 		p->idx += pkt.count + 2;
pkt              1301 drivers/gpu/drm/radeon/r300.c 		switch (pkt.type) {
pkt              1303 drivers/gpu/drm/radeon/r300.c 			r = r100_cs_parse_packet0(p, &pkt,
pkt              1311 drivers/gpu/drm/radeon/r300.c 			r = r300_packet3_check(p, &pkt);
pkt              1314 drivers/gpu/drm/radeon/r300.c 			DRM_ERROR("Unknown packet type %d !\n", pkt.type);
pkt               920 drivers/gpu/drm/radeon/r600_cs.c 				struct radeon_cs_packet *pkt,
pkt               942 drivers/gpu/drm/radeon/r600_cs.c 				struct radeon_cs_packet *pkt)
pkt               948 drivers/gpu/drm/radeon/r600_cs.c 	idx = pkt->idx + 1;
pkt               949 drivers/gpu/drm/radeon/r600_cs.c 	reg = pkt->reg;
pkt               950 drivers/gpu/drm/radeon/r600_cs.c 	for (i = 0; i <= pkt->count; i++, idx++, reg += 4) {
pkt               951 drivers/gpu/drm/radeon/r600_cs.c 		r = r600_packet0_check(p, pkt, idx, reg);
pkt              1627 drivers/gpu/drm/radeon/r600_cs.c 				struct radeon_cs_packet *pkt)
pkt              1640 drivers/gpu/drm/radeon/r600_cs.c 	idx = pkt->idx + 1;
pkt              1643 drivers/gpu/drm/radeon/r600_cs.c 	switch (pkt->opcode) {
pkt              1650 drivers/gpu/drm/radeon/r600_cs.c 		if (pkt->count != 1) {
pkt              1683 drivers/gpu/drm/radeon/r600_cs.c 		if (p->family >= CHIP_RV770 || pkt->count) {
pkt              1689 drivers/gpu/drm/radeon/r600_cs.c 		if (pkt->count != 1) {
pkt              1696 drivers/gpu/drm/radeon/r600_cs.c 		if (pkt->count) {
pkt              1704 drivers/gpu/drm/radeon/r600_cs.c 		if (pkt->count != 3) {
pkt              1729 drivers/gpu/drm/radeon/r600_cs.c 		if (pkt->count != 1) {
pkt              1741 drivers/gpu/drm/radeon/r600_cs.c 		if (pkt->count < 2) {
pkt              1752 drivers/gpu/drm/radeon/r600_cs.c 		if (pkt->count != 5) {
pkt              1781 drivers/gpu/drm/radeon/r600_cs.c 		if (pkt->count != 4) {
pkt              1850 drivers/gpu/drm/radeon/r600_cs.c 		if (pkt->count != 3) {
pkt              1866 drivers/gpu/drm/radeon/r600_cs.c 		if (pkt->count != 2 && pkt->count != 0) {
pkt              1870 drivers/gpu/drm/radeon/r600_cs.c 		if (pkt->count) {
pkt              1890 drivers/gpu/drm/radeon/r600_cs.c 		if (pkt->count != 4) {
pkt              1910 drivers/gpu/drm/radeon/r600_cs.c 		end_reg = 4 * pkt->count + start_reg - 4;
pkt              1917 drivers/gpu/drm/radeon/r600_cs.c 		for (i = 0; i < pkt->count; i++) {
pkt              1926 drivers/gpu/drm/radeon/r600_cs.c 		end_reg = 4 * pkt->count + start_reg - 4;
pkt              1933 drivers/gpu/drm/radeon/r600_cs.c 		for (i = 0; i < pkt->count; i++) {
pkt              1941 drivers/gpu/drm/radeon/r600_cs.c 		if (pkt->count % 7) {
pkt              1946 drivers/gpu/drm/radeon/r600_cs.c 		end_reg = 4 * pkt->count + start_reg - 4;
pkt              1953 drivers/gpu/drm/radeon/r600_cs.c 		for (i = 0; i < (pkt->count / 7); i++) {
pkt              2026 drivers/gpu/drm/radeon/r600_cs.c 			end_reg = 4 * pkt->count + start_reg - 4;
pkt              2037 drivers/gpu/drm/radeon/r600_cs.c 		end_reg = 4 * pkt->count + start_reg - 4;
pkt              2047 drivers/gpu/drm/radeon/r600_cs.c 		end_reg = 4 * pkt->count + start_reg - 4;
pkt              2057 drivers/gpu/drm/radeon/r600_cs.c 		end_reg = 4 * pkt->count + start_reg - 4;
pkt              2066 drivers/gpu/drm/radeon/r600_cs.c 		if (pkt->count % 3) {
pkt              2071 drivers/gpu/drm/radeon/r600_cs.c 		end_reg = 4 * pkt->count + start_reg - 4;
pkt              2085 drivers/gpu/drm/radeon/r600_cs.c 		if (pkt->count != 1) {
pkt              2127 drivers/gpu/drm/radeon/r600_cs.c 		if (pkt->count) {
pkt              2133 drivers/gpu/drm/radeon/r600_cs.c 		if (pkt->count != 4) {
pkt              2180 drivers/gpu/drm/radeon/r600_cs.c 		if (pkt->count != 3) {
pkt              2206 drivers/gpu/drm/radeon/r600_cs.c 		if (pkt->count != 4) {
pkt              2262 drivers/gpu/drm/radeon/r600_cs.c 		DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode);
pkt              2270 drivers/gpu/drm/radeon/r600_cs.c 	struct radeon_cs_packet pkt;
pkt              2292 drivers/gpu/drm/radeon/r600_cs.c 		r = radeon_cs_packet_parse(p, &pkt, p->idx);
pkt              2298 drivers/gpu/drm/radeon/r600_cs.c 		p->idx += pkt.count + 2;
pkt              2299 drivers/gpu/drm/radeon/r600_cs.c 		switch (pkt.type) {
pkt              2301 drivers/gpu/drm/radeon/r600_cs.c 			r = r600_cs_parse_packet0(p, &pkt);
pkt              2306 drivers/gpu/drm/radeon/r600_cs.c 			r = r600_packet3_check(p, &pkt);
pkt              2309 drivers/gpu/drm/radeon/r600_cs.c 			DRM_ERROR("Unknown packet type %d !\n", pkt.type);
pkt              1109 drivers/gpu/drm/radeon/radeon.h 				      struct radeon_cs_packet *pkt,
pkt              1112 drivers/gpu/drm/radeon/radeon.h 				      struct radeon_cs_packet *pkt);
pkt              2942 drivers/gpu/drm/radeon/radeon.h 			   struct radeon_cs_packet *pkt,
pkt              2946 drivers/gpu/drm/radeon/radeon.h 			   struct radeon_cs_packet *pkt);
pkt               122 drivers/gpu/drm/radeon/radeon_asic.h 					 struct radeon_cs_packet *pkt,
pkt               125 drivers/gpu/drm/radeon/radeon_asic.h 			  struct radeon_cs_packet *pkt,
pkt               129 drivers/gpu/drm/radeon/radeon_asic.h 			 struct radeon_cs_packet *pkt,
pkt               733 drivers/gpu/drm/radeon/radeon_cs.c 			   struct radeon_cs_packet *pkt,
pkt               747 drivers/gpu/drm/radeon/radeon_cs.c 	pkt->idx = idx;
pkt               748 drivers/gpu/drm/radeon/radeon_cs.c 	pkt->type = RADEON_CP_PACKET_GET_TYPE(header);
pkt               749 drivers/gpu/drm/radeon/radeon_cs.c 	pkt->count = RADEON_CP_PACKET_GET_COUNT(header);
pkt               750 drivers/gpu/drm/radeon/radeon_cs.c 	pkt->one_reg_wr = 0;
pkt               751 drivers/gpu/drm/radeon/radeon_cs.c 	switch (pkt->type) {
pkt               754 drivers/gpu/drm/radeon/radeon_cs.c 			pkt->reg = R100_CP_PACKET0_GET_REG(header);
pkt               755 drivers/gpu/drm/radeon/radeon_cs.c 			pkt->one_reg_wr =
pkt               758 drivers/gpu/drm/radeon/radeon_cs.c 			pkt->reg = R600_CP_PACKET0_GET_REG(header);
pkt               761 drivers/gpu/drm/radeon/radeon_cs.c 		pkt->opcode = RADEON_CP_PACKET3_GET_OPCODE(header);
pkt               764 drivers/gpu/drm/radeon/radeon_cs.c 		pkt->count = -1;
pkt               767 drivers/gpu/drm/radeon/radeon_cs.c 		DRM_ERROR("Unknown packet type %d at %d !\n", pkt->type, idx);
pkt               771 drivers/gpu/drm/radeon/radeon_cs.c 	if ((pkt->count + 1 + pkt->idx) >= ib_chunk->length_dw) {
pkt               773 drivers/gpu/drm/radeon/radeon_cs.c 			  pkt->idx, pkt->type, pkt->count, ib_chunk->length_dw);
pkt               818 drivers/gpu/drm/radeon/radeon_cs.c 			   struct radeon_cs_packet *pkt)
pkt               825 drivers/gpu/drm/radeon/radeon_cs.c 	idx = pkt->idx;
pkt               826 drivers/gpu/drm/radeon/radeon_cs.c 	for (i = 0; i <= (pkt->count + 1); i++, idx++)
pkt               649 drivers/gpu/drm/radeon/radeon_uvd.c 			     struct radeon_cs_packet *pkt,
pkt               657 drivers/gpu/drm/radeon/radeon_uvd.c 	for (i = 0; i <= pkt->count; ++i) {
pkt               658 drivers/gpu/drm/radeon/radeon_uvd.c 		switch (pkt->reg + i*4) {
pkt               676 drivers/gpu/drm/radeon/radeon_uvd.c 				  pkt->reg + i*4);
pkt               686 drivers/gpu/drm/radeon/radeon_uvd.c 	struct radeon_cs_packet pkt;
pkt               713 drivers/gpu/drm/radeon/radeon_uvd.c 		r = radeon_cs_packet_parse(p, &pkt, p->idx);
pkt               716 drivers/gpu/drm/radeon/radeon_uvd.c 		switch (pkt.type) {
pkt               718 drivers/gpu/drm/radeon/radeon_uvd.c 			r = radeon_uvd_cs_reg(p, &pkt, &data0, &data1,
pkt               724 drivers/gpu/drm/radeon/radeon_uvd.c 			p->idx += pkt.count + 2;
pkt               727 drivers/gpu/drm/radeon/radeon_uvd.c 			DRM_ERROR("Unknown packet type %d !\n", pkt.type);
pkt              4458 drivers/gpu/drm/radeon/si.c 				  u32 *ib, struct radeon_cs_packet *pkt)
pkt              4460 drivers/gpu/drm/radeon/si.c 	switch (pkt->opcode) {
pkt              4473 drivers/gpu/drm/radeon/si.c 		DRM_ERROR("Invalid CE packet3: 0x%x\n", pkt->opcode);
pkt              4531 drivers/gpu/drm/radeon/si.c 				   u32 *ib, struct radeon_cs_packet *pkt)
pkt              4534 drivers/gpu/drm/radeon/si.c 	u32 idx = pkt->idx + 1;
pkt              4538 drivers/gpu/drm/radeon/si.c 	switch (pkt->opcode) {
pkt              4599 drivers/gpu/drm/radeon/si.c 				for (i = 0; i < (pkt->count - 2); i++) {
pkt              4623 drivers/gpu/drm/radeon/si.c 		end_reg = 4 * pkt->count + start_reg - 4;
pkt              4630 drivers/gpu/drm/radeon/si.c 		for (i = 0; i < pkt->count; i++) {
pkt              4642 drivers/gpu/drm/radeon/si.c 		DRM_ERROR("Invalid GFX packet3: 0x%x\n", pkt->opcode);
pkt              4649 drivers/gpu/drm/radeon/si.c 				       u32 *ib, struct radeon_cs_packet *pkt)
pkt              4652 drivers/gpu/drm/radeon/si.c 	u32 idx = pkt->idx + 1;
pkt              4656 drivers/gpu/drm/radeon/si.c 	switch (pkt->opcode) {
pkt              4702 drivers/gpu/drm/radeon/si.c 				for (i = 0; i < (pkt->count - 2); i++) {
pkt              4730 drivers/gpu/drm/radeon/si.c 		DRM_ERROR("Invalid Compute packet3: 0x%x\n", pkt->opcode);
pkt              4740 drivers/gpu/drm/radeon/si.c 	struct radeon_cs_packet pkt;
pkt              4743 drivers/gpu/drm/radeon/si.c 		pkt.idx = idx;
pkt              4744 drivers/gpu/drm/radeon/si.c 		pkt.type = RADEON_CP_PACKET_GET_TYPE(ib->ptr[idx]);
pkt              4745 drivers/gpu/drm/radeon/si.c 		pkt.count = RADEON_CP_PACKET_GET_COUNT(ib->ptr[idx]);
pkt              4746 drivers/gpu/drm/radeon/si.c 		pkt.one_reg_wr = 0;
pkt              4747 drivers/gpu/drm/radeon/si.c 		switch (pkt.type) {
pkt              4756 drivers/gpu/drm/radeon/si.c 			pkt.opcode = RADEON_CP_PACKET3_GET_OPCODE(ib->ptr[idx]);
pkt              4758 drivers/gpu/drm/radeon/si.c 				ret = si_vm_packet3_ce_check(rdev, ib->ptr, &pkt);
pkt              4762 drivers/gpu/drm/radeon/si.c 					ret = si_vm_packet3_gfx_check(rdev, ib->ptr, &pkt);
pkt              4766 drivers/gpu/drm/radeon/si.c 					ret = si_vm_packet3_compute_check(rdev, ib->ptr, &pkt);
pkt              4774 drivers/gpu/drm/radeon/si.c 			idx += pkt.count + 2;
pkt              4777 drivers/gpu/drm/radeon/si.c 			dev_err(rdev->dev, "Unknown packet type %d !\n", pkt.type);
pkt               831 drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c 	u32 pkt = msg->type;
pkt               834 drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c 		pkt |= ((msg->tx_len + 1) & 0xffff) << 8;
pkt               835 drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c 		pkt |= (((msg->tx_len + 1) >> 8) & 0xffff) << 16;
pkt               837 drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c 		pkt |= (((u8 *)msg->tx_buf)[0] << 8);
pkt               839 drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c 			pkt |= (((u8 *)msg->tx_buf)[1] << 16);
pkt               842 drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c 	pkt |= sun6i_dsi_ecc_compute(pkt) << 24;
pkt               844 drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c 	return pkt;
pkt               444 drivers/infiniband/hw/hfi1/driver.c bool hfi1_process_ecn_slowpath(struct rvt_qp *qp, struct hfi1_packet *pkt,
pkt               449 drivers/infiniband/hw/hfi1/driver.c 	struct ib_other_headers *ohdr = pkt->ohdr;
pkt               450 drivers/infiniband/hw/hfi1/driver.c 	struct ib_grh *grh = pkt->grh;
pkt               459 drivers/infiniband/hw/hfi1/driver.c 	if (pkt->etype == RHF_RCV_TYPE_BYPASS) {
pkt               460 drivers/infiniband/hw/hfi1/driver.c 		pkey = hfi1_16B_get_pkey(pkt->hdr);
pkt               461 drivers/infiniband/hw/hfi1/driver.c 		sc = hfi1_16B_get_sc(pkt->hdr);
pkt               462 drivers/infiniband/hw/hfi1/driver.c 		dlid = hfi1_16B_get_dlid(pkt->hdr);
pkt               463 drivers/infiniband/hw/hfi1/driver.c 		slid = hfi1_16B_get_slid(pkt->hdr);
pkt               467 drivers/infiniband/hw/hfi1/driver.c 		fecn = hfi1_16B_get_fecn(pkt->hdr);
pkt               468 drivers/infiniband/hw/hfi1/driver.c 		becn = hfi1_16B_get_becn(pkt->hdr);
pkt               471 drivers/infiniband/hw/hfi1/driver.c 		sc = hfi1_9B_get_sc5(pkt->hdr, pkt->rhf);
pkt               472 drivers/infiniband/hw/hfi1/driver.c 		dlid = qp->ibqp.qp_type != IB_QPT_UD ? ib_get_dlid(pkt->hdr) :
pkt               474 drivers/infiniband/hw/hfi1/driver.c 		slid = ib_get_slid(pkt->hdr);
pkt               486 drivers/infiniband/hw/hfi1/driver.c 		rqpn = ib_get_sqpn(pkt->ohdr);
pkt               492 drivers/infiniband/hw/hfi1/driver.c 		rqpn = ib_get_sqpn(pkt->ohdr);
pkt              1872 drivers/infiniband/hw/hfi1/hfi.h static inline bool hfi1_may_ecn(struct hfi1_packet *pkt)
pkt              1876 drivers/infiniband/hw/hfi1/hfi.h 	if (pkt->etype == RHF_RCV_TYPE_BYPASS) {
pkt              1877 drivers/infiniband/hw/hfi1/hfi.h 		fecn = hfi1_16B_get_fecn(pkt->hdr);
pkt              1878 drivers/infiniband/hw/hfi1/hfi.h 		becn = hfi1_16B_get_becn(pkt->hdr);
pkt              1880 drivers/infiniband/hw/hfi1/hfi.h 		fecn = ib_bth_get_fecn(pkt->ohdr);
pkt              1881 drivers/infiniband/hw/hfi1/hfi.h 		becn = ib_bth_get_becn(pkt->ohdr);
pkt              1886 drivers/infiniband/hw/hfi1/hfi.h bool hfi1_process_ecn_slowpath(struct rvt_qp *qp, struct hfi1_packet *pkt,
pkt              1888 drivers/infiniband/hw/hfi1/hfi.h static inline bool process_ecn(struct rvt_qp *qp, struct hfi1_packet *pkt)
pkt              1892 drivers/infiniband/hw/hfi1/hfi.h 	do_work = hfi1_may_ecn(pkt);
pkt              1894 drivers/infiniband/hw/hfi1/hfi.h 		return hfi1_process_ecn_slowpath(qp, pkt, false);
pkt              1711 drivers/infiniband/hw/hfi1/tid_rdma.c 	*bth2 = mask_psn(flow->flow_state.ib_spsn + flow->pkt);
pkt              1748 drivers/infiniband/hw/hfi1/tid_rdma.c 			    ((flow->flow_state.spsn + flow->pkt) &
pkt              1858 drivers/infiniband/hw/hfi1/tid_rdma.c 	flow->pkt = 0;
pkt              1930 drivers/infiniband/hw/hfi1/tid_rdma.c 	flow->pkt = 0;
pkt              2380 drivers/infiniband/hw/hfi1/tid_rdma.c 					       flow->pkt));
pkt              2384 drivers/infiniband/hw/hfi1/tid_rdma.c 	*bth2 = mask_psn(((flow->flow_state.spsn + flow->pkt++) &
pkt              3076 drivers/infiniband/hw/hfi1/tid_rdma.c 	flow->pkt = 0;
pkt              3088 drivers/infiniband/hw/hfi1/tid_rdma.c 			flow->pkt += npkts;
pkt              3107 drivers/infiniband/hw/hfi1/tid_rdma.c 		flow->pkt -= flow->resync_npkts;
pkt              3134 drivers/infiniband/hw/hfi1/tid_rdma.c 				req->flows[fidx].pkt = 0;
pkt              4094 drivers/infiniband/hw/hfi1/tid_rdma.c 	flow->pkt = 0;
pkt              4234 drivers/infiniband/hw/hfi1/tid_rdma.c 	*bth2 = mask_psn(((flow->flow_state.spsn + flow->pkt++) &
pkt              4680 drivers/infiniband/hw/hfi1/tid_rdma.c 					flow->pkt = 0;
pkt               190 drivers/infiniband/hw/hfi1/tid_rdma.h 	u8 pkt;
pkt              4323 drivers/infiniband/hw/i40iw/i40iw_ctrl.c static u32 i40iw_iwarp_opcode(struct i40iw_aeqe_info *info, u8 *pkt)
pkt              4329 drivers/infiniband/hw/i40iw/i40iw_ctrl.c 		mpa = (__be16 *)pkt;
pkt              4339 drivers/infiniband/hw/i40iw/i40iw_ctrl.c static u8 *i40iw_locate_mpa(u8 *pkt)
pkt              4342 drivers/infiniband/hw/i40iw/i40iw_ctrl.c 	pkt += I40IW_MAC_HLEN;
pkt              4345 drivers/infiniband/hw/i40iw/i40iw_ctrl.c 	pkt += 4 * (pkt[0] & 0x0f);
pkt              4346 drivers/infiniband/hw/i40iw/i40iw_ctrl.c 	pkt += 4 * ((pkt[12] >> 4) & 0x0f);
pkt              4347 drivers/infiniband/hw/i40iw/i40iw_ctrl.c 	return pkt;
pkt              4377 drivers/infiniband/hw/i40iw/i40iw_ctrl.c 	u8 *pkt = qp->q2_buf + Q2_BAD_FRAME_OFFSET;
pkt              4389 drivers/infiniband/hw/i40iw/i40iw_ctrl.c 		pkt = i40iw_locate_mpa(pkt);
pkt              4390 drivers/infiniband/hw/i40iw/i40iw_ctrl.c 		ddp_seg_len = ntohs(*(__be16 *)pkt);
pkt              4394 drivers/infiniband/hw/i40iw/i40iw_ctrl.c 			if (pkt[2] & 0x80) {
pkt              4407 drivers/infiniband/hw/i40iw/i40iw_ctrl.c 					if ((pkt[3] & RDMA_OPCODE_MASK) == RDMA_READ_REQ_OPCODE) {
pkt              4416 drivers/infiniband/hw/i40iw/i40iw_ctrl.c 	opcode = i40iw_iwarp_opcode(info, pkt);
pkt              4546 drivers/infiniband/hw/i40iw/i40iw_ctrl.c 		memcpy(termhdr + 1, pkt, copy_len);
pkt              4592 drivers/infiniband/hw/i40iw/i40iw_ctrl.c 	u8 *pkt = qp->q2_buf + Q2_BAD_FRAME_OFFSET;
pkt              4599 drivers/infiniband/hw/i40iw/i40iw_ctrl.c 	mpa = (__be32 *)i40iw_locate_mpa(pkt);
pkt                74 drivers/infiniband/hw/qedr/qedr_roce_cm.c 	struct qed_roce_ll2_packet *pkt = cookie;
pkt                84 drivers/infiniband/hw/qedr/qedr_roce_cm.c 	dma_free_coherent(&dev->pdev->dev, pkt->header.len, pkt->header.vaddr,
pkt                85 drivers/infiniband/hw/qedr/qedr_roce_cm.c 			  pkt->header.baddr);
pkt                86 drivers/infiniband/hw/qedr/qedr_roce_cm.c 	kfree(pkt);
pkt               181 drivers/infiniband/hw/qedr/qedr_roce_cm.c 			    struct qed_roce_ll2_packet *pkt)
pkt               190 drivers/infiniband/hw/qedr/qedr_roce_cm.c 	roce_flavor = (pkt->roce_mode == ROCE_V1) ?
pkt               193 drivers/infiniband/hw/qedr/qedr_roce_cm.c 	if (pkt->roce_mode == ROCE_V2_IPV4)
pkt               196 drivers/infiniband/hw/qedr/qedr_roce_cm.c 	ll2_tx_pkt.num_of_bds = 1 /* hdr */  + pkt->n_seg;
pkt               198 drivers/infiniband/hw/qedr/qedr_roce_cm.c 	ll2_tx_pkt.tx_dest = pkt->tx_dest;
pkt               200 drivers/infiniband/hw/qedr/qedr_roce_cm.c 	ll2_tx_pkt.first_frag = pkt->header.baddr;
pkt               201 drivers/infiniband/hw/qedr/qedr_roce_cm.c 	ll2_tx_pkt.first_frag_len = pkt->header.len;
pkt               202 drivers/infiniband/hw/qedr/qedr_roce_cm.c 	ll2_tx_pkt.cookie = pkt;
pkt               210 drivers/infiniband/hw/qedr/qedr_roce_cm.c 		dma_free_coherent(&dev->pdev->dev, pkt->header.len,
pkt               211 drivers/infiniband/hw/qedr/qedr_roce_cm.c 				  pkt->header.vaddr, pkt->header.baddr);
pkt               212 drivers/infiniband/hw/qedr/qedr_roce_cm.c 		kfree(pkt);
pkt               219 drivers/infiniband/hw/qedr/qedr_roce_cm.c 	for (i = 0; i < pkt->n_seg; i++) {
pkt               223 drivers/infiniband/hw/qedr/qedr_roce_cm.c 			pkt->payload[i].baddr,
pkt               224 drivers/infiniband/hw/qedr/qedr_roce_cm.c 			pkt->payload[i].len);
pkt               546 drivers/infiniband/hw/qedr/qedr_roce_cm.c 	struct qed_roce_ll2_packet *pkt = NULL;
pkt               577 drivers/infiniband/hw/qedr/qedr_roce_cm.c 	rc = qedr_gsi_build_packet(dev, qp, wr, &pkt);
pkt               583 drivers/infiniband/hw/qedr/qedr_roce_cm.c 	rc = qedr_ll2_post_tx(dev, pkt);
pkt               724 drivers/infiniband/hw/qib/qib_common.h 		__u32 pkt:16;
pkt               254 drivers/infiniband/hw/qib/qib_user_sdma.c static void qib_user_sdma_init_frag(struct qib_user_sdma_pkt *pkt,
pkt               261 drivers/infiniband/hw/qib/qib_user_sdma.c 	pkt->addr[i].offset = offset;
pkt               262 drivers/infiniband/hw/qib/qib_user_sdma.c 	pkt->addr[i].length = len;
pkt               263 drivers/infiniband/hw/qib/qib_user_sdma.c 	pkt->addr[i].first_desc = first_desc;
pkt               264 drivers/infiniband/hw/qib/qib_user_sdma.c 	pkt->addr[i].last_desc = last_desc;
pkt               265 drivers/infiniband/hw/qib/qib_user_sdma.c 	pkt->addr[i].put_page = put_page;
pkt               266 drivers/infiniband/hw/qib/qib_user_sdma.c 	pkt->addr[i].dma_mapped = dma_mapped;
pkt               267 drivers/infiniband/hw/qib/qib_user_sdma.c 	pkt->addr[i].page = page;
pkt               268 drivers/infiniband/hw/qib/qib_user_sdma.c 	pkt->addr[i].kvaddr = kvaddr;
pkt               269 drivers/infiniband/hw/qib/qib_user_sdma.c 	pkt->addr[i].addr = dma_addr;
pkt               270 drivers/infiniband/hw/qib/qib_user_sdma.c 	pkt->addr[i].dma_length = dma_length;
pkt               297 drivers/infiniband/hw/qib/qib_user_sdma.c 				       struct qib_user_sdma_pkt *pkt,
pkt               339 drivers/infiniband/hw/qib/qib_user_sdma.c 	if (pkt->tiddma && len > pkt->tidsm[pkt->tidsmidx].length)
pkt               340 drivers/infiniband/hw/qib/qib_user_sdma.c 		newlen = pkt->tidsm[pkt->tidsmidx].length;
pkt               352 drivers/infiniband/hw/qib/qib_user_sdma.c 	if ((pkt->payload_size + newlen) >= pkt->frag_size) {
pkt               353 drivers/infiniband/hw/qib/qib_user_sdma.c 		newlen = pkt->frag_size - pkt->payload_size;
pkt               355 drivers/infiniband/hw/qib/qib_user_sdma.c 	} else if (pkt->tiddma) {
pkt               356 drivers/infiniband/hw/qib/qib_user_sdma.c 		if (newlen == pkt->tidsm[pkt->tidsmidx].length)
pkt               359 drivers/infiniband/hw/qib/qib_user_sdma.c 		if (newlen == pkt->bytes_togo)
pkt               364 drivers/infiniband/hw/qib/qib_user_sdma.c 	qib_user_sdma_init_frag(pkt, pkt->naddr, /* index */
pkt               370 drivers/infiniband/hw/qib/qib_user_sdma.c 	pkt->bytes_togo -= newlen;
pkt               371 drivers/infiniband/hw/qib/qib_user_sdma.c 	pkt->payload_size += newlen;
pkt               372 drivers/infiniband/hw/qib/qib_user_sdma.c 	pkt->naddr++;
pkt               373 drivers/infiniband/hw/qib/qib_user_sdma.c 	if (pkt->naddr == pkt->addrlimit) {
pkt               379 drivers/infiniband/hw/qib/qib_user_sdma.c 	if (pkt->bytes_togo == 0) {
pkt               382 drivers/infiniband/hw/qib/qib_user_sdma.c 		if (!pkt->addr[pkt->index].addr) {
pkt               383 drivers/infiniband/hw/qib/qib_user_sdma.c 			pkt->addr[pkt->index].addr =
pkt               385 drivers/infiniband/hw/qib/qib_user_sdma.c 					pkt->addr[pkt->index].kvaddr,
pkt               386 drivers/infiniband/hw/qib/qib_user_sdma.c 					pkt->addr[pkt->index].dma_length,
pkt               389 drivers/infiniband/hw/qib/qib_user_sdma.c 					pkt->addr[pkt->index].addr)) {
pkt               393 drivers/infiniband/hw/qib/qib_user_sdma.c 			pkt->addr[pkt->index].dma_mapped = 1;
pkt               400 drivers/infiniband/hw/qib/qib_user_sdma.c 	if (pkt->tiddma) {
pkt               401 drivers/infiniband/hw/qib/qib_user_sdma.c 		pkt->tidsm[pkt->tidsmidx].length -= newlen;
pkt               402 drivers/infiniband/hw/qib/qib_user_sdma.c 		if (pkt->tidsm[pkt->tidsmidx].length) {
pkt               403 drivers/infiniband/hw/qib/qib_user_sdma.c 			pkt->tidsm[pkt->tidsmidx].offset += newlen;
pkt               405 drivers/infiniband/hw/qib/qib_user_sdma.c 			pkt->tidsmidx++;
pkt               406 drivers/infiniband/hw/qib/qib_user_sdma.c 			if (pkt->tidsmidx == pkt->tidsmcount) {
pkt               432 drivers/infiniband/hw/qib/qib_user_sdma.c 	pbclen = pkt->addr[pkt->index].length;
pkt               439 drivers/infiniband/hw/qib/qib_user_sdma.c 	pbc16 = (__le16 *)pkt->addr[pkt->index].kvaddr;
pkt               446 drivers/infiniband/hw/qib/qib_user_sdma.c 	pbc16[0] = cpu_to_le16(le16_to_cpu(pbc16[0])-(pkt->bytes_togo>>2));
pkt               451 drivers/infiniband/hw/qib/qib_user_sdma.c 	if (pkt->tiddma) {
pkt               473 drivers/infiniband/hw/qib/qib_user_sdma.c 	if (!pkt->addr[pkt->index].addr) {
pkt               474 drivers/infiniband/hw/qib/qib_user_sdma.c 		pkt->addr[pkt->index].addr =
pkt               476 drivers/infiniband/hw/qib/qib_user_sdma.c 				pkt->addr[pkt->index].kvaddr,
pkt               477 drivers/infiniband/hw/qib/qib_user_sdma.c 				pkt->addr[pkt->index].dma_length,
pkt               480 drivers/infiniband/hw/qib/qib_user_sdma.c 				pkt->addr[pkt->index].addr)) {
pkt               484 drivers/infiniband/hw/qib/qib_user_sdma.c 		pkt->addr[pkt->index].dma_mapped = 1;
pkt               492 drivers/infiniband/hw/qib/qib_user_sdma.c 	pbc16[0] = cpu_to_le16(le16_to_cpu(pbc16[0])-(pkt->payload_size>>2));
pkt               497 drivers/infiniband/hw/qib/qib_user_sdma.c 	if (pkt->tiddma) {
pkt               501 drivers/infiniband/hw/qib/qib_user_sdma.c 			(pkt->tidsm[pkt->tidsmidx].tid<<QLOGIC_IB_I_TID_SHIFT) +
pkt               502 drivers/infiniband/hw/qib/qib_user_sdma.c 			(pkt->tidsm[pkt->tidsmidx].offset>>2));
pkt               505 drivers/infiniband/hw/qib/qib_user_sdma.c 		hdr->uwords[2] += pkt->payload_size;
pkt               517 drivers/infiniband/hw/qib/qib_user_sdma.c 	if (pkt->tiddma)
pkt               520 drivers/infiniband/hw/qib/qib_user_sdma.c 		seqnum.pkt++;
pkt               524 drivers/infiniband/hw/qib/qib_user_sdma.c 	qib_user_sdma_init_frag(pkt, pkt->naddr, /* index */
pkt               530 drivers/infiniband/hw/qib/qib_user_sdma.c 	pkt->index = pkt->naddr;
pkt               531 drivers/infiniband/hw/qib/qib_user_sdma.c 	pkt->payload_size = 0;
pkt               532 drivers/infiniband/hw/qib/qib_user_sdma.c 	pkt->naddr++;
pkt               533 drivers/infiniband/hw/qib/qib_user_sdma.c 	if (pkt->naddr == pkt->addrlimit) {
pkt               559 drivers/infiniband/hw/qib/qib_user_sdma.c 				  struct qib_user_sdma_pkt *pkt,
pkt               591 drivers/infiniband/hw/qib/qib_user_sdma.c 	ret = qib_user_sdma_page_to_frags(dd, pq, pkt,
pkt               617 drivers/infiniband/hw/qib/qib_user_sdma.c 					struct qib_user_sdma_pkt *pkt,
pkt               622 drivers/infiniband/hw/qib/qib_user_sdma.c 	if (pkt->addr[i].page) {
pkt               624 drivers/infiniband/hw/qib/qib_user_sdma.c 		if (pkt->addr[i].dma_mapped)
pkt               626 drivers/infiniband/hw/qib/qib_user_sdma.c 				       pkt->addr[i].addr,
pkt               627 drivers/infiniband/hw/qib/qib_user_sdma.c 				       pkt->addr[i].dma_length,
pkt               630 drivers/infiniband/hw/qib/qib_user_sdma.c 		if (pkt->addr[i].kvaddr)
pkt               631 drivers/infiniband/hw/qib/qib_user_sdma.c 			kunmap(pkt->addr[i].page);
pkt               633 drivers/infiniband/hw/qib/qib_user_sdma.c 		if (pkt->addr[i].put_page)
pkt               634 drivers/infiniband/hw/qib/qib_user_sdma.c 			put_user_page(pkt->addr[i].page);
pkt               636 drivers/infiniband/hw/qib/qib_user_sdma.c 			__free_page(pkt->addr[i].page);
pkt               637 drivers/infiniband/hw/qib/qib_user_sdma.c 	} else if (pkt->addr[i].kvaddr) {
pkt               639 drivers/infiniband/hw/qib/qib_user_sdma.c 		if (pkt->addr[i].dma_mapped) {
pkt               642 drivers/infiniband/hw/qib/qib_user_sdma.c 				       pkt->addr[i].addr,
pkt               643 drivers/infiniband/hw/qib/qib_user_sdma.c 				       pkt->addr[i].dma_length,
pkt               645 drivers/infiniband/hw/qib/qib_user_sdma.c 			kfree(pkt->addr[i].kvaddr);
pkt               646 drivers/infiniband/hw/qib/qib_user_sdma.c 		} else if (pkt->addr[i].addr) {
pkt               649 drivers/infiniband/hw/qib/qib_user_sdma.c 			      pkt->addr[i].kvaddr, pkt->addr[i].addr);
pkt               652 drivers/infiniband/hw/qib/qib_user_sdma.c 			kfree(pkt->addr[i].kvaddr);
pkt               660 drivers/infiniband/hw/qib/qib_user_sdma.c 				   struct qib_user_sdma_pkt *pkt,
pkt               687 drivers/infiniband/hw/qib/qib_user_sdma.c 			ret = qib_user_sdma_page_to_frags(dd, pq, pkt,
pkt               717 drivers/infiniband/hw/qib/qib_user_sdma.c 				 struct qib_user_sdma_pkt *pkt,
pkt               728 drivers/infiniband/hw/qib/qib_user_sdma.c 		ret = qib_user_sdma_pin_pages(dd, pq, pkt, addr,
pkt               738 drivers/infiniband/hw/qib/qib_user_sdma.c 	for (idx = 1; idx < pkt->naddr; idx++)
pkt               739 drivers/infiniband/hw/qib/qib_user_sdma.c 		qib_user_sdma_free_pkt_frag(&dd->pcidev->dev, pq, pkt, idx);
pkt               744 drivers/infiniband/hw/qib/qib_user_sdma.c 	if (pkt->addr[0].dma_mapped) {
pkt               746 drivers/infiniband/hw/qib/qib_user_sdma.c 		       pkt->addr[0].addr,
pkt               747 drivers/infiniband/hw/qib/qib_user_sdma.c 		       pkt->addr[0].dma_length,
pkt               749 drivers/infiniband/hw/qib/qib_user_sdma.c 		pkt->addr[0].addr = 0;
pkt               750 drivers/infiniband/hw/qib/qib_user_sdma.c 		pkt->addr[0].dma_mapped = 0;
pkt               759 drivers/infiniband/hw/qib/qib_user_sdma.c 				      struct qib_user_sdma_pkt *pkt,
pkt               765 drivers/infiniband/hw/qib/qib_user_sdma.c 	if (pkt->frag_size == pkt->bytes_togo &&
pkt               766 drivers/infiniband/hw/qib/qib_user_sdma.c 			npages >= ARRAY_SIZE(pkt->addr))
pkt               767 drivers/infiniband/hw/qib/qib_user_sdma.c 		ret = qib_user_sdma_coalesce(dd, pq, pkt, iov, niov);
pkt               769 drivers/infiniband/hw/qib/qib_user_sdma.c 		ret = qib_user_sdma_pin_pkt(dd, pq, pkt, iov, niov);
pkt               779 drivers/infiniband/hw/qib/qib_user_sdma.c 	struct qib_user_sdma_pkt *pkt, *pkt_next;
pkt               781 drivers/infiniband/hw/qib/qib_user_sdma.c 	list_for_each_entry_safe(pkt, pkt_next, list, list) {
pkt               784 drivers/infiniband/hw/qib/qib_user_sdma.c 		for (i = 0; i < pkt->naddr; i++)
pkt               785 drivers/infiniband/hw/qib/qib_user_sdma.c 			qib_user_sdma_free_pkt_frag(dev, pq, pkt, i);
pkt               787 drivers/infiniband/hw/qib/qib_user_sdma.c 		if (pkt->largepkt)
pkt               788 drivers/infiniband/hw/qib/qib_user_sdma.c 			kfree(pkt);
pkt               790 drivers/infiniband/hw/qib/qib_user_sdma.c 			kmem_cache_free(pq->pkt_slab, pkt);
pkt               815 drivers/infiniband/hw/qib/qib_user_sdma.c 	struct qib_user_sdma_pkt *pkt = NULL;
pkt               911 drivers/infiniband/hw/qib/qib_user_sdma.c 			pktsize = struct_size(pkt, addr, n);
pkt               926 drivers/infiniband/hw/qib/qib_user_sdma.c 			pkt = kmalloc(pktsize+tidsmsize, GFP_KERNEL);
pkt               927 drivers/infiniband/hw/qib/qib_user_sdma.c 			if (!pkt) {
pkt               931 drivers/infiniband/hw/qib/qib_user_sdma.c 			pkt->largepkt = 1;
pkt               932 drivers/infiniband/hw/qib/qib_user_sdma.c 			pkt->frag_size = frag_size;
pkt               933 drivers/infiniband/hw/qib/qib_user_sdma.c 			pkt->addrlimit = n + ARRAY_SIZE(pkt->addr);
pkt               936 drivers/infiniband/hw/qib/qib_user_sdma.c 				char *tidsm = (char *)pkt + pktsize;
pkt               944 drivers/infiniband/hw/qib/qib_user_sdma.c 				pkt->tidsm =
pkt               946 drivers/infiniband/hw/qib/qib_user_sdma.c 				pkt->tidsmcount = tidsmsize/
pkt               948 drivers/infiniband/hw/qib/qib_user_sdma.c 				pkt->tidsmidx = 0;
pkt               959 drivers/infiniband/hw/qib/qib_user_sdma.c 			pkt = kmem_cache_alloc(pq->pkt_slab, GFP_KERNEL);
pkt               960 drivers/infiniband/hw/qib/qib_user_sdma.c 			if (!pkt) {
pkt               964 drivers/infiniband/hw/qib/qib_user_sdma.c 			pkt->largepkt = 0;
pkt               965 drivers/infiniband/hw/qib/qib_user_sdma.c 			pkt->frag_size = bytes_togo;
pkt               966 drivers/infiniband/hw/qib/qib_user_sdma.c 			pkt->addrlimit = ARRAY_SIZE(pkt->addr);
pkt               968 drivers/infiniband/hw/qib/qib_user_sdma.c 		pkt->bytes_togo = bytes_togo;
pkt               969 drivers/infiniband/hw/qib/qib_user_sdma.c 		pkt->payload_size = 0;
pkt               970 drivers/infiniband/hw/qib/qib_user_sdma.c 		pkt->counter = counter;
pkt               971 drivers/infiniband/hw/qib/qib_user_sdma.c 		pkt->tiddma = tiddma;
pkt               974 drivers/infiniband/hw/qib/qib_user_sdma.c 		qib_user_sdma_init_frag(pkt, 0, /* index */
pkt               980 drivers/infiniband/hw/qib/qib_user_sdma.c 		pkt->index = 0;
pkt               981 drivers/infiniband/hw/qib/qib_user_sdma.c 		pkt->naddr = 1;
pkt               984 drivers/infiniband/hw/qib/qib_user_sdma.c 			ret = qib_user_sdma_init_payload(dd, pq, pkt,
pkt               992 drivers/infiniband/hw/qib/qib_user_sdma.c 			pkt->addr[0].last_desc = 1;
pkt              1006 drivers/infiniband/hw/qib/qib_user_sdma.c 				pkt->addr[0].addr = dma_addr;
pkt              1007 drivers/infiniband/hw/qib/qib_user_sdma.c 				pkt->addr[0].dma_mapped = 1;
pkt              1013 drivers/infiniband/hw/qib/qib_user_sdma.c 		pkt->pq = pq;
pkt              1014 drivers/infiniband/hw/qib/qib_user_sdma.c 		pkt->index = 0; /* reset index for push on hw */
pkt              1015 drivers/infiniband/hw/qib/qib_user_sdma.c 		*ndesc += pkt->naddr;
pkt              1017 drivers/infiniband/hw/qib/qib_user_sdma.c 		list_add_tail(&pkt->list, list);
pkt              1025 drivers/infiniband/hw/qib/qib_user_sdma.c 	if (pkt->largepkt)
pkt              1026 drivers/infiniband/hw/qib/qib_user_sdma.c 		kfree(pkt);
pkt              1028 drivers/infiniband/hw/qib/qib_user_sdma.c 		kmem_cache_free(pq->pkt_slab, pkt);
pkt              1052 drivers/infiniband/hw/qib/qib_user_sdma.c 	struct qib_user_sdma_pkt *pkt;
pkt              1068 drivers/infiniband/hw/qib/qib_user_sdma.c 	list_for_each_entry_safe(pkt, pkt_prev, &pq->sent, list) {
pkt              1069 drivers/infiniband/hw/qib/qib_user_sdma.c 		s64 descd = ppd->sdma_descq_removed - pkt->added;
pkt              1074 drivers/infiniband/hw/qib/qib_user_sdma.c 		list_move_tail(&pkt->list, &free_list);
pkt              1085 drivers/infiniband/hw/qib/qib_user_sdma.c 		pkt = list_entry(free_list.prev,
pkt              1087 drivers/infiniband/hw/qib/qib_user_sdma.c 		counter = pkt->counter;
pkt              1148 drivers/infiniband/hw/qib/qib_user_sdma.c 		struct qib_user_sdma_pkt *pkt;
pkt              1158 drivers/infiniband/hw/qib/qib_user_sdma.c 			list_for_each_entry_safe(pkt, pkt_prev,
pkt              1160 drivers/infiniband/hw/qib/qib_user_sdma.c 				if (pkt->pq == pq) {
pkt              1161 drivers/infiniband/hw/qib/qib_user_sdma.c 					list_move_tail(&pkt->list, &pq->sent);
pkt              1209 drivers/infiniband/hw/qib/qib_user_sdma.c 				    struct qib_user_sdma_pkt *pkt, int idx,
pkt              1212 drivers/infiniband/hw/qib/qib_user_sdma.c 	const u64 addr = (u64) pkt->addr[idx].addr +
pkt              1213 drivers/infiniband/hw/qib/qib_user_sdma.c 		(u64) pkt->addr[idx].offset;
pkt              1214 drivers/infiniband/hw/qib/qib_user_sdma.c 	const u64 dwlen = (u64) pkt->addr[idx].length / 4;
pkt              1221 drivers/infiniband/hw/qib/qib_user_sdma.c 	if (pkt->addr[idx].first_desc)
pkt              1223 drivers/infiniband/hw/qib/qib_user_sdma.c 	if (pkt->addr[idx].last_desc) {
pkt              1252 drivers/infiniband/hw/qib/qib_user_sdma.c 		struct qib_user_sdma_pkt *pkt =
pkt              1259 drivers/infiniband/hw/qib/qib_user_sdma.c 		for (i = pkt->index; i < pkt->naddr && nfree; i++) {
pkt              1260 drivers/infiniband/hw/qib/qib_user_sdma.c 			qib_user_sdma_send_frag(ppd, pkt, i, ofs, tail, gen);
pkt              1261 drivers/infiniband/hw/qib/qib_user_sdma.c 			ofs += pkt->addr[i].length >> 2;
pkt              1271 drivers/infiniband/hw/qib/qib_user_sdma.c 			if (pkt->addr[i].last_desc == 0)
pkt              1281 drivers/infiniband/hw/qib/qib_user_sdma.c 				for (j = pkt->index; j <= i; j++) {
pkt              1288 drivers/infiniband/hw/qib/qib_user_sdma.c 			c += i + 1 - pkt->index;
pkt              1289 drivers/infiniband/hw/qib/qib_user_sdma.c 			pkt->index = i + 1; /* index for next first */
pkt              1297 drivers/infiniband/hw/qib/qib_user_sdma.c 		if (pkt->index == pkt->naddr) {
pkt              1298 drivers/infiniband/hw/qib/qib_user_sdma.c 			pkt->added = ppd->sdma_descq_added;
pkt              1299 drivers/infiniband/hw/qib/qib_user_sdma.c 			pkt->pq->added = pkt->added;
pkt              1300 drivers/infiniband/hw/qib/qib_user_sdma.c 			pkt->pq->num_pending--;
pkt              1301 drivers/infiniband/hw/qib/qib_user_sdma.c 			spin_lock(&pkt->pq->sent_lock);
pkt              1302 drivers/infiniband/hw/qib/qib_user_sdma.c 			pkt->pq->num_sending++;
pkt              1303 drivers/infiniband/hw/qib/qib_user_sdma.c 			list_move_tail(&pkt->list, &pkt->pq->sent);
pkt              1304 drivers/infiniband/hw/qib/qib_user_sdma.c 			spin_unlock(&pkt->pq->sent_lock);
pkt               102 drivers/infiniband/sw/rxe/rxe_av.c struct rxe_av *rxe_get_av(struct rxe_pkt_info *pkt)
pkt               104 drivers/infiniband/sw/rxe/rxe_av.c 	if (!pkt || !pkt->qp)
pkt               107 drivers/infiniband/sw/rxe/rxe_av.c 	if (qp_type(pkt->qp) == IB_QPT_RC || qp_type(pkt->qp) == IB_QPT_UC)
pkt               108 drivers/infiniband/sw/rxe/rxe_av.c 		return &pkt->qp->pri_av;
pkt               110 drivers/infiniband/sw/rxe/rxe_av.c 	return (pkt->wqe) ? &pkt->wqe->av : NULL;
pkt               163 drivers/infiniband/sw/rxe/rxe_comp.c 				      struct rxe_pkt_info *pkt,
pkt               176 drivers/infiniband/sw/rxe/rxe_comp.c 		return pkt ? COMPST_DONE : COMPST_EXIT;
pkt               187 drivers/infiniband/sw/rxe/rxe_comp.c 	return pkt ? COMPST_CHECK_PSN : COMPST_EXIT;
pkt               198 drivers/infiniband/sw/rxe/rxe_comp.c 					struct rxe_pkt_info *pkt,
pkt               206 drivers/infiniband/sw/rxe/rxe_comp.c 	diff = psn_compare(pkt->psn, wqe->last_psn);
pkt               220 drivers/infiniband/sw/rxe/rxe_comp.c 	diff = psn_compare(pkt->psn, qp->comp.psn);
pkt               225 drivers/infiniband/sw/rxe/rxe_comp.c 		if (pkt->psn == wqe->last_psn)
pkt               237 drivers/infiniband/sw/rxe/rxe_comp.c 					struct rxe_pkt_info *pkt,
pkt               240 drivers/infiniband/sw/rxe/rxe_comp.c 	unsigned int mask = pkt->mask;
pkt               255 drivers/infiniband/sw/rxe/rxe_comp.c 		if (pkt->opcode != IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE &&
pkt               256 drivers/infiniband/sw/rxe/rxe_comp.c 		    pkt->opcode != IB_OPCODE_RC_RDMA_READ_RESPONSE_LAST) {
pkt               260 drivers/infiniband/sw/rxe/rxe_comp.c 			if ((pkt->psn == wqe->first_psn &&
pkt               261 drivers/infiniband/sw/rxe/rxe_comp.c 			     pkt->opcode ==
pkt               264 drivers/infiniband/sw/rxe/rxe_comp.c 			     pkt->opcode ==
pkt               276 drivers/infiniband/sw/rxe/rxe_comp.c 	switch (pkt->opcode) {
pkt               280 drivers/infiniband/sw/rxe/rxe_comp.c 		syn = aeth_syn(pkt);
pkt               298 drivers/infiniband/sw/rxe/rxe_comp.c 		syn = aeth_syn(pkt);
pkt               310 drivers/infiniband/sw/rxe/rxe_comp.c 		syn = aeth_syn(pkt);
pkt               326 drivers/infiniband/sw/rxe/rxe_comp.c 				if (psn_compare(pkt->psn, qp->comp.psn) > 0) {
pkt               329 drivers/infiniband/sw/rxe/rxe_comp.c 					qp->comp.psn = pkt->psn;
pkt               368 drivers/infiniband/sw/rxe/rxe_comp.c 				      struct rxe_pkt_info *pkt,
pkt               374 drivers/infiniband/sw/rxe/rxe_comp.c 			&wqe->dma, payload_addr(pkt),
pkt               375 drivers/infiniband/sw/rxe/rxe_comp.c 			payload_size(pkt), to_mem_obj, NULL);
pkt               379 drivers/infiniband/sw/rxe/rxe_comp.c 	if (wqe->dma.resid == 0 && (pkt->mask & RXE_END_MASK))
pkt               386 drivers/infiniband/sw/rxe/rxe_comp.c 					struct rxe_pkt_info *pkt,
pkt               391 drivers/infiniband/sw/rxe/rxe_comp.c 	u64 atomic_orig = atmack_orig(pkt);
pkt               471 drivers/infiniband/sw/rxe/rxe_comp.c 					   struct rxe_pkt_info *pkt,
pkt               510 drivers/infiniband/sw/rxe/rxe_comp.c 	if (psn_compare(pkt->psn, qp->comp.psn) >= 0)
pkt               517 drivers/infiniband/sw/rxe/rxe_comp.c 					   struct rxe_pkt_info *pkt,
pkt               520 drivers/infiniband/sw/rxe/rxe_comp.c 	if (pkt && wqe->state == wqe_state_pending) {
pkt               563 drivers/infiniband/sw/rxe/rxe_comp.c 	struct rxe_pkt_info *pkt = NULL;
pkt               594 drivers/infiniband/sw/rxe/rxe_comp.c 				pkt = SKB_TO_PKT(skb);
pkt               601 drivers/infiniband/sw/rxe/rxe_comp.c 			state = get_wqe(qp, pkt, &wqe);
pkt               605 drivers/infiniband/sw/rxe/rxe_comp.c 			state = check_psn(qp, pkt, wqe);
pkt               609 drivers/infiniband/sw/rxe/rxe_comp.c 			state = check_ack(qp, pkt, wqe);
pkt               613 drivers/infiniband/sw/rxe/rxe_comp.c 			state = do_read(qp, pkt, wqe);
pkt               617 drivers/infiniband/sw/rxe/rxe_comp.c 			state = do_atomic(qp, pkt, wqe);
pkt               622 drivers/infiniband/sw/rxe/rxe_comp.c 			    wqe->last_psn == pkt->psn)
pkt               629 drivers/infiniband/sw/rxe/rxe_comp.c 			state = complete_ack(qp, pkt, wqe);
pkt               633 drivers/infiniband/sw/rxe/rxe_comp.c 			state = complete_wqe(qp, pkt, wqe);
pkt               637 drivers/infiniband/sw/rxe/rxe_comp.c 			if (pkt->mask & RXE_END_MASK)
pkt               640 drivers/infiniband/sw/rxe/rxe_comp.c 				qp->comp.opcode = pkt->opcode;
pkt               642 drivers/infiniband/sw/rxe/rxe_comp.c 			if (psn_compare(pkt->psn, qp->comp.psn) >= 0)
pkt               643 drivers/infiniband/sw/rxe/rxe_comp.c 				qp->comp.psn = (pkt->psn + 1) & BTH_PSN_MASK;
pkt               654 drivers/infiniband/sw/rxe/rxe_comp.c 			if (pkt) {
pkt               655 drivers/infiniband/sw/rxe/rxe_comp.c 				rxe_drop_ref(pkt->qp);
pkt               702 drivers/infiniband/sw/rxe/rxe_comp.c 				if (pkt) {
pkt               703 drivers/infiniband/sw/rxe/rxe_comp.c 					rxe_drop_ref(pkt->qp);
pkt               731 drivers/infiniband/sw/rxe/rxe_comp.c 				if (pkt) {
pkt               732 drivers/infiniband/sw/rxe/rxe_comp.c 					rxe_drop_ref(pkt->qp);
pkt               755 drivers/infiniband/sw/rxe/rxe_comp.c 					  jiffies + rnrnak_jiffies(aeth_syn(pkt)
pkt               757 drivers/infiniband/sw/rxe/rxe_comp.c 				rxe_drop_ref(pkt->qp);
pkt               774 drivers/infiniband/sw/rxe/rxe_comp.c 			if (pkt) {
pkt               775 drivers/infiniband/sw/rxe/rxe_comp.c 				rxe_drop_ref(pkt->qp);
pkt                62 drivers/infiniband/sw/rxe/rxe_hdr.h static inline struct sk_buff *PKT_TO_SKB(struct rxe_pkt_info *pkt)
pkt                64 drivers/infiniband/sw/rxe/rxe_hdr.h 	return container_of((void *)pkt, struct sk_buff, cb);
pkt               308 drivers/infiniband/sw/rxe/rxe_hdr.h static inline u8 bth_opcode(struct rxe_pkt_info *pkt)
pkt               310 drivers/infiniband/sw/rxe/rxe_hdr.h 	return __bth_opcode(pkt->hdr + pkt->offset);
pkt               313 drivers/infiniband/sw/rxe/rxe_hdr.h static inline void bth_set_opcode(struct rxe_pkt_info *pkt, u8 opcode)
pkt               315 drivers/infiniband/sw/rxe/rxe_hdr.h 	__bth_set_opcode(pkt->hdr + pkt->offset, opcode);
pkt               318 drivers/infiniband/sw/rxe/rxe_hdr.h static inline u8 bth_se(struct rxe_pkt_info *pkt)
pkt               320 drivers/infiniband/sw/rxe/rxe_hdr.h 	return __bth_se(pkt->hdr + pkt->offset);
pkt               323 drivers/infiniband/sw/rxe/rxe_hdr.h static inline void bth_set_se(struct rxe_pkt_info *pkt, int se)
pkt               325 drivers/infiniband/sw/rxe/rxe_hdr.h 	__bth_set_se(pkt->hdr + pkt->offset, se);
pkt               328 drivers/infiniband/sw/rxe/rxe_hdr.h static inline u8 bth_mig(struct rxe_pkt_info *pkt)
pkt               330 drivers/infiniband/sw/rxe/rxe_hdr.h 	return __bth_mig(pkt->hdr + pkt->offset);
pkt               333 drivers/infiniband/sw/rxe/rxe_hdr.h static inline void bth_set_mig(struct rxe_pkt_info *pkt, u8 mig)
pkt               335 drivers/infiniband/sw/rxe/rxe_hdr.h 	__bth_set_mig(pkt->hdr + pkt->offset, mig);
pkt               338 drivers/infiniband/sw/rxe/rxe_hdr.h static inline u8 bth_pad(struct rxe_pkt_info *pkt)
pkt               340 drivers/infiniband/sw/rxe/rxe_hdr.h 	return __bth_pad(pkt->hdr + pkt->offset);
pkt               343 drivers/infiniband/sw/rxe/rxe_hdr.h static inline void bth_set_pad(struct rxe_pkt_info *pkt, u8 pad)
pkt               345 drivers/infiniband/sw/rxe/rxe_hdr.h 	__bth_set_pad(pkt->hdr + pkt->offset, pad);
pkt               348 drivers/infiniband/sw/rxe/rxe_hdr.h static inline u8 bth_tver(struct rxe_pkt_info *pkt)
pkt               350 drivers/infiniband/sw/rxe/rxe_hdr.h 	return __bth_tver(pkt->hdr + pkt->offset);
pkt               353 drivers/infiniband/sw/rxe/rxe_hdr.h static inline void bth_set_tver(struct rxe_pkt_info *pkt, u8 tver)
pkt               355 drivers/infiniband/sw/rxe/rxe_hdr.h 	__bth_set_tver(pkt->hdr + pkt->offset, tver);
pkt               358 drivers/infiniband/sw/rxe/rxe_hdr.h static inline u16 bth_pkey(struct rxe_pkt_info *pkt)
pkt               360 drivers/infiniband/sw/rxe/rxe_hdr.h 	return __bth_pkey(pkt->hdr + pkt->offset);
pkt               363 drivers/infiniband/sw/rxe/rxe_hdr.h static inline void bth_set_pkey(struct rxe_pkt_info *pkt, u16 pkey)
pkt               365 drivers/infiniband/sw/rxe/rxe_hdr.h 	__bth_set_pkey(pkt->hdr + pkt->offset, pkey);
pkt               368 drivers/infiniband/sw/rxe/rxe_hdr.h static inline u32 bth_qpn(struct rxe_pkt_info *pkt)
pkt               370 drivers/infiniband/sw/rxe/rxe_hdr.h 	return __bth_qpn(pkt->hdr + pkt->offset);
pkt               373 drivers/infiniband/sw/rxe/rxe_hdr.h static inline void bth_set_qpn(struct rxe_pkt_info *pkt, u32 qpn)
pkt               375 drivers/infiniband/sw/rxe/rxe_hdr.h 	__bth_set_qpn(pkt->hdr + pkt->offset, qpn);
pkt               378 drivers/infiniband/sw/rxe/rxe_hdr.h static inline int bth_fecn(struct rxe_pkt_info *pkt)
pkt               380 drivers/infiniband/sw/rxe/rxe_hdr.h 	return __bth_fecn(pkt->hdr + pkt->offset);
pkt               383 drivers/infiniband/sw/rxe/rxe_hdr.h static inline void bth_set_fecn(struct rxe_pkt_info *pkt, int fecn)
pkt               385 drivers/infiniband/sw/rxe/rxe_hdr.h 	__bth_set_fecn(pkt->hdr + pkt->offset, fecn);
pkt               388 drivers/infiniband/sw/rxe/rxe_hdr.h static inline int bth_becn(struct rxe_pkt_info *pkt)
pkt               390 drivers/infiniband/sw/rxe/rxe_hdr.h 	return __bth_becn(pkt->hdr + pkt->offset);
pkt               393 drivers/infiniband/sw/rxe/rxe_hdr.h static inline void bth_set_becn(struct rxe_pkt_info *pkt, int becn)
pkt               395 drivers/infiniband/sw/rxe/rxe_hdr.h 	__bth_set_becn(pkt->hdr + pkt->offset, becn);
pkt               398 drivers/infiniband/sw/rxe/rxe_hdr.h static inline u8 bth_resv6a(struct rxe_pkt_info *pkt)
pkt               400 drivers/infiniband/sw/rxe/rxe_hdr.h 	return __bth_resv6a(pkt->hdr + pkt->offset);
pkt               403 drivers/infiniband/sw/rxe/rxe_hdr.h static inline void bth_set_resv6a(struct rxe_pkt_info *pkt)
pkt               405 drivers/infiniband/sw/rxe/rxe_hdr.h 	__bth_set_resv6a(pkt->hdr + pkt->offset);
pkt               408 drivers/infiniband/sw/rxe/rxe_hdr.h static inline int bth_ack(struct rxe_pkt_info *pkt)
pkt               410 drivers/infiniband/sw/rxe/rxe_hdr.h 	return __bth_ack(pkt->hdr + pkt->offset);
pkt               413 drivers/infiniband/sw/rxe/rxe_hdr.h static inline void bth_set_ack(struct rxe_pkt_info *pkt, int ack)
pkt               415 drivers/infiniband/sw/rxe/rxe_hdr.h 	__bth_set_ack(pkt->hdr + pkt->offset, ack);
pkt               418 drivers/infiniband/sw/rxe/rxe_hdr.h static inline void bth_set_resv7(struct rxe_pkt_info *pkt)
pkt               420 drivers/infiniband/sw/rxe/rxe_hdr.h 	__bth_set_resv7(pkt->hdr + pkt->offset);
pkt               423 drivers/infiniband/sw/rxe/rxe_hdr.h static inline u32 bth_psn(struct rxe_pkt_info *pkt)
pkt               425 drivers/infiniband/sw/rxe/rxe_hdr.h 	return __bth_psn(pkt->hdr + pkt->offset);
pkt               428 drivers/infiniband/sw/rxe/rxe_hdr.h static inline void bth_set_psn(struct rxe_pkt_info *pkt, u32 psn)
pkt               430 drivers/infiniband/sw/rxe/rxe_hdr.h 	__bth_set_psn(pkt->hdr + pkt->offset, psn);
pkt               433 drivers/infiniband/sw/rxe/rxe_hdr.h static inline void bth_init(struct rxe_pkt_info *pkt, u8 opcode, int se,
pkt               437 drivers/infiniband/sw/rxe/rxe_hdr.h 	struct rxe_bth *bth = (struct rxe_bth *)(pkt->hdr + pkt->offset);
pkt               476 drivers/infiniband/sw/rxe/rxe_hdr.h static inline u8 rdeth_een(struct rxe_pkt_info *pkt)
pkt               478 drivers/infiniband/sw/rxe/rxe_hdr.h 	return __rdeth_een(pkt->hdr + pkt->offset
pkt               479 drivers/infiniband/sw/rxe/rxe_hdr.h 		+ rxe_opcode[pkt->opcode].offset[RXE_RDETH]);
pkt               482 drivers/infiniband/sw/rxe/rxe_hdr.h static inline void rdeth_set_een(struct rxe_pkt_info *pkt, u32 een)
pkt               484 drivers/infiniband/sw/rxe/rxe_hdr.h 	__rdeth_set_een(pkt->hdr + pkt->offset
pkt               485 drivers/infiniband/sw/rxe/rxe_hdr.h 		+ rxe_opcode[pkt->opcode].offset[RXE_RDETH], een);
pkt               527 drivers/infiniband/sw/rxe/rxe_hdr.h static inline u32 deth_qkey(struct rxe_pkt_info *pkt)
pkt               529 drivers/infiniband/sw/rxe/rxe_hdr.h 	return __deth_qkey(pkt->hdr + pkt->offset
pkt               530 drivers/infiniband/sw/rxe/rxe_hdr.h 		+ rxe_opcode[pkt->opcode].offset[RXE_DETH]);
pkt               533 drivers/infiniband/sw/rxe/rxe_hdr.h static inline void deth_set_qkey(struct rxe_pkt_info *pkt, u32 qkey)
pkt               535 drivers/infiniband/sw/rxe/rxe_hdr.h 	__deth_set_qkey(pkt->hdr + pkt->offset
pkt               536 drivers/infiniband/sw/rxe/rxe_hdr.h 		+ rxe_opcode[pkt->opcode].offset[RXE_DETH], qkey);
pkt               539 drivers/infiniband/sw/rxe/rxe_hdr.h static inline u32 deth_sqp(struct rxe_pkt_info *pkt)
pkt               541 drivers/infiniband/sw/rxe/rxe_hdr.h 	return __deth_sqp(pkt->hdr + pkt->offset
pkt               542 drivers/infiniband/sw/rxe/rxe_hdr.h 		+ rxe_opcode[pkt->opcode].offset[RXE_DETH]);
pkt               545 drivers/infiniband/sw/rxe/rxe_hdr.h static inline void deth_set_sqp(struct rxe_pkt_info *pkt, u32 sqp)
pkt               547 drivers/infiniband/sw/rxe/rxe_hdr.h 	__deth_set_sqp(pkt->hdr + pkt->offset
pkt               548 drivers/infiniband/sw/rxe/rxe_hdr.h 		+ rxe_opcode[pkt->opcode].offset[RXE_DETH], sqp);
pkt               602 drivers/infiniband/sw/rxe/rxe_hdr.h static inline u64 reth_va(struct rxe_pkt_info *pkt)
pkt               604 drivers/infiniband/sw/rxe/rxe_hdr.h 	return __reth_va(pkt->hdr + pkt->offset
pkt               605 drivers/infiniband/sw/rxe/rxe_hdr.h 		+ rxe_opcode[pkt->opcode].offset[RXE_RETH]);
pkt               608 drivers/infiniband/sw/rxe/rxe_hdr.h static inline void reth_set_va(struct rxe_pkt_info *pkt, u64 va)
pkt               610 drivers/infiniband/sw/rxe/rxe_hdr.h 	__reth_set_va(pkt->hdr + pkt->offset
pkt               611 drivers/infiniband/sw/rxe/rxe_hdr.h 		+ rxe_opcode[pkt->opcode].offset[RXE_RETH], va);
pkt               614 drivers/infiniband/sw/rxe/rxe_hdr.h static inline u32 reth_rkey(struct rxe_pkt_info *pkt)
pkt               616 drivers/infiniband/sw/rxe/rxe_hdr.h 	return __reth_rkey(pkt->hdr + pkt->offset
pkt               617 drivers/infiniband/sw/rxe/rxe_hdr.h 		+ rxe_opcode[pkt->opcode].offset[RXE_RETH]);
pkt               620 drivers/infiniband/sw/rxe/rxe_hdr.h static inline void reth_set_rkey(struct rxe_pkt_info *pkt, u32 rkey)
pkt               622 drivers/infiniband/sw/rxe/rxe_hdr.h 	__reth_set_rkey(pkt->hdr + pkt->offset
pkt               623 drivers/infiniband/sw/rxe/rxe_hdr.h 		+ rxe_opcode[pkt->opcode].offset[RXE_RETH], rkey);
pkt               626 drivers/infiniband/sw/rxe/rxe_hdr.h static inline u32 reth_len(struct rxe_pkt_info *pkt)
pkt               628 drivers/infiniband/sw/rxe/rxe_hdr.h 	return __reth_len(pkt->hdr + pkt->offset
pkt               629 drivers/infiniband/sw/rxe/rxe_hdr.h 		+ rxe_opcode[pkt->opcode].offset[RXE_RETH]);
pkt               632 drivers/infiniband/sw/rxe/rxe_hdr.h static inline void reth_set_len(struct rxe_pkt_info *pkt, u32 len)
pkt               634 drivers/infiniband/sw/rxe/rxe_hdr.h 	__reth_set_len(pkt->hdr + pkt->offset
pkt               635 drivers/infiniband/sw/rxe/rxe_hdr.h 		+ rxe_opcode[pkt->opcode].offset[RXE_RETH], len);
pkt               704 drivers/infiniband/sw/rxe/rxe_hdr.h static inline u64 atmeth_va(struct rxe_pkt_info *pkt)
pkt               706 drivers/infiniband/sw/rxe/rxe_hdr.h 	return __atmeth_va(pkt->hdr + pkt->offset
pkt               707 drivers/infiniband/sw/rxe/rxe_hdr.h 		+ rxe_opcode[pkt->opcode].offset[RXE_ATMETH]);
pkt               710 drivers/infiniband/sw/rxe/rxe_hdr.h static inline void atmeth_set_va(struct rxe_pkt_info *pkt, u64 va)
pkt               712 drivers/infiniband/sw/rxe/rxe_hdr.h 	__atmeth_set_va(pkt->hdr + pkt->offset
pkt               713 drivers/infiniband/sw/rxe/rxe_hdr.h 		+ rxe_opcode[pkt->opcode].offset[RXE_ATMETH], va);
pkt               716 drivers/infiniband/sw/rxe/rxe_hdr.h static inline u32 atmeth_rkey(struct rxe_pkt_info *pkt)
pkt               718 drivers/infiniband/sw/rxe/rxe_hdr.h 	return __atmeth_rkey(pkt->hdr + pkt->offset
pkt               719 drivers/infiniband/sw/rxe/rxe_hdr.h 		+ rxe_opcode[pkt->opcode].offset[RXE_ATMETH]);
pkt               722 drivers/infiniband/sw/rxe/rxe_hdr.h static inline void atmeth_set_rkey(struct rxe_pkt_info *pkt, u32 rkey)
pkt               724 drivers/infiniband/sw/rxe/rxe_hdr.h 	__atmeth_set_rkey(pkt->hdr + pkt->offset
pkt               725 drivers/infiniband/sw/rxe/rxe_hdr.h 		+ rxe_opcode[pkt->opcode].offset[RXE_ATMETH], rkey);
pkt               728 drivers/infiniband/sw/rxe/rxe_hdr.h static inline u64 atmeth_swap_add(struct rxe_pkt_info *pkt)
pkt               730 drivers/infiniband/sw/rxe/rxe_hdr.h 	return __atmeth_swap_add(pkt->hdr + pkt->offset
pkt               731 drivers/infiniband/sw/rxe/rxe_hdr.h 		+ rxe_opcode[pkt->opcode].offset[RXE_ATMETH]);
pkt               734 drivers/infiniband/sw/rxe/rxe_hdr.h static inline void atmeth_set_swap_add(struct rxe_pkt_info *pkt, u64 swap_add)
pkt               736 drivers/infiniband/sw/rxe/rxe_hdr.h 	__atmeth_set_swap_add(pkt->hdr + pkt->offset
pkt               737 drivers/infiniband/sw/rxe/rxe_hdr.h 		+ rxe_opcode[pkt->opcode].offset[RXE_ATMETH], swap_add);
pkt               740 drivers/infiniband/sw/rxe/rxe_hdr.h static inline u64 atmeth_comp(struct rxe_pkt_info *pkt)
pkt               742 drivers/infiniband/sw/rxe/rxe_hdr.h 	return __atmeth_comp(pkt->hdr + pkt->offset
pkt               743 drivers/infiniband/sw/rxe/rxe_hdr.h 		+ rxe_opcode[pkt->opcode].offset[RXE_ATMETH]);
pkt               746 drivers/infiniband/sw/rxe/rxe_hdr.h static inline void atmeth_set_comp(struct rxe_pkt_info *pkt, u64 comp)
pkt               748 drivers/infiniband/sw/rxe/rxe_hdr.h 	__atmeth_set_comp(pkt->hdr + pkt->offset
pkt               749 drivers/infiniband/sw/rxe/rxe_hdr.h 		+ rxe_opcode[pkt->opcode].offset[RXE_ATMETH], comp);
pkt               808 drivers/infiniband/sw/rxe/rxe_hdr.h static inline u8 aeth_syn(struct rxe_pkt_info *pkt)
pkt               810 drivers/infiniband/sw/rxe/rxe_hdr.h 	return __aeth_syn(pkt->hdr + pkt->offset
pkt               811 drivers/infiniband/sw/rxe/rxe_hdr.h 		+ rxe_opcode[pkt->opcode].offset[RXE_AETH]);
pkt               814 drivers/infiniband/sw/rxe/rxe_hdr.h static inline void aeth_set_syn(struct rxe_pkt_info *pkt, u8 syn)
pkt               816 drivers/infiniband/sw/rxe/rxe_hdr.h 	__aeth_set_syn(pkt->hdr + pkt->offset
pkt               817 drivers/infiniband/sw/rxe/rxe_hdr.h 		+ rxe_opcode[pkt->opcode].offset[RXE_AETH], syn);
pkt               820 drivers/infiniband/sw/rxe/rxe_hdr.h static inline u32 aeth_msn(struct rxe_pkt_info *pkt)
pkt               822 drivers/infiniband/sw/rxe/rxe_hdr.h 	return __aeth_msn(pkt->hdr + pkt->offset
pkt               823 drivers/infiniband/sw/rxe/rxe_hdr.h 		+ rxe_opcode[pkt->opcode].offset[RXE_AETH]);
pkt               826 drivers/infiniband/sw/rxe/rxe_hdr.h static inline void aeth_set_msn(struct rxe_pkt_info *pkt, u32 msn)
pkt               828 drivers/infiniband/sw/rxe/rxe_hdr.h 	__aeth_set_msn(pkt->hdr + pkt->offset
pkt               829 drivers/infiniband/sw/rxe/rxe_hdr.h 		+ rxe_opcode[pkt->opcode].offset[RXE_AETH], msn);
pkt               853 drivers/infiniband/sw/rxe/rxe_hdr.h static inline u64 atmack_orig(struct rxe_pkt_info *pkt)
pkt               855 drivers/infiniband/sw/rxe/rxe_hdr.h 	return __atmack_orig(pkt->hdr + pkt->offset
pkt               856 drivers/infiniband/sw/rxe/rxe_hdr.h 		+ rxe_opcode[pkt->opcode].offset[RXE_ATMACK]);
pkt               859 drivers/infiniband/sw/rxe/rxe_hdr.h static inline void atmack_set_orig(struct rxe_pkt_info *pkt, u64 orig)
pkt               861 drivers/infiniband/sw/rxe/rxe_hdr.h 	__atmack_set_orig(pkt->hdr + pkt->offset
pkt               862 drivers/infiniband/sw/rxe/rxe_hdr.h 		+ rxe_opcode[pkt->opcode].offset[RXE_ATMACK], orig);
pkt               886 drivers/infiniband/sw/rxe/rxe_hdr.h static inline __be32 immdt_imm(struct rxe_pkt_info *pkt)
pkt               888 drivers/infiniband/sw/rxe/rxe_hdr.h 	return __immdt_imm(pkt->hdr + pkt->offset
pkt               889 drivers/infiniband/sw/rxe/rxe_hdr.h 		+ rxe_opcode[pkt->opcode].offset[RXE_IMMDT]);
pkt               892 drivers/infiniband/sw/rxe/rxe_hdr.h static inline void immdt_set_imm(struct rxe_pkt_info *pkt, __be32 imm)
pkt               894 drivers/infiniband/sw/rxe/rxe_hdr.h 	__immdt_set_imm(pkt->hdr + pkt->offset
pkt               895 drivers/infiniband/sw/rxe/rxe_hdr.h 		+ rxe_opcode[pkt->opcode].offset[RXE_IMMDT], imm);
pkt               919 drivers/infiniband/sw/rxe/rxe_hdr.h static inline u32 ieth_rkey(struct rxe_pkt_info *pkt)
pkt               921 drivers/infiniband/sw/rxe/rxe_hdr.h 	return __ieth_rkey(pkt->hdr + pkt->offset
pkt               922 drivers/infiniband/sw/rxe/rxe_hdr.h 		+ rxe_opcode[pkt->opcode].offset[RXE_IETH]);
pkt               925 drivers/infiniband/sw/rxe/rxe_hdr.h static inline void ieth_set_rkey(struct rxe_pkt_info *pkt, u32 rkey)
pkt               927 drivers/infiniband/sw/rxe/rxe_hdr.h 	__ieth_set_rkey(pkt->hdr + pkt->offset
pkt               928 drivers/infiniband/sw/rxe/rxe_hdr.h 		+ rxe_opcode[pkt->opcode].offset[RXE_IETH], rkey);
pkt               943 drivers/infiniband/sw/rxe/rxe_hdr.h static inline size_t header_size(struct rxe_pkt_info *pkt)
pkt               945 drivers/infiniband/sw/rxe/rxe_hdr.h 	return pkt->offset + rxe_opcode[pkt->opcode].length;
pkt               948 drivers/infiniband/sw/rxe/rxe_hdr.h static inline void *payload_addr(struct rxe_pkt_info *pkt)
pkt               950 drivers/infiniband/sw/rxe/rxe_hdr.h 	return pkt->hdr + pkt->offset
pkt               951 drivers/infiniband/sw/rxe/rxe_hdr.h 		+ rxe_opcode[pkt->opcode].offset[RXE_PAYLOAD];
pkt               954 drivers/infiniband/sw/rxe/rxe_hdr.h static inline size_t payload_size(struct rxe_pkt_info *pkt)
pkt               956 drivers/infiniband/sw/rxe/rxe_hdr.h 	return pkt->paylen - rxe_opcode[pkt->opcode].offset[RXE_PAYLOAD]
pkt               957 drivers/infiniband/sw/rxe/rxe_hdr.h 		- bth_pad(pkt) - RXE_ICRC_SIZE;
pkt                38 drivers/infiniband/sw/rxe/rxe_icrc.c u32 rxe_icrc_hdr(struct rxe_pkt_info *pkt, struct sk_buff *skb)
pkt                83 drivers/infiniband/sw/rxe/rxe_icrc.c 	memcpy(&pshdr[bth_offset], pkt->hdr, RXE_BTH_BYTES);
pkt                90 drivers/infiniband/sw/rxe/rxe_icrc.c 	crc = rxe_crc32(pkt->rxe, crc, pshdr, length);
pkt                93 drivers/infiniband/sw/rxe/rxe_icrc.c 	crc = rxe_crc32(pkt->rxe, crc, pkt->hdr + RXE_BTH_BYTES,
pkt                94 drivers/infiniband/sw/rxe/rxe_icrc.c 			rxe_opcode[pkt->opcode].length - RXE_BTH_BYTES);
pkt                49 drivers/infiniband/sw/rxe/rxe_loc.h struct rxe_av *rxe_get_av(struct rxe_pkt_info *pkt);
pkt               144 drivers/infiniband/sw/rxe/rxe_loc.h int rxe_send(struct rxe_pkt_info *pkt, struct sk_buff *skb);
pkt               146 drivers/infiniband/sw/rxe/rxe_loc.h 				int paylen, struct rxe_pkt_info *pkt);
pkt               147 drivers/infiniband/sw/rxe/rxe_loc.h int rxe_prepare(struct rxe_pkt_info *pkt, struct sk_buff *skb, u32 *crc);
pkt               239 drivers/infiniband/sw/rxe/rxe_loc.h u32 rxe_icrc_hdr(struct rxe_pkt_info *pkt, struct sk_buff *skb);
pkt               250 drivers/infiniband/sw/rxe/rxe_loc.h static inline int rxe_xmit_packet(struct rxe_qp *qp, struct rxe_pkt_info *pkt,
pkt               254 drivers/infiniband/sw/rxe/rxe_loc.h 	int is_request = pkt->mask & RXE_REQ_MASK;
pkt               263 drivers/infiniband/sw/rxe/rxe_loc.h 	if (pkt->mask & RXE_LOOPBACK_MASK) {
pkt               264 drivers/infiniband/sw/rxe/rxe_loc.h 		memcpy(SKB_TO_PKT(skb), pkt, sizeof(*pkt));
pkt               268 drivers/infiniband/sw/rxe/rxe_loc.h 		err = rxe_send(pkt, skb);
pkt               278 drivers/infiniband/sw/rxe/rxe_loc.h 	    (pkt->mask & RXE_END_MASK)) {
pkt               279 drivers/infiniband/sw/rxe/rxe_loc.h 		pkt->wqe->state = wqe_state_done;
pkt               198 drivers/infiniband/sw/rxe/rxe_net.c 	struct rxe_pkt_info *pkt = SKB_TO_PKT(skb);
pkt               214 drivers/infiniband/sw/rxe/rxe_net.c 	pkt->rxe = rxe;
pkt               215 drivers/infiniband/sw/rxe/rxe_net.c 	pkt->port_num = 1;
pkt               216 drivers/infiniband/sw/rxe/rxe_net.c 	pkt->hdr = (u8 *)(udph + 1);
pkt               217 drivers/infiniband/sw/rxe/rxe_net.c 	pkt->mask = RXE_GRH_MASK;
pkt               218 drivers/infiniband/sw/rxe/rxe_net.c 	pkt->paylen = be16_to_cpu(udph->len) - sizeof(*udph);
pkt               343 drivers/infiniband/sw/rxe/rxe_net.c static int prepare4(struct rxe_pkt_info *pkt, struct sk_buff *skb)
pkt               345 drivers/infiniband/sw/rxe/rxe_net.c 	struct rxe_qp *qp = pkt->qp;
pkt               349 drivers/infiniband/sw/rxe/rxe_net.c 	struct rxe_av *av = rxe_get_av(pkt);
pkt               369 drivers/infiniband/sw/rxe/rxe_net.c static int prepare6(struct rxe_pkt_info *pkt, struct sk_buff *skb)
pkt               371 drivers/infiniband/sw/rxe/rxe_net.c 	struct rxe_qp *qp = pkt->qp;
pkt               373 drivers/infiniband/sw/rxe/rxe_net.c 	struct rxe_av *av = rxe_get_av(pkt);
pkt               394 drivers/infiniband/sw/rxe/rxe_net.c int rxe_prepare(struct rxe_pkt_info *pkt, struct sk_buff *skb, u32 *crc)
pkt               399 drivers/infiniband/sw/rxe/rxe_net.c 		err = prepare4(pkt, skb);
pkt               401 drivers/infiniband/sw/rxe/rxe_net.c 		err = prepare6(pkt, skb);
pkt               403 drivers/infiniband/sw/rxe/rxe_net.c 	*crc = rxe_icrc_hdr(pkt, skb);
pkt               405 drivers/infiniband/sw/rxe/rxe_net.c 	if (ether_addr_equal(skb->dev->dev_addr, rxe_get_av(pkt)->dmac))
pkt               406 drivers/infiniband/sw/rxe/rxe_net.c 		pkt->mask |= RXE_LOOPBACK_MASK;
pkt               424 drivers/infiniband/sw/rxe/rxe_net.c int rxe_send(struct rxe_pkt_info *pkt, struct sk_buff *skb)
pkt               429 drivers/infiniband/sw/rxe/rxe_net.c 	skb->sk = pkt->qp->sk->sk;
pkt               431 drivers/infiniband/sw/rxe/rxe_net.c 	rxe_add_ref(pkt->qp);
pkt               432 drivers/infiniband/sw/rxe/rxe_net.c 	atomic_inc(&pkt->qp->skb_out);
pkt               440 drivers/infiniband/sw/rxe/rxe_net.c 		atomic_dec(&pkt->qp->skb_out);
pkt               441 drivers/infiniband/sw/rxe/rxe_net.c 		rxe_drop_ref(pkt->qp);
pkt               460 drivers/infiniband/sw/rxe/rxe_net.c 				int paylen, struct rxe_pkt_info *pkt)
pkt               504 drivers/infiniband/sw/rxe/rxe_net.c 	pkt->rxe	= rxe;
pkt               505 drivers/infiniband/sw/rxe/rxe_net.c 	pkt->port_num	= port_num;
pkt               506 drivers/infiniband/sw/rxe/rxe_net.c 	pkt->hdr	= skb_put_zero(skb, paylen);
pkt               507 drivers/infiniband/sw/rxe/rxe_net.c 	pkt->mask	|= RXE_GRH_MASK;
pkt                39 drivers/infiniband/sw/rxe/rxe_recv.c static int check_type_state(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
pkt                47 drivers/infiniband/sw/rxe/rxe_recv.c 		if (unlikely((pkt->opcode & IB_OPCODE_RC) != 0)) {
pkt                53 drivers/infiniband/sw/rxe/rxe_recv.c 		if (unlikely(!(pkt->opcode & IB_OPCODE_UC))) {
pkt                61 drivers/infiniband/sw/rxe/rxe_recv.c 		if (unlikely(!(pkt->opcode & IB_OPCODE_UD))) {
pkt                71 drivers/infiniband/sw/rxe/rxe_recv.c 	if (pkt->mask & RXE_REQ_MASK) {
pkt               101 drivers/infiniband/sw/rxe/rxe_recv.c static int check_keys(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
pkt               107 drivers/infiniband/sw/rxe/rxe_recv.c 	u16 pkey = bth_pkey(pkt);
pkt               109 drivers/infiniband/sw/rxe/rxe_recv.c 	pkt->pkey_index = 0;
pkt               114 drivers/infiniband/sw/rxe/rxe_recv.c 				pkt->pkey_index = i;
pkt               133 drivers/infiniband/sw/rxe/rxe_recv.c 		pkt->pkey_index = qp->attr.pkey_index;
pkt               137 drivers/infiniband/sw/rxe/rxe_recv.c 	    pkt->mask) {
pkt               140 drivers/infiniband/sw/rxe/rxe_recv.c 		if (unlikely(deth_qkey(pkt) != qkey)) {
pkt               142 drivers/infiniband/sw/rxe/rxe_recv.c 					    deth_qkey(pkt), qkey, qpn);
pkt               154 drivers/infiniband/sw/rxe/rxe_recv.c static int check_addr(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
pkt               157 drivers/infiniband/sw/rxe/rxe_recv.c 	struct sk_buff *skb = PKT_TO_SKB(pkt);
pkt               162 drivers/infiniband/sw/rxe/rxe_recv.c 	if (unlikely(pkt->port_num != qp->attr.port_num)) {
pkt               164 drivers/infiniband/sw/rxe/rxe_recv.c 				    pkt->port_num, qp->attr.port_num);
pkt               214 drivers/infiniband/sw/rxe/rxe_recv.c static int hdr_check(struct rxe_pkt_info *pkt)
pkt               216 drivers/infiniband/sw/rxe/rxe_recv.c 	struct rxe_dev *rxe = pkt->rxe;
pkt               219 drivers/infiniband/sw/rxe/rxe_recv.c 	u32 qpn = bth_qpn(pkt);
pkt               223 drivers/infiniband/sw/rxe/rxe_recv.c 	if (unlikely(bth_tver(pkt) != BTH_TVER)) {
pkt               242 drivers/infiniband/sw/rxe/rxe_recv.c 		err = check_type_state(rxe, pkt, qp);
pkt               246 drivers/infiniband/sw/rxe/rxe_recv.c 		err = check_addr(rxe, pkt, qp);
pkt               250 drivers/infiniband/sw/rxe/rxe_recv.c 		err = check_keys(rxe, pkt, qpn, qp);
pkt               254 drivers/infiniband/sw/rxe/rxe_recv.c 		if (unlikely((pkt->mask & RXE_GRH_MASK) == 0)) {
pkt               260 drivers/infiniband/sw/rxe/rxe_recv.c 	pkt->qp = qp;
pkt               269 drivers/infiniband/sw/rxe/rxe_recv.c static inline void rxe_rcv_pkt(struct rxe_pkt_info *pkt, struct sk_buff *skb)
pkt               271 drivers/infiniband/sw/rxe/rxe_recv.c 	if (pkt->mask & RXE_REQ_MASK)
pkt               272 drivers/infiniband/sw/rxe/rxe_recv.c 		rxe_resp_queue_pkt(pkt->qp, skb);
pkt               274 drivers/infiniband/sw/rxe/rxe_recv.c 		rxe_comp_queue_pkt(pkt->qp, skb);
pkt               279 drivers/infiniband/sw/rxe/rxe_recv.c 	struct rxe_pkt_info *pkt = SKB_TO_PKT(skb);
pkt               301 drivers/infiniband/sw/rxe/rxe_recv.c 		pkt = SKB_TO_PKT(skb);
pkt               304 drivers/infiniband/sw/rxe/rxe_recv.c 		err = check_type_state(rxe, pkt, qp);
pkt               308 drivers/infiniband/sw/rxe/rxe_recv.c 		err = check_keys(rxe, pkt, bth_qpn(pkt), qp);
pkt               318 drivers/infiniband/sw/rxe/rxe_recv.c 		pkt->qp = qp;
pkt               320 drivers/infiniband/sw/rxe/rxe_recv.c 		rxe_rcv_pkt(pkt, skb);
pkt               359 drivers/infiniband/sw/rxe/rxe_recv.c 	struct rxe_pkt_info *pkt = SKB_TO_PKT(skb);
pkt               360 drivers/infiniband/sw/rxe/rxe_recv.c 	struct rxe_dev *rxe = pkt->rxe;
pkt               364 drivers/infiniband/sw/rxe/rxe_recv.c 	pkt->offset = 0;
pkt               366 drivers/infiniband/sw/rxe/rxe_recv.c 	if (unlikely(skb->len < pkt->offset + RXE_BTH_BYTES))
pkt               374 drivers/infiniband/sw/rxe/rxe_recv.c 	pkt->opcode = bth_opcode(pkt);
pkt               375 drivers/infiniband/sw/rxe/rxe_recv.c 	pkt->psn = bth_psn(pkt);
pkt               376 drivers/infiniband/sw/rxe/rxe_recv.c 	pkt->qp = NULL;
pkt               377 drivers/infiniband/sw/rxe/rxe_recv.c 	pkt->mask |= rxe_opcode[pkt->opcode].mask;
pkt               379 drivers/infiniband/sw/rxe/rxe_recv.c 	if (unlikely(skb->len < header_size(pkt)))
pkt               382 drivers/infiniband/sw/rxe/rxe_recv.c 	err = hdr_check(pkt);
pkt               387 drivers/infiniband/sw/rxe/rxe_recv.c 	icrcp = (__be32 *)(pkt->hdr + pkt->paylen - RXE_ICRC_SIZE);
pkt               390 drivers/infiniband/sw/rxe/rxe_recv.c 	calc_icrc = rxe_icrc_hdr(pkt, skb);
pkt               391 drivers/infiniband/sw/rxe/rxe_recv.c 	calc_icrc = rxe_crc32(rxe, calc_icrc, (u8 *)payload_addr(pkt),
pkt               392 drivers/infiniband/sw/rxe/rxe_recv.c 			      payload_size(pkt) + bth_pad(pkt));
pkt               409 drivers/infiniband/sw/rxe/rxe_recv.c 	if (unlikely(bth_qpn(pkt) == IB_MULTICAST_QPN))
pkt               412 drivers/infiniband/sw/rxe/rxe_recv.c 		rxe_rcv_pkt(pkt, skb);
pkt               417 drivers/infiniband/sw/rxe/rxe_recv.c 	if (pkt->qp)
pkt               418 drivers/infiniband/sw/rxe/rxe_recv.c 		rxe_drop_ref(pkt->qp);
pkt               381 drivers/infiniband/sw/rxe/rxe_req.c 				       struct rxe_pkt_info *pkt)
pkt               401 drivers/infiniband/sw/rxe/rxe_req.c 	pkt->opcode	= opcode;
pkt               402 drivers/infiniband/sw/rxe/rxe_req.c 	pkt->qp		= qp;
pkt               403 drivers/infiniband/sw/rxe/rxe_req.c 	pkt->psn	= qp->req.psn;
pkt               404 drivers/infiniband/sw/rxe/rxe_req.c 	pkt->mask	= rxe_opcode[opcode].mask;
pkt               405 drivers/infiniband/sw/rxe/rxe_req.c 	pkt->paylen	= paylen;
pkt               406 drivers/infiniband/sw/rxe/rxe_req.c 	pkt->offset	= 0;
pkt               407 drivers/infiniband/sw/rxe/rxe_req.c 	pkt->wqe	= wqe;
pkt               410 drivers/infiniband/sw/rxe/rxe_req.c 	av = rxe_get_av(pkt);
pkt               411 drivers/infiniband/sw/rxe/rxe_req.c 	skb = rxe_init_packet(rxe, av, paylen, pkt);
pkt               417 drivers/infiniband/sw/rxe/rxe_req.c 			(pkt->mask & RXE_END_MASK) &&
pkt               418 drivers/infiniband/sw/rxe/rxe_req.c 			((pkt->mask & (RXE_SEND_MASK)) ||
pkt               419 drivers/infiniband/sw/rxe/rxe_req.c 			(pkt->mask & (RXE_WRITE_MASK | RXE_IMMDT_MASK)) ==
pkt               426 drivers/infiniband/sw/rxe/rxe_req.c 	qp_num = (pkt->mask & RXE_DETH_MASK) ? ibwr->wr.ud.remote_qpn :
pkt               429 drivers/infiniband/sw/rxe/rxe_req.c 	ack_req = ((pkt->mask & RXE_END_MASK) ||
pkt               434 drivers/infiniband/sw/rxe/rxe_req.c 	bth_init(pkt, pkt->opcode, solicited, 0, pad, pkey, qp_num,
pkt               435 drivers/infiniband/sw/rxe/rxe_req.c 		 ack_req, pkt->psn);
pkt               438 drivers/infiniband/sw/rxe/rxe_req.c 	if (pkt->mask & RXE_RETH_MASK) {
pkt               439 drivers/infiniband/sw/rxe/rxe_req.c 		reth_set_rkey(pkt, ibwr->wr.rdma.rkey);
pkt               440 drivers/infiniband/sw/rxe/rxe_req.c 		reth_set_va(pkt, wqe->iova);
pkt               441 drivers/infiniband/sw/rxe/rxe_req.c 		reth_set_len(pkt, wqe->dma.resid);
pkt               444 drivers/infiniband/sw/rxe/rxe_req.c 	if (pkt->mask & RXE_IMMDT_MASK)
pkt               445 drivers/infiniband/sw/rxe/rxe_req.c 		immdt_set_imm(pkt, ibwr->ex.imm_data);
pkt               447 drivers/infiniband/sw/rxe/rxe_req.c 	if (pkt->mask & RXE_IETH_MASK)
pkt               448 drivers/infiniband/sw/rxe/rxe_req.c 		ieth_set_rkey(pkt, ibwr->ex.invalidate_rkey);
pkt               450 drivers/infiniband/sw/rxe/rxe_req.c 	if (pkt->mask & RXE_ATMETH_MASK) {
pkt               451 drivers/infiniband/sw/rxe/rxe_req.c 		atmeth_set_va(pkt, wqe->iova);
pkt               454 drivers/infiniband/sw/rxe/rxe_req.c 			atmeth_set_swap_add(pkt, ibwr->wr.atomic.swap);
pkt               455 drivers/infiniband/sw/rxe/rxe_req.c 			atmeth_set_comp(pkt, ibwr->wr.atomic.compare_add);
pkt               457 drivers/infiniband/sw/rxe/rxe_req.c 			atmeth_set_swap_add(pkt, ibwr->wr.atomic.compare_add);
pkt               459 drivers/infiniband/sw/rxe/rxe_req.c 		atmeth_set_rkey(pkt, ibwr->wr.atomic.rkey);
pkt               462 drivers/infiniband/sw/rxe/rxe_req.c 	if (pkt->mask & RXE_DETH_MASK) {
pkt               464 drivers/infiniband/sw/rxe/rxe_req.c 			deth_set_qkey(pkt, GSI_QKEY);
pkt               466 drivers/infiniband/sw/rxe/rxe_req.c 			deth_set_qkey(pkt, ibwr->wr.ud.remote_qkey);
pkt               467 drivers/infiniband/sw/rxe/rxe_req.c 		deth_set_sqp(pkt, qp->ibqp.qp_num);
pkt               474 drivers/infiniband/sw/rxe/rxe_req.c 		       struct rxe_pkt_info *pkt, struct sk_buff *skb,
pkt               482 drivers/infiniband/sw/rxe/rxe_req.c 	err = rxe_prepare(pkt, skb, &crc);
pkt               486 drivers/infiniband/sw/rxe/rxe_req.c 	if (pkt->mask & RXE_WRITE_OR_SEND) {
pkt               491 drivers/infiniband/sw/rxe/rxe_req.c 			memcpy(payload_addr(pkt), tmp, paylen);
pkt               497 drivers/infiniband/sw/rxe/rxe_req.c 					payload_addr(pkt), paylen,
pkt               503 drivers/infiniband/sw/rxe/rxe_req.c 		if (bth_pad(pkt)) {
pkt               504 drivers/infiniband/sw/rxe/rxe_req.c 			u8 *pad = payload_addr(pkt) + paylen;
pkt               506 drivers/infiniband/sw/rxe/rxe_req.c 			memset(pad, 0, bth_pad(pkt));
pkt               507 drivers/infiniband/sw/rxe/rxe_req.c 			crc = rxe_crc32(rxe, crc, pad, bth_pad(pkt));
pkt               510 drivers/infiniband/sw/rxe/rxe_req.c 	p = payload_addr(pkt) + paylen + bth_pad(pkt);
pkt               519 drivers/infiniband/sw/rxe/rxe_req.c 		struct rxe_pkt_info *pkt)
pkt               521 drivers/infiniband/sw/rxe/rxe_req.c 	if (pkt->mask & RXE_END_MASK) {
pkt               531 drivers/infiniband/sw/rxe/rxe_req.c 			   struct rxe_pkt_info *pkt,
pkt               541 drivers/infiniband/sw/rxe/rxe_req.c 	if (pkt->mask & RXE_START_MASK) {
pkt               546 drivers/infiniband/sw/rxe/rxe_req.c 	if (pkt->mask & RXE_READ_MASK)
pkt               575 drivers/infiniband/sw/rxe/rxe_req.c 			 struct rxe_pkt_info *pkt, int payload)
pkt               577 drivers/infiniband/sw/rxe/rxe_req.c 	qp->req.opcode = pkt->opcode;
pkt               579 drivers/infiniband/sw/rxe/rxe_req.c 	if (pkt->mask & RXE_END_MASK)
pkt               592 drivers/infiniband/sw/rxe/rxe_req.c 	struct rxe_pkt_info pkt;
pkt               717 drivers/infiniband/sw/rxe/rxe_req.c 	skb = init_req_packet(qp, wqe, opcode, payload, &pkt);
pkt               723 drivers/infiniband/sw/rxe/rxe_req.c 	if (fill_packet(qp, wqe, &pkt, skb, payload)) {
pkt               736 drivers/infiniband/sw/rxe/rxe_req.c 	update_wqe_state(qp, wqe, &pkt);
pkt               737 drivers/infiniband/sw/rxe/rxe_req.c 	update_wqe_psn(qp, wqe, &pkt, payload);
pkt               738 drivers/infiniband/sw/rxe/rxe_req.c 	ret = rxe_xmit_packet(qp, &pkt, skb);
pkt               752 drivers/infiniband/sw/rxe/rxe_req.c 	update_state(qp, wqe, &pkt, payload);
pkt               110 drivers/infiniband/sw/rxe/rxe_resp.c 	struct rxe_pkt_info *pkt = SKB_TO_PKT(skb);
pkt               114 drivers/infiniband/sw/rxe/rxe_resp.c 	must_sched = (pkt->opcode == IB_OPCODE_RC_RDMA_READ_REQUEST) ||
pkt               145 drivers/infiniband/sw/rxe/rxe_resp.c 				  struct rxe_pkt_info *pkt)
pkt               147 drivers/infiniband/sw/rxe/rxe_resp.c 	int diff = psn_compare(pkt->psn, qp->resp.psn);
pkt               172 drivers/infiniband/sw/rxe/rxe_resp.c 			if (pkt->mask & RXE_START_MASK) {
pkt               189 drivers/infiniband/sw/rxe/rxe_resp.c 				     struct rxe_pkt_info *pkt)
pkt               196 drivers/infiniband/sw/rxe/rxe_resp.c 			switch (pkt->opcode) {
pkt               208 drivers/infiniband/sw/rxe/rxe_resp.c 			switch (pkt->opcode) {
pkt               218 drivers/infiniband/sw/rxe/rxe_resp.c 			switch (pkt->opcode) {
pkt               237 drivers/infiniband/sw/rxe/rxe_resp.c 			switch (pkt->opcode) {
pkt               248 drivers/infiniband/sw/rxe/rxe_resp.c 			switch (pkt->opcode) {
pkt               258 drivers/infiniband/sw/rxe/rxe_resp.c 			switch (pkt->opcode) {
pkt               279 drivers/infiniband/sw/rxe/rxe_resp.c 				       struct rxe_pkt_info *pkt)
pkt               283 drivers/infiniband/sw/rxe/rxe_resp.c 		if (((pkt->mask & RXE_READ_MASK) &&
pkt               285 drivers/infiniband/sw/rxe/rxe_resp.c 		    ((pkt->mask & RXE_WRITE_MASK) &&
pkt               287 drivers/infiniband/sw/rxe/rxe_resp.c 		    ((pkt->mask & RXE_ATOMIC_MASK) &&
pkt               295 drivers/infiniband/sw/rxe/rxe_resp.c 		if ((pkt->mask & RXE_WRITE_MASK) &&
pkt               359 drivers/infiniband/sw/rxe/rxe_resp.c 				       struct rxe_pkt_info *pkt)
pkt               380 drivers/infiniband/sw/rxe/rxe_resp.c 	if (pkt->mask & RXE_READ_OR_ATOMIC) {
pkt               391 drivers/infiniband/sw/rxe/rxe_resp.c 	if (pkt->mask & RXE_RWR_MASK) {
pkt               403 drivers/infiniband/sw/rxe/rxe_resp.c 				     struct rxe_pkt_info *pkt)
pkt               418 drivers/infiniband/sw/rxe/rxe_resp.c 				   struct rxe_pkt_info *pkt)
pkt               429 drivers/infiniband/sw/rxe/rxe_resp.c 	if (pkt->mask & (RXE_READ_MASK | RXE_WRITE_MASK)) {
pkt               430 drivers/infiniband/sw/rxe/rxe_resp.c 		if (pkt->mask & RXE_RETH_MASK) {
pkt               431 drivers/infiniband/sw/rxe/rxe_resp.c 			qp->resp.va = reth_va(pkt);
pkt               432 drivers/infiniband/sw/rxe/rxe_resp.c 			qp->resp.rkey = reth_rkey(pkt);
pkt               433 drivers/infiniband/sw/rxe/rxe_resp.c 			qp->resp.resid = reth_len(pkt);
pkt               434 drivers/infiniband/sw/rxe/rxe_resp.c 			qp->resp.length = reth_len(pkt);
pkt               436 drivers/infiniband/sw/rxe/rxe_resp.c 		access = (pkt->mask & RXE_READ_MASK) ? IB_ACCESS_REMOTE_READ
pkt               438 drivers/infiniband/sw/rxe/rxe_resp.c 	} else if (pkt->mask & RXE_ATOMIC_MASK) {
pkt               439 drivers/infiniband/sw/rxe/rxe_resp.c 		qp->resp.va = atmeth_va(pkt);
pkt               440 drivers/infiniband/sw/rxe/rxe_resp.c 		qp->resp.rkey = atmeth_rkey(pkt);
pkt               448 drivers/infiniband/sw/rxe/rxe_resp.c 	if ((pkt->mask & (RXE_READ_MASK | RXE_WRITE_OR_SEND)) &&
pkt               449 drivers/infiniband/sw/rxe/rxe_resp.c 	    (pkt->mask & RXE_RETH_MASK) &&
pkt               450 drivers/infiniband/sw/rxe/rxe_resp.c 	    reth_len(pkt) == 0) {
pkt               457 drivers/infiniband/sw/rxe/rxe_resp.c 	pktlen	= payload_size(pkt);
pkt               475 drivers/infiniband/sw/rxe/rxe_resp.c 	if (pkt->mask & RXE_WRITE_MASK)	 {
pkt               477 drivers/infiniband/sw/rxe/rxe_resp.c 			if (pktlen != mtu || bth_pad(pkt)) {
pkt               486 drivers/infiniband/sw/rxe/rxe_resp.c 			if ((bth_pad(pkt) != (0x3 & (-resid)))) {
pkt               522 drivers/infiniband/sw/rxe/rxe_resp.c 				      struct rxe_pkt_info *pkt)
pkt               526 drivers/infiniband/sw/rxe/rxe_resp.c 	int data_len = payload_size(pkt);
pkt               528 drivers/infiniband/sw/rxe/rxe_resp.c 	err = rxe_mem_copy(qp->resp.mr, qp->resp.va, payload_addr(pkt),
pkt               546 drivers/infiniband/sw/rxe/rxe_resp.c 				       struct rxe_pkt_info *pkt)
pkt               548 drivers/infiniband/sw/rxe/rxe_resp.c 	u64 iova = atmeth_va(pkt);
pkt               570 drivers/infiniband/sw/rxe/rxe_resp.c 	if (pkt->opcode == IB_OPCODE_RC_COMPARE_SWAP ||
pkt               571 drivers/infiniband/sw/rxe/rxe_resp.c 	    pkt->opcode == IB_OPCODE_RD_COMPARE_SWAP) {
pkt               572 drivers/infiniband/sw/rxe/rxe_resp.c 		if (*vaddr == atmeth_comp(pkt))
pkt               573 drivers/infiniband/sw/rxe/rxe_resp.c 			*vaddr = atmeth_swap_add(pkt);
pkt               575 drivers/infiniband/sw/rxe/rxe_resp.c 		*vaddr += atmeth_swap_add(pkt);
pkt               586 drivers/infiniband/sw/rxe/rxe_resp.c 					  struct rxe_pkt_info *pkt,
pkt               615 drivers/infiniband/sw/rxe/rxe_resp.c 	ack->offset = pkt->offset;
pkt               619 drivers/infiniband/sw/rxe/rxe_resp.c 	memcpy(ack->hdr, pkt->hdr, pkt->offset + RXE_BTH_BYTES);
pkt               770 drivers/infiniband/sw/rxe/rxe_resp.c 				   struct rxe_pkt_info *pkt)
pkt               772 drivers/infiniband/sw/rxe/rxe_resp.c 	struct sk_buff *skb = PKT_TO_SKB(pkt);
pkt               784 drivers/infiniband/sw/rxe/rxe_resp.c static enum resp_states execute(struct rxe_qp *qp, struct rxe_pkt_info *pkt)
pkt               788 drivers/infiniband/sw/rxe/rxe_resp.c 	if (pkt->mask & RXE_SEND_MASK) {
pkt               794 drivers/infiniband/sw/rxe/rxe_resp.c 			build_rdma_network_hdr(&hdr, pkt);
pkt               800 drivers/infiniband/sw/rxe/rxe_resp.c 		err = send_data_in(qp, payload_addr(pkt), payload_size(pkt));
pkt               803 drivers/infiniband/sw/rxe/rxe_resp.c 	} else if (pkt->mask & RXE_WRITE_MASK) {
pkt               804 drivers/infiniband/sw/rxe/rxe_resp.c 		err = write_data_in(qp, pkt);
pkt               807 drivers/infiniband/sw/rxe/rxe_resp.c 	} else if (pkt->mask & RXE_READ_MASK) {
pkt               811 drivers/infiniband/sw/rxe/rxe_resp.c 	} else if (pkt->mask & RXE_ATOMIC_MASK) {
pkt               812 drivers/infiniband/sw/rxe/rxe_resp.c 		err = process_atomic(qp, pkt);
pkt               821 drivers/infiniband/sw/rxe/rxe_resp.c 	qp->resp.psn = (pkt->psn + 1) & BTH_PSN_MASK;
pkt               824 drivers/infiniband/sw/rxe/rxe_resp.c 	qp->resp.opcode = pkt->opcode;
pkt               827 drivers/infiniband/sw/rxe/rxe_resp.c 	if (pkt->mask & RXE_COMP_MASK) {
pkt               838 drivers/infiniband/sw/rxe/rxe_resp.c 				    struct rxe_pkt_info *pkt)
pkt               863 drivers/infiniband/sw/rxe/rxe_resp.c 		wc->opcode = (pkt->mask & RXE_IMMDT_MASK &&
pkt               864 drivers/infiniband/sw/rxe/rxe_resp.c 				pkt->mask & RXE_WRITE_MASK) ?
pkt               867 drivers/infiniband/sw/rxe/rxe_resp.c 		wc->byte_len = (pkt->mask & RXE_IMMDT_MASK &&
pkt               868 drivers/infiniband/sw/rxe/rxe_resp.c 				pkt->mask & RXE_WRITE_MASK) ?
pkt               877 drivers/infiniband/sw/rxe/rxe_resp.c 			if (pkt->mask & RXE_IMMDT_MASK) {
pkt               879 drivers/infiniband/sw/rxe/rxe_resp.c 				uwc->ex.imm_data = immdt_imm(pkt);
pkt               882 drivers/infiniband/sw/rxe/rxe_resp.c 			if (pkt->mask & RXE_IETH_MASK) {
pkt               884 drivers/infiniband/sw/rxe/rxe_resp.c 				uwc->ex.invalidate_rkey = ieth_rkey(pkt);
pkt               889 drivers/infiniband/sw/rxe/rxe_resp.c 			if (pkt->mask & RXE_DETH_MASK)
pkt               890 drivers/infiniband/sw/rxe/rxe_resp.c 				uwc->src_qp = deth_sqp(pkt);
pkt               894 drivers/infiniband/sw/rxe/rxe_resp.c 			struct sk_buff *skb = PKT_TO_SKB(pkt);
pkt               907 drivers/infiniband/sw/rxe/rxe_resp.c 			if (pkt->mask & RXE_IMMDT_MASK) {
pkt               909 drivers/infiniband/sw/rxe/rxe_resp.c 				wc->ex.imm_data = immdt_imm(pkt);
pkt               912 drivers/infiniband/sw/rxe/rxe_resp.c 			if (pkt->mask & RXE_IETH_MASK) {
pkt               916 drivers/infiniband/sw/rxe/rxe_resp.c 				wc->ex.invalidate_rkey = ieth_rkey(pkt);
pkt               931 drivers/infiniband/sw/rxe/rxe_resp.c 			if (pkt->mask & RXE_DETH_MASK)
pkt               932 drivers/infiniband/sw/rxe/rxe_resp.c 				wc->src_qp = deth_sqp(pkt);
pkt               944 drivers/infiniband/sw/rxe/rxe_resp.c 	if (rxe_cq_post(qp->rcq, &cqe, pkt ? bth_se(pkt) : 1))
pkt               950 drivers/infiniband/sw/rxe/rxe_resp.c 	if (!pkt)
pkt               958 drivers/infiniband/sw/rxe/rxe_resp.c static int send_ack(struct rxe_qp *qp, struct rxe_pkt_info *pkt,
pkt               965 drivers/infiniband/sw/rxe/rxe_resp.c 	skb = prepare_ack_packet(qp, pkt, &ack_pkt, IB_OPCODE_RC_ACKNOWLEDGE,
pkt               980 drivers/infiniband/sw/rxe/rxe_resp.c static int send_atomic_ack(struct rxe_qp *qp, struct rxe_pkt_info *pkt,
pkt               988 drivers/infiniband/sw/rxe/rxe_resp.c 	skb = prepare_ack_packet(qp, pkt, &ack_pkt,
pkt               989 drivers/infiniband/sw/rxe/rxe_resp.c 				 IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE, 0, pkt->psn,
pkt              1023 drivers/infiniband/sw/rxe/rxe_resp.c 				    struct rxe_pkt_info *pkt)
pkt              1029 drivers/infiniband/sw/rxe/rxe_resp.c 		send_ack(qp, pkt, qp->resp.aeth_syndrome, pkt->psn);
pkt              1030 drivers/infiniband/sw/rxe/rxe_resp.c 	else if (pkt->mask & RXE_ATOMIC_MASK)
pkt              1031 drivers/infiniband/sw/rxe/rxe_resp.c 		send_atomic_ack(qp, pkt, AETH_ACK_UNLIMITED);
pkt              1032 drivers/infiniband/sw/rxe/rxe_resp.c 	else if (bth_ack(pkt))
pkt              1033 drivers/infiniband/sw/rxe/rxe_resp.c 		send_ack(qp, pkt, AETH_ACK_UNLIMITED, pkt->psn);
pkt              1039 drivers/infiniband/sw/rxe/rxe_resp.c 				struct rxe_pkt_info *pkt)
pkt              1043 drivers/infiniband/sw/rxe/rxe_resp.c 	if (pkt) {
pkt              1077 drivers/infiniband/sw/rxe/rxe_resp.c 					  struct rxe_pkt_info *pkt)
pkt              1082 drivers/infiniband/sw/rxe/rxe_resp.c 	if (pkt->mask & RXE_SEND_MASK ||
pkt              1083 drivers/infiniband/sw/rxe/rxe_resp.c 	    pkt->mask & RXE_WRITE_MASK) {
pkt              1085 drivers/infiniband/sw/rxe/rxe_resp.c 		if (bth_ack(pkt))
pkt              1086 drivers/infiniband/sw/rxe/rxe_resp.c 			send_ack(qp, pkt, AETH_ACK_UNLIMITED, prev_psn);
pkt              1089 drivers/infiniband/sw/rxe/rxe_resp.c 	} else if (pkt->mask & RXE_READ_MASK) {
pkt              1092 drivers/infiniband/sw/rxe/rxe_resp.c 		res = find_resource(qp, pkt->psn);
pkt              1103 drivers/infiniband/sw/rxe/rxe_resp.c 			u64 iova = reth_va(pkt);
pkt              1104 drivers/infiniband/sw/rxe/rxe_resp.c 			u32 resid = reth_len(pkt);
pkt              1114 drivers/infiniband/sw/rxe/rxe_resp.c 			if (reth_rkey(pkt) != res->read.rkey) {
pkt              1119 drivers/infiniband/sw/rxe/rxe_resp.c 			res->cur_psn = pkt->psn;
pkt              1120 drivers/infiniband/sw/rxe/rxe_resp.c 			res->state = (pkt->psn == res->first_psn) ?
pkt              1139 drivers/infiniband/sw/rxe/rxe_resp.c 		res = find_resource(qp, pkt->psn);
pkt              1143 drivers/infiniband/sw/rxe/rxe_resp.c 			rc = rxe_xmit_packet(qp, pkt, res->atomic.skb);
pkt              1225 drivers/infiniband/sw/rxe/rxe_resp.c 	struct rxe_pkt_info *pkt = NULL;
pkt              1252 drivers/infiniband/sw/rxe/rxe_resp.c 			state = get_req(qp, &pkt);
pkt              1255 drivers/infiniband/sw/rxe/rxe_resp.c 			state = check_psn(qp, pkt);
pkt              1258 drivers/infiniband/sw/rxe/rxe_resp.c 			state = check_op_seq(qp, pkt);
pkt              1261 drivers/infiniband/sw/rxe/rxe_resp.c 			state = check_op_valid(qp, pkt);
pkt              1264 drivers/infiniband/sw/rxe/rxe_resp.c 			state = check_resource(qp, pkt);
pkt              1267 drivers/infiniband/sw/rxe/rxe_resp.c 			state = check_length(qp, pkt);
pkt              1270 drivers/infiniband/sw/rxe/rxe_resp.c 			state = check_rkey(qp, pkt);
pkt              1273 drivers/infiniband/sw/rxe/rxe_resp.c 			state = execute(qp, pkt);
pkt              1276 drivers/infiniband/sw/rxe/rxe_resp.c 			state = do_complete(qp, pkt);
pkt              1279 drivers/infiniband/sw/rxe/rxe_resp.c 			state = read_reply(qp, pkt);
pkt              1282 drivers/infiniband/sw/rxe/rxe_resp.c 			state = acknowledge(qp, pkt);
pkt              1285 drivers/infiniband/sw/rxe/rxe_resp.c 			state = cleanup(qp, pkt);
pkt              1288 drivers/infiniband/sw/rxe/rxe_resp.c 			state = duplicate_request(qp, pkt);
pkt              1292 drivers/infiniband/sw/rxe/rxe_resp.c 			send_ack(qp, pkt, AETH_NAK_PSN_SEQ_ERROR, qp->resp.psn);
pkt              1314 drivers/infiniband/sw/rxe/rxe_resp.c 				send_ack(qp, pkt, AETH_RNR_NAK |
pkt              1317 drivers/infiniband/sw/rxe/rxe_resp.c 					 pkt->psn);
pkt               397 drivers/infiniband/sw/siw/siw.h 	} pkt;
pkt               128 drivers/infiniband/sw/siw/siw_qp_tx.c 		memcpy(&c_tx->pkt.ctrl,
pkt               132 drivers/infiniband/sw/siw/siw_qp_tx.c 		c_tx->pkt.rreq.rsvd = 0;
pkt               133 drivers/infiniband/sw/siw/siw_qp_tx.c 		c_tx->pkt.rreq.ddp_qn = htonl(RDMAP_UNTAGGED_QN_RDMA_READ);
pkt               134 drivers/infiniband/sw/siw/siw_qp_tx.c 		c_tx->pkt.rreq.ddp_msn =
pkt               136 drivers/infiniband/sw/siw/siw_qp_tx.c 		c_tx->pkt.rreq.ddp_mo = 0;
pkt               137 drivers/infiniband/sw/siw/siw_qp_tx.c 		c_tx->pkt.rreq.sink_stag = htonl(wqe->sqe.sge[0].lkey);
pkt               138 drivers/infiniband/sw/siw/siw_qp_tx.c 		c_tx->pkt.rreq.sink_to =
pkt               140 drivers/infiniband/sw/siw/siw_qp_tx.c 		c_tx->pkt.rreq.source_stag = htonl(wqe->sqe.rkey);
pkt               141 drivers/infiniband/sw/siw/siw_qp_tx.c 		c_tx->pkt.rreq.source_to = cpu_to_be64(wqe->sqe.raddr);
pkt               142 drivers/infiniband/sw/siw/siw_qp_tx.c 		c_tx->pkt.rreq.read_size = htonl(wqe->sqe.sge[0].length);
pkt               145 drivers/infiniband/sw/siw/siw_qp_tx.c 		crc = (char *)&c_tx->pkt.rreq_pkt.crc;
pkt               150 drivers/infiniband/sw/siw/siw_qp_tx.c 			memcpy(&c_tx->pkt.ctrl,
pkt               154 drivers/infiniband/sw/siw/siw_qp_tx.c 			memcpy(&c_tx->pkt.ctrl, &iwarp_pktinfo[RDMAP_SEND].ctrl,
pkt               157 drivers/infiniband/sw/siw/siw_qp_tx.c 		c_tx->pkt.send.ddp_qn = RDMAP_UNTAGGED_QN_SEND;
pkt               158 drivers/infiniband/sw/siw/siw_qp_tx.c 		c_tx->pkt.send.ddp_msn =
pkt               160 drivers/infiniband/sw/siw/siw_qp_tx.c 		c_tx->pkt.send.ddp_mo = 0;
pkt               162 drivers/infiniband/sw/siw/siw_qp_tx.c 		c_tx->pkt.send_inv.inval_stag = 0;
pkt               166 drivers/infiniband/sw/siw/siw_qp_tx.c 		crc = (char *)&c_tx->pkt.send_pkt.crc;
pkt               172 drivers/infiniband/sw/siw/siw_qp_tx.c 			memcpy(&c_tx->pkt.ctrl,
pkt               176 drivers/infiniband/sw/siw/siw_qp_tx.c 			memcpy(&c_tx->pkt.ctrl,
pkt               180 drivers/infiniband/sw/siw/siw_qp_tx.c 		c_tx->pkt.send.ddp_qn = RDMAP_UNTAGGED_QN_SEND;
pkt               181 drivers/infiniband/sw/siw/siw_qp_tx.c 		c_tx->pkt.send.ddp_msn =
pkt               183 drivers/infiniband/sw/siw/siw_qp_tx.c 		c_tx->pkt.send.ddp_mo = 0;
pkt               185 drivers/infiniband/sw/siw/siw_qp_tx.c 		c_tx->pkt.send_inv.inval_stag = cpu_to_be32(wqe->sqe.rkey);
pkt               189 drivers/infiniband/sw/siw/siw_qp_tx.c 		crc = (char *)&c_tx->pkt.send_pkt.crc;
pkt               194 drivers/infiniband/sw/siw/siw_qp_tx.c 		memcpy(&c_tx->pkt.ctrl, &iwarp_pktinfo[RDMAP_RDMA_WRITE].ctrl,
pkt               197 drivers/infiniband/sw/siw/siw_qp_tx.c 		c_tx->pkt.rwrite.sink_stag = htonl(wqe->sqe.rkey);
pkt               198 drivers/infiniband/sw/siw/siw_qp_tx.c 		c_tx->pkt.rwrite.sink_to = cpu_to_be64(wqe->sqe.raddr);
pkt               201 drivers/infiniband/sw/siw/siw_qp_tx.c 		crc = (char *)&c_tx->pkt.write_pkt.crc;
pkt               206 drivers/infiniband/sw/siw/siw_qp_tx.c 		memcpy(&c_tx->pkt.ctrl,
pkt               211 drivers/infiniband/sw/siw/siw_qp_tx.c 		c_tx->pkt.rresp.sink_stag = cpu_to_be32(wqe->sqe.rkey);
pkt               212 drivers/infiniband/sw/siw/siw_qp_tx.c 		c_tx->pkt.rresp.sink_to = cpu_to_be64(wqe->sqe.raddr);
pkt               216 drivers/infiniband/sw/siw/siw_qp_tx.c 		crc = (char *)&c_tx->pkt.write_pkt.crc;
pkt               233 drivers/infiniband/sw/siw/siw_qp_tx.c 			c_tx->pkt.ctrl.mpa_len =
pkt               242 drivers/infiniband/sw/siw/siw_qp_tx.c 			if (!(c_tx->pkt.ctrl.ddp_rdmap_ctrl & DDP_FLAG_TAGGED))
pkt               243 drivers/infiniband/sw/siw/siw_qp_tx.c 				c_tx->pkt.c_untagged.ddp_mo = 0;
pkt               245 drivers/infiniband/sw/siw/siw_qp_tx.c 				c_tx->pkt.c_tagged.ddp_to =
pkt               256 drivers/infiniband/sw/siw/siw_qp_tx.c 						(u8 *)&c_tx->pkt,
pkt               298 drivers/infiniband/sw/siw/siw_qp_tx.c 				    (char *)&c_tx->pkt.ctrl + c_tx->ctrl_sent,
pkt               447 drivers/infiniband/sw/siw/siw_qp_tx.c 				(char *)&c_tx->pkt.ctrl + c_tx->ctrl_sent;
pkt               699 drivers/infiniband/sw/siw/siw_qp_tx.c 		iwarp_pktinfo[__rdmap_get_opcode(&c_tx->pkt.ctrl)].hdr_len;
pkt               705 drivers/infiniband/sw/siw/siw_qp_tx.c 	if (!(c_tx->pkt.ctrl.ddp_rdmap_ctrl & DDP_FLAG_TAGGED))
pkt               707 drivers/infiniband/sw/siw/siw_qp_tx.c 		c_tx->pkt.c_untagged.ddp_mo = cpu_to_be32(wqe->processed);
pkt               709 drivers/infiniband/sw/siw/siw_qp_tx.c 		c_tx->pkt.c_tagged.ddp_to =
pkt               716 drivers/infiniband/sw/siw/siw_qp_tx.c 		c_tx->pkt.ctrl.ddp_rdmap_ctrl &= ~DDP_FLAG_LAST;
pkt               719 drivers/infiniband/sw/siw/siw_qp_tx.c 		c_tx->pkt.ctrl.ddp_rdmap_ctrl |= DDP_FLAG_LAST;
pkt               724 drivers/infiniband/sw/siw/siw_qp_tx.c 	c_tx->pkt.ctrl.mpa_len =
pkt               732 drivers/infiniband/sw/siw/siw_qp_tx.c 		crypto_shash_update(c_tx->mpa_crc_hd, (u8 *)&c_tx->pkt,
pkt               893 drivers/infiniband/sw/siw/siw_qp_tx.c 		if (c_tx->pkt.ctrl.ddp_rdmap_ctrl & DDP_FLAG_LAST) {
pkt               119 drivers/input/joystick/grip_mp.c static int bit_parity(u32 pkt)
pkt               121 drivers/input/joystick/grip_mp.c 	int x = pkt ^ (pkt >> 16);
pkt               168 drivers/input/joystick/grip_mp.c 	u32 pkt;                 /* packet temporary storage */
pkt               207 drivers/input/joystick/grip_mp.c 		pkt = (PACKET_FULL | PACKET_IO_FAST) >> 28;
pkt               211 drivers/input/joystick/grip_mp.c 		pkt = (PACKET_FULL | PACKET_IO_SLOW) >> 28;
pkt               221 drivers/input/joystick/grip_mp.c 		if (pkt & PACKET_FULL)
pkt               223 drivers/input/joystick/grip_mp.c 		pkt = (pkt << bits_per_read) | raw_data;
pkt               235 drivers/input/joystick/grip_mp.c 		pkt = (pkt & 0xffff0000) | ((pkt << 1) & 0xffff);
pkt               236 drivers/input/joystick/grip_mp.c 		pkt = (pkt >> 2) | 0xf0000000;
pkt               239 drivers/input/joystick/grip_mp.c 	if (bit_parity(pkt) == 1)
pkt               259 drivers/input/joystick/grip_mp.c 	*packet = pkt;
pkt               260 drivers/input/joystick/grip_mp.c 	if ((sendflags == 0) || ((sendflags & IO_RETRY) && !(pkt & PACKET_MP_DONE)))
pkt               263 drivers/input/joystick/grip_mp.c 	if (pkt & PACKET_MP_MORE)
pkt                16 drivers/input/joystick/iforce/iforce-serio.c 	int idx, pkt, len, id;
pkt               133 drivers/input/joystick/iforce/iforce-serio.c 	if (!iforce_serio->pkt) {
pkt               135 drivers/input/joystick/iforce/iforce-serio.c 			iforce_serio->pkt = 1;
pkt               141 drivers/input/joystick/iforce/iforce-serio.c 			iforce_serio->pkt = 0;
pkt               149 drivers/input/joystick/iforce/iforce-serio.c 			iforce_serio->pkt = 0;
pkt               179 drivers/input/joystick/iforce/iforce-serio.c 		iforce_serio->pkt = 0;
pkt               987 drivers/input/joystick/xpad.c 	struct xpad_output_packet *pkt, *packet = NULL;
pkt               998 drivers/input/joystick/xpad.c 		pkt = &xpad->out_packets[xpad->last_out_packet];
pkt               999 drivers/input/joystick/xpad.c 		if (pkt->pending) {
pkt              1003 drivers/input/joystick/xpad.c 			packet = pkt;
pkt               970 drivers/input/mouse/alps.c 					  unsigned char *pkt,
pkt               973 drivers/input/mouse/alps.c 	mt[0].x = ((pkt[2] & 0x80) << 4);
pkt               974 drivers/input/mouse/alps.c 	mt[0].x |= ((pkt[2] & 0x3F) << 5);
pkt               975 drivers/input/mouse/alps.c 	mt[0].x |= ((pkt[3] & 0x30) >> 1);
pkt               976 drivers/input/mouse/alps.c 	mt[0].x |= (pkt[3] & 0x07);
pkt               977 drivers/input/mouse/alps.c 	mt[0].y = (pkt[1] << 3) | (pkt[0] & 0x07);
pkt               979 drivers/input/mouse/alps.c 	mt[1].x = ((pkt[3] & 0x80) << 4);
pkt               980 drivers/input/mouse/alps.c 	mt[1].x |= ((pkt[4] & 0x80) << 3);
pkt               981 drivers/input/mouse/alps.c 	mt[1].x |= ((pkt[4] & 0x3F) << 4);
pkt               982 drivers/input/mouse/alps.c 	mt[1].y = ((pkt[5] & 0x80) << 3);
pkt               983 drivers/input/mouse/alps.c 	mt[1].y |= ((pkt[5] & 0x3F) << 4);
pkt               999 drivers/input/mouse/alps.c 		mt[1].y |= ((pkt[4] & 0x02) << 4);
pkt              1005 drivers/input/mouse/alps.c 		mt[1].x |= (pkt[0] & 0x20);
pkt               276 drivers/input/mouse/byd.c 	u8 *pkt = psmouse->packet;
pkt               278 drivers/input/mouse/byd.c 	if (psmouse->pktcnt > 0 && !(pkt[0] & PS2_ALWAYS_1)) {
pkt               280 drivers/input/mouse/byd.c 			     pkt[0]);
pkt               288 drivers/input/mouse/byd.c 	switch (pkt[3]) {
pkt               298 drivers/input/mouse/byd.c 			priv->abs_x = pkt[1] * (BYD_PAD_WIDTH / 256);
pkt               299 drivers/input/mouse/byd.c 			priv->abs_y = (255 - pkt[2]) * (BYD_PAD_HEIGHT / 256);
pkt               305 drivers/input/mouse/byd.c 		u32 signx = pkt[0] & PS2_X_SIGN ? ~0xFF : 0;
pkt               306 drivers/input/mouse/byd.c 		u32 signy = pkt[0] & PS2_Y_SIGN ? ~0xFF : 0;
pkt               307 drivers/input/mouse/byd.c 		s32 dx = signx | (int) pkt[1];
pkt               308 drivers/input/mouse/byd.c 		s32 dy = signy | (int) pkt[2];
pkt               325 drivers/input/mouse/byd.c 	priv->btn_left = pkt[0] & PS2_LEFT;
pkt               326 drivers/input/mouse/byd.c 	priv->btn_right = pkt[0] & PS2_RIGHT;
pkt                71 drivers/input/touchscreen/usbtouchscreen.c 	void (*process_pkt) (struct usbtouch_usb *usbtouch, unsigned char *pkt, int len);
pkt                79 drivers/input/touchscreen/usbtouchscreen.c 	int  (*get_pkt_len) (unsigned char *pkt, int len);
pkt                81 drivers/input/touchscreen/usbtouchscreen.c 	int  (*read_data)   (struct usbtouch_usb *usbtouch, unsigned char *pkt);
pkt               264 drivers/input/touchscreen/usbtouchscreen.c static int e2i_read_data(struct usbtouch_usb *dev, unsigned char *pkt)
pkt               266 drivers/input/touchscreen/usbtouchscreen.c 	int tmp = (pkt[0] << 8) | pkt[1];
pkt               267 drivers/input/touchscreen/usbtouchscreen.c 	dev->x  = (pkt[2] << 8) | pkt[3];
pkt               268 drivers/input/touchscreen/usbtouchscreen.c 	dev->y  = (pkt[4] << 8) | pkt[5];
pkt               332 drivers/input/touchscreen/usbtouchscreen.c static int egalax_read_data(struct usbtouch_usb *dev, unsigned char *pkt)
pkt               334 drivers/input/touchscreen/usbtouchscreen.c 	if ((pkt[0] & EGALAX_PKT_TYPE_MASK) != EGALAX_PKT_TYPE_REPT)
pkt               337 drivers/input/touchscreen/usbtouchscreen.c 	dev->x = ((pkt[3] & 0x0F) << 7) | (pkt[4] & 0x7F);
pkt               338 drivers/input/touchscreen/usbtouchscreen.c 	dev->y = ((pkt[1] & 0x0F) << 7) | (pkt[2] & 0x7F);
pkt               339 drivers/input/touchscreen/usbtouchscreen.c 	dev->touch = pkt[0] & 0x01;
pkt               376 drivers/input/touchscreen/usbtouchscreen.c static int etouch_read_data(struct usbtouch_usb *dev, unsigned char *pkt)
pkt               378 drivers/input/touchscreen/usbtouchscreen.c 	if ((pkt[0] & ETOUCH_PKT_TYPE_MASK) != ETOUCH_PKT_TYPE_REPT &&
pkt               379 drivers/input/touchscreen/usbtouchscreen.c 		(pkt[0] & ETOUCH_PKT_TYPE_MASK) != ETOUCH_PKT_TYPE_REPT2)
pkt               382 drivers/input/touchscreen/usbtouchscreen.c 	dev->x = ((pkt[1] & 0x1F) << 7) | (pkt[2] & 0x7F);
pkt               383 drivers/input/touchscreen/usbtouchscreen.c 	dev->y = ((pkt[3] & 0x1F) << 7) | (pkt[4] & 0x7F);
pkt               384 drivers/input/touchscreen/usbtouchscreen.c 	dev->touch = pkt[0] & 0x01;
pkt               411 drivers/input/touchscreen/usbtouchscreen.c static int panjit_read_data(struct usbtouch_usb *dev, unsigned char *pkt)
pkt               413 drivers/input/touchscreen/usbtouchscreen.c 	dev->x = ((pkt[2] & 0x0F) << 8) | pkt[1];
pkt               414 drivers/input/touchscreen/usbtouchscreen.c 	dev->y = ((pkt[4] & 0x0F) << 8) | pkt[3];
pkt               415 drivers/input/touchscreen/usbtouchscreen.c 	dev->touch = pkt[0] & 0x01;
pkt               433 drivers/input/touchscreen/usbtouchscreen.c static int mtouch_read_data(struct usbtouch_usb *dev, unsigned char *pkt)
pkt               436 drivers/input/touchscreen/usbtouchscreen.c 		dev->x = (pkt[4] << 8) | pkt[3];
pkt               437 drivers/input/touchscreen/usbtouchscreen.c 		dev->y = 0xffff - ((pkt[6] << 8) | pkt[5]);
pkt               439 drivers/input/touchscreen/usbtouchscreen.c 		dev->x = (pkt[8] << 8) | pkt[7];
pkt               440 drivers/input/touchscreen/usbtouchscreen.c 		dev->y = (pkt[10] << 8) | pkt[9];
pkt               442 drivers/input/touchscreen/usbtouchscreen.c 	dev->touch = (pkt[2] & 0x40) ? 1 : 0;
pkt               582 drivers/input/touchscreen/usbtouchscreen.c static int itm_read_data(struct usbtouch_usb *dev, unsigned char *pkt)
pkt               591 drivers/input/touchscreen/usbtouchscreen.c 	dev->press = ((pkt[2] & 0x01) << 7) | (pkt[5] & 0x7F);
pkt               593 drivers/input/touchscreen/usbtouchscreen.c 	touch = ~pkt[7] & 0x20;
pkt               603 drivers/input/touchscreen/usbtouchscreen.c 	dev->x = ((pkt[0] & 0x1F) << 7) | (pkt[3] & 0x7F);
pkt               604 drivers/input/touchscreen/usbtouchscreen.c 	dev->y = ((pkt[1] & 0x1F) << 7) | (pkt[4] & 0x7F);
pkt               619 drivers/input/touchscreen/usbtouchscreen.c static int eturbo_read_data(struct usbtouch_usb *dev, unsigned char *pkt)
pkt               624 drivers/input/touchscreen/usbtouchscreen.c 	if (!(pkt[0] & 0x80))
pkt               627 drivers/input/touchscreen/usbtouchscreen.c 	shift = (6 - (pkt[0] & 0x03));
pkt               628 drivers/input/touchscreen/usbtouchscreen.c 	dev->x = ((pkt[3] << 7) | pkt[4]) >> shift;
pkt               629 drivers/input/touchscreen/usbtouchscreen.c 	dev->y = ((pkt[1] << 7) | pkt[2]) >> shift;
pkt               630 drivers/input/touchscreen/usbtouchscreen.c 	dev->touch = (pkt[0] & 0x10) ? 1 : 0;
pkt               650 drivers/input/touchscreen/usbtouchscreen.c static int gunze_read_data(struct usbtouch_usb *dev, unsigned char *pkt)
pkt               652 drivers/input/touchscreen/usbtouchscreen.c 	if (!(pkt[0] & 0x80) || ((pkt[1] | pkt[2] | pkt[3]) & 0x80))
pkt               655 drivers/input/touchscreen/usbtouchscreen.c 	dev->x = ((pkt[0] & 0x1F) << 7) | (pkt[2] & 0x7F);
pkt               656 drivers/input/touchscreen/usbtouchscreen.c 	dev->y = ((pkt[1] & 0x1F) << 7) | (pkt[3] & 0x7F);
pkt               657 drivers/input/touchscreen/usbtouchscreen.c 	dev->touch = pkt[0] & 0x20;
pkt               736 drivers/input/touchscreen/usbtouchscreen.c static int dmc_tsc10_read_data(struct usbtouch_usb *dev, unsigned char *pkt)
pkt               738 drivers/input/touchscreen/usbtouchscreen.c 	dev->x = ((pkt[2] & 0x03) << 8) | pkt[1];
pkt               739 drivers/input/touchscreen/usbtouchscreen.c 	dev->y = ((pkt[4] & 0x03) << 8) | pkt[3];
pkt               740 drivers/input/touchscreen/usbtouchscreen.c 	dev->touch = pkt[0] & 0x01;
pkt               751 drivers/input/touchscreen/usbtouchscreen.c static int irtouch_read_data(struct usbtouch_usb *dev, unsigned char *pkt)
pkt               753 drivers/input/touchscreen/usbtouchscreen.c 	dev->x = (pkt[3] << 8) | pkt[2];
pkt               754 drivers/input/touchscreen/usbtouchscreen.c 	dev->y = (pkt[5] << 8) | pkt[4];
pkt               755 drivers/input/touchscreen/usbtouchscreen.c 	dev->touch = (pkt[1] & 0x03) ? 1 : 0;
pkt               765 drivers/input/touchscreen/usbtouchscreen.c static int tc45usb_read_data(struct usbtouch_usb *dev, unsigned char *pkt)
pkt               767 drivers/input/touchscreen/usbtouchscreen.c 	dev->x = ((pkt[2] & 0x0F) << 8) | pkt[1];
pkt               768 drivers/input/touchscreen/usbtouchscreen.c 	dev->y = ((pkt[4] & 0x0F) << 8) | pkt[3];
pkt               769 drivers/input/touchscreen/usbtouchscreen.c 	dev->touch = pkt[0] & 0x01;
pkt               791 drivers/input/touchscreen/usbtouchscreen.c static int idealtek_read_data(struct usbtouch_usb *dev, unsigned char *pkt)
pkt               793 drivers/input/touchscreen/usbtouchscreen.c 	switch (pkt[0] & 0x98) {
pkt               796 drivers/input/touchscreen/usbtouchscreen.c 		dev->x = (pkt[1] << 5) | (pkt[2] >> 2);
pkt               797 drivers/input/touchscreen/usbtouchscreen.c 		dev->y = (pkt[3] << 5) | (pkt[4] >> 2);
pkt               798 drivers/input/touchscreen/usbtouchscreen.c 		dev->touch = (pkt[0] & 0x40) ? 1 : 0;
pkt               803 drivers/input/touchscreen/usbtouchscreen.c 		dev->x = (pkt[2] << 5) | (pkt[1] >> 2);
pkt               804 drivers/input/touchscreen/usbtouchscreen.c 		dev->y = (pkt[4] << 5) | (pkt[3] >> 2);
pkt               805 drivers/input/touchscreen/usbtouchscreen.c 		dev->touch = (pkt[0] & 0x40) ? 1 : 0;
pkt               818 drivers/input/touchscreen/usbtouchscreen.c static int general_touch_read_data(struct usbtouch_usb *dev, unsigned char *pkt)
pkt               820 drivers/input/touchscreen/usbtouchscreen.c 	dev->x = (pkt[2] << 8) | pkt[1];
pkt               821 drivers/input/touchscreen/usbtouchscreen.c 	dev->y = (pkt[4] << 8) | pkt[3];
pkt               822 drivers/input/touchscreen/usbtouchscreen.c 	dev->press = pkt[5] & 0xff;
pkt               823 drivers/input/touchscreen/usbtouchscreen.c 	dev->touch = pkt[0] & 0x01;
pkt               833 drivers/input/touchscreen/usbtouchscreen.c static int gotop_read_data(struct usbtouch_usb *dev, unsigned char *pkt)
pkt               835 drivers/input/touchscreen/usbtouchscreen.c 	dev->x = ((pkt[1] & 0x38) << 4) | pkt[2];
pkt               836 drivers/input/touchscreen/usbtouchscreen.c 	dev->y = ((pkt[1] & 0x07) << 7) | pkt[3];
pkt               837 drivers/input/touchscreen/usbtouchscreen.c 	dev->touch = pkt[0] & 0x01;
pkt               847 drivers/input/touchscreen/usbtouchscreen.c static int jastec_read_data(struct usbtouch_usb *dev, unsigned char *pkt)
pkt               849 drivers/input/touchscreen/usbtouchscreen.c 	dev->x = ((pkt[0] & 0x3f) << 6) | (pkt[2] & 0x3f);
pkt               850 drivers/input/touchscreen/usbtouchscreen.c 	dev->y = ((pkt[1] & 0x3f) << 6) | (pkt[3] & 0x3f);
pkt               851 drivers/input/touchscreen/usbtouchscreen.c 	dev->touch = (pkt[0] & 0x40) >> 6;
pkt               861 drivers/input/touchscreen/usbtouchscreen.c static int zytronic_read_data(struct usbtouch_usb *dev, unsigned char *pkt)
pkt               865 drivers/input/touchscreen/usbtouchscreen.c 	switch (pkt[0]) {
pkt               867 drivers/input/touchscreen/usbtouchscreen.c 		dev_dbg(&intf->dev, "%s: Command response %d\n", __func__, pkt[1]);
pkt               871 drivers/input/touchscreen/usbtouchscreen.c 		dev->x = (pkt[1] & 0x7f) | ((pkt[2] & 0x07) << 7);
pkt               872 drivers/input/touchscreen/usbtouchscreen.c 		dev->y = (pkt[3] & 0x7f) | ((pkt[4] & 0x07) << 7);
pkt               878 drivers/input/touchscreen/usbtouchscreen.c 		dev->x = (pkt[1] & 0x7f) | ((pkt[2] & 0x07) << 7);
pkt               879 drivers/input/touchscreen/usbtouchscreen.c 		dev->y = (pkt[3] & 0x7f) | ((pkt[4] & 0x07) << 7);
pkt               885 drivers/input/touchscreen/usbtouchscreen.c 		dev_dbg(&intf->dev, "%s: Unknown return %d\n", __func__, pkt[0]);
pkt              1045 drivers/input/touchscreen/usbtouchscreen.c static int nexio_read_data(struct usbtouch_usb *usbtouch, unsigned char *pkt)
pkt              1047 drivers/input/touchscreen/usbtouchscreen.c 	struct nexio_touch_packet *packet = (void *) pkt;
pkt              1055 drivers/input/touchscreen/usbtouchscreen.c 	if ((pkt[0] & 0xe0) != 0xe0)
pkt              1136 drivers/input/touchscreen/usbtouchscreen.c static int elo_read_data(struct usbtouch_usb *dev, unsigned char *pkt)
pkt              1138 drivers/input/touchscreen/usbtouchscreen.c 	dev->x = (pkt[3] << 8) | pkt[2];
pkt              1139 drivers/input/touchscreen/usbtouchscreen.c 	dev->y = (pkt[5] << 8) | pkt[4];
pkt              1140 drivers/input/touchscreen/usbtouchscreen.c 	dev->touch = pkt[6] > 0;
pkt              1141 drivers/input/touchscreen/usbtouchscreen.c 	dev->press = pkt[6];
pkt              1153 drivers/input/touchscreen/usbtouchscreen.c 				   unsigned char *pkt, int len);
pkt              1386 drivers/input/touchscreen/usbtouchscreen.c                                  unsigned char *pkt, int len)
pkt              1390 drivers/input/touchscreen/usbtouchscreen.c 	if (!type->read_data(usbtouch, pkt))
pkt              1410 drivers/input/touchscreen/usbtouchscreen.c                                    unsigned char *pkt, int len)
pkt              1432 drivers/input/touchscreen/usbtouchscreen.c 			memcpy(usbtouch->buffer + usbtouch->buf_len, pkt, append);
pkt              1445 drivers/input/touchscreen/usbtouchscreen.c 		memcpy(usbtouch->buffer + usbtouch->buf_len, pkt, tmp);
pkt              1448 drivers/input/touchscreen/usbtouchscreen.c 		buffer = pkt + tmp;
pkt              1451 drivers/input/touchscreen/usbtouchscreen.c 		buffer = pkt;
pkt                67 drivers/mailbox/mtk-cmdq-mailbox.c 	struct cmdq_pkt		*pkt; /* the packet sent from mailbox client */
pkt               152 drivers/mailbox/mtk-cmdq-mailbox.c 	u64 *prev_task_base = prev_task->pkt->va_base;
pkt               156 drivers/mailbox/mtk-cmdq-mailbox.c 				prev_task->pkt->cmd_buf_size, DMA_TO_DEVICE);
pkt               157 drivers/mailbox/mtk-cmdq-mailbox.c 	prev_task_base[CMDQ_NUM_CMD(prev_task->pkt) - 1] =
pkt               160 drivers/mailbox/mtk-cmdq-mailbox.c 				   prev_task->pkt->cmd_buf_size, DMA_TO_DEVICE);
pkt               178 drivers/mailbox/mtk-cmdq-mailbox.c 	u64 *base = task->pkt->va_base;
pkt               181 drivers/mailbox/mtk-cmdq-mailbox.c 	dma_sync_single_for_cpu(dev, task->pa_base, task->pkt->cmd_buf_size,
pkt               183 drivers/mailbox/mtk-cmdq-mailbox.c 	for (i = 0; i < CMDQ_NUM_CMD(task->pkt); i++)
pkt               187 drivers/mailbox/mtk-cmdq-mailbox.c 	dma_sync_single_for_device(dev, task->pa_base, task->pkt->cmd_buf_size,
pkt               209 drivers/mailbox/mtk-cmdq-mailbox.c 	struct cmdq_task_cb *cb = &task->pkt->async_cb;
pkt               264 drivers/mailbox/mtk-cmdq-mailbox.c 		task_end_pa = task->pa_base + task->pkt->cmd_buf_size;
pkt               353 drivers/mailbox/mtk-cmdq-mailbox.c 	struct cmdq_pkt *pkt = (struct cmdq_pkt *)data;
pkt               368 drivers/mailbox/mtk-cmdq-mailbox.c 	task->pa_base = pkt->pa_base;
pkt               370 drivers/mailbox/mtk-cmdq-mailbox.c 	task->pkt = pkt;
pkt               377 drivers/mailbox/mtk-cmdq-mailbox.c 		writel(task->pa_base + pkt->cmd_buf_size,
pkt               417 drivers/mailbox/mtk-cmdq-mailbox.c 		writel(task->pa_base + pkt->cmd_buf_size,
pkt               892 drivers/media/dvb-core/dvb_net.c 			const u8 *pkt, int pkt_len)
pkt               910 drivers/media/dvb-core/dvb_net.c 	if ((pkt[5] & 0xfd) != 0xc1) {
pkt               913 drivers/media/dvb-core/dvb_net.c 	if ((pkt[5] & 0x3c) != 0x00) {
pkt               920 drivers/media/dvb-core/dvb_net.c 	if (pkt[5] & 0x02) {
pkt               922 drivers/media/dvb-core/dvb_net.c 		if (pkt_len < 24 || memcmp(&pkt[12], "\xaa\xaa\x03\0\0\0", 6)) {
pkt               928 drivers/media/dvb-core/dvb_net.c 	if (pkt[7]) {
pkt               948 drivers/media/dvb-core/dvb_net.c 	memcpy(eth + 14, pkt + 12 + snap, pkt_len - 12 - 4 - snap);
pkt               951 drivers/media/dvb-core/dvb_net.c 	eth[0]=pkt[0x0b];
pkt               952 drivers/media/dvb-core/dvb_net.c 	eth[1]=pkt[0x0a];
pkt               953 drivers/media/dvb-core/dvb_net.c 	eth[2]=pkt[0x09];
pkt               954 drivers/media/dvb-core/dvb_net.c 	eth[3]=pkt[0x08];
pkt               955 drivers/media/dvb-core/dvb_net.c 	eth[4]=pkt[0x04];
pkt               956 drivers/media/dvb-core/dvb_net.c 	eth[5]=pkt[0x03];
pkt               961 drivers/media/dvb-core/dvb_net.c 		eth[12] = pkt[18];
pkt               962 drivers/media/dvb-core/dvb_net.c 		eth[13] = pkt[19];
pkt               967 drivers/media/dvb-core/dvb_net.c 		if (pkt[12] >> 4 == 6) { /* version field from IP header */
pkt              10679 drivers/media/dvb-frontends/drx39xyj/drxj.c 	u32 ber, cnt, err, pkt;
pkt              10694 drivers/media/dvb-frontends/drx39xyj/drxj.c 		rc = get_acc_pkt_err(demod, &pkt);
pkt              10709 drivers/media/dvb-frontends/drx39xyj/drxj.c 			rc = get_vsb_post_rs_pck_err(dev_addr, &err, &pkt);
pkt              10717 drivers/media/dvb-frontends/drx39xyj/drxj.c 				p->block_count.stat[0].uvalue += pkt;
pkt                13 drivers/media/platform/qcom/venus/hfi_cmds.c void pkt_sys_init(struct hfi_sys_init_pkt *pkt, u32 arch_type)
pkt                15 drivers/media/platform/qcom/venus/hfi_cmds.c 	pkt->hdr.size = sizeof(*pkt);
pkt                16 drivers/media/platform/qcom/venus/hfi_cmds.c 	pkt->hdr.pkt_type = HFI_CMD_SYS_INIT;
pkt                17 drivers/media/platform/qcom/venus/hfi_cmds.c 	pkt->arch_type = arch_type;
pkt                20 drivers/media/platform/qcom/venus/hfi_cmds.c void pkt_sys_pc_prep(struct hfi_sys_pc_prep_pkt *pkt)
pkt                22 drivers/media/platform/qcom/venus/hfi_cmds.c 	pkt->hdr.size = sizeof(*pkt);
pkt                23 drivers/media/platform/qcom/venus/hfi_cmds.c 	pkt->hdr.pkt_type = HFI_CMD_SYS_PC_PREP;
pkt                26 drivers/media/platform/qcom/venus/hfi_cmds.c void pkt_sys_idle_indicator(struct hfi_sys_set_property_pkt *pkt, u32 enable)
pkt                28 drivers/media/platform/qcom/venus/hfi_cmds.c 	struct hfi_enable *hfi = (struct hfi_enable *)&pkt->data[1];
pkt                30 drivers/media/platform/qcom/venus/hfi_cmds.c 	pkt->hdr.size = sizeof(*pkt) + sizeof(*hfi) + sizeof(u32);
pkt                31 drivers/media/platform/qcom/venus/hfi_cmds.c 	pkt->hdr.pkt_type = HFI_CMD_SYS_SET_PROPERTY;
pkt                32 drivers/media/platform/qcom/venus/hfi_cmds.c 	pkt->num_properties = 1;
pkt                33 drivers/media/platform/qcom/venus/hfi_cmds.c 	pkt->data[0] = HFI_PROPERTY_SYS_IDLE_INDICATOR;
pkt                37 drivers/media/platform/qcom/venus/hfi_cmds.c void pkt_sys_debug_config(struct hfi_sys_set_property_pkt *pkt, u32 mode,
pkt                42 drivers/media/platform/qcom/venus/hfi_cmds.c 	pkt->hdr.size = sizeof(*pkt) + sizeof(*hfi) + sizeof(u32);
pkt                43 drivers/media/platform/qcom/venus/hfi_cmds.c 	pkt->hdr.pkt_type = HFI_CMD_SYS_SET_PROPERTY;
pkt                44 drivers/media/platform/qcom/venus/hfi_cmds.c 	pkt->num_properties = 1;
pkt                45 drivers/media/platform/qcom/venus/hfi_cmds.c 	pkt->data[0] = HFI_PROPERTY_SYS_DEBUG_CONFIG;
pkt                46 drivers/media/platform/qcom/venus/hfi_cmds.c 	hfi = (struct hfi_debug_config *)&pkt->data[1];
pkt                51 drivers/media/platform/qcom/venus/hfi_cmds.c void pkt_sys_coverage_config(struct hfi_sys_set_property_pkt *pkt, u32 mode)
pkt                53 drivers/media/platform/qcom/venus/hfi_cmds.c 	pkt->hdr.size = sizeof(*pkt) + sizeof(u32);
pkt                54 drivers/media/platform/qcom/venus/hfi_cmds.c 	pkt->hdr.pkt_type = HFI_CMD_SYS_SET_PROPERTY;
pkt                55 drivers/media/platform/qcom/venus/hfi_cmds.c 	pkt->num_properties = 1;
pkt                56 drivers/media/platform/qcom/venus/hfi_cmds.c 	pkt->data[0] = HFI_PROPERTY_SYS_CONFIG_COVERAGE;
pkt                57 drivers/media/platform/qcom/venus/hfi_cmds.c 	pkt->data[1] = mode;
pkt                60 drivers/media/platform/qcom/venus/hfi_cmds.c int pkt_sys_set_resource(struct hfi_sys_set_resource_pkt *pkt, u32 id, u32 size,
pkt                63 drivers/media/platform/qcom/venus/hfi_cmds.c 	pkt->hdr.size = sizeof(*pkt);
pkt                64 drivers/media/platform/qcom/venus/hfi_cmds.c 	pkt->hdr.pkt_type = HFI_CMD_SYS_SET_RESOURCE;
pkt                65 drivers/media/platform/qcom/venus/hfi_cmds.c 	pkt->resource_handle = hash32_ptr(cookie);
pkt                71 drivers/media/platform/qcom/venus/hfi_cmds.c 			(struct hfi_resource_ocmem *)&pkt->resource_data[0];
pkt                75 drivers/media/platform/qcom/venus/hfi_cmds.c 		pkt->resource_type = HFI_RESOURCE_OCMEM;
pkt                76 drivers/media/platform/qcom/venus/hfi_cmds.c 		pkt->hdr.size += sizeof(*res) - sizeof(u32);
pkt                87 drivers/media/platform/qcom/venus/hfi_cmds.c int pkt_sys_unset_resource(struct hfi_sys_release_resource_pkt *pkt, u32 id,
pkt                90 drivers/media/platform/qcom/venus/hfi_cmds.c 	pkt->hdr.size = sizeof(*pkt);
pkt                91 drivers/media/platform/qcom/venus/hfi_cmds.c 	pkt->hdr.pkt_type = HFI_CMD_SYS_RELEASE_RESOURCE;
pkt                92 drivers/media/platform/qcom/venus/hfi_cmds.c 	pkt->resource_handle = hash32_ptr(cookie);
pkt                97 drivers/media/platform/qcom/venus/hfi_cmds.c 		pkt->resource_type = HFI_RESOURCE_OCMEM;
pkt               108 drivers/media/platform/qcom/venus/hfi_cmds.c void pkt_sys_ping(struct hfi_sys_ping_pkt *pkt, u32 cookie)
pkt               110 drivers/media/platform/qcom/venus/hfi_cmds.c 	pkt->hdr.size = sizeof(*pkt);
pkt               111 drivers/media/platform/qcom/venus/hfi_cmds.c 	pkt->hdr.pkt_type = HFI_CMD_SYS_PING;
pkt               112 drivers/media/platform/qcom/venus/hfi_cmds.c 	pkt->client_data = cookie;
pkt               115 drivers/media/platform/qcom/venus/hfi_cmds.c void pkt_sys_power_control(struct hfi_sys_set_property_pkt *pkt, u32 enable)
pkt               117 drivers/media/platform/qcom/venus/hfi_cmds.c 	struct hfi_enable *hfi = (struct hfi_enable *)&pkt->data[1];
pkt               119 drivers/media/platform/qcom/venus/hfi_cmds.c 	pkt->hdr.size = sizeof(*pkt) + sizeof(*hfi) + sizeof(u32);
pkt               120 drivers/media/platform/qcom/venus/hfi_cmds.c 	pkt->hdr.pkt_type = HFI_CMD_SYS_SET_PROPERTY;
pkt               121 drivers/media/platform/qcom/venus/hfi_cmds.c 	pkt->num_properties = 1;
pkt               122 drivers/media/platform/qcom/venus/hfi_cmds.c 	pkt->data[0] = HFI_PROPERTY_SYS_CODEC_POWER_PLANE_CTRL;
pkt               126 drivers/media/platform/qcom/venus/hfi_cmds.c int pkt_sys_ssr_cmd(struct hfi_sys_test_ssr_pkt *pkt, u32 trigger_type)
pkt               137 drivers/media/platform/qcom/venus/hfi_cmds.c 	pkt->hdr.size = sizeof(*pkt);
pkt               138 drivers/media/platform/qcom/venus/hfi_cmds.c 	pkt->hdr.pkt_type = HFI_CMD_SYS_TEST_SSR;
pkt               139 drivers/media/platform/qcom/venus/hfi_cmds.c 	pkt->trigger_type = trigger_type;
pkt               144 drivers/media/platform/qcom/venus/hfi_cmds.c void pkt_sys_image_version(struct hfi_sys_get_property_pkt *pkt)
pkt               146 drivers/media/platform/qcom/venus/hfi_cmds.c 	pkt->hdr.size = sizeof(*pkt);
pkt               147 drivers/media/platform/qcom/venus/hfi_cmds.c 	pkt->hdr.pkt_type = HFI_CMD_SYS_GET_PROPERTY;
pkt               148 drivers/media/platform/qcom/venus/hfi_cmds.c 	pkt->num_properties = 1;
pkt               149 drivers/media/platform/qcom/venus/hfi_cmds.c 	pkt->data[0] = HFI_PROPERTY_SYS_IMAGE_VERSION;
pkt               152 drivers/media/platform/qcom/venus/hfi_cmds.c int pkt_session_init(struct hfi_session_init_pkt *pkt, void *cookie,
pkt               155 drivers/media/platform/qcom/venus/hfi_cmds.c 	if (!pkt || !cookie || !codec)
pkt               158 drivers/media/platform/qcom/venus/hfi_cmds.c 	pkt->shdr.hdr.size = sizeof(*pkt);
pkt               159 drivers/media/platform/qcom/venus/hfi_cmds.c 	pkt->shdr.hdr.pkt_type = HFI_CMD_SYS_SESSION_INIT;
pkt               160 drivers/media/platform/qcom/venus/hfi_cmds.c 	pkt->shdr.session_id = hash32_ptr(cookie);
pkt               161 drivers/media/platform/qcom/venus/hfi_cmds.c 	pkt->session_domain = session_type;
pkt               162 drivers/media/platform/qcom/venus/hfi_cmds.c 	pkt->session_codec = codec;
pkt               167 drivers/media/platform/qcom/venus/hfi_cmds.c void pkt_session_cmd(struct hfi_session_pkt *pkt, u32 pkt_type, void *cookie)
pkt               169 drivers/media/platform/qcom/venus/hfi_cmds.c 	pkt->shdr.hdr.size = sizeof(*pkt);
pkt               170 drivers/media/platform/qcom/venus/hfi_cmds.c 	pkt->shdr.hdr.pkt_type = pkt_type;
pkt               171 drivers/media/platform/qcom/venus/hfi_cmds.c 	pkt->shdr.session_id = hash32_ptr(cookie);
pkt               174 drivers/media/platform/qcom/venus/hfi_cmds.c int pkt_session_set_buffers(struct hfi_session_set_buffers_pkt *pkt,
pkt               179 drivers/media/platform/qcom/venus/hfi_cmds.c 	if (!cookie || !pkt || !bd)
pkt               182 drivers/media/platform/qcom/venus/hfi_cmds.c 	pkt->shdr.hdr.pkt_type = HFI_CMD_SESSION_SET_BUFFERS;
pkt               183 drivers/media/platform/qcom/venus/hfi_cmds.c 	pkt->shdr.session_id = hash32_ptr(cookie);
pkt               184 drivers/media/platform/qcom/venus/hfi_cmds.c 	pkt->buffer_size = bd->buffer_size;
pkt               185 drivers/media/platform/qcom/venus/hfi_cmds.c 	pkt->min_buffer_size = bd->buffer_size;
pkt               186 drivers/media/platform/qcom/venus/hfi_cmds.c 	pkt->num_buffers = bd->num_buffers;
pkt               192 drivers/media/platform/qcom/venus/hfi_cmds.c 		pkt->extradata_size = bd->extradata_size;
pkt               193 drivers/media/platform/qcom/venus/hfi_cmds.c 		pkt->shdr.hdr.size = sizeof(*pkt) - sizeof(u32) +
pkt               195 drivers/media/platform/qcom/venus/hfi_cmds.c 		bi = (struct hfi_buffer_info *)pkt->buffer_info;
pkt               196 drivers/media/platform/qcom/venus/hfi_cmds.c 		for (i = 0; i < pkt->num_buffers; i++) {
pkt               201 drivers/media/platform/qcom/venus/hfi_cmds.c 		pkt->extradata_size = 0;
pkt               202 drivers/media/platform/qcom/venus/hfi_cmds.c 		pkt->shdr.hdr.size = sizeof(*pkt) +
pkt               204 drivers/media/platform/qcom/venus/hfi_cmds.c 		for (i = 0; i < pkt->num_buffers; i++)
pkt               205 drivers/media/platform/qcom/venus/hfi_cmds.c 			pkt->buffer_info[i] = bd->device_addr;
pkt               208 drivers/media/platform/qcom/venus/hfi_cmds.c 	pkt->buffer_type = bd->buffer_type;
pkt               213 drivers/media/platform/qcom/venus/hfi_cmds.c int pkt_session_unset_buffers(struct hfi_session_release_buffer_pkt *pkt,
pkt               218 drivers/media/platform/qcom/venus/hfi_cmds.c 	if (!cookie || !pkt || !bd)
pkt               221 drivers/media/platform/qcom/venus/hfi_cmds.c 	pkt->shdr.hdr.pkt_type = HFI_CMD_SESSION_RELEASE_BUFFERS;
pkt               222 drivers/media/platform/qcom/venus/hfi_cmds.c 	pkt->shdr.session_id = hash32_ptr(cookie);
pkt               223 drivers/media/platform/qcom/venus/hfi_cmds.c 	pkt->buffer_size = bd->buffer_size;
pkt               224 drivers/media/platform/qcom/venus/hfi_cmds.c 	pkt->num_buffers = bd->num_buffers;
pkt               230 drivers/media/platform/qcom/venus/hfi_cmds.c 		bi = (struct hfi_buffer_info *)pkt->buffer_info;
pkt               231 drivers/media/platform/qcom/venus/hfi_cmds.c 		for (i = 0; i < pkt->num_buffers; i++) {
pkt               235 drivers/media/platform/qcom/venus/hfi_cmds.c 		pkt->shdr.hdr.size =
pkt               239 drivers/media/platform/qcom/venus/hfi_cmds.c 		for (i = 0; i < pkt->num_buffers; i++)
pkt               240 drivers/media/platform/qcom/venus/hfi_cmds.c 			pkt->buffer_info[i] = bd->device_addr;
pkt               242 drivers/media/platform/qcom/venus/hfi_cmds.c 		pkt->extradata_size = 0;
pkt               243 drivers/media/platform/qcom/venus/hfi_cmds.c 		pkt->shdr.hdr.size =
pkt               248 drivers/media/platform/qcom/venus/hfi_cmds.c 	pkt->response_req = bd->response_required;
pkt               249 drivers/media/platform/qcom/venus/hfi_cmds.c 	pkt->buffer_type = bd->buffer_type;
pkt               254 drivers/media/platform/qcom/venus/hfi_cmds.c int pkt_session_etb_decoder(struct hfi_session_empty_buffer_compressed_pkt *pkt,
pkt               260 drivers/media/platform/qcom/venus/hfi_cmds.c 	pkt->shdr.hdr.size = sizeof(*pkt);
pkt               261 drivers/media/platform/qcom/venus/hfi_cmds.c 	pkt->shdr.hdr.pkt_type = HFI_CMD_SESSION_EMPTY_BUFFER;
pkt               262 drivers/media/platform/qcom/venus/hfi_cmds.c 	pkt->shdr.session_id = hash32_ptr(cookie);
pkt               263 drivers/media/platform/qcom/venus/hfi_cmds.c 	pkt->time_stamp_hi = upper_32_bits(in_frame->timestamp);
pkt               264 drivers/media/platform/qcom/venus/hfi_cmds.c 	pkt->time_stamp_lo = lower_32_bits(in_frame->timestamp);
pkt               265 drivers/media/platform/qcom/venus/hfi_cmds.c 	pkt->flags = in_frame->flags;
pkt               266 drivers/media/platform/qcom/venus/hfi_cmds.c 	pkt->mark_target = in_frame->mark_target;
pkt               267 drivers/media/platform/qcom/venus/hfi_cmds.c 	pkt->mark_data = in_frame->mark_data;
pkt               268 drivers/media/platform/qcom/venus/hfi_cmds.c 	pkt->offset = in_frame->offset;
pkt               269 drivers/media/platform/qcom/venus/hfi_cmds.c 	pkt->alloc_len = in_frame->alloc_len;
pkt               270 drivers/media/platform/qcom/venus/hfi_cmds.c 	pkt->filled_len = in_frame->filled_len;
pkt               271 drivers/media/platform/qcom/venus/hfi_cmds.c 	pkt->input_tag = in_frame->clnt_data;
pkt               272 drivers/media/platform/qcom/venus/hfi_cmds.c 	pkt->packet_buffer = in_frame->device_addr;
pkt               278 drivers/media/platform/qcom/venus/hfi_cmds.c 		struct hfi_session_empty_buffer_uncompressed_plane0_pkt *pkt,
pkt               284 drivers/media/platform/qcom/venus/hfi_cmds.c 	pkt->shdr.hdr.size = sizeof(*pkt);
pkt               285 drivers/media/platform/qcom/venus/hfi_cmds.c 	pkt->shdr.hdr.pkt_type = HFI_CMD_SESSION_EMPTY_BUFFER;
pkt               286 drivers/media/platform/qcom/venus/hfi_cmds.c 	pkt->shdr.session_id = hash32_ptr(cookie);
pkt               287 drivers/media/platform/qcom/venus/hfi_cmds.c 	pkt->view_id = 0;
pkt               288 drivers/media/platform/qcom/venus/hfi_cmds.c 	pkt->time_stamp_hi = upper_32_bits(in_frame->timestamp);
pkt               289 drivers/media/platform/qcom/venus/hfi_cmds.c 	pkt->time_stamp_lo = lower_32_bits(in_frame->timestamp);
pkt               290 drivers/media/platform/qcom/venus/hfi_cmds.c 	pkt->flags = in_frame->flags;
pkt               291 drivers/media/platform/qcom/venus/hfi_cmds.c 	pkt->mark_target = in_frame->mark_target;
pkt               292 drivers/media/platform/qcom/venus/hfi_cmds.c 	pkt->mark_data = in_frame->mark_data;
pkt               293 drivers/media/platform/qcom/venus/hfi_cmds.c 	pkt->offset = in_frame->offset;
pkt               294 drivers/media/platform/qcom/venus/hfi_cmds.c 	pkt->alloc_len = in_frame->alloc_len;
pkt               295 drivers/media/platform/qcom/venus/hfi_cmds.c 	pkt->filled_len = in_frame->filled_len;
pkt               296 drivers/media/platform/qcom/venus/hfi_cmds.c 	pkt->input_tag = in_frame->clnt_data;
pkt               297 drivers/media/platform/qcom/venus/hfi_cmds.c 	pkt->packet_buffer = in_frame->device_addr;
pkt               298 drivers/media/platform/qcom/venus/hfi_cmds.c 	pkt->extradata_buffer = in_frame->extradata_addr;
pkt               303 drivers/media/platform/qcom/venus/hfi_cmds.c int pkt_session_ftb(struct hfi_session_fill_buffer_pkt *pkt, void *cookie,
pkt               309 drivers/media/platform/qcom/venus/hfi_cmds.c 	pkt->shdr.hdr.size = sizeof(*pkt);
pkt               310 drivers/media/platform/qcom/venus/hfi_cmds.c 	pkt->shdr.hdr.pkt_type = HFI_CMD_SESSION_FILL_BUFFER;
pkt               311 drivers/media/platform/qcom/venus/hfi_cmds.c 	pkt->shdr.session_id = hash32_ptr(cookie);
pkt               314 drivers/media/platform/qcom/venus/hfi_cmds.c 		pkt->stream_id = 0;
pkt               316 drivers/media/platform/qcom/venus/hfi_cmds.c 		pkt->stream_id = 1;
pkt               318 drivers/media/platform/qcom/venus/hfi_cmds.c 	pkt->output_tag = out_frame->clnt_data;
pkt               319 drivers/media/platform/qcom/venus/hfi_cmds.c 	pkt->packet_buffer = out_frame->device_addr;
pkt               320 drivers/media/platform/qcom/venus/hfi_cmds.c 	pkt->extradata_buffer = out_frame->extradata_addr;
pkt               321 drivers/media/platform/qcom/venus/hfi_cmds.c 	pkt->alloc_len = out_frame->alloc_len;
pkt               322 drivers/media/platform/qcom/venus/hfi_cmds.c 	pkt->filled_len = out_frame->filled_len;
pkt               323 drivers/media/platform/qcom/venus/hfi_cmds.c 	pkt->offset = out_frame->offset;
pkt               324 drivers/media/platform/qcom/venus/hfi_cmds.c 	pkt->data[0] = out_frame->extradata_size;
pkt               330 drivers/media/platform/qcom/venus/hfi_cmds.c 		struct hfi_session_parse_sequence_header_pkt *pkt,
pkt               336 drivers/media/platform/qcom/venus/hfi_cmds.c 	pkt->shdr.hdr.size = sizeof(*pkt);
pkt               337 drivers/media/platform/qcom/venus/hfi_cmds.c 	pkt->shdr.hdr.pkt_type = HFI_CMD_SESSION_PARSE_SEQUENCE_HEADER;
pkt               338 drivers/media/platform/qcom/venus/hfi_cmds.c 	pkt->shdr.session_id = hash32_ptr(cookie);
pkt               339 drivers/media/platform/qcom/venus/hfi_cmds.c 	pkt->header_len = seq_hdr_len;
pkt               340 drivers/media/platform/qcom/venus/hfi_cmds.c 	pkt->packet_buffer = seq_hdr;
pkt               345 drivers/media/platform/qcom/venus/hfi_cmds.c int pkt_session_get_seq_hdr(struct hfi_session_get_sequence_header_pkt *pkt,
pkt               351 drivers/media/platform/qcom/venus/hfi_cmds.c 	pkt->shdr.hdr.size = sizeof(*pkt);
pkt               352 drivers/media/platform/qcom/venus/hfi_cmds.c 	pkt->shdr.hdr.pkt_type = HFI_CMD_SESSION_GET_SEQUENCE_HEADER;
pkt               353 drivers/media/platform/qcom/venus/hfi_cmds.c 	pkt->shdr.session_id = hash32_ptr(cookie);
pkt               354 drivers/media/platform/qcom/venus/hfi_cmds.c 	pkt->buffer_len = seq_hdr_len;
pkt               355 drivers/media/platform/qcom/venus/hfi_cmds.c 	pkt->packet_buffer = seq_hdr;
pkt               360 drivers/media/platform/qcom/venus/hfi_cmds.c int pkt_session_flush(struct hfi_session_flush_pkt *pkt, void *cookie, u32 type)
pkt               372 drivers/media/platform/qcom/venus/hfi_cmds.c 	pkt->shdr.hdr.size = sizeof(*pkt);
pkt               373 drivers/media/platform/qcom/venus/hfi_cmds.c 	pkt->shdr.hdr.pkt_type = HFI_CMD_SESSION_FLUSH;
pkt               374 drivers/media/platform/qcom/venus/hfi_cmds.c 	pkt->shdr.session_id = hash32_ptr(cookie);
pkt               375 drivers/media/platform/qcom/venus/hfi_cmds.c 	pkt->flush_type = type;
pkt               380 drivers/media/platform/qcom/venus/hfi_cmds.c static int pkt_session_get_property_1x(struct hfi_session_get_property_pkt *pkt,
pkt               391 drivers/media/platform/qcom/venus/hfi_cmds.c 	pkt->shdr.hdr.size = sizeof(*pkt);
pkt               392 drivers/media/platform/qcom/venus/hfi_cmds.c 	pkt->shdr.hdr.pkt_type = HFI_CMD_SESSION_GET_PROPERTY;
pkt               393 drivers/media/platform/qcom/venus/hfi_cmds.c 	pkt->shdr.session_id = hash32_ptr(cookie);
pkt               394 drivers/media/platform/qcom/venus/hfi_cmds.c 	pkt->num_properties = 1;
pkt               395 drivers/media/platform/qcom/venus/hfi_cmds.c 	pkt->data[0] = ptype;
pkt               400 drivers/media/platform/qcom/venus/hfi_cmds.c static int pkt_session_set_property_1x(struct hfi_session_set_property_pkt *pkt,
pkt               406 drivers/media/platform/qcom/venus/hfi_cmds.c 	if (!pkt || !cookie || !pdata)
pkt               409 drivers/media/platform/qcom/venus/hfi_cmds.c 	prop_data = &pkt->data[1];
pkt               411 drivers/media/platform/qcom/venus/hfi_cmds.c 	pkt->shdr.hdr.size = sizeof(*pkt);
pkt               412 drivers/media/platform/qcom/venus/hfi_cmds.c 	pkt->shdr.hdr.pkt_type = HFI_CMD_SESSION_SET_PROPERTY;
pkt               413 drivers/media/platform/qcom/venus/hfi_cmds.c 	pkt->shdr.session_id = hash32_ptr(cookie);
pkt               414 drivers/media/platform/qcom/venus/hfi_cmds.c 	pkt->num_properties = 1;
pkt               415 drivers/media/platform/qcom/venus/hfi_cmds.c 	pkt->data[0] = ptype;
pkt               423 drivers/media/platform/qcom/venus/hfi_cmds.c 		pkt->shdr.hdr.size += sizeof(u32) + sizeof(*frate);
pkt               432 drivers/media/platform/qcom/venus/hfi_cmds.c 		pkt->shdr.hdr.size += sizeof(u32) + sizeof(*hfi);
pkt               441 drivers/media/platform/qcom/venus/hfi_cmds.c 		pkt->shdr.hdr.size += sizeof(u32) + sizeof(*fsize);
pkt               448 drivers/media/platform/qcom/venus/hfi_cmds.c 		pkt->shdr.hdr.size += sizeof(u32) * 2;
pkt               456 drivers/media/platform/qcom/venus/hfi_cmds.c 		pkt->shdr.hdr.size += sizeof(u32) + sizeof(*count);
pkt               464 drivers/media/platform/qcom/venus/hfi_cmds.c 		pkt->shdr.hdr.size += sizeof(u32) + sizeof(*sz);
pkt               473 drivers/media/platform/qcom/venus/hfi_cmds.c 		pkt->shdr.hdr.size += sizeof(u32) + sizeof(*count);
pkt               481 drivers/media/platform/qcom/venus/hfi_cmds.c 		pkt->shdr.hdr.size += sizeof(u32) + sizeof(*fmt);
pkt               496 drivers/media/platform/qcom/venus/hfi_cmds.c 		pkt->data[1] = *in;
pkt               497 drivers/media/platform/qcom/venus/hfi_cmds.c 		pkt->shdr.hdr.size += sizeof(u32) * 2;
pkt               504 drivers/media/platform/qcom/venus/hfi_cmds.c 		pkt->shdr.hdr.size += sizeof(u32) + sizeof(*en);
pkt               511 drivers/media/platform/qcom/venus/hfi_cmds.c 		pkt->shdr.hdr.size += sizeof(u32) + sizeof(*en);
pkt               519 drivers/media/platform/qcom/venus/hfi_cmds.c 		pkt->shdr.hdr.size += sizeof(u32) + sizeof(*en);
pkt               529 drivers/media/platform/qcom/venus/hfi_cmds.c 		pkt->shdr.hdr.size += sizeof(u32) + sizeof(*multi);
pkt               538 drivers/media/platform/qcom/venus/hfi_cmds.c 		pkt->shdr.hdr.size += sizeof(u32) + sizeof(*count);
pkt               554 drivers/media/platform/qcom/venus/hfi_cmds.c 		pkt->data[1] = *in;
pkt               555 drivers/media/platform/qcom/venus/hfi_cmds.c 		pkt->shdr.hdr.size += sizeof(u32) * 2;
pkt               562 drivers/media/platform/qcom/venus/hfi_cmds.c 		pkt->shdr.hdr.size += sizeof(u32) + sizeof(*en);
pkt               569 drivers/media/platform/qcom/venus/hfi_cmds.c 		pkt->shdr.hdr.size += sizeof(u32) + sizeof(*en);
pkt               576 drivers/media/platform/qcom/venus/hfi_cmds.c 		pkt->shdr.hdr.size += sizeof(u32) + sizeof(*en);
pkt               583 drivers/media/platform/qcom/venus/hfi_cmds.c 		pkt->shdr.hdr.size += sizeof(u32) + sizeof(*en);
pkt               587 drivers/media/platform/qcom/venus/hfi_cmds.c 		pkt->shdr.hdr.size += sizeof(u32);
pkt               598 drivers/media/platform/qcom/venus/hfi_cmds.c 		pkt->shdr.hdr.size += sizeof(u32) + sizeof(*brate);
pkt               606 drivers/media/platform/qcom/venus/hfi_cmds.c 		pkt->shdr.hdr.size += sizeof(u32) + sizeof(*hfi);
pkt               622 drivers/media/platform/qcom/venus/hfi_cmds.c 		pkt->shdr.hdr.size += sizeof(u32) + sizeof(*pl);
pkt               631 drivers/media/platform/qcom/venus/hfi_cmds.c 		pkt->shdr.hdr.size += sizeof(u32) + sizeof(*hfi);
pkt               649 drivers/media/platform/qcom/venus/hfi_cmds.c 		pkt->data[1] = *in;
pkt               650 drivers/media/platform/qcom/venus/hfi_cmds.c 		pkt->shdr.hdr.size += sizeof(u32) * 2;
pkt               657 drivers/media/platform/qcom/venus/hfi_cmds.c 		pkt->shdr.hdr.size += sizeof(u32) + sizeof(*res);
pkt               664 drivers/media/platform/qcom/venus/hfi_cmds.c 		pkt->shdr.hdr.size += sizeof(u32) + sizeof(*ext);
pkt               683 drivers/media/platform/qcom/venus/hfi_cmds.c 		pkt->shdr.hdr.size += sizeof(u32) + sizeof(*db);
pkt               693 drivers/media/platform/qcom/venus/hfi_cmds.c 		pkt->shdr.hdr.size += sizeof(u32) + sizeof(*quant);
pkt               719 drivers/media/platform/qcom/venus/hfi_cmds.c 		pkt->shdr.hdr.size += sizeof(u32) + sizeof(*range);
pkt               732 drivers/media/platform/qcom/venus/hfi_cmds.c 		pkt->shdr.hdr.size += sizeof(u32) + sizeof(*perf);
pkt               740 drivers/media/platform/qcom/venus/hfi_cmds.c 		pkt->shdr.hdr.size += sizeof(u32) + sizeof(*bframes);
pkt               748 drivers/media/platform/qcom/venus/hfi_cmds.c 		pkt->shdr.hdr.size += sizeof(u32) + sizeof(*intra);
pkt               755 drivers/media/platform/qcom/venus/hfi_cmds.c 		pkt->shdr.hdr.size += sizeof(u32) + sizeof(*idr);
pkt               763 drivers/media/platform/qcom/venus/hfi_cmds.c 		pkt->shdr.hdr.size += sizeof(u32) + sizeof(*color);
pkt               792 drivers/media/platform/qcom/venus/hfi_cmds.c 		pkt->shdr.hdr.size += sizeof(u32) + sizeof(*ops);
pkt               814 drivers/media/platform/qcom/venus/hfi_cmds.c 		pkt->shdr.hdr.size += sizeof(u32) + sizeof(*intra);
pkt               833 drivers/media/platform/qcom/venus/hfi_cmds.c 		pkt->shdr.hdr.size += sizeof(u32) + sizeof(*multi);
pkt               840 drivers/media/platform/qcom/venus/hfi_cmds.c 		pkt->shdr.hdr.size += sizeof(u32) + sizeof(*en);
pkt               849 drivers/media/platform/qcom/venus/hfi_cmds.c 		pkt->shdr.hdr.size += sizeof(u32) + sizeof(*vui);
pkt               856 drivers/media/platform/qcom/venus/hfi_cmds.c 		pkt->shdr.hdr.size += sizeof(u32) + sizeof(*en);
pkt               863 drivers/media/platform/qcom/venus/hfi_cmds.c 		pkt->shdr.hdr.size += sizeof(u32) + sizeof(*en);
pkt               871 drivers/media/platform/qcom/venus/hfi_cmds.c 		pkt->shdr.hdr.size += sizeof(u32) + sizeof(*mode);
pkt               878 drivers/media/platform/qcom/venus/hfi_cmds.c 		pkt->shdr.hdr.size += sizeof(u32) + sizeof(*en);
pkt               885 drivers/media/platform/qcom/venus/hfi_cmds.c 		pkt->shdr.hdr.size += sizeof(u32) + sizeof(*en);
pkt               892 drivers/media/platform/qcom/venus/hfi_cmds.c 		pkt->shdr.hdr.size += sizeof(u32) + sizeof(*en);
pkt               900 drivers/media/platform/qcom/venus/hfi_cmds.c 		pkt->shdr.hdr.size += sizeof(u32) + sizeof(*thres);
pkt               919 drivers/media/platform/qcom/venus/hfi_cmds.c 		pkt->shdr.hdr.size += sizeof(u32) + sizeof(*mvc);
pkt               938 drivers/media/platform/qcom/venus/hfi_cmds.c 		pkt->shdr.hdr.size += sizeof(u32) + sizeof(*ltr);
pkt               947 drivers/media/platform/qcom/venus/hfi_cmds.c 		pkt->shdr.hdr.size += sizeof(u32) + sizeof(*ltr_use);
pkt               954 drivers/media/platform/qcom/venus/hfi_cmds.c 		pkt->shdr.hdr.size += sizeof(u32) + sizeof(*ltr_mark);
pkt               960 drivers/media/platform/qcom/venus/hfi_cmds.c 		pkt->data[1] = *in;
pkt               961 drivers/media/platform/qcom/venus/hfi_cmds.c 		pkt->shdr.hdr.size += sizeof(u32) * 2;
pkt               967 drivers/media/platform/qcom/venus/hfi_cmds.c 		pkt->data[1] = *in;
pkt               968 drivers/media/platform/qcom/venus/hfi_cmds.c 		pkt->shdr.hdr.size += sizeof(u32) * 2;
pkt               975 drivers/media/platform/qcom/venus/hfi_cmds.c 		pkt->shdr.hdr.size += sizeof(u32) + sizeof(*en);
pkt               985 drivers/media/platform/qcom/venus/hfi_cmds.c 		pkt->shdr.hdr.size += sizeof(u32) + sizeof(*quant);
pkt               996 drivers/media/platform/qcom/venus/hfi_cmds.c 		pkt->shdr.hdr.size += sizeof(u32) + sizeof(*csc);
pkt              1003 drivers/media/platform/qcom/venus/hfi_cmds.c 		pkt->shdr.hdr.size += sizeof(u32) + sizeof(*en);
pkt              1010 drivers/media/platform/qcom/venus/hfi_cmds.c 		pkt->shdr.hdr.size += sizeof(u32) + sizeof(*en);
pkt              1016 drivers/media/platform/qcom/venus/hfi_cmds.c 		pkt->data[1] = *in;
pkt              1017 drivers/media/platform/qcom/venus/hfi_cmds.c 		pkt->shdr.hdr.size += sizeof(u32) * 2;
pkt              1023 drivers/media/platform/qcom/venus/hfi_cmds.c 		pkt->data[1] = *in;
pkt              1024 drivers/media/platform/qcom/venus/hfi_cmds.c 		pkt->shdr.hdr.size += sizeof(u32) * 2;
pkt              1031 drivers/media/platform/qcom/venus/hfi_cmds.c 		pkt->shdr.hdr.size += sizeof(u32) + sizeof(*en);
pkt              1038 drivers/media/platform/qcom/venus/hfi_cmds.c 		pkt->shdr.hdr.size += sizeof(u32) + sizeof(*hierp);
pkt              1070 drivers/media/platform/qcom/venus/hfi_cmds.c pkt_session_get_property_3xx(struct hfi_session_get_property_pkt *pkt,
pkt              1075 drivers/media/platform/qcom/venus/hfi_cmds.c 	if (!pkt || !cookie)
pkt              1078 drivers/media/platform/qcom/venus/hfi_cmds.c 	pkt->shdr.hdr.size = sizeof(struct hfi_session_get_property_pkt);
pkt              1079 drivers/media/platform/qcom/venus/hfi_cmds.c 	pkt->shdr.hdr.pkt_type = HFI_CMD_SESSION_GET_PROPERTY;
pkt              1080 drivers/media/platform/qcom/venus/hfi_cmds.c 	pkt->shdr.session_id = hash32_ptr(cookie);
pkt              1081 drivers/media/platform/qcom/venus/hfi_cmds.c 	pkt->num_properties = 1;
pkt              1085 drivers/media/platform/qcom/venus/hfi_cmds.c 		pkt->data[0] = HFI_PROPERTY_CONFIG_VDEC_ENTROPY;
pkt              1088 drivers/media/platform/qcom/venus/hfi_cmds.c 		ret = pkt_session_get_property_1x(pkt, cookie, ptype);
pkt              1096 drivers/media/platform/qcom/venus/hfi_cmds.c pkt_session_set_property_3xx(struct hfi_session_set_property_pkt *pkt,
pkt              1102 drivers/media/platform/qcom/venus/hfi_cmds.c 	if (!pkt || !cookie || !pdata)
pkt              1105 drivers/media/platform/qcom/venus/hfi_cmds.c 	prop_data = &pkt->data[1];
pkt              1107 drivers/media/platform/qcom/venus/hfi_cmds.c 	pkt->shdr.hdr.size = sizeof(*pkt);
pkt              1108 drivers/media/platform/qcom/venus/hfi_cmds.c 	pkt->shdr.hdr.pkt_type = HFI_CMD_SESSION_SET_PROPERTY;
pkt              1109 drivers/media/platform/qcom/venus/hfi_cmds.c 	pkt->shdr.session_id = hash32_ptr(cookie);
pkt              1110 drivers/media/platform/qcom/venus/hfi_cmds.c 	pkt->num_properties = 1;
pkt              1111 drivers/media/platform/qcom/venus/hfi_cmds.c 	pkt->data[0] = ptype;
pkt              1125 drivers/media/platform/qcom/venus/hfi_cmds.c 		pkt->shdr.hdr.size += sizeof(u32) + sizeof(*multi);
pkt              1146 drivers/media/platform/qcom/venus/hfi_cmds.c 		pkt->shdr.hdr.size += sizeof(u32) + sizeof(*intra);
pkt              1153 drivers/media/platform/qcom/venus/hfi_cmds.c 		ret = pkt_session_set_property_1x(pkt, cookie, ptype, pdata);
pkt              1161 drivers/media/platform/qcom/venus/hfi_cmds.c pkt_session_set_property_4xx(struct hfi_session_set_property_pkt *pkt,
pkt              1166 drivers/media/platform/qcom/venus/hfi_cmds.c 	if (!pkt || !cookie || !pdata)
pkt              1169 drivers/media/platform/qcom/venus/hfi_cmds.c 	prop_data = &pkt->data[1];
pkt              1171 drivers/media/platform/qcom/venus/hfi_cmds.c 	pkt->shdr.hdr.size = sizeof(*pkt);
pkt              1172 drivers/media/platform/qcom/venus/hfi_cmds.c 	pkt->shdr.hdr.pkt_type = HFI_CMD_SESSION_SET_PROPERTY;
pkt              1173 drivers/media/platform/qcom/venus/hfi_cmds.c 	pkt->shdr.session_id = hash32_ptr(cookie);
pkt              1174 drivers/media/platform/qcom/venus/hfi_cmds.c 	pkt->num_properties = 1;
pkt              1175 drivers/media/platform/qcom/venus/hfi_cmds.c 	pkt->data[0] = ptype;
pkt              1190 drivers/media/platform/qcom/venus/hfi_cmds.c 		pkt->shdr.hdr.size += sizeof(u32) + sizeof(*count);
pkt              1197 drivers/media/platform/qcom/venus/hfi_cmds.c 		pkt->shdr.hdr.size += sizeof(u32) + sizeof(*wm);
pkt              1204 drivers/media/platform/qcom/venus/hfi_cmds.c 		pkt->shdr.hdr.size += sizeof(u32) + sizeof(*cu);
pkt              1213 drivers/media/platform/qcom/venus/hfi_cmds.c 		return pkt_session_set_property_3xx(pkt, cookie, ptype, pdata);
pkt              1219 drivers/media/platform/qcom/venus/hfi_cmds.c int pkt_session_get_property(struct hfi_session_get_property_pkt *pkt,
pkt              1223 drivers/media/platform/qcom/venus/hfi_cmds.c 		return pkt_session_get_property_1x(pkt, cookie, ptype);
pkt              1225 drivers/media/platform/qcom/venus/hfi_cmds.c 	return pkt_session_get_property_3xx(pkt, cookie, ptype);
pkt              1228 drivers/media/platform/qcom/venus/hfi_cmds.c int pkt_session_set_property(struct hfi_session_set_property_pkt *pkt,
pkt              1232 drivers/media/platform/qcom/venus/hfi_cmds.c 		return pkt_session_set_property_1x(pkt, cookie, ptype, pdata);
pkt              1235 drivers/media/platform/qcom/venus/hfi_cmds.c 		return pkt_session_set_property_3xx(pkt, cookie, ptype, pdata);
pkt              1237 drivers/media/platform/qcom/venus/hfi_cmds.c 	return pkt_session_set_property_4xx(pkt, cookie, ptype, pdata);
pkt               255 drivers/media/platform/qcom/venus/hfi_cmds.h void pkt_sys_init(struct hfi_sys_init_pkt *pkt, u32 arch_type);
pkt               256 drivers/media/platform/qcom/venus/hfi_cmds.h void pkt_sys_pc_prep(struct hfi_sys_pc_prep_pkt *pkt);
pkt               257 drivers/media/platform/qcom/venus/hfi_cmds.h void pkt_sys_idle_indicator(struct hfi_sys_set_property_pkt *pkt, u32 enable);
pkt               258 drivers/media/platform/qcom/venus/hfi_cmds.h void pkt_sys_power_control(struct hfi_sys_set_property_pkt *pkt, u32 enable);
pkt               259 drivers/media/platform/qcom/venus/hfi_cmds.h int pkt_sys_set_resource(struct hfi_sys_set_resource_pkt *pkt, u32 id, u32 size,
pkt               261 drivers/media/platform/qcom/venus/hfi_cmds.h int pkt_sys_unset_resource(struct hfi_sys_release_resource_pkt *pkt, u32 id,
pkt               263 drivers/media/platform/qcom/venus/hfi_cmds.h void pkt_sys_debug_config(struct hfi_sys_set_property_pkt *pkt, u32 mode,
pkt               265 drivers/media/platform/qcom/venus/hfi_cmds.h void pkt_sys_coverage_config(struct hfi_sys_set_property_pkt *pkt, u32 mode);
pkt               266 drivers/media/platform/qcom/venus/hfi_cmds.h void pkt_sys_ping(struct hfi_sys_ping_pkt *pkt, u32 cookie);
pkt               267 drivers/media/platform/qcom/venus/hfi_cmds.h void pkt_sys_image_version(struct hfi_sys_get_property_pkt *pkt);
pkt               268 drivers/media/platform/qcom/venus/hfi_cmds.h int pkt_sys_ssr_cmd(struct hfi_sys_test_ssr_pkt *pkt, u32 trigger_type);
pkt               269 drivers/media/platform/qcom/venus/hfi_cmds.h int pkt_session_init(struct hfi_session_init_pkt *pkt, void *cookie,
pkt               271 drivers/media/platform/qcom/venus/hfi_cmds.h void pkt_session_cmd(struct hfi_session_pkt *pkt, u32 pkt_type, void *cookie);
pkt               272 drivers/media/platform/qcom/venus/hfi_cmds.h int pkt_session_set_buffers(struct hfi_session_set_buffers_pkt *pkt,
pkt               274 drivers/media/platform/qcom/venus/hfi_cmds.h int pkt_session_unset_buffers(struct hfi_session_release_buffer_pkt *pkt,
pkt               276 drivers/media/platform/qcom/venus/hfi_cmds.h int pkt_session_etb_decoder(struct hfi_session_empty_buffer_compressed_pkt *pkt,
pkt               279 drivers/media/platform/qcom/venus/hfi_cmds.h 		struct hfi_session_empty_buffer_uncompressed_plane0_pkt *pkt,
pkt               281 drivers/media/platform/qcom/venus/hfi_cmds.h int pkt_session_ftb(struct hfi_session_fill_buffer_pkt *pkt,
pkt               284 drivers/media/platform/qcom/venus/hfi_cmds.h 		struct hfi_session_parse_sequence_header_pkt *pkt,
pkt               286 drivers/media/platform/qcom/venus/hfi_cmds.h int pkt_session_get_seq_hdr(struct hfi_session_get_sequence_header_pkt *pkt,
pkt               288 drivers/media/platform/qcom/venus/hfi_cmds.h int pkt_session_flush(struct hfi_session_flush_pkt *pkt, void *cookie,
pkt               290 drivers/media/platform/qcom/venus/hfi_cmds.h int pkt_session_get_property(struct hfi_session_get_property_pkt *pkt,
pkt               292 drivers/media/platform/qcom/venus/hfi_cmds.h int pkt_session_set_property(struct hfi_session_set_property_pkt *pkt,
pkt                18 drivers/media/platform/qcom/venus/hfi_msgs.c 			      struct hfi_msg_event_notify_pkt *pkt)
pkt                35 drivers/media/platform/qcom/venus/hfi_msgs.c 	switch (pkt->event_data1) {
pkt                44 drivers/media/platform/qcom/venus/hfi_msgs.c 	event.event_type = pkt->event_data1;
pkt                46 drivers/media/platform/qcom/venus/hfi_msgs.c 	num_properties_changed = pkt->event_data2;
pkt                52 drivers/media/platform/qcom/venus/hfi_msgs.c 	data_ptr = (u8 *)&pkt->ext_event_data[0];
pkt               120 drivers/media/platform/qcom/venus/hfi_msgs.c 				     struct hfi_msg_event_notify_pkt *pkt)
pkt               126 drivers/media/platform/qcom/venus/hfi_msgs.c 		pkt->ext_event_data;
pkt               138 drivers/media/platform/qcom/venus/hfi_msgs.c 			    struct hfi_msg_event_notify_pkt *pkt)
pkt               140 drivers/media/platform/qcom/venus/hfi_msgs.c 	if (pkt)
pkt               143 drivers/media/platform/qcom/venus/hfi_msgs.c 			pkt->shdr.session_id, pkt->event_data1,
pkt               144 drivers/media/platform/qcom/venus/hfi_msgs.c 			pkt->event_data2);
pkt               151 drivers/media/platform/qcom/venus/hfi_msgs.c 		    struct hfi_msg_event_notify_pkt *pkt)
pkt               156 drivers/media/platform/qcom/venus/hfi_msgs.c 		pkt->event_data1, pkt->shdr.session_id);
pkt               161 drivers/media/platform/qcom/venus/hfi_msgs.c 	switch (pkt->event_data1) {
pkt               171 drivers/media/platform/qcom/venus/hfi_msgs.c 			pkt->event_data1, pkt->event_data2,
pkt               172 drivers/media/platform/qcom/venus/hfi_msgs.c 			pkt->shdr.session_id);
pkt               174 drivers/media/platform/qcom/venus/hfi_msgs.c 		inst->error = pkt->event_data1;
pkt               183 drivers/media/platform/qcom/venus/hfi_msgs.c 	struct hfi_msg_event_notify_pkt *pkt = packet;
pkt               188 drivers/media/platform/qcom/venus/hfi_msgs.c 	switch (pkt->event_id) {
pkt               190 drivers/media/platform/qcom/venus/hfi_msgs.c 		event_sys_error(core, EVT_SYS_ERROR, pkt);
pkt               193 drivers/media/platform/qcom/venus/hfi_msgs.c 		event_session_error(core, inst, pkt);
pkt               196 drivers/media/platform/qcom/venus/hfi_msgs.c 		event_seq_changed(core, inst, pkt);
pkt               199 drivers/media/platform/qcom/venus/hfi_msgs.c 		event_release_buffer_ref(core, inst, pkt);
pkt               211 drivers/media/platform/qcom/venus/hfi_msgs.c 	struct hfi_msg_sys_init_done_pkt *pkt = packet;
pkt               215 drivers/media/platform/qcom/venus/hfi_msgs.c 	error = pkt->error_type;
pkt               219 drivers/media/platform/qcom/venus/hfi_msgs.c 	if (!pkt->num_properties) {
pkt               224 drivers/media/platform/qcom/venus/hfi_msgs.c 	rem_bytes = pkt->hdr.size - sizeof(*pkt) + sizeof(u32);
pkt               231 drivers/media/platform/qcom/venus/hfi_msgs.c 	error = hfi_parser(core, inst, pkt->data, rem_bytes);
pkt               240 drivers/media/platform/qcom/venus/hfi_msgs.c 			   struct hfi_msg_sys_property_info_pkt *pkt)
pkt               244 drivers/media/platform/qcom/venus/hfi_msgs.c 	req_bytes = pkt->hdr.size - sizeof(*pkt);
pkt               246 drivers/media/platform/qcom/venus/hfi_msgs.c 	if (req_bytes < 128 || !pkt->data[1] || pkt->num_properties > 1)
pkt               250 drivers/media/platform/qcom/venus/hfi_msgs.c 	dev_dbg(dev, "F/W version: %s\n", (u8 *)&pkt->data[1]);
pkt               256 drivers/media/platform/qcom/venus/hfi_msgs.c 	struct hfi_msg_sys_property_info_pkt *pkt = packet;
pkt               259 drivers/media/platform/qcom/venus/hfi_msgs.c 	if (!pkt->num_properties) {
pkt               264 drivers/media/platform/qcom/venus/hfi_msgs.c 	switch (pkt->data[0]) {
pkt               266 drivers/media/platform/qcom/venus/hfi_msgs.c 		sys_get_prop_image_version(dev, pkt);
pkt               278 drivers/media/platform/qcom/venus/hfi_msgs.c 	struct hfi_msg_sys_release_resource_done_pkt *pkt = packet;
pkt               280 drivers/media/platform/qcom/venus/hfi_msgs.c 	core->error = pkt->error_type;
pkt               287 drivers/media/platform/qcom/venus/hfi_msgs.c 	struct hfi_msg_sys_ping_ack_pkt *pkt = packet;
pkt               291 drivers/media/platform/qcom/venus/hfi_msgs.c 	if (pkt->client_data != 0xbeef)
pkt               306 drivers/media/platform/qcom/venus/hfi_msgs.c 	struct hfi_msg_sys_pc_prep_done_pkt *pkt = packet;
pkt               308 drivers/media/platform/qcom/venus/hfi_msgs.c 	dev_dbg(core->dev, "pc prepare done (error %x)\n", pkt->error_type);
pkt               312 drivers/media/platform/qcom/venus/hfi_msgs.c session_get_prop_profile_level(struct hfi_msg_session_property_info_pkt *pkt,
pkt               318 drivers/media/platform/qcom/venus/hfi_msgs.c 	req_bytes = pkt->shdr.hdr.size - sizeof(*pkt);
pkt               324 drivers/media/platform/qcom/venus/hfi_msgs.c 	hfi = (struct hfi_profile_level *)&pkt->data[1];
pkt               332 drivers/media/platform/qcom/venus/hfi_msgs.c session_get_prop_buf_req(struct hfi_msg_session_property_info_pkt *pkt,
pkt               339 drivers/media/platform/qcom/venus/hfi_msgs.c 	req_bytes = pkt->shdr.hdr.size - sizeof(*pkt);
pkt               341 drivers/media/platform/qcom/venus/hfi_msgs.c 	if (!req_bytes || req_bytes % sizeof(*buf_req) || !pkt->data[1])
pkt               345 drivers/media/platform/qcom/venus/hfi_msgs.c 	buf_req = (struct hfi_buffer_requirements *)&pkt->data[1];
pkt               366 drivers/media/platform/qcom/venus/hfi_msgs.c 	struct hfi_msg_session_property_info_pkt *pkt = packet;
pkt               371 drivers/media/platform/qcom/venus/hfi_msgs.c 	if (!pkt->num_properties) {
pkt               377 drivers/media/platform/qcom/venus/hfi_msgs.c 	switch (pkt->data[0]) {
pkt               380 drivers/media/platform/qcom/venus/hfi_msgs.c 		error = session_get_prop_buf_req(pkt, hprop->bufreq);
pkt               384 drivers/media/platform/qcom/venus/hfi_msgs.c 		error = session_get_prop_profile_level(pkt,
pkt               391 drivers/media/platform/qcom/venus/hfi_msgs.c 			pkt->data[0]);
pkt               403 drivers/media/platform/qcom/venus/hfi_msgs.c 	struct hfi_msg_session_init_done_pkt *pkt = packet;
pkt               407 drivers/media/platform/qcom/venus/hfi_msgs.c 	error = pkt->error_type;
pkt               414 drivers/media/platform/qcom/venus/hfi_msgs.c 	rem_bytes = pkt->shdr.hdr.size - sizeof(*pkt) + sizeof(u32);
pkt               420 drivers/media/platform/qcom/venus/hfi_msgs.c 	error = hfi_parser(core, inst, pkt->data, rem_bytes);
pkt               429 drivers/media/platform/qcom/venus/hfi_msgs.c 	struct hfi_msg_session_load_resources_done_pkt *pkt = packet;
pkt               431 drivers/media/platform/qcom/venus/hfi_msgs.c 	inst->error = pkt->error_type;
pkt               438 drivers/media/platform/qcom/venus/hfi_msgs.c 	struct hfi_msg_session_flush_done_pkt *pkt = packet;
pkt               440 drivers/media/platform/qcom/venus/hfi_msgs.c 	inst->error = pkt->error_type;
pkt               447 drivers/media/platform/qcom/venus/hfi_msgs.c 	struct hfi_msg_session_empty_buffer_done_pkt *pkt = packet;
pkt               449 drivers/media/platform/qcom/venus/hfi_msgs.c 	inst->error = pkt->error_type;
pkt               450 drivers/media/platform/qcom/venus/hfi_msgs.c 	inst->ops->buf_done(inst, HFI_BUFFER_INPUT, pkt->input_tag,
pkt               451 drivers/media/platform/qcom/venus/hfi_msgs.c 			    pkt->filled_len, pkt->offset, 0, 0, 0);
pkt               465 drivers/media/platform/qcom/venus/hfi_msgs.c 		struct hfi_msg_session_fbd_compressed_pkt *pkt = packet;
pkt               467 drivers/media/platform/qcom/venus/hfi_msgs.c 		timestamp_hi = pkt->time_stamp_hi;
pkt               468 drivers/media/platform/qcom/venus/hfi_msgs.c 		timestamp_lo = pkt->time_stamp_lo;
pkt               469 drivers/media/platform/qcom/venus/hfi_msgs.c 		hfi_flags = pkt->flags;
pkt               470 drivers/media/platform/qcom/venus/hfi_msgs.c 		offset = pkt->offset;
pkt               471 drivers/media/platform/qcom/venus/hfi_msgs.c 		filled_len = pkt->filled_len;
pkt               472 drivers/media/platform/qcom/venus/hfi_msgs.c 		pic_type = pkt->picture_type;
pkt               473 drivers/media/platform/qcom/venus/hfi_msgs.c 		output_tag = pkt->output_tag;
pkt               476 drivers/media/platform/qcom/venus/hfi_msgs.c 		error = pkt->error_type;
pkt               478 drivers/media/platform/qcom/venus/hfi_msgs.c 		struct hfi_msg_session_fbd_uncompressed_plane0_pkt *pkt =
pkt               481 drivers/media/platform/qcom/venus/hfi_msgs.c 		timestamp_hi = pkt->time_stamp_hi;
pkt               482 drivers/media/platform/qcom/venus/hfi_msgs.c 		timestamp_lo = pkt->time_stamp_lo;
pkt               483 drivers/media/platform/qcom/venus/hfi_msgs.c 		hfi_flags = pkt->flags;
pkt               484 drivers/media/platform/qcom/venus/hfi_msgs.c 		offset = pkt->offset;
pkt               485 drivers/media/platform/qcom/venus/hfi_msgs.c 		filled_len = pkt->filled_len;
pkt               486 drivers/media/platform/qcom/venus/hfi_msgs.c 		pic_type = pkt->picture_type;
pkt               487 drivers/media/platform/qcom/venus/hfi_msgs.c 		output_tag = pkt->output_tag;
pkt               489 drivers/media/platform/qcom/venus/hfi_msgs.c 		if (pkt->stream_id == 0)
pkt               491 drivers/media/platform/qcom/venus/hfi_msgs.c 		else if (pkt->stream_id == 1)
pkt               494 drivers/media/platform/qcom/venus/hfi_msgs.c 		error = pkt->error_type;
pkt               538 drivers/media/platform/qcom/venus/hfi_msgs.c 	struct hfi_msg_session_start_done_pkt *pkt = packet;
pkt               540 drivers/media/platform/qcom/venus/hfi_msgs.c 	inst->error = pkt->error_type;
pkt               547 drivers/media/platform/qcom/venus/hfi_msgs.c 	struct hfi_msg_session_stop_done_pkt *pkt = packet;
pkt               549 drivers/media/platform/qcom/venus/hfi_msgs.c 	inst->error = pkt->error_type;
pkt               556 drivers/media/platform/qcom/venus/hfi_msgs.c 	struct hfi_msg_session_release_resources_done_pkt *pkt = packet;
pkt               558 drivers/media/platform/qcom/venus/hfi_msgs.c 	inst->error = pkt->error_type;
pkt               565 drivers/media/platform/qcom/venus/hfi_msgs.c 	struct hfi_msg_session_release_buffers_done_pkt *pkt = packet;
pkt               567 drivers/media/platform/qcom/venus/hfi_msgs.c 	inst->error = pkt->error_type;
pkt               574 drivers/media/platform/qcom/venus/hfi_msgs.c 	struct hfi_msg_session_end_done_pkt *pkt = packet;
pkt               576 drivers/media/platform/qcom/venus/hfi_msgs.c 	inst->error = pkt->error_type;
pkt               583 drivers/media/platform/qcom/venus/hfi_msgs.c 	struct hfi_msg_sys_session_abort_done_pkt *pkt = packet;
pkt               585 drivers/media/platform/qcom/venus/hfi_msgs.c 	inst->error = pkt->error_type;
pkt               592 drivers/media/platform/qcom/venus/hfi_msgs.c 	struct hfi_msg_session_get_sequence_hdr_done_pkt *pkt = packet;
pkt               594 drivers/media/platform/qcom/venus/hfi_msgs.c 	inst->error = pkt->error_type;
pkt               599 drivers/media/platform/qcom/venus/hfi_msgs.c 	u32 pkt;
pkt               607 drivers/media/platform/qcom/venus/hfi_msgs.c 	{.pkt = HFI_MSG_EVENT_NOTIFY,
pkt               611 drivers/media/platform/qcom/venus/hfi_msgs.c 	{.pkt = HFI_MSG_SYS_INIT,
pkt               616 drivers/media/platform/qcom/venus/hfi_msgs.c 	{.pkt = HFI_MSG_SYS_PROPERTY_INFO,
pkt               621 drivers/media/platform/qcom/venus/hfi_msgs.c 	{.pkt = HFI_MSG_SYS_RELEASE_RESOURCE,
pkt               626 drivers/media/platform/qcom/venus/hfi_msgs.c 	{.pkt = HFI_MSG_SYS_PING_ACK,
pkt               631 drivers/media/platform/qcom/venus/hfi_msgs.c 	{.pkt = HFI_MSG_SYS_IDLE,
pkt               636 drivers/media/platform/qcom/venus/hfi_msgs.c 	{.pkt = HFI_MSG_SYS_PC_PREP,
pkt               641 drivers/media/platform/qcom/venus/hfi_msgs.c 	{.pkt = HFI_MSG_SYS_SESSION_INIT,
pkt               645 drivers/media/platform/qcom/venus/hfi_msgs.c 	{.pkt = HFI_MSG_SYS_SESSION_END,
pkt               649 drivers/media/platform/qcom/venus/hfi_msgs.c 	{.pkt = HFI_MSG_SESSION_LOAD_RESOURCES,
pkt               653 drivers/media/platform/qcom/venus/hfi_msgs.c 	{.pkt = HFI_MSG_SESSION_START,
pkt               657 drivers/media/platform/qcom/venus/hfi_msgs.c 	{.pkt = HFI_MSG_SESSION_STOP,
pkt               661 drivers/media/platform/qcom/venus/hfi_msgs.c 	{.pkt = HFI_MSG_SYS_SESSION_ABORT,
pkt               665 drivers/media/platform/qcom/venus/hfi_msgs.c 	{.pkt = HFI_MSG_SESSION_EMPTY_BUFFER,
pkt               669 drivers/media/platform/qcom/venus/hfi_msgs.c 	{.pkt = HFI_MSG_SESSION_FILL_BUFFER,
pkt               674 drivers/media/platform/qcom/venus/hfi_msgs.c 	{.pkt = HFI_MSG_SESSION_FLUSH,
pkt               678 drivers/media/platform/qcom/venus/hfi_msgs.c 	{.pkt = HFI_MSG_SESSION_PROPERTY_INFO,
pkt               682 drivers/media/platform/qcom/venus/hfi_msgs.c 	{.pkt = HFI_MSG_SESSION_RELEASE_RESOURCES,
pkt               686 drivers/media/platform/qcom/venus/hfi_msgs.c 	{.pkt = HFI_MSG_SESSION_GET_SEQUENCE_HEADER,
pkt               690 drivers/media/platform/qcom/venus/hfi_msgs.c 	{.pkt = HFI_MSG_SESSION_RELEASE_BUFFERS,
pkt               726 drivers/media/platform/qcom/venus/hfi_msgs.c 		if (handler->pkt != hdr->pkt_type)
pkt               746 drivers/media/platform/qcom/venus/hfi_msgs.c 		struct hfi_session_pkt *pkt;
pkt               748 drivers/media/platform/qcom/venus/hfi_msgs.c 		pkt = (struct hfi_session_pkt *)hdr;
pkt               749 drivers/media/platform/qcom/venus/hfi_msgs.c 		inst = to_instance(core, pkt->shdr.session_id);
pkt               753 drivers/media/platform/qcom/venus/hfi_msgs.c 				 pkt->shdr.session_id,
pkt               754 drivers/media/platform/qcom/venus/hfi_msgs.c 				 handler ? handler->pkt : 0);
pkt               762 drivers/media/platform/qcom/venus/hfi_msgs.c 				pkt->shdr.session_id);
pkt               233 drivers/media/platform/qcom/venus/hfi_venus.c 			    struct iface_queue *queue, void *pkt, u32 *tx_req)
pkt               283 drivers/media/platform/qcom/venus/hfi_venus.c 			memcpy(pkt, rd_ptr, dwords << 2);
pkt               289 drivers/media/platform/qcom/venus/hfi_venus.c 			memcpy(pkt, rd_ptr, len);
pkt               290 drivers/media/platform/qcom/venus/hfi_venus.c 			memcpy(pkt + len, queue->qmem.kva, new_rd_idx << 2);
pkt               320 drivers/media/platform/qcom/venus/hfi_venus.c 	venus_dump_packet(hdev, pkt);
pkt               375 drivers/media/platform/qcom/venus/hfi_venus.c 					 void *pkt)
pkt               386 drivers/media/platform/qcom/venus/hfi_venus.c 	cmd_packet = (struct hfi_pkt_hdr *)pkt;
pkt               391 drivers/media/platform/qcom/venus/hfi_venus.c 	ret = venus_write_queue(hdev, queue, pkt, &rx_req);
pkt               403 drivers/media/platform/qcom/venus/hfi_venus.c static int venus_iface_cmdq_write(struct venus_hfi_device *hdev, void *pkt)
pkt               408 drivers/media/platform/qcom/venus/hfi_venus.c 	ret = venus_iface_cmdq_write_nolock(hdev, pkt);
pkt               418 drivers/media/platform/qcom/venus/hfi_venus.c 	struct hfi_sys_set_resource_pkt *pkt;
pkt               425 drivers/media/platform/qcom/venus/hfi_venus.c 	pkt = (struct hfi_sys_set_resource_pkt *)packet;
pkt               427 drivers/media/platform/qcom/venus/hfi_venus.c 	ret = pkt_sys_set_resource(pkt, id, size, addr, cookie);
pkt               431 drivers/media/platform/qcom/venus/hfi_venus.c 	ret = venus_iface_cmdq_write(hdev, pkt);
pkt               604 drivers/media/platform/qcom/venus/hfi_venus.c 					void *pkt)
pkt               615 drivers/media/platform/qcom/venus/hfi_venus.c 	ret = venus_read_queue(hdev, queue, pkt, &tx_req);
pkt               625 drivers/media/platform/qcom/venus/hfi_venus.c static int venus_iface_msgq_read(struct venus_hfi_device *hdev, void *pkt)
pkt               630 drivers/media/platform/qcom/venus/hfi_venus.c 	ret = venus_iface_msgq_read_nolock(hdev, pkt);
pkt               637 drivers/media/platform/qcom/venus/hfi_venus.c 					void *pkt)
pkt               649 drivers/media/platform/qcom/venus/hfi_venus.c 	ret = venus_read_queue(hdev, queue, pkt, &tx_req);
pkt               659 drivers/media/platform/qcom/venus/hfi_venus.c static int venus_iface_dbgq_read(struct venus_hfi_device *hdev, void *pkt)
pkt               663 drivers/media/platform/qcom/venus/hfi_venus.c 	if (!pkt)
pkt               667 drivers/media/platform/qcom/venus/hfi_venus.c 	ret = venus_iface_dbgq_read_nolock(hdev, pkt);
pkt               773 drivers/media/platform/qcom/venus/hfi_venus.c 	struct hfi_sys_set_property_pkt *pkt;
pkt               777 drivers/media/platform/qcom/venus/hfi_venus.c 	pkt = (struct hfi_sys_set_property_pkt *)packet;
pkt               779 drivers/media/platform/qcom/venus/hfi_venus.c 	pkt_sys_debug_config(pkt, HFI_DEBUG_MODE_QUEUE, debug);
pkt               781 drivers/media/platform/qcom/venus/hfi_venus.c 	ret = venus_iface_cmdq_write(hdev, pkt);
pkt               790 drivers/media/platform/qcom/venus/hfi_venus.c 	struct hfi_sys_set_property_pkt *pkt;
pkt               794 drivers/media/platform/qcom/venus/hfi_venus.c 	pkt = (struct hfi_sys_set_property_pkt *)packet;
pkt               796 drivers/media/platform/qcom/venus/hfi_venus.c 	pkt_sys_coverage_config(pkt, mode);
pkt               798 drivers/media/platform/qcom/venus/hfi_venus.c 	ret = venus_iface_cmdq_write(hdev, pkt);
pkt               808 drivers/media/platform/qcom/venus/hfi_venus.c 	struct hfi_sys_set_property_pkt *pkt;
pkt               815 drivers/media/platform/qcom/venus/hfi_venus.c 	pkt = (struct hfi_sys_set_property_pkt *)packet;
pkt               817 drivers/media/platform/qcom/venus/hfi_venus.c 	pkt_sys_idle_indicator(pkt, enable);
pkt               819 drivers/media/platform/qcom/venus/hfi_venus.c 	ret = venus_iface_cmdq_write(hdev, pkt);
pkt               829 drivers/media/platform/qcom/venus/hfi_venus.c 	struct hfi_sys_set_property_pkt *pkt;
pkt               833 drivers/media/platform/qcom/venus/hfi_venus.c 	pkt = (struct hfi_sys_set_property_pkt *)packet;
pkt               835 drivers/media/platform/qcom/venus/hfi_venus.c 	pkt_sys_power_control(pkt, enable);
pkt               837 drivers/media/platform/qcom/venus/hfi_venus.c 	ret = venus_iface_cmdq_write(hdev, pkt);
pkt               891 drivers/media/platform/qcom/venus/hfi_venus.c 	struct hfi_session_pkt pkt;
pkt               893 drivers/media/platform/qcom/venus/hfi_venus.c 	pkt_session_cmd(&pkt, pkt_type, inst);
pkt               895 drivers/media/platform/qcom/venus/hfi_venus.c 	return venus_iface_cmdq_write(hdev, &pkt);
pkt               904 drivers/media/platform/qcom/venus/hfi_venus.c 		struct hfi_msg_sys_coverage_pkt *pkt = packet;
pkt               906 drivers/media/platform/qcom/venus/hfi_venus.c 		if (pkt->hdr.pkt_type != HFI_MSG_SYS_COV) {
pkt               907 drivers/media/platform/qcom/venus/hfi_venus.c 			struct hfi_msg_sys_debug_pkt *pkt = packet;
pkt               909 drivers/media/platform/qcom/venus/hfi_venus.c 			dev_dbg(dev, "%s", pkt->msg_data);
pkt               918 drivers/media/platform/qcom/venus/hfi_venus.c 	struct hfi_sys_pc_prep_pkt pkt;
pkt               923 drivers/media/platform/qcom/venus/hfi_venus.c 	pkt_sys_pc_prep(&pkt);
pkt               925 drivers/media/platform/qcom/venus/hfi_venus.c 	ret = venus_iface_cmdq_write(hdev, &pkt);
pkt              1003 drivers/media/platform/qcom/venus/hfi_venus.c 	void *pkt;
pkt              1010 drivers/media/platform/qcom/venus/hfi_venus.c 	pkt = hdev->pkt_buf;
pkt              1017 drivers/media/platform/qcom/venus/hfi_venus.c 	while (!venus_iface_msgq_read(hdev, pkt)) {
pkt              1018 drivers/media/platform/qcom/venus/hfi_venus.c 		msg_ret = hfi_process_msg_packet(core, pkt);
pkt              1021 drivers/media/platform/qcom/venus/hfi_venus.c 			venus_process_msg_sys_error(hdev, pkt);
pkt              1071 drivers/media/platform/qcom/venus/hfi_venus.c 	struct hfi_sys_init_pkt pkt;
pkt              1074 drivers/media/platform/qcom/venus/hfi_venus.c 	pkt_sys_init(&pkt, HFI_VIDEO_ARCH_OX);
pkt              1078 drivers/media/platform/qcom/venus/hfi_venus.c 	ret = venus_iface_cmdq_write(hdev, &pkt);
pkt              1109 drivers/media/platform/qcom/venus/hfi_venus.c 	struct hfi_sys_ping_pkt pkt;
pkt              1111 drivers/media/platform/qcom/venus/hfi_venus.c 	pkt_sys_ping(&pkt, cookie);
pkt              1113 drivers/media/platform/qcom/venus/hfi_venus.c 	return venus_iface_cmdq_write(hdev, &pkt);
pkt              1119 drivers/media/platform/qcom/venus/hfi_venus.c 	struct hfi_sys_test_ssr_pkt pkt;
pkt              1122 drivers/media/platform/qcom/venus/hfi_venus.c 	ret = pkt_sys_ssr_cmd(&pkt, trigger_type);
pkt              1126 drivers/media/platform/qcom/venus/hfi_venus.c 	return venus_iface_cmdq_write(hdev, &pkt);
pkt              1133 drivers/media/platform/qcom/venus/hfi_venus.c 	struct hfi_session_init_pkt pkt;
pkt              1136 drivers/media/platform/qcom/venus/hfi_venus.c 	ret = pkt_session_init(&pkt, inst, session_type, codec);
pkt              1140 drivers/media/platform/qcom/venus/hfi_venus.c 	ret = venus_iface_cmdq_write(hdev, &pkt);
pkt              1176 drivers/media/platform/qcom/venus/hfi_venus.c 	struct hfi_session_flush_pkt pkt;
pkt              1179 drivers/media/platform/qcom/venus/hfi_venus.c 	ret = pkt_session_flush(&pkt, inst, flush_mode);
pkt              1183 drivers/media/platform/qcom/venus/hfi_venus.c 	return venus_iface_cmdq_write(hdev, &pkt);
pkt              1209 drivers/media/platform/qcom/venus/hfi_venus.c 		struct hfi_session_empty_buffer_compressed_pkt pkt;
pkt              1211 drivers/media/platform/qcom/venus/hfi_venus.c 		ret = pkt_session_etb_decoder(&pkt, inst, in_frame);
pkt              1215 drivers/media/platform/qcom/venus/hfi_venus.c 		ret = venus_iface_cmdq_write(hdev, &pkt);
pkt              1217 drivers/media/platform/qcom/venus/hfi_venus.c 		struct hfi_session_empty_buffer_uncompressed_plane0_pkt pkt;
pkt              1219 drivers/media/platform/qcom/venus/hfi_venus.c 		ret = pkt_session_etb_encoder(&pkt, inst, in_frame);
pkt              1223 drivers/media/platform/qcom/venus/hfi_venus.c 		ret = venus_iface_cmdq_write(hdev, &pkt);
pkt              1235 drivers/media/platform/qcom/venus/hfi_venus.c 	struct hfi_session_fill_buffer_pkt pkt;
pkt              1238 drivers/media/platform/qcom/venus/hfi_venus.c 	ret = pkt_session_ftb(&pkt, inst, out_frame);
pkt              1242 drivers/media/platform/qcom/venus/hfi_venus.c 	return venus_iface_cmdq_write(hdev, &pkt);
pkt              1249 drivers/media/platform/qcom/venus/hfi_venus.c 	struct hfi_session_set_buffers_pkt *pkt;
pkt              1256 drivers/media/platform/qcom/venus/hfi_venus.c 	pkt = (struct hfi_session_set_buffers_pkt *)packet;
pkt              1258 drivers/media/platform/qcom/venus/hfi_venus.c 	ret = pkt_session_set_buffers(pkt, inst, bd);
pkt              1262 drivers/media/platform/qcom/venus/hfi_venus.c 	return venus_iface_cmdq_write(hdev, pkt);
pkt              1269 drivers/media/platform/qcom/venus/hfi_venus.c 	struct hfi_session_release_buffer_pkt *pkt;
pkt              1276 drivers/media/platform/qcom/venus/hfi_venus.c 	pkt = (struct hfi_session_release_buffer_pkt *)packet;
pkt              1278 drivers/media/platform/qcom/venus/hfi_venus.c 	ret = pkt_session_unset_buffers(pkt, inst, bd);
pkt              1282 drivers/media/platform/qcom/venus/hfi_venus.c 	return venus_iface_cmdq_write(hdev, pkt);
pkt              1299 drivers/media/platform/qcom/venus/hfi_venus.c 	struct hfi_session_parse_sequence_header_pkt *pkt;
pkt              1303 drivers/media/platform/qcom/venus/hfi_venus.c 	pkt = (struct hfi_session_parse_sequence_header_pkt *)packet;
pkt              1305 drivers/media/platform/qcom/venus/hfi_venus.c 	ret = pkt_session_parse_seq_header(pkt, inst, seq_hdr, seq_hdr_len);
pkt              1309 drivers/media/platform/qcom/venus/hfi_venus.c 	ret = venus_iface_cmdq_write(hdev, pkt);
pkt              1320 drivers/media/platform/qcom/venus/hfi_venus.c 	struct hfi_session_get_sequence_header_pkt *pkt;
pkt              1324 drivers/media/platform/qcom/venus/hfi_venus.c 	pkt = (struct hfi_session_get_sequence_header_pkt *)packet;
pkt              1326 drivers/media/platform/qcom/venus/hfi_venus.c 	ret = pkt_session_get_seq_hdr(pkt, inst, seq_hdr, seq_hdr_len);
pkt              1330 drivers/media/platform/qcom/venus/hfi_venus.c 	return venus_iface_cmdq_write(hdev, pkt);
pkt              1337 drivers/media/platform/qcom/venus/hfi_venus.c 	struct hfi_session_set_property_pkt *pkt;
pkt              1341 drivers/media/platform/qcom/venus/hfi_venus.c 	pkt = (struct hfi_session_set_property_pkt *)packet;
pkt              1343 drivers/media/platform/qcom/venus/hfi_venus.c 	ret = pkt_session_set_property(pkt, inst, ptype, pdata);
pkt              1349 drivers/media/platform/qcom/venus/hfi_venus.c 	return venus_iface_cmdq_write(hdev, pkt);
pkt              1355 drivers/media/platform/qcom/venus/hfi_venus.c 	struct hfi_session_get_property_pkt pkt;
pkt              1358 drivers/media/platform/qcom/venus/hfi_venus.c 	ret = pkt_session_get_property(&pkt, inst, ptype);
pkt              1362 drivers/media/platform/qcom/venus/hfi_venus.c 	return venus_iface_cmdq_write(hdev, &pkt);
pkt                24 drivers/misc/habanalabs/debugfs.c 	struct armcp_packet pkt;
pkt                30 drivers/misc/habanalabs/debugfs.c 	memset(&pkt, 0, sizeof(pkt));
pkt                32 drivers/misc/habanalabs/debugfs.c 	pkt.ctl = cpu_to_le32(ARMCP_PACKET_I2C_RD <<
pkt                34 drivers/misc/habanalabs/debugfs.c 	pkt.i2c_bus = i2c_bus;
pkt                35 drivers/misc/habanalabs/debugfs.c 	pkt.i2c_addr = i2c_addr;
pkt                36 drivers/misc/habanalabs/debugfs.c 	pkt.i2c_reg = i2c_reg;
pkt                38 drivers/misc/habanalabs/debugfs.c 	rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
pkt                50 drivers/misc/habanalabs/debugfs.c 	struct armcp_packet pkt;
pkt                56 drivers/misc/habanalabs/debugfs.c 	memset(&pkt, 0, sizeof(pkt));
pkt                58 drivers/misc/habanalabs/debugfs.c 	pkt.ctl = cpu_to_le32(ARMCP_PACKET_I2C_WR <<
pkt                60 drivers/misc/habanalabs/debugfs.c 	pkt.i2c_bus = i2c_bus;
pkt                61 drivers/misc/habanalabs/debugfs.c 	pkt.i2c_addr = i2c_addr;
pkt                62 drivers/misc/habanalabs/debugfs.c 	pkt.i2c_reg = i2c_reg;
pkt                63 drivers/misc/habanalabs/debugfs.c 	pkt.value = cpu_to_le64(val);
pkt                65 drivers/misc/habanalabs/debugfs.c 	rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
pkt                76 drivers/misc/habanalabs/debugfs.c 	struct armcp_packet pkt;
pkt                82 drivers/misc/habanalabs/debugfs.c 	memset(&pkt, 0, sizeof(pkt));
pkt                84 drivers/misc/habanalabs/debugfs.c 	pkt.ctl = cpu_to_le32(ARMCP_PACKET_LED_SET <<
pkt                86 drivers/misc/habanalabs/debugfs.c 	pkt.led_index = cpu_to_le32(led);
pkt                87 drivers/misc/habanalabs/debugfs.c 	pkt.value = cpu_to_le64(state);
pkt                89 drivers/misc/habanalabs/debugfs.c 	rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
pkt                57 drivers/misc/habanalabs/firmware_if.c 	struct armcp_packet pkt = {};
pkt                59 drivers/misc/habanalabs/firmware_if.c 	pkt.ctl = cpu_to_le32(opcode << ARMCP_PKT_CTL_OPCODE_SHIFT);
pkt                61 drivers/misc/habanalabs/firmware_if.c 	return hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt,
pkt                62 drivers/misc/habanalabs/firmware_if.c 				sizeof(pkt), HL_DEVICE_TIMEOUT_USEC, NULL);
pkt                68 drivers/misc/habanalabs/firmware_if.c 	struct armcp_packet *pkt;
pkt                73 drivers/misc/habanalabs/firmware_if.c 	pkt = hdev->asic_funcs->cpu_accessible_dma_pool_alloc(hdev, len,
pkt                75 drivers/misc/habanalabs/firmware_if.c 	if (!pkt) {
pkt                81 drivers/misc/habanalabs/firmware_if.c 	memcpy(pkt, msg, len);
pkt                99 drivers/misc/habanalabs/firmware_if.c 	rc = hl_poll_timeout_memory(hdev, &pkt->fence, tmp,
pkt               111 drivers/misc/habanalabs/firmware_if.c 	tmp = le32_to_cpu(pkt->ctl);
pkt               121 drivers/misc/habanalabs/firmware_if.c 		*result = (long) le64_to_cpu(pkt->result);
pkt               127 drivers/misc/habanalabs/firmware_if.c 	hdev->asic_funcs->cpu_accessible_dma_pool_free(hdev, len, pkt);
pkt               201 drivers/misc/habanalabs/firmware_if.c 	struct armcp_packet pkt = {};
pkt               219 drivers/misc/habanalabs/firmware_if.c 	pkt.ctl = cpu_to_le32(ARMCP_PACKET_INFO_GET <<
pkt               221 drivers/misc/habanalabs/firmware_if.c 	pkt.addr = cpu_to_le64(armcp_info_dma_addr);
pkt               222 drivers/misc/habanalabs/firmware_if.c 	pkt.data_max_size = cpu_to_le32(sizeof(struct armcp_info));
pkt               224 drivers/misc/habanalabs/firmware_if.c 	rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
pkt               252 drivers/misc/habanalabs/firmware_if.c 	struct armcp_packet pkt = {};
pkt               269 drivers/misc/habanalabs/firmware_if.c 	pkt.ctl = cpu_to_le32(ARMCP_PACKET_EEPROM_DATA_GET <<
pkt               271 drivers/misc/habanalabs/firmware_if.c 	pkt.addr = cpu_to_le64(eeprom_info_dma_addr);
pkt               272 drivers/misc/habanalabs/firmware_if.c 	pkt.data_max_size = cpu_to_le32(max_size);
pkt               274 drivers/misc/habanalabs/firmware_if.c 	rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
pkt              4426 drivers/misc/habanalabs/goya/goya.c 	struct armcp_unmask_irq_arr_packet *pkt;
pkt              4445 drivers/misc/habanalabs/goya/goya.c 	pkt = kzalloc(total_pkt_size, GFP_KERNEL);
pkt              4446 drivers/misc/habanalabs/goya/goya.c 	if (!pkt)
pkt              4450 drivers/misc/habanalabs/goya/goya.c 	pkt->length = cpu_to_le32(irq_num_entries);
pkt              4455 drivers/misc/habanalabs/goya/goya.c 	for (irq_arr_index = 0, goya_irq_arr = (__le32 *) &pkt->irqs;
pkt              4460 drivers/misc/habanalabs/goya/goya.c 	pkt->armcp_pkt.ctl = cpu_to_le32(ARMCP_PACKET_UNMASK_RAZWI_IRQ_ARRAY <<
pkt              4463 drivers/misc/habanalabs/goya/goya.c 	rc = goya_send_cpu_message(hdev, (u32 *) pkt, total_pkt_size,
pkt              4469 drivers/misc/habanalabs/goya/goya.c 	kfree(pkt);
pkt              4486 drivers/misc/habanalabs/goya/goya.c 	struct armcp_packet pkt;
pkt              4490 drivers/misc/habanalabs/goya/goya.c 	memset(&pkt, 0, sizeof(pkt));
pkt              4492 drivers/misc/habanalabs/goya/goya.c 	pkt.ctl = cpu_to_le32(ARMCP_PACKET_UNMASK_RAZWI_IRQ <<
pkt              4494 drivers/misc/habanalabs/goya/goya.c 	pkt.value = cpu_to_le64(event_type);
pkt              4496 drivers/misc/habanalabs/goya/goya.c 	rc = goya_send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
pkt               270 drivers/misc/habanalabs/hwmon.c 	struct armcp_packet pkt;
pkt               274 drivers/misc/habanalabs/hwmon.c 	memset(&pkt, 0, sizeof(pkt));
pkt               276 drivers/misc/habanalabs/hwmon.c 	pkt.ctl = cpu_to_le32(ARMCP_PACKET_TEMPERATURE_GET <<
pkt               278 drivers/misc/habanalabs/hwmon.c 	pkt.sensor_index = __cpu_to_le16(sensor_index);
pkt               279 drivers/misc/habanalabs/hwmon.c 	pkt.type = __cpu_to_le16(attr);
pkt               281 drivers/misc/habanalabs/hwmon.c 	rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
pkt               296 drivers/misc/habanalabs/hwmon.c 	struct armcp_packet pkt;
pkt               300 drivers/misc/habanalabs/hwmon.c 	memset(&pkt, 0, sizeof(pkt));
pkt               302 drivers/misc/habanalabs/hwmon.c 	pkt.ctl = cpu_to_le32(ARMCP_PACKET_VOLTAGE_GET <<
pkt               304 drivers/misc/habanalabs/hwmon.c 	pkt.sensor_index = __cpu_to_le16(sensor_index);
pkt               305 drivers/misc/habanalabs/hwmon.c 	pkt.type = __cpu_to_le16(attr);
pkt               307 drivers/misc/habanalabs/hwmon.c 	rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
pkt               322 drivers/misc/habanalabs/hwmon.c 	struct armcp_packet pkt;
pkt               326 drivers/misc/habanalabs/hwmon.c 	memset(&pkt, 0, sizeof(pkt));
pkt               328 drivers/misc/habanalabs/hwmon.c 	pkt.ctl = cpu_to_le32(ARMCP_PACKET_CURRENT_GET <<
pkt               330 drivers/misc/habanalabs/hwmon.c 	pkt.sensor_index = __cpu_to_le16(sensor_index);
pkt               331 drivers/misc/habanalabs/hwmon.c 	pkt.type = __cpu_to_le16(attr);
pkt               333 drivers/misc/habanalabs/hwmon.c 	rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
pkt               348 drivers/misc/habanalabs/hwmon.c 	struct armcp_packet pkt;
pkt               352 drivers/misc/habanalabs/hwmon.c 	memset(&pkt, 0, sizeof(pkt));
pkt               354 drivers/misc/habanalabs/hwmon.c 	pkt.ctl = cpu_to_le32(ARMCP_PACKET_FAN_SPEED_GET <<
pkt               356 drivers/misc/habanalabs/hwmon.c 	pkt.sensor_index = __cpu_to_le16(sensor_index);
pkt               357 drivers/misc/habanalabs/hwmon.c 	pkt.type = __cpu_to_le16(attr);
pkt               359 drivers/misc/habanalabs/hwmon.c 	rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
pkt               374 drivers/misc/habanalabs/hwmon.c 	struct armcp_packet pkt;
pkt               378 drivers/misc/habanalabs/hwmon.c 	memset(&pkt, 0, sizeof(pkt));
pkt               380 drivers/misc/habanalabs/hwmon.c 	pkt.ctl = cpu_to_le32(ARMCP_PACKET_PWM_GET <<
pkt               382 drivers/misc/habanalabs/hwmon.c 	pkt.sensor_index = __cpu_to_le16(sensor_index);
pkt               383 drivers/misc/habanalabs/hwmon.c 	pkt.type = __cpu_to_le16(attr);
pkt               385 drivers/misc/habanalabs/hwmon.c 	rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
pkt               401 drivers/misc/habanalabs/hwmon.c 	struct armcp_packet pkt;
pkt               404 drivers/misc/habanalabs/hwmon.c 	memset(&pkt, 0, sizeof(pkt));
pkt               406 drivers/misc/habanalabs/hwmon.c 	pkt.ctl = cpu_to_le32(ARMCP_PACKET_PWM_SET <<
pkt               408 drivers/misc/habanalabs/hwmon.c 	pkt.sensor_index = __cpu_to_le16(sensor_index);
pkt               409 drivers/misc/habanalabs/hwmon.c 	pkt.type = __cpu_to_le16(attr);
pkt               410 drivers/misc/habanalabs/hwmon.c 	pkt.value = cpu_to_le64(value);
pkt               412 drivers/misc/habanalabs/hwmon.c 	rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
pkt                17 drivers/misc/habanalabs/sysfs.c 	struct armcp_packet pkt;
pkt                21 drivers/misc/habanalabs/sysfs.c 	memset(&pkt, 0, sizeof(pkt));
pkt                24 drivers/misc/habanalabs/sysfs.c 		pkt.ctl = cpu_to_le32(ARMCP_PACKET_FREQUENCY_CURR_GET <<
pkt                27 drivers/misc/habanalabs/sysfs.c 		pkt.ctl = cpu_to_le32(ARMCP_PACKET_FREQUENCY_GET <<
pkt                29 drivers/misc/habanalabs/sysfs.c 	pkt.pll_index = cpu_to_le32(pll_index);
pkt                31 drivers/misc/habanalabs/sysfs.c 	rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
pkt                46 drivers/misc/habanalabs/sysfs.c 	struct armcp_packet pkt;
pkt                49 drivers/misc/habanalabs/sysfs.c 	memset(&pkt, 0, sizeof(pkt));
pkt                51 drivers/misc/habanalabs/sysfs.c 	pkt.ctl = cpu_to_le32(ARMCP_PACKET_FREQUENCY_SET <<
pkt                53 drivers/misc/habanalabs/sysfs.c 	pkt.pll_index = cpu_to_le32(pll_index);
pkt                54 drivers/misc/habanalabs/sysfs.c 	pkt.value = cpu_to_le64(freq);
pkt                56 drivers/misc/habanalabs/sysfs.c 	rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
pkt                67 drivers/misc/habanalabs/sysfs.c 	struct armcp_packet pkt;
pkt                71 drivers/misc/habanalabs/sysfs.c 	memset(&pkt, 0, sizeof(pkt));
pkt                73 drivers/misc/habanalabs/sysfs.c 	pkt.ctl = cpu_to_le32(ARMCP_PACKET_MAX_POWER_GET <<
pkt                76 drivers/misc/habanalabs/sysfs.c 	rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
pkt                89 drivers/misc/habanalabs/sysfs.c 	struct armcp_packet pkt;
pkt                92 drivers/misc/habanalabs/sysfs.c 	memset(&pkt, 0, sizeof(pkt));
pkt                94 drivers/misc/habanalabs/sysfs.c 	pkt.ctl = cpu_to_le32(ARMCP_PACKET_MAX_POWER_SET <<
pkt                96 drivers/misc/habanalabs/sysfs.c 	pkt.value = cpu_to_le64(value);
pkt                98 drivers/misc/habanalabs/sysfs.c 	rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
pkt               145 drivers/misc/hpilo.c 			   int dir, int *id, int *len, void **pkt)
pkt               166 drivers/misc/hpilo.c 		if (pkt)
pkt               167 drivers/misc/hpilo.c 			*pkt = (void *)(desc + desc_mem_sz(pkt_id));
pkt               446 drivers/misc/hpilo.c 	void *pkt;
pkt               467 drivers/misc/hpilo.c 					&pkt_len, &pkt);
pkt               481 drivers/misc/hpilo.c 	err = copy_to_user(buf, pkt, len);
pkt               496 drivers/misc/hpilo.c 	void *pkt;
pkt               502 drivers/misc/hpilo.c 	if (!ilo_pkt_dequeue(hw, driver_ccb, SENDQ, &pkt_id, &pkt_len, &pkt))
pkt               510 drivers/misc/hpilo.c 	err = copy_from_user(pkt, buf, len);
pkt                44 drivers/net/arcnet/arc-rawmode.c 	struct archdr *pkt = pkthdr;
pkt                62 drivers/net/arcnet/arc-rawmode.c 	pkt = (struct archdr *)skb->data;
pkt                68 drivers/net/arcnet/arc-rawmode.c 	memcpy(pkt, pkthdr, sizeof(struct archdr));
pkt                69 drivers/net/arcnet/arc-rawmode.c 	if (length > sizeof(pkt->soft))
pkt                70 drivers/net/arcnet/arc-rawmode.c 		lp->hw.copy_from_card(dev, bufnum, ofs + sizeof(pkt->soft),
pkt                71 drivers/net/arcnet/arc-rawmode.c 				      pkt->soft.raw + sizeof(pkt->soft),
pkt                72 drivers/net/arcnet/arc-rawmode.c 				      length - sizeof(pkt->soft));
pkt                88 drivers/net/arcnet/arc-rawmode.c 	struct archdr *pkt = skb_push(skb, hdr_size);
pkt                96 drivers/net/arcnet/arc-rawmode.c 	pkt->hard.source = *dev->dev_addr;
pkt               104 drivers/net/arcnet/arc-rawmode.c 		pkt->hard.dest = 0;
pkt               108 drivers/net/arcnet/arc-rawmode.c 	pkt->hard.dest = daddr;
pkt               113 drivers/net/arcnet/arc-rawmode.c static int prepare_tx(struct net_device *dev, struct archdr *pkt, int length,
pkt               117 drivers/net/arcnet/arc-rawmode.c 	struct arc_hardware *hard = &pkt->hard;
pkt               146 drivers/net/arcnet/arc-rawmode.c 	lp->hw.copy_to_card(dev, bufnum, ofs, &pkt->soft, length);
pkt               202 drivers/net/arcnet/arcdevice.h 	int (*prepare_tx)(struct net_device *dev, struct archdr *pkt,
pkt               228 drivers/net/arcnet/arcdevice.h 	struct archdr *pkt;	/* a pointer into the skb */
pkt                66 drivers/net/arcnet/arcnet.c static int null_prepare_tx(struct net_device *dev, struct archdr *pkt,
pkt               652 drivers/net/arcnet/arcnet.c 	struct archdr *pkt;
pkt               663 drivers/net/arcnet/arcnet.c 	pkt = (struct archdr *)skb->data;
pkt               664 drivers/net/arcnet/arcnet.c 	soft = &pkt->soft.rfc1201;
pkt               668 drivers/net/arcnet/arcnet.c 		   skb->len, pkt->hard.dest);
pkt               691 drivers/net/arcnet/arcnet.c 		if (proto->prepare_tx(dev, pkt, skb->len, txbuf) &&
pkt               701 drivers/net/arcnet/arcnet.c 			lp->outgoing.pkt = pkt;
pkt              1067 drivers/net/arcnet/arcnet.c 		struct archdr pkt;
pkt              1073 drivers/net/arcnet/arcnet.c 	soft = &rxdata.pkt.soft.rfc1201;
pkt              1075 drivers/net/arcnet/arcnet.c 	lp->hw.copy_from_card(dev, bufnum, 0, &rxdata.pkt, ARC_HDR_SIZE);
pkt              1076 drivers/net/arcnet/arcnet.c 	if (rxdata.pkt.hard.offset[0]) {
pkt              1077 drivers/net/arcnet/arcnet.c 		ofs = rxdata.pkt.hard.offset[0];
pkt              1080 drivers/net/arcnet/arcnet.c 		ofs = rxdata.pkt.hard.offset[1];
pkt              1085 drivers/net/arcnet/arcnet.c 	if (sizeof(rxdata.pkt.soft) <= length) {
pkt              1086 drivers/net/arcnet/arcnet.c 		lp->hw.copy_from_card(dev, bufnum, ofs, soft, sizeof(rxdata.pkt.soft));
pkt              1088 drivers/net/arcnet/arcnet.c 		memset(&rxdata.pkt.soft, 0, sizeof(rxdata.pkt.soft));
pkt              1093 drivers/net/arcnet/arcnet.c 		   bufnum, rxdata.pkt.hard.source, rxdata.pkt.hard.dest, length);
pkt              1102 drivers/net/arcnet/arcnet.c 			*oldp = arc_proto_map[lp->default_proto[rxdata.pkt.hard.source]],
pkt              1108 drivers/net/arcnet/arcnet.c 					   soft->proto, rxdata.pkt.hard.source,
pkt              1117 drivers/net/arcnet/arcnet.c 		lp->default_proto[rxdata.pkt.hard.source] = soft->proto;
pkt              1120 drivers/net/arcnet/arcnet.c 	arc_proto_map[soft->proto]->rx(dev, bufnum, &rxdata.pkt, length);
pkt              1145 drivers/net/arcnet/arcnet.c static int null_prepare_tx(struct net_device *dev, struct archdr *pkt,
pkt                47 drivers/net/arcnet/capmode.c 	struct archdr *pkt;
pkt                67 drivers/net/arcnet/capmode.c 	pkt = (struct archdr *)skb_mac_header(skb);
pkt                75 drivers/net/arcnet/capmode.c 	pktbuf = (char *)pkt;
pkt                77 drivers/net/arcnet/capmode.c 	memcpy(pktbuf, pkthdrbuf, ARC_HDR_SIZE + sizeof(pkt->soft.cap.proto));
pkt                78 drivers/net/arcnet/capmode.c 	memcpy(pktbuf + ARC_HDR_SIZE + sizeof(pkt->soft.cap.proto) + sizeof(int),
pkt                79 drivers/net/arcnet/capmode.c 	       pkthdrbuf + ARC_HDR_SIZE + sizeof(pkt->soft.cap.proto),
pkt                80 drivers/net/arcnet/capmode.c 	       sizeof(struct archdr) - ARC_HDR_SIZE - sizeof(pkt->soft.cap.proto));
pkt                82 drivers/net/arcnet/capmode.c 	if (length > sizeof(pkt->soft))
pkt                83 drivers/net/arcnet/capmode.c 		lp->hw.copy_from_card(dev, bufnum, ofs + sizeof(pkt->soft),
pkt                84 drivers/net/arcnet/capmode.c 				      pkt->soft.raw + sizeof(pkt->soft)
pkt                86 drivers/net/arcnet/capmode.c 				      length - sizeof(pkt->soft));
pkt               104 drivers/net/arcnet/capmode.c 	struct archdr *pkt = skb_push(skb, hdr_size);
pkt               107 drivers/net/arcnet/capmode.c 		   *((int *)&pkt->soft.cap.cookie[0]));
pkt               115 drivers/net/arcnet/capmode.c 	pkt->hard.source = *dev->dev_addr;
pkt               123 drivers/net/arcnet/capmode.c 		pkt->hard.dest = 0;
pkt               127 drivers/net/arcnet/capmode.c 	pkt->hard.dest = daddr;
pkt               132 drivers/net/arcnet/capmode.c static int prepare_tx(struct net_device *dev, struct archdr *pkt, int length,
pkt               136 drivers/net/arcnet/capmode.c 	struct arc_hardware *hard = &pkt->hard;
pkt               148 drivers/net/arcnet/capmode.c 		   *((int *)&pkt->soft.cap.cookie[0]));
pkt               171 drivers/net/arcnet/capmode.c 	lp->hw.copy_to_card(dev, bufnum, ofs, &pkt->soft.cap.proto,
pkt               172 drivers/net/arcnet/capmode.c 			    sizeof(pkt->soft.cap.proto));
pkt               178 drivers/net/arcnet/capmode.c 			    ((unsigned char *)&pkt->soft.cap.mes), length - 1);
pkt                44 drivers/net/arcnet/rfc1051.c static int prepare_tx(struct net_device *dev, struct archdr *pkt, int length,
pkt                89 drivers/net/arcnet/rfc1051.c 	struct archdr *pkt = (struct archdr *)skb->data;
pkt                90 drivers/net/arcnet/rfc1051.c 	struct arc_rfc1051 *soft = &pkt->soft.rfc1051;
pkt                97 drivers/net/arcnet/rfc1051.c 	if (pkt->hard.dest == 0) {
pkt               101 drivers/net/arcnet/rfc1051.c 		if (pkt->hard.dest != dev->dev_addr[0])
pkt               126 drivers/net/arcnet/rfc1051.c 	struct archdr *pkt = pkthdr;
pkt               144 drivers/net/arcnet/rfc1051.c 	pkt = (struct archdr *)skb->data;
pkt               147 drivers/net/arcnet/rfc1051.c 	memcpy(pkt, pkthdr, sizeof(struct archdr));
pkt               148 drivers/net/arcnet/rfc1051.c 	if (length > sizeof(pkt->soft))
pkt               149 drivers/net/arcnet/rfc1051.c 		lp->hw.copy_from_card(dev, bufnum, ofs + sizeof(pkt->soft),
pkt               150 drivers/net/arcnet/rfc1051.c 				      pkt->soft.raw + sizeof(pkt->soft),
pkt               151 drivers/net/arcnet/rfc1051.c 				      length - sizeof(pkt->soft));
pkt               165 drivers/net/arcnet/rfc1051.c 	struct archdr *pkt = skb_push(skb, hdr_size);
pkt               166 drivers/net/arcnet/rfc1051.c 	struct arc_rfc1051 *soft = &pkt->soft.rfc1051;
pkt               190 drivers/net/arcnet/rfc1051.c 	pkt->hard.source = *dev->dev_addr;
pkt               198 drivers/net/arcnet/rfc1051.c 		pkt->hard.dest = 0;
pkt               202 drivers/net/arcnet/rfc1051.c 	pkt->hard.dest = daddr;
pkt               207 drivers/net/arcnet/rfc1051.c static int prepare_tx(struct net_device *dev, struct archdr *pkt, int length,
pkt               211 drivers/net/arcnet/rfc1051.c 	struct arc_hardware *hard = &pkt->hard;
pkt               237 drivers/net/arcnet/rfc1051.c 	lp->hw.copy_to_card(dev, bufnum, ofs, &pkt->soft, length);
pkt                45 drivers/net/arcnet/rfc1201.c static int prepare_tx(struct net_device *dev, struct archdr *pkt, int length,
pkt                93 drivers/net/arcnet/rfc1201.c 	struct archdr *pkt = (struct archdr *)skb->data;
pkt                94 drivers/net/arcnet/rfc1201.c 	struct arc_rfc1201 *soft = &pkt->soft.rfc1201;
pkt               101 drivers/net/arcnet/rfc1201.c 	if (pkt->hard.dest == 0) {
pkt               105 drivers/net/arcnet/rfc1201.c 		if (pkt->hard.dest != dev->dev_addr[0])
pkt               137 drivers/net/arcnet/rfc1201.c 	struct archdr *pkt = pkthdr;
pkt               139 drivers/net/arcnet/rfc1201.c 	int saddr = pkt->hard.source, ofs;
pkt               163 drivers/net/arcnet/rfc1201.c 				      soft, sizeof(pkt->soft));
pkt               189 drivers/net/arcnet/rfc1201.c 		pkt = (struct archdr *)skb->data;
pkt               190 drivers/net/arcnet/rfc1201.c 		soft = &pkt->soft.rfc1201;
pkt               195 drivers/net/arcnet/rfc1201.c 		memcpy(pkt, pkthdr, sizeof(struct archdr));
pkt               196 drivers/net/arcnet/rfc1201.c 		if (length > sizeof(pkt->soft))
pkt               198 drivers/net/arcnet/rfc1201.c 					      ofs + sizeof(pkt->soft),
pkt               199 drivers/net/arcnet/rfc1201.c 					      pkt->soft.raw + sizeof(pkt->soft),
pkt               200 drivers/net/arcnet/rfc1201.c 					      length - sizeof(pkt->soft));
pkt               300 drivers/net/arcnet/rfc1201.c 			pkt = (struct archdr *)skb->data;
pkt               301 drivers/net/arcnet/rfc1201.c 			soft = &pkt->soft.rfc1201;
pkt               303 drivers/net/arcnet/rfc1201.c 			memcpy(pkt, pkthdr, ARC_HDR_SIZE + RFC1201_HDR_SIZE);
pkt               347 drivers/net/arcnet/rfc1201.c 			pkt = (struct archdr *)in->skb->data;
pkt               348 drivers/net/arcnet/rfc1201.c 			soft = &pkt->soft.rfc1201;
pkt               364 drivers/net/arcnet/rfc1201.c 				   skb->len, pkt->hard.source);
pkt               366 drivers/net/arcnet/rfc1201.c 				   skb->len, pkt->hard.source);
pkt               382 drivers/net/arcnet/rfc1201.c 	struct archdr *pkt = skb_push(skb, hdr_size);
pkt               383 drivers/net/arcnet/rfc1201.c 	struct arc_rfc1201 *soft = &pkt->soft.rfc1201;
pkt               421 drivers/net/arcnet/rfc1201.c 	pkt->hard.source = *dev->dev_addr;
pkt               434 drivers/net/arcnet/rfc1201.c 		pkt->hard.dest = 0;
pkt               438 drivers/net/arcnet/rfc1201.c 	pkt->hard.dest = daddr;
pkt               475 drivers/net/arcnet/rfc1201.c static int prepare_tx(struct net_device *dev, struct archdr *pkt, int length,
pkt               487 drivers/net/arcnet/rfc1201.c 	pkt->soft.rfc1201.split_flag = 0;
pkt               500 drivers/net/arcnet/rfc1201.c 			   pkt->soft.rfc1201.sequence);
pkt               505 drivers/net/arcnet/rfc1201.c 	load_pkt(dev, &pkt->hard, &pkt->soft.rfc1201, length, bufnum);
pkt               514 drivers/net/arcnet/rfc1201.c 	struct arc_hardware *hard = &out->pkt->hard;
pkt               515 drivers/net/arcnet/rfc1201.c 	struct arc_rfc1201 *soft = &out->pkt->soft.rfc1201, *newsoft;
pkt               525 drivers/net/arcnet/rfc1201.c 	    (out->pkt->soft.raw + out->length - out->dataleft);
pkt               908 drivers/net/bonding/bond_alb.c 	struct learning_pkt pkt;
pkt               912 drivers/net/bonding/bond_alb.c 	memset(&pkt, 0, size);
pkt               913 drivers/net/bonding/bond_alb.c 	ether_addr_copy(pkt.mac_dst, mac_addr);
pkt               914 drivers/net/bonding/bond_alb.c 	ether_addr_copy(pkt.mac_src, mac_addr);
pkt               915 drivers/net/bonding/bond_alb.c 	pkt.type = cpu_to_be16(ETH_P_LOOPBACK);
pkt               921 drivers/net/bonding/bond_alb.c 	skb_put_data(skb, &pkt, size);
pkt               925 drivers/net/bonding/bond_alb.c 	skb->protocol = pkt.type;
pkt               215 drivers/net/ethernet/broadcom/bcmsysport.c 	STAT_MIB_RX("rx_pkts", mib.rx.pkt),
pkt               543 drivers/net/ethernet/broadcom/bcmsysport.h 	u32	pkt;		/* RO (0x428) Received pkt count*/
pkt               797 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	STAT_GENET_MIB_RX("rx_pkts", mib.rx.pkt),
pkt                89 drivers/net/ethernet/broadcom/genet/bcmgenet.h 	u32	pkt;		/* RO (0x428) Received pkt count*/
pkt               600 drivers/net/ethernet/cavium/liquidio/octeon_droq.c 	u32 pkt, total_len = 0, pkt_count, retval;
pkt               606 drivers/net/ethernet/cavium/liquidio/octeon_droq.c 	for (pkt = 0; pkt < pkt_count; pkt++) {
pkt               718 drivers/net/ethernet/cavium/liquidio/octeon_droq.c 	droq->stats.pkts_received += pkt;
pkt               721 drivers/net/ethernet/cavium/liquidio/octeon_droq.c 	retval = pkt;
pkt               722 drivers/net/ethernet/cavium/liquidio/octeon_droq.c 	if ((droq->ops.drop_on_max) && (pkts_to_process - pkt)) {
pkt               723 drivers/net/ethernet/cavium/liquidio/octeon_droq.c 		octeon_droq_drop_packets(oct, droq, (pkts_to_process - pkt));
pkt               725 drivers/net/ethernet/cavium/liquidio/octeon_droq.c 		droq->stats.dropped_toomany += (pkts_to_process - pkt);
pkt              2740 drivers/net/ethernet/chelsio/cxgb4/sge.c 		   const struct cpl_rx_pkt *pkt, unsigned long tnl_hdr_len)
pkt              2768 drivers/net/ethernet/chelsio/cxgb4/sge.c 		skb_set_hash(skb, (__force u32)pkt->rsshdr.hash_val,
pkt              2771 drivers/net/ethernet/chelsio/cxgb4/sge.c 	if (unlikely(pkt->vlan_ex)) {
pkt              2772 drivers/net/ethernet/chelsio/cxgb4/sge.c 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(pkt->vlan));
pkt              2931 drivers/net/ethernet/chelsio/cxgb4/sge.c 	const struct cpl_rx_pkt *pkt;
pkt              2953 drivers/net/ethernet/chelsio/cxgb4/sge.c 	pkt = (const struct cpl_rx_pkt *)rsp;
pkt              2956 drivers/net/ethernet/chelsio/cxgb4/sge.c 		err_vec = T6_COMPR_RXERR_VEC_G(be16_to_cpu(pkt->err_vec));
pkt              2957 drivers/net/ethernet/chelsio/cxgb4/sge.c 		tnl_hdr_len = T6_RX_TNLHDR_LEN_G(ntohs(pkt->err_vec));
pkt              2959 drivers/net/ethernet/chelsio/cxgb4/sge.c 		err_vec = be16_to_cpu(pkt->err_vec);
pkt              2962 drivers/net/ethernet/chelsio/cxgb4/sge.c 	csum_ok = pkt->csum_calc && !err_vec &&
pkt              2968 drivers/net/ethernet/chelsio/cxgb4/sge.c 	if (((pkt->l2info & htonl(RXF_TCP_F)) ||
pkt              2970 drivers/net/ethernet/chelsio/cxgb4/sge.c 	    (q->netdev->features & NETIF_F_GRO) && csum_ok && !pkt->ip_frag) {
pkt              2971 drivers/net/ethernet/chelsio/cxgb4/sge.c 		do_gro(rxq, si, pkt, tnl_hdr_len);
pkt              2994 drivers/net/ethernet/chelsio/cxgb4/sge.c 		     (pkt->l2info & htonl(RXF_UDP_F)) &&
pkt              3003 drivers/net/ethernet/chelsio/cxgb4/sge.c 		skb_set_hash(skb, (__force u32)pkt->rsshdr.hash_val,
pkt              3011 drivers/net/ethernet/chelsio/cxgb4/sge.c 	if (csum_ok && (pkt->l2info & htonl(RXF_UDP_F | RXF_TCP_F))) {
pkt              3012 drivers/net/ethernet/chelsio/cxgb4/sge.c 		if (!pkt->ip_frag) {
pkt              3015 drivers/net/ethernet/chelsio/cxgb4/sge.c 		} else if (pkt->l2info & htonl(RXF_IP_F)) {
pkt              3016 drivers/net/ethernet/chelsio/cxgb4/sge.c 			__sum16 c = (__force __sum16)pkt->csum;
pkt              3033 drivers/net/ethernet/chelsio/cxgb4/sge.c 		if (!(pkt->l2info & cpu_to_be32(CPL_RX_PKT_FLAGS))) {
pkt              3034 drivers/net/ethernet/chelsio/cxgb4/sge.c 			if ((pkt->l2info & cpu_to_be32(RXF_FCOE_F)) &&
pkt              3050 drivers/net/ethernet/chelsio/cxgb4/sge.c 	if (unlikely(pkt->vlan_ex)) {
pkt              3051 drivers/net/ethernet/chelsio/cxgb4/sge.c 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(pkt->vlan));
pkt              1568 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 		   const struct cpl_rx_pkt *pkt)
pkt              1591 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 	if (pkt->vlan_ex && !pi->vlan_id) {
pkt              1593 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 					be16_to_cpu(pkt->vlan));
pkt              1618 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 	const struct cpl_rx_pkt *pkt = (void *)rsp;
pkt              1619 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 	bool csum_ok = pkt->csum_calc && !pkt->err_vec &&
pkt              1630 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 	if ((pkt->l2info & cpu_to_be32(RXF_TCP_F)) &&
pkt              1632 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 	    !pkt->ip_frag) {
pkt              1633 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 		do_gro(rxq, gl, pkt);
pkt              1652 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 	if (csum_ok && !pkt->err_vec &&
pkt              1653 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 	    (be32_to_cpu(pkt->l2info) & (RXF_UDP_F | RXF_TCP_F))) {
pkt              1654 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 		if (!pkt->ip_frag) {
pkt              1657 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 		} else if (pkt->l2info & htonl(RXF_IP_F)) {
pkt              1658 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 			__sum16 c = (__force __sum16)pkt->csum;
pkt              1666 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 	if (pkt->vlan_ex && !pi->vlan_id) {
pkt              1669 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 				       be16_to_cpu(pkt->vlan));
pkt                83 drivers/net/ethernet/google/gve/gve.h 	struct gve_tx_pkt_desc pkt; /* first desc for a packet */
pkt               360 drivers/net/ethernet/google/gve/gve_tx.c 		pkt_desc->pkt.type_flags = GVE_TXD_TSO | GVE_TXF_L4CSUM;
pkt               361 drivers/net/ethernet/google/gve/gve_tx.c 		pkt_desc->pkt.l4_csum_offset = skb->csum_offset >> 1;
pkt               362 drivers/net/ethernet/google/gve/gve_tx.c 		pkt_desc->pkt.l4_hdr_offset = l4_hdr_offset >> 1;
pkt               364 drivers/net/ethernet/google/gve/gve_tx.c 		pkt_desc->pkt.type_flags = GVE_TXD_STD | GVE_TXF_L4CSUM;
pkt               365 drivers/net/ethernet/google/gve/gve_tx.c 		pkt_desc->pkt.l4_csum_offset = skb->csum_offset >> 1;
pkt               366 drivers/net/ethernet/google/gve/gve_tx.c 		pkt_desc->pkt.l4_hdr_offset = l4_hdr_offset >> 1;
pkt               368 drivers/net/ethernet/google/gve/gve_tx.c 		pkt_desc->pkt.type_flags = GVE_TXD_STD;
pkt               369 drivers/net/ethernet/google/gve/gve_tx.c 		pkt_desc->pkt.l4_csum_offset = 0;
pkt               370 drivers/net/ethernet/google/gve/gve_tx.c 		pkt_desc->pkt.l4_hdr_offset = 0;
pkt               372 drivers/net/ethernet/google/gve/gve_tx.c 	pkt_desc->pkt.desc_cnt = desc_cnt;
pkt               373 drivers/net/ethernet/google/gve/gve_tx.c 	pkt_desc->pkt.len = cpu_to_be16(skb->len);
pkt               374 drivers/net/ethernet/google/gve/gve_tx.c 	pkt_desc->pkt.seg_len = cpu_to_be16(hlen);
pkt               375 drivers/net/ethernet/google/gve/gve_tx.c 	pkt_desc->pkt.seg_addr = cpu_to_be64(addr);
pkt               989 drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c 	if (mr_route->mfc->mfc_un.res.pkt != packets)
pkt               991 drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c 	mr_route->mfc->mfc_un.res.pkt = packets;
pkt              1691 drivers/net/ethernet/qlogic/qed/qed_ll2.c 					  struct qed_ll2_tx_pkt_info *pkt,
pkt              1695 drivers/net/ethernet/qlogic/qed/qed_ll2.c 	p_curp->cookie = pkt->cookie;
pkt              1696 drivers/net/ethernet/qlogic/qed/qed_ll2.c 	p_curp->bd_used = pkt->num_of_bds;
pkt              1701 drivers/net/ethernet/qlogic/qed/qed_ll2.c 	p_curp->bds_set[p_tx->cur_send_frag_num].tx_frag = pkt->first_frag;
pkt              1702 drivers/net/ethernet/qlogic/qed/qed_ll2.c 	p_curp->bds_set[p_tx->cur_send_frag_num].frag_len = pkt->first_frag_len;
pkt              1710 drivers/net/ethernet/qlogic/qed/qed_ll2.c 				 struct qed_ll2_tx_pkt_info *pkt)
pkt              1719 drivers/net/ethernet/qlogic/qed/qed_ll2.c 	roce_flavor = (pkt->qed_roce_flavor == QED_LL2_ROCE) ? CORE_ROCE
pkt              1722 drivers/net/ethernet/qlogic/qed/qed_ll2.c 	switch (pkt->tx_dest) {
pkt              1743 drivers/net/ethernet/qlogic/qed/qed_ll2.c 		start_bd->nw_vlan_or_lb_echo = cpu_to_le16(pkt->vlan);
pkt              1746 drivers/net/ethernet/qlogic/qed/qed_ll2.c 			pkt->remove_stag = true;
pkt              1750 drivers/net/ethernet/qlogic/qed/qed_ll2.c 		  cpu_to_le16(pkt->l4_hdr_offset_w));
pkt              1752 drivers/net/ethernet/qlogic/qed/qed_ll2.c 	bd_data |= pkt->bd_flags;
pkt              1754 drivers/net/ethernet/qlogic/qed/qed_ll2.c 	SET_FIELD(bd_data, CORE_TX_BD_DATA_NBDS, pkt->num_of_bds);
pkt              1756 drivers/net/ethernet/qlogic/qed/qed_ll2.c 	SET_FIELD(bd_data, CORE_TX_BD_DATA_IP_CSUM, !!(pkt->enable_ip_cksum));
pkt              1757 drivers/net/ethernet/qlogic/qed/qed_ll2.c 	SET_FIELD(bd_data, CORE_TX_BD_DATA_L4_CSUM, !!(pkt->enable_l4_cksum));
pkt              1758 drivers/net/ethernet/qlogic/qed/qed_ll2.c 	SET_FIELD(bd_data, CORE_TX_BD_DATA_IP_LEN, !!(pkt->calc_ip_len));
pkt              1760 drivers/net/ethernet/qlogic/qed/qed_ll2.c 		  !!(pkt->remove_stag));
pkt              1763 drivers/net/ethernet/qlogic/qed/qed_ll2.c 	DMA_REGPAIR_LE(start_bd->addr, pkt->first_frag);
pkt              1764 drivers/net/ethernet/qlogic/qed/qed_ll2.c 	start_bd->nbytes = cpu_to_le16(pkt->first_frag_len);
pkt              1773 drivers/net/ethernet/qlogic/qed/qed_ll2.c 		   pkt->first_frag_len,
pkt              1774 drivers/net/ethernet/qlogic/qed/qed_ll2.c 		   pkt->num_of_bds,
pkt              1778 drivers/net/ethernet/qlogic/qed/qed_ll2.c 	if (p_ll2->tx_queue.cur_send_frag_num == pkt->num_of_bds)
pkt              1783 drivers/net/ethernet/qlogic/qed/qed_ll2.c 	     frag_idx < pkt->num_of_bds; frag_idx++) {
pkt              1846 drivers/net/ethernet/qlogic/qed/qed_ll2.c 			      struct qed_ll2_tx_pkt_info *pkt,
pkt              1863 drivers/net/ethernet/qlogic/qed/qed_ll2.c 	if (pkt->num_of_bds > p_ll2_conn->input.tx_max_bds_per_packet)
pkt              1876 drivers/net/ethernet/qlogic/qed/qed_ll2.c 	if (p_curp && qed_chain_get_elem_left(p_tx_chain) < pkt->num_of_bds)
pkt              1885 drivers/net/ethernet/qlogic/qed/qed_ll2.c 	qed_ll2_prepare_tx_packet_set(p_hwfn, p_tx, p_curp, pkt, notify_fw);
pkt              1887 drivers/net/ethernet/qlogic/qed/qed_ll2.c 	qed_ll2_prepare_tx_packet_set_bd(p_hwfn, p_ll2_conn, p_curp, pkt);
pkt              2515 drivers/net/ethernet/qlogic/qed/qed_ll2.c 	struct qed_ll2_tx_pkt_info pkt;
pkt              2555 drivers/net/ethernet/qlogic/qed/qed_ll2.c 	memset(&pkt, 0, sizeof(pkt));
pkt              2556 drivers/net/ethernet/qlogic/qed/qed_ll2.c 	pkt.num_of_bds = 1 + nr_frags;
pkt              2557 drivers/net/ethernet/qlogic/qed/qed_ll2.c 	pkt.vlan = vlan;
pkt              2558 drivers/net/ethernet/qlogic/qed/qed_ll2.c 	pkt.bd_flags = flags;
pkt              2559 drivers/net/ethernet/qlogic/qed/qed_ll2.c 	pkt.tx_dest = QED_LL2_TX_DEST_NW;
pkt              2560 drivers/net/ethernet/qlogic/qed/qed_ll2.c 	pkt.first_frag = mapping;
pkt              2561 drivers/net/ethernet/qlogic/qed/qed_ll2.c 	pkt.first_frag_len = skb->len;
pkt              2562 drivers/net/ethernet/qlogic/qed/qed_ll2.c 	pkt.cookie = skb;
pkt              2565 drivers/net/ethernet/qlogic/qed/qed_ll2.c 		pkt.remove_stag = true;
pkt              2573 drivers/net/ethernet/qlogic/qed/qed_ll2.c 				       &pkt, 1);
pkt               186 drivers/net/ethernet/qlogic/qed/qed_ll2.h 			      struct qed_ll2_tx_pkt_info *pkt,
pkt                55 drivers/net/ethernet/sun/sunvnet_common.c 	struct vio_msg_tag *pkt = arg;
pkt                58 drivers/net/ethernet/sun/sunvnet_common.c 	       pkt->type, pkt->stype, pkt->stype_env, pkt->sid);
pkt                72 drivers/net/ethernet/sun/sunvnet_common.c 	struct vio_net_attr_info pkt;
pkt                80 drivers/net/ethernet/sun/sunvnet_common.c 	memset(&pkt, 0, sizeof(pkt));
pkt                81 drivers/net/ethernet/sun/sunvnet_common.c 	pkt.tag.type = VIO_TYPE_CTRL;
pkt                82 drivers/net/ethernet/sun/sunvnet_common.c 	pkt.tag.stype = VIO_SUBTYPE_INFO;
pkt                83 drivers/net/ethernet/sun/sunvnet_common.c 	pkt.tag.stype_env = VIO_ATTR_INFO;
pkt                84 drivers/net/ethernet/sun/sunvnet_common.c 	pkt.tag.sid = vio_send_sid(vio);
pkt                86 drivers/net/ethernet/sun/sunvnet_common.c 		pkt.xfer_mode = VIO_DRING_MODE;
pkt                88 drivers/net/ethernet/sun/sunvnet_common.c 		pkt.xfer_mode = VIO_NEW_DRING_MODE;
pkt                89 drivers/net/ethernet/sun/sunvnet_common.c 	pkt.addr_type = VNET_ADDR_ETHERMAC;
pkt                90 drivers/net/ethernet/sun/sunvnet_common.c 	pkt.ack_freq = 0;
pkt                92 drivers/net/ethernet/sun/sunvnet_common.c 		pkt.addr |= (u64)dev->dev_addr[i] << ((5 - i) * 8);
pkt                96 drivers/net/ethernet/sun/sunvnet_common.c 			pkt.mtu = port->rmtu;
pkt                99 drivers/net/ethernet/sun/sunvnet_common.c 			pkt.mtu = port->rmtu;
pkt               102 drivers/net/ethernet/sun/sunvnet_common.c 			pkt.options = VIO_TX_DRING;
pkt               104 drivers/net/ethernet/sun/sunvnet_common.c 		pkt.mtu = framelen;
pkt               106 drivers/net/ethernet/sun/sunvnet_common.c 		pkt.mtu = framelen + VLAN_HLEN;
pkt               109 drivers/net/ethernet/sun/sunvnet_common.c 	pkt.cflags = 0;
pkt               111 drivers/net/ethernet/sun/sunvnet_common.c 		pkt.cflags |= VNET_LSO_IPV4_CAPAB;
pkt               114 drivers/net/ethernet/sun/sunvnet_common.c 		pkt.ipv4_lso_maxlen = port->tsolen;
pkt               117 drivers/net/ethernet/sun/sunvnet_common.c 	pkt.plnk_updt = PHYSLINK_UPDATE_NONE;
pkt               122 drivers/net/ethernet/sun/sunvnet_common.c 	       pkt.xfer_mode, pkt.addr_type,
pkt               123 drivers/net/ethernet/sun/sunvnet_common.c 	       (unsigned long long)pkt.addr,
pkt               124 drivers/net/ethernet/sun/sunvnet_common.c 	       pkt.ack_freq, pkt.plnk_updt, pkt.options,
pkt               125 drivers/net/ethernet/sun/sunvnet_common.c 	       (unsigned long long)pkt.mtu, pkt.cflags, pkt.ipv4_lso_maxlen);
pkt               127 drivers/net/ethernet/sun/sunvnet_common.c 	return vio_ldc_send(vio, &pkt, sizeof(pkt));
pkt               132 drivers/net/ethernet/sun/sunvnet_common.c 			    struct vio_net_attr_info *pkt)
pkt               141 drivers/net/ethernet/sun/sunvnet_common.c 	       pkt->xfer_mode, pkt->addr_type,
pkt               142 drivers/net/ethernet/sun/sunvnet_common.c 	       (unsigned long long)pkt->addr,
pkt               143 drivers/net/ethernet/sun/sunvnet_common.c 	       pkt->ack_freq, pkt->plnk_updt, pkt->options,
pkt               144 drivers/net/ethernet/sun/sunvnet_common.c 	       (unsigned long long)pkt->mtu, port->rmtu, pkt->cflags,
pkt               145 drivers/net/ethernet/sun/sunvnet_common.c 	       pkt->ipv4_lso_maxlen);
pkt               147 drivers/net/ethernet/sun/sunvnet_common.c 	pkt->tag.sid = vio_send_sid(vio);
pkt               149 drivers/net/ethernet/sun/sunvnet_common.c 	xfer_mode = pkt->xfer_mode;
pkt               164 drivers/net/ethernet/sun/sunvnet_common.c 		localmtu = min(pkt->mtu, localmtu);
pkt               165 drivers/net/ethernet/sun/sunvnet_common.c 		pkt->mtu = localmtu;
pkt               173 drivers/net/ethernet/sun/sunvnet_common.c 		port->tso &= !!(pkt->cflags & VNET_LSO_IPV4_CAPAB);
pkt               179 drivers/net/ethernet/sun/sunvnet_common.c 		port->tsolen = min(port->tsolen, pkt->ipv4_lso_maxlen);
pkt               183 drivers/net/ethernet/sun/sunvnet_common.c 			pkt->cflags &= ~VNET_LSO_IPV4_CAPAB;
pkt               185 drivers/net/ethernet/sun/sunvnet_common.c 		pkt->ipv4_lso_maxlen = port->tsolen;
pkt               187 drivers/net/ethernet/sun/sunvnet_common.c 		pkt->cflags &= ~VNET_LSO_IPV4_CAPAB;
pkt               188 drivers/net/ethernet/sun/sunvnet_common.c 		pkt->ipv4_lso_maxlen = 0;
pkt               194 drivers/net/ethernet/sun/sunvnet_common.c 		pkt->xfer_mode = VIO_NEW_DRING_MODE;
pkt               195 drivers/net/ethernet/sun/sunvnet_common.c 		pkt->options = VIO_TX_DRING;
pkt               199 drivers/net/ethernet/sun/sunvnet_common.c 	    pkt->addr_type != VNET_ADDR_ETHERMAC ||
pkt               200 drivers/net/ethernet/sun/sunvnet_common.c 	    pkt->mtu != localmtu) {
pkt               203 drivers/net/ethernet/sun/sunvnet_common.c 		pkt->tag.stype = VIO_SUBTYPE_NACK;
pkt               205 drivers/net/ethernet/sun/sunvnet_common.c 		(void)vio_ldc_send(vio, pkt, sizeof(*pkt));
pkt               213 drivers/net/ethernet/sun/sunvnet_common.c 	       pkt->xfer_mode, pkt->addr_type,
pkt               214 drivers/net/ethernet/sun/sunvnet_common.c 	       (unsigned long long)pkt->addr,
pkt               215 drivers/net/ethernet/sun/sunvnet_common.c 	       pkt->ack_freq, pkt->plnk_updt, pkt->options,
pkt               216 drivers/net/ethernet/sun/sunvnet_common.c 	       (unsigned long long)pkt->mtu, port->rmtu, pkt->cflags,
pkt               217 drivers/net/ethernet/sun/sunvnet_common.c 	       pkt->ipv4_lso_maxlen);
pkt               219 drivers/net/ethernet/sun/sunvnet_common.c 	pkt->tag.stype = VIO_SUBTYPE_ACK;
pkt               221 drivers/net/ethernet/sun/sunvnet_common.c 	return vio_ldc_send(vio, pkt, sizeof(*pkt));
pkt               225 drivers/net/ethernet/sun/sunvnet_common.c 			   struct vio_net_attr_info *pkt)
pkt               233 drivers/net/ethernet/sun/sunvnet_common.c 			    struct vio_net_attr_info *pkt)
pkt               242 drivers/net/ethernet/sun/sunvnet_common.c 	struct vio_net_attr_info *pkt = arg;
pkt               244 drivers/net/ethernet/sun/sunvnet_common.c 	switch (pkt->tag.stype) {
pkt               246 drivers/net/ethernet/sun/sunvnet_common.c 		return handle_attr_info(vio, pkt);
pkt               249 drivers/net/ethernet/sun/sunvnet_common.c 		return handle_attr_ack(vio, pkt);
pkt               252 drivers/net/ethernet/sun/sunvnet_common.c 		return handle_attr_nack(vio, pkt);
pkt               646 drivers/net/ethernet/sun/sunvnet_common.c 	struct vio_dring_data *pkt = msgbuf;
pkt               651 drivers/net/ethernet/sun/sunvnet_common.c 	       pkt->tag.stype_env, pkt->seq, dr->rcv_nxt);
pkt               653 drivers/net/ethernet/sun/sunvnet_common.c 	if (unlikely(pkt->tag.stype_env != VIO_DRING_DATA))
pkt               655 drivers/net/ethernet/sun/sunvnet_common.c 	if (unlikely(pkt->seq != dr->rcv_nxt)) {
pkt               657 drivers/net/ethernet/sun/sunvnet_common.c 		       pkt->seq, dr->rcv_nxt);
pkt               666 drivers/net/ethernet/sun/sunvnet_common.c 	return vnet_walk_rx(port, dr, pkt->start_idx, pkt->end_idx,
pkt               688 drivers/net/ethernet/sun/sunvnet_common.c 	struct vio_dring_data *pkt = msgbuf;
pkt               694 drivers/net/ethernet/sun/sunvnet_common.c 	if (unlikely(pkt->tag.stype_env != VIO_DRING_DATA))
pkt               697 drivers/net/ethernet/sun/sunvnet_common.c 	end = pkt->end_idx;
pkt               742 drivers/net/ethernet/sun/sunvnet_common.c 	struct vio_net_mcast_info *pkt = msgbuf;
pkt               745 drivers/net/ethernet/sun/sunvnet_common.c 	if (pkt->tag.stype != VIO_SUBTYPE_ACK)
pkt               748 drivers/net/ethernet/sun/sunvnet_common.c 		       pkt->tag.type,
pkt               749 drivers/net/ethernet/sun/sunvnet_common.c 		       pkt->tag.stype,
pkt               750 drivers/net/ethernet/sun/sunvnet_common.c 		       pkt->tag.stype_env,
pkt               751 drivers/net/ethernet/sun/sunvnet_common.c 		       pkt->tag.sid);
pkt               842 drivers/net/ethernet/sun/sunvnet_common.c 			struct vio_dring_data *pkt =
pkt               847 drivers/net/ethernet/sun/sunvnet_common.c 			pkt->tag.type = VIO_TYPE_DATA;
pkt               848 drivers/net/ethernet/sun/sunvnet_common.c 			pkt->tag.stype = VIO_SUBTYPE_INFO;
pkt               849 drivers/net/ethernet/sun/sunvnet_common.c 			pkt->tag.stype_env = VIO_DRING_DATA;
pkt               850 drivers/net/ethernet/sun/sunvnet_common.c 			pkt->seq = dr->rcv_nxt;
pkt               851 drivers/net/ethernet/sun/sunvnet_common.c 			pkt->start_idx = vio_dring_next(dr,
pkt               853 drivers/net/ethernet/sun/sunvnet_common.c 			pkt->end_idx = -1;
pkt               836 drivers/net/hyperv/hyperv_net.h 	struct hv_netvsc_packet *pkt; /* netvsc pkt pending */
pkt              1603 drivers/net/hyperv/hyperv_net.h 	struct rndis_packet pkt;
pkt               892 drivers/net/hyperv/netvsc.c 	*msd_send = msdp->pkt;
pkt               894 drivers/net/hyperv/netvsc.c 	msdp->pkt = NULL;
pkt               934 drivers/net/hyperv/netvsc.c 	if (msdp->pkt)
pkt               935 drivers/net/hyperv/netvsc.c 		msd_len = msdp->pkt->total_data_buflen;
pkt               940 drivers/net/hyperv/netvsc.c 		section_index = msdp->pkt->send_buf_index;
pkt               944 drivers/net/hyperv/netvsc.c 		section_index = msdp->pkt->send_buf_index;
pkt               980 drivers/net/hyperv/netvsc.c 		if (msdp->pkt) {
pkt               981 drivers/net/hyperv/netvsc.c 			packet->total_packets += msdp->pkt->total_packets;
pkt               982 drivers/net/hyperv/netvsc.c 			packet->total_bytes += msdp->pkt->total_bytes;
pkt               990 drivers/net/hyperv/netvsc.c 			msdp->pkt = packet;
pkt               995 drivers/net/hyperv/netvsc.c 			msdp->pkt = NULL;
pkt               229 drivers/net/hyperv/netvsc_drv.c 	struct rndis_packet *rndis_pkt = &msg->msg.pkt;
pkt               589 drivers/net/hyperv/netvsc_drv.c 	rndis_msg->msg.pkt = (struct rndis_packet) {
pkt                43 drivers/net/hyperv/rndis_filter.c 	struct hv_netvsc_packet	pkt;
pkt                98 drivers/net/hyperv/rndis_filter.c 	request->pkt.q_idx = 0;
pkt               138 drivers/net/hyperv/rndis_filter.c 			   rndis_msg->msg.pkt.data_offset,
pkt               139 drivers/net/hyperv/rndis_filter.c 			   rndis_msg->msg.pkt.data_len,
pkt               140 drivers/net/hyperv/rndis_filter.c 			   rndis_msg->msg.pkt.num_oob_data_elements,
pkt               141 drivers/net/hyperv/rndis_filter.c 			   rndis_msg->msg.pkt.oob_data_offset,
pkt               142 drivers/net/hyperv/rndis_filter.c 			   rndis_msg->msg.pkt.oob_data_len,
pkt               143 drivers/net/hyperv/rndis_filter.c 			   rndis_msg->msg.pkt.per_pkt_info_offset,
pkt               144 drivers/net/hyperv/rndis_filter.c 			   rndis_msg->msg.pkt.per_pkt_info_len);
pkt               212 drivers/net/hyperv/rndis_filter.c 	packet = &req->pkt;
pkt               384 drivers/net/hyperv/rndis_filter.c 	struct rndis_packet *rndis_pkt = &msg->msg.pkt;
pkt                40 drivers/net/wireless/ath/ath10k/sdio.c static inline void ath10k_sdio_mbox_free_rx_pkt(struct ath10k_sdio_rx_data *pkt)
pkt                42 drivers/net/wireless/ath/ath10k/sdio.c 	dev_kfree_skb(pkt->skb);
pkt                43 drivers/net/wireless/ath/ath10k/sdio.c 	pkt->skb = NULL;
pkt                44 drivers/net/wireless/ath/ath10k/sdio.c 	pkt->alloc_len = 0;
pkt                45 drivers/net/wireless/ath/ath10k/sdio.c 	pkt->act_len = 0;
pkt                46 drivers/net/wireless/ath/ath10k/sdio.c 	pkt->trailer_only = false;
pkt                49 drivers/net/wireless/ath/ath10k/sdio.c static inline int ath10k_sdio_mbox_alloc_rx_pkt(struct ath10k_sdio_rx_data *pkt,
pkt                54 drivers/net/wireless/ath/ath10k/sdio.c 	pkt->skb = dev_alloc_skb(full_len);
pkt                55 drivers/net/wireless/ath/ath10k/sdio.c 	if (!pkt->skb)
pkt                58 drivers/net/wireless/ath/ath10k/sdio.c 	pkt->act_len = act_len;
pkt                59 drivers/net/wireless/ath/ath10k/sdio.c 	pkt->alloc_len = full_len;
pkt                60 drivers/net/wireless/ath/ath10k/sdio.c 	pkt->part_of_bundle = part_of_bundle;
pkt                61 drivers/net/wireless/ath/ath10k/sdio.c 	pkt->last_in_bundle = last_in_bundle;
pkt                62 drivers/net/wireless/ath/ath10k/sdio.c 	pkt->trailer_only = false;
pkt                67 drivers/net/wireless/ath/ath10k/sdio.c static inline bool is_trailer_only_msg(struct ath10k_sdio_rx_data *pkt)
pkt                71 drivers/net/wireless/ath/ath10k/sdio.c 		(struct ath10k_htc_hdr *)pkt->skb->data;
pkt               375 drivers/net/wireless/ath/ath10k/sdio.c 					      struct ath10k_sdio_rx_data *pkt,
pkt               380 drivers/net/wireless/ath/ath10k/sdio.c 	struct sk_buff *skb = pkt->skb;
pkt               401 drivers/net/wireless/ath/ath10k/sdio.c 		if (is_trailer_only_msg(pkt))
pkt               402 drivers/net/wireless/ath/ath10k/sdio.c 			pkt->trailer_only = true;
pkt               418 drivers/net/wireless/ath/ath10k/sdio.c 	struct ath10k_sdio_rx_data *pkt;
pkt               447 drivers/net/wireless/ath/ath10k/sdio.c 		pkt = &ar_sdio->rx_pkts[i];
pkt               449 drivers/net/wireless/ath/ath10k/sdio.c 		if (pkt->part_of_bundle && !pkt->last_in_bundle) {
pkt               459 drivers/net/wireless/ath/ath10k/sdio.c 							 pkt,
pkt               465 drivers/net/wireless/ath/ath10k/sdio.c 		if (!pkt->trailer_only)
pkt               466 drivers/net/wireless/ath/ath10k/sdio.c 			ep->ep_ops.ep_rx_complete(ar_sdio->ar, pkt->skb);
pkt               468 drivers/net/wireless/ath/ath10k/sdio.c 			kfree_skb(pkt->skb);
pkt               471 drivers/net/wireless/ath/ath10k/sdio.c 		pkt->skb = NULL;
pkt               472 drivers/net/wireless/ath/ath10k/sdio.c 		pkt->alloc_len = 0;
pkt               626 drivers/net/wireless/ath/ath10k/sdio.c 				      struct ath10k_sdio_rx_data *pkt)
pkt               629 drivers/net/wireless/ath/ath10k/sdio.c 	struct sk_buff *skb = pkt->skb;
pkt               634 drivers/net/wireless/ath/ath10k/sdio.c 				 skb->data, pkt->alloc_len);
pkt               643 drivers/net/wireless/ath/ath10k/sdio.c 	pkt->act_len = le16_to_cpu(htc_hdr->len) + sizeof(*htc_hdr);
pkt               644 drivers/net/wireless/ath/ath10k/sdio.c 	if (pkt->act_len > pkt->alloc_len) {
pkt               646 drivers/net/wireless/ath/ath10k/sdio.c 			    pkt->act_len, pkt->alloc_len);
pkt               651 drivers/net/wireless/ath/ath10k/sdio.c 	skb_put(skb, pkt->act_len);
pkt               654 drivers/net/wireless/ath/ath10k/sdio.c 	pkt->status = ret;
pkt               376 drivers/net/wireless/ath/ath6kl/htc_mbox.c 				   struct htc_packet *pkt)
pkt               379 drivers/net/wireless/ath/ath6kl/htc_mbox.c 	list_add_tail(&pkt->list, &target->free_ctrl_txbuf);
pkt               715 drivers/net/wireless/ath/ath6kl/main.c 	stats->tx_pkt += le32_to_cpu(tgt_stats->stats.tx.pkt);
pkt               741 drivers/net/wireless/ath/ath6kl/main.c 	stats->rx_pkt += le32_to_cpu(tgt_stats->stats.rx.pkt);
pkt               271 drivers/net/wireless/ath/ath6kl/wmi.c u8 ath6kl_wmi_determine_user_priority(u8 *pkt, u32 layer2_pri)
pkt               273 drivers/net/wireless/ath/ath6kl/wmi.c 	struct iphdr *ip_hdr = (struct iphdr *) pkt;
pkt              1759 drivers/net/wireless/ath/ath6kl/wmi.h 	__le32 pkt;
pkt              1780 drivers/net/wireless/ath/ath6kl/wmi.h 	__le32 pkt;
pkt              2676 drivers/net/wireless/ath/ath6kl/wmi.h u8 ath6kl_wmi_determine_user_priority(u8 *pkt, u32 layer2_pri);
pkt               539 drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c int brcmf_sdiod_recv_pkt(struct brcmf_sdio_dev *sdiodev, struct sk_buff *pkt)
pkt               544 drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c 	brcmf_dbg(SDIO, "addr = 0x%x, size = %d\n", addr, pkt->len);
pkt               553 drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c 	err = brcmf_sdiod_skbuff_read(sdiodev, sdiodev->func2, addr, pkt);
pkt               668 drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c 	struct sk_buff *pkt;
pkt               673 drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c 	pkt = dev_alloc_skb(dsize);
pkt               674 drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c 	if (!pkt) {
pkt               678 drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c 	pkt->priority = 0;
pkt               703 drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c 		skb_put(pkt, dsize);
pkt               706 drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c 			memcpy(pkt->data, data, dsize);
pkt               708 drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c 						       sdaddr, pkt);
pkt               711 drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c 						      sdaddr, pkt);
pkt               719 drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c 			memcpy(data, pkt->data, dsize);
pkt               720 drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c 		skb_trim(pkt, 0);
pkt               732 drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c 	dev_kfree_skb(pkt);
pkt               431 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 	struct sk_buff *pkt;
pkt               604 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 				    struct sk_buff *pkt, u32 slot_id)
pkt               616 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 	h->items[slot_id].pkt = pkt;
pkt               634 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 	*pktout = h->items[slot_id].pkt;
pkt               637 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 		h->items[slot_id].pkt = NULL;
pkt               693 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 			skb = h->items[i].pkt;
pkt               845 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 			WARN_ON(skb != hi->pkt);
pkt              1678 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c void brcmf_fws_rxreorder(struct brcmf_if *ifp, struct sk_buff *pkt)
pkt              1689 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 	reorder_data = ((struct brcmf_skb_reorder_data *)pkt->cb)->reorder;
pkt              1696 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 		brcmf_netif_rx(ifp, pkt);
pkt              1708 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 			brcmf_netif_rx(ifp, pkt);
pkt              1715 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 		__skb_queue_tail(&reorder_list, pkt);
pkt              1725 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 		buf_size += (max_idx + 1) * sizeof(pkt);
pkt              1733 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 			brcmf_netif_rx(ifp, pkt);
pkt              1753 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 		rfi->pktslots[rfi->cur_idx] = pkt;
pkt              1769 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 			rfi->pktslots[cur_idx] = pkt;
pkt              1786 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 			rfi->pktslots[cur_idx] = pkt;
pkt              1819 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 				__skb_queue_tail(&reorder_list, pkt);
pkt              1821 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 				rfi->pktslots[cur_idx] = pkt;
pkt              1840 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 		__skb_queue_tail(&reorder_list, pkt);
pkt              1845 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 	skb_queue_walk_safe(&reorder_list, pkt, pnext) {
pkt              1846 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 		__skb_unlink(pkt, &reorder_list);
pkt              1847 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 		brcmf_netif_rx(ifp, pkt);
pkt              1821 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c 	struct sk_buff *pkt;		/* Packet for event or data frames */
pkt              1898 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c 		pkt = brcmu_pkt_buf_get_skb(rd->len_left + head_read +
pkt              1900 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c 		if (!pkt) {
pkt              1908 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c 		skb_pull(pkt, head_read);
pkt              1909 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c 		pkt_align(pkt, rd->len_left, bus->head_align);
pkt              1911 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c 		ret = brcmf_sdiod_recv_pkt(bus->sdiodev, pkt);
pkt              1918 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c 			brcmu_pkt_buf_free_skb(pkt);
pkt              1927 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c 			skb_push(pkt, head_read);
pkt              1928 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c 			memcpy(pkt->data, bus->rxhdr, head_read);
pkt              1931 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c 			memcpy(bus->rxhdr, pkt->data, SDPCM_HDRLEN);
pkt              1939 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c 				brcmu_pkt_buf_free_skb(pkt);
pkt              1950 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c 				brcmu_pkt_buf_free_skb(pkt);
pkt              1972 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c 				brcmu_pkt_buf_free_skb(pkt);
pkt              1978 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c 				   pkt->data, rd->len, "Rx Data:\n");
pkt              1986 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c 						   pkt->data, rd->len,
pkt              1988 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c 				__skb_trim(pkt, rd->len);
pkt              1989 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c 				skb_pull(pkt, SDPCM_HDRLEN);
pkt              1990 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c 				bus->glomd = pkt;
pkt              2007 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c 		__skb_trim(pkt, rd->len);
pkt              2008 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c 		skb_pull(pkt, rd->dat_offset);
pkt              2010 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c 		if (pkt->len == 0)
pkt              2011 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c 			brcmu_pkt_buf_free_skb(pkt);
pkt              2013 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c 			brcmf_rx_event(bus->sdiodev->dev, pkt);
pkt              2015 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c 			brcmf_rx_frame(bus->sdiodev->dev, pkt,
pkt              2046 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c static int brcmf_sdio_txpkt_hdalign(struct brcmf_sdio *bus, struct sk_buff *pkt)
pkt              2052 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c 	dat_buf = (u8 *)(pkt->data);
pkt              2057 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c 		if (skb_headroom(pkt) < head_pad) {
pkt              2060 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c 			if (skb_cow_head(pkt, head_pad)) {
pkt              2066 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c 		skb_push(pkt, head_pad);
pkt              2067 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c 		dat_buf = (u8 *)(pkt->data);
pkt              2084 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c 				    struct sk_buff *pkt, u16 total_len)
pkt              2099 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c 	lastfrm = skb_queue_is_last(pktq, pkt);
pkt              2101 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c 	tail_chop = pkt->len % bus->sgentry_align;
pkt              2107 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c 	if (skb_tailroom(pkt) < tail_pad && pkt->len > blksize) {
pkt              2118 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c 		       pkt->data + pkt->len - tail_chop,
pkt              2121 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c 		skb_trim(pkt, pkt->len - tail_chop);
pkt              2123 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c 		__skb_queue_after(pktq, pkt, pkt_pad);
pkt              2125 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c 		ntail = pkt->data_len + tail_pad -
pkt              2126 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c 			(pkt->end - pkt->tail);
pkt              2127 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c 		if (skb_cloned(pkt) || ntail > 0)
pkt              2128 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c 			if (pskb_expand_head(pkt, 0, ntail, GFP_ATOMIC))
pkt              2130 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c 		if (skb_linearize(pkt))
pkt              2132 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c 		__skb_put(pkt, tail_pad);
pkt              2296 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c 	struct sk_buff *pkt;
pkt              2319 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c 			pkt = brcmu_pktq_mdeq(&bus->txq, tx_prec_map,
pkt              2321 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c 			if (pkt == NULL)
pkt              2323 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c 			__skb_queue_tail(&pktq, pkt);
pkt              2715 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c static bool brcmf_sdio_prec_enq(struct pktq *q, struct sk_buff *pkt, int prec)
pkt              2724 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c 		brcmu_pktq_penq(q, prec, pkt);
pkt              2750 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c 	p = brcmu_pktq_penq(q, prec, pkt);
pkt              2757 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c static int brcmf_sdio_bus_txdata(struct device *dev, struct sk_buff *pkt)
pkt              2765 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c 	brcmf_dbg(TRACE, "Enter: pkt: data %p len %d\n", pkt->data, pkt->len);
pkt              2770 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c 	skb_push(pkt, bus->tx_hdrlen);
pkt              2773 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c 	prec = prio2prec((pkt->priority & PRIOMASK));
pkt              2783 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c 	*(u16 *)(pkt->cb) = 0;
pkt              2784 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c 	if (!brcmf_sdio_prec_enq(&bus->txq, pkt, prec)) {
pkt              2785 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c 		skb_pull(pkt, bus->tx_hdrlen);
pkt               322 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h int brcmf_sdiod_recv_pkt(struct brcmf_sdio_dev *sdiodev, struct sk_buff *pkt);
pkt              1550 drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.c 		      (void *pkt, void *arg_a), void *arg_a)
pkt               105 drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.h 		      (void *pkt, void *arg_a), void *arg_a);
pkt              7682 drivers/net/wireless/intel/ipw2x00/ipw2200.c 	struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data;
pkt              7689 drivers/net/wireless/intel/ipw2x00/ipw2200.c 	if (unlikely((le16_to_cpu(pkt->u.frame.length) + IPW_RX_FRAME_SIZE) >
pkt              7706 drivers/net/wireless/intel/ipw2x00/ipw2200.c 	skb_put(rxb->skb, le16_to_cpu(pkt->u.frame.length));
pkt              7731 drivers/net/wireless/intel/ipw2x00/ipw2200.c 	struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data;
pkt              7732 drivers/net/wireless/intel/ipw2x00/ipw2200.c 	struct ipw_rx_frame *frame = &pkt->u.frame;
pkt              7745 drivers/net/wireless/intel/ipw2x00/ipw2200.c 	unsigned short len = le16_to_cpu(pkt->u.frame.length);
pkt              7752 drivers/net/wireless/intel/ipw2x00/ipw2200.c 	if (unlikely((le16_to_cpu(pkt->u.frame.length) + IPW_RX_FRAME_SIZE) >
pkt              7908 drivers/net/wireless/intel/ipw2x00/ipw2200.c 	struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data;
pkt              7909 drivers/net/wireless/intel/ipw2x00/ipw2200.c 	struct ipw_rx_frame *frame = &pkt->u.frame;
pkt              7920 drivers/net/wireless/intel/ipw2x00/ipw2200.c 	unsigned short len = le16_to_cpu(pkt->u.frame.length);
pkt              8203 drivers/net/wireless/intel/ipw2x00/ipw2200.c 	struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)skb->data;
pkt              8223 drivers/net/wireless/intel/ipw2x00/ipw2200.c 		skb_put(skb, le16_to_cpu(pkt->u.frame.length) +
pkt              8253 drivers/net/wireless/intel/ipw2x00/ipw2200.c 	struct ipw_rx_packet *pkt;
pkt              8278 drivers/net/wireless/intel/ipw2x00/ipw2200.c 		pkt = (struct ipw_rx_packet *)rxb->skb->data;
pkt              8280 drivers/net/wireless/intel/ipw2x00/ipw2200.c 			     pkt->header.message_type,
pkt              8281 drivers/net/wireless/intel/ipw2x00/ipw2200.c 			     pkt->header.rx_seq_num, pkt->header.control_bits);
pkt              8283 drivers/net/wireless/intel/ipw2x00/ipw2200.c 		switch (pkt->header.message_type) {
pkt              8286 drivers/net/wireless/intel/ipw2x00/ipw2200.c 					.rssi = pkt->u.frame.rssi_dbm -
pkt              8289 drivers/net/wireless/intel/ipw2x00/ipw2200.c 					    pkt->u.frame.rssi_dbm -
pkt              8292 drivers/net/wireless/intel/ipw2x00/ipw2200.c 					    le16_to_cpu(pkt->u.frame.noise),
pkt              8293 drivers/net/wireless/intel/ipw2x00/ipw2200.c 					.rate = pkt->u.frame.rate,
pkt              8296 drivers/net/wireless/intel/ipw2x00/ipw2200.c 					    pkt->u.frame.received_channel,
pkt              8298 drivers/net/wireless/intel/ipw2x00/ipw2200.c 					    (pkt->u.frame.
pkt              8302 drivers/net/wireless/intel/ipw2x00/ipw2200.c 					.len = le16_to_cpu(pkt->u.frame.length),
pkt              8357 drivers/net/wireless/intel/ipw2x00/ipw2200.c 					     le16_to_cpu(pkt->u.frame.length));
pkt              8359 drivers/net/wireless/intel/ipw2x00/ipw2200.c 				if (le16_to_cpu(pkt->u.frame.length) <
pkt              8407 drivers/net/wireless/intel/ipw2x00/ipw2200.c 				     pkt->u.notification.subtype,
pkt              8408 drivers/net/wireless/intel/ipw2x00/ipw2200.c 				     pkt->u.notification.flags,
pkt              8409 drivers/net/wireless/intel/ipw2x00/ipw2200.c 				     le16_to_cpu(pkt->u.notification.size));
pkt              8410 drivers/net/wireless/intel/ipw2x00/ipw2200.c 				ipw_rx_notification(priv, &pkt->u.notification);
pkt              8416 drivers/net/wireless/intel/ipw2x00/ipw2200.c 				     pkt->header.message_type);
pkt               646 drivers/net/wireless/intel/iwlegacy/3945-mac.c 	struct il_rx_pkt *pkt;
pkt               692 drivers/net/wireless/intel/iwlegacy/3945-mac.c 	pkt = (struct il_rx_pkt *)cmd.reply_page;
pkt               693 drivers/net/wireless/intel/iwlegacy/3945-mac.c 	if (pkt->hdr.flags & IL_CMD_FAILED_MSK) {
pkt               698 drivers/net/wireless/intel/iwlegacy/3945-mac.c 	spectrum_resp_status = le16_to_cpu(pkt->u.spectrum.status);
pkt               701 drivers/net/wireless/intel/iwlegacy/3945-mac.c 		if (pkt->u.spectrum.id != 0xff) {
pkt               703 drivers/net/wireless/intel/iwlegacy/3945-mac.c 			       pkt->u.spectrum.id);
pkt               723 drivers/net/wireless/intel/iwlegacy/3945-mac.c 	struct il_rx_pkt *pkt = rxb_addr(rxb);
pkt               727 drivers/net/wireless/intel/iwlegacy/3945-mac.c 	palive = &pkt->u.alive_frame;
pkt               734 drivers/net/wireless/intel/iwlegacy/3945-mac.c 		memcpy(&il->card_alive_init, &pkt->u.alive_frame,
pkt               739 drivers/net/wireless/intel/iwlegacy/3945-mac.c 		memcpy(&il->card_alive, &pkt->u.alive_frame,
pkt               757 drivers/net/wireless/intel/iwlegacy/3945-mac.c 	struct il_rx_pkt *pkt = rxb_addr(rxb);
pkt               760 drivers/net/wireless/intel/iwlegacy/3945-mac.c 	D_RX("Received C_ADD_STA: 0x%02X\n", pkt->u.status);
pkt               766 drivers/net/wireless/intel/iwlegacy/3945-mac.c 	struct il_rx_pkt *pkt = rxb_addr(rxb);
pkt               767 drivers/net/wireless/intel/iwlegacy/3945-mac.c 	struct il3945_beacon_notif *beacon = &(pkt->u.beacon_status);
pkt               787 drivers/net/wireless/intel/iwlegacy/3945-mac.c 	struct il_rx_pkt *pkt = rxb_addr(rxb);
pkt               788 drivers/net/wireless/intel/iwlegacy/3945-mac.c 	u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags);
pkt              1183 drivers/net/wireless/intel/iwlegacy/3945-mac.c 	struct il_rx_pkt *pkt;
pkt              1223 drivers/net/wireless/intel/iwlegacy/3945-mac.c 		pkt = rxb_addr(rxb);
pkt              1225 drivers/net/wireless/intel/iwlegacy/3945-mac.c 		len = le32_to_cpu(pkt->len_n_flags) & IL_RX_FRAME_SIZE_MSK;
pkt              1228 drivers/net/wireless/intel/iwlegacy/3945-mac.c 		reclaim = il_need_reclaim(il, pkt);
pkt              1233 drivers/net/wireless/intel/iwlegacy/3945-mac.c 		if (il->handlers[pkt->hdr.cmd]) {
pkt              1235 drivers/net/wireless/intel/iwlegacy/3945-mac.c 			     il_get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
pkt              1236 drivers/net/wireless/intel/iwlegacy/3945-mac.c 			il->isr_stats.handlers[pkt->hdr.cmd]++;
pkt              1237 drivers/net/wireless/intel/iwlegacy/3945-mac.c 			il->handlers[pkt->hdr.cmd] (il, rxb);
pkt              1241 drivers/net/wireless/intel/iwlegacy/3945-mac.c 			     i, il_get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
pkt               300 drivers/net/wireless/intel/iwlegacy/3945.c 	struct il_rx_pkt *pkt = rxb_addr(rxb);
pkt               301 drivers/net/wireless/intel/iwlegacy/3945.c 	u16 sequence = le16_to_cpu(pkt->hdr.sequence);
pkt               306 drivers/net/wireless/intel/iwlegacy/3945.c 	struct il3945_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
pkt               406 drivers/net/wireless/intel/iwlegacy/3945.c 	struct il_rx_pkt *pkt = rxb_addr(rxb);
pkt               410 drivers/net/wireless/intel/iwlegacy/3945.c 	     le32_to_cpu(pkt->len_n_flags) & IL_RX_FRAME_SIZE_MSK);
pkt               412 drivers/net/wireless/intel/iwlegacy/3945.c 	il3945_accumulative_stats(il, (__le32 *) &pkt->u.raw);
pkt               415 drivers/net/wireless/intel/iwlegacy/3945.c 	memcpy(&il->_3945.stats, pkt->u.raw, sizeof(il->_3945.stats));
pkt               421 drivers/net/wireless/intel/iwlegacy/3945.c 	struct il_rx_pkt *pkt = rxb_addr(rxb);
pkt               422 drivers/net/wireless/intel/iwlegacy/3945.c 	__le32 *flag = (__le32 *) &pkt->u.raw;
pkt               468 drivers/net/wireless/intel/iwlegacy/3945.c 	struct il_rx_pkt *pkt = rxb_addr(rxb);
pkt               469 drivers/net/wireless/intel/iwlegacy/3945.c 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)IL_RX_DATA(pkt);
pkt               470 drivers/net/wireless/intel/iwlegacy/3945.c 	struct il3945_rx_frame_hdr *rx_hdr = IL_RX_HDR(pkt);
pkt               471 drivers/net/wireless/intel/iwlegacy/3945.c 	struct il3945_rx_frame_end *rx_end = IL_RX_END(pkt);
pkt               501 drivers/net/wireless/intel/iwlegacy/3945.c 		il_set_decrypted_flag(il, (struct ieee80211_hdr *)pkt,
pkt               511 drivers/net/wireless/intel/iwlegacy/3945.c 				(void *)rx_hdr->payload - (void *)pkt, len,
pkt               529 drivers/net/wireless/intel/iwlegacy/3945.c 	struct il_rx_pkt *pkt = rxb_addr(rxb);
pkt               530 drivers/net/wireless/intel/iwlegacy/3945.c 	struct il3945_rx_frame_stats *rx_stats = IL_RX_STATS(pkt);
pkt               531 drivers/net/wireless/intel/iwlegacy/3945.c 	struct il3945_rx_frame_hdr *rx_hdr = IL_RX_HDR(pkt);
pkt               532 drivers/net/wireless/intel/iwlegacy/3945.c 	struct il3945_rx_frame_end *rx_end = IL_RX_END(pkt);
pkt               578 drivers/net/wireless/intel/iwlegacy/3945.c 	header = (struct ieee80211_hdr *)IL_RX_DATA(pkt);
pkt              1662 drivers/net/wireless/intel/iwlegacy/3945.c 	struct il_rx_pkt *pkt;
pkt              1691 drivers/net/wireless/intel/iwlegacy/3945.c 	pkt = (struct il_rx_pkt *)cmd.reply_page;
pkt              1692 drivers/net/wireless/intel/iwlegacy/3945.c 	if (pkt->hdr.flags & IL_CMD_FAILED_MSK) {
pkt               614 drivers/net/wireless/intel/iwlegacy/4965-mac.c 	struct il_rx_pkt *pkt = rxb_addr(rxb);
pkt               631 drivers/net/wireless/intel/iwlegacy/4965-mac.c 	if (pkt->hdr.cmd == N_RX) {
pkt               632 drivers/net/wireless/intel/iwlegacy/4965-mac.c 		phy_res = (struct il_rx_phy_res *)pkt->u.raw;
pkt               634 drivers/net/wireless/intel/iwlegacy/4965-mac.c 		    (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*phy_res) +
pkt               639 drivers/net/wireless/intel/iwlegacy/4965-mac.c 		    *(__le32 *) (pkt->u.raw + sizeof(*phy_res) +
pkt               648 drivers/net/wireless/intel/iwlegacy/4965-mac.c 		amsdu = (struct il_rx_mpdu_res_start *)pkt->u.raw;
pkt               649 drivers/net/wireless/intel/iwlegacy/4965-mac.c 		header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*amsdu));
pkt               651 drivers/net/wireless/intel/iwlegacy/4965-mac.c 		rx_pkt_status = *(__le32 *) (pkt->u.raw + sizeof(*amsdu) + len);
pkt               746 drivers/net/wireless/intel/iwlegacy/4965-mac.c 	struct il_rx_pkt *pkt = rxb_addr(rxb);
pkt               749 drivers/net/wireless/intel/iwlegacy/4965-mac.c 	memcpy(&il->_4965.last_phy_res, pkt->u.raw,
pkt              1252 drivers/net/wireless/intel/iwlegacy/4965-mac.c 	struct il_rx_pkt *pkt = rxb_addr(rxb);
pkt              1255 drivers/net/wireless/intel/iwlegacy/4965-mac.c 	missed_beacon = &pkt->u.missed_beacon;
pkt              1358 drivers/net/wireless/intel/iwlegacy/4965-mac.c 	struct il_rx_pkt *pkt = rxb_addr(rxb);
pkt              1362 drivers/net/wireless/intel/iwlegacy/4965-mac.c 	     le32_to_cpu(pkt->len_n_flags) & IL_RX_FRAME_SIZE_MSK);
pkt              1366 drivers/net/wireless/intel/iwlegacy/4965-mac.c 	      pkt->u.stats.general.common.temperature) ||
pkt              1368 drivers/net/wireless/intel/iwlegacy/4965-mac.c 	      (pkt->u.stats.flag & STATS_REPLY_FLG_HT40_MODE_MSK)));
pkt              1370 drivers/net/wireless/intel/iwlegacy/4965-mac.c 	il4965_accumulative_stats(il, (__le32 *) &pkt->u.stats);
pkt              1374 drivers/net/wireless/intel/iwlegacy/4965-mac.c 	memcpy(&il->_4965.stats, &pkt->u.stats, sizeof(il->_4965.stats));
pkt              1386 drivers/net/wireless/intel/iwlegacy/4965-mac.c 	    (pkt->hdr.cmd == N_STATS)) {
pkt              1398 drivers/net/wireless/intel/iwlegacy/4965-mac.c 	struct il_rx_pkt *pkt = rxb_addr(rxb);
pkt              1400 drivers/net/wireless/intel/iwlegacy/4965-mac.c 	if (le32_to_cpu(pkt->u.stats.flag) & UCODE_STATS_CLEAR_MSK) {
pkt              2761 drivers/net/wireless/intel/iwlegacy/4965-mac.c 	struct il_rx_pkt *pkt = rxb_addr(rxb);
pkt              2762 drivers/net/wireless/intel/iwlegacy/4965-mac.c 	u16 sequence = le16_to_cpu(pkt->hdr.sequence);
pkt              2769 drivers/net/wireless/intel/iwlegacy/4965-mac.c 	struct il4965_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
pkt              2908 drivers/net/wireless/intel/iwlegacy/4965-mac.c 	struct il_rx_pkt *pkt = rxb_addr(rxb);
pkt              2909 drivers/net/wireless/intel/iwlegacy/4965-mac.c 	struct il_compressed_ba_resp *ba_resp = &pkt->u.compressed_ba;
pkt              4018 drivers/net/wireless/intel/iwlegacy/4965-mac.c 	struct il_rx_pkt *pkt = rxb_addr(rxb);
pkt              4022 drivers/net/wireless/intel/iwlegacy/4965-mac.c 	palive = &pkt->u.alive_frame;
pkt              4029 drivers/net/wireless/intel/iwlegacy/4965-mac.c 		memcpy(&il->card_alive_init, &pkt->u.alive_frame,
pkt              4034 drivers/net/wireless/intel/iwlegacy/4965-mac.c 		memcpy(&il->card_alive, &pkt->u.alive_frame,
pkt              4075 drivers/net/wireless/intel/iwlegacy/4965-mac.c 	struct il_rx_pkt *pkt = rxb_addr(rxb);
pkt              4077 drivers/net/wireless/intel/iwlegacy/4965-mac.c 	    (struct il4965_beacon_notif *)pkt->u.raw;
pkt              4115 drivers/net/wireless/intel/iwlegacy/4965-mac.c 	struct il_rx_pkt *pkt = rxb_addr(rxb);
pkt              4116 drivers/net/wireless/intel/iwlegacy/4965-mac.c 	u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags);
pkt              4213 drivers/net/wireless/intel/iwlegacy/4965-mac.c 	struct il_rx_pkt *pkt;
pkt              4254 drivers/net/wireless/intel/iwlegacy/4965-mac.c 		pkt = rxb_addr(rxb);
pkt              4256 drivers/net/wireless/intel/iwlegacy/4965-mac.c 		len = le32_to_cpu(pkt->len_n_flags) & IL_RX_FRAME_SIZE_MSK;
pkt              4259 drivers/net/wireless/intel/iwlegacy/4965-mac.c 		reclaim = il_need_reclaim(il, pkt);
pkt              4264 drivers/net/wireless/intel/iwlegacy/4965-mac.c 		if (il->handlers[pkt->hdr.cmd]) {
pkt              4266 drivers/net/wireless/intel/iwlegacy/4965-mac.c 			     il_get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
pkt              4267 drivers/net/wireless/intel/iwlegacy/4965-mac.c 			il->isr_stats.handlers[pkt->hdr.cmd]++;
pkt              4268 drivers/net/wireless/intel/iwlegacy/4965-mac.c 			il->handlers[pkt->hdr.cmd] (il, rxb);
pkt              4272 drivers/net/wireless/intel/iwlegacy/4965-mac.c 			     i, il_get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
pkt               242 drivers/net/wireless/intel/iwlegacy/common.c 			struct il_rx_pkt *pkt)
pkt               244 drivers/net/wireless/intel/iwlegacy/common.c 	if (pkt->hdr.flags & IL_CMD_FAILED_MSK) {
pkt               246 drivers/net/wireless/intel/iwlegacy/common.c 		       il_get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags);
pkt               254 drivers/net/wireless/intel/iwlegacy/common.c 			  il_get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags);
pkt               258 drivers/net/wireless/intel/iwlegacy/common.c 		     pkt->hdr.flags);
pkt               403 drivers/net/wireless/intel/iwlegacy/common.c 					struct il_rx_pkt *pkt))
pkt              1251 drivers/net/wireless/intel/iwlegacy/common.c 	struct il_rx_pkt *pkt;
pkt              1271 drivers/net/wireless/intel/iwlegacy/common.c 	pkt = (struct il_rx_pkt *)cmd.reply_page;
pkt              1272 drivers/net/wireless/intel/iwlegacy/common.c 	if (pkt->u.status != CAN_ABORT_STATUS) {
pkt              1279 drivers/net/wireless/intel/iwlegacy/common.c 		D_SCAN("SCAN_ABORT ret %d.\n", pkt->u.status);
pkt              1389 drivers/net/wireless/intel/iwlegacy/common.c 	struct il_rx_pkt *pkt = rxb_addr(rxb);
pkt              1391 drivers/net/wireless/intel/iwlegacy/common.c 	    (struct il_scanreq_notification *)pkt->u.raw;
pkt              1401 drivers/net/wireless/intel/iwlegacy/common.c 	struct il_rx_pkt *pkt = rxb_addr(rxb);
pkt              1403 drivers/net/wireless/intel/iwlegacy/common.c 	    (struct il_scanstart_notification *)pkt->u.raw;
pkt              1416 drivers/net/wireless/intel/iwlegacy/common.c 	struct il_rx_pkt *pkt = rxb_addr(rxb);
pkt              1418 drivers/net/wireless/intel/iwlegacy/common.c 	    (struct il_scanresults_notification *)pkt->u.raw;
pkt              1434 drivers/net/wireless/intel/iwlegacy/common.c 	struct il_rx_pkt *pkt = rxb_addr(rxb);
pkt              1435 drivers/net/wireless/intel/iwlegacy/common.c 	struct il_scancomplete_notification *scan_notif = (void *)pkt->u.raw;
pkt              1759 drivers/net/wireless/intel/iwlegacy/common.c 			struct il_rx_pkt *pkt, bool sync)
pkt              1765 drivers/net/wireless/intel/iwlegacy/common.c 	if (pkt->hdr.flags & IL_CMD_FAILED_MSK) {
pkt              1766 drivers/net/wireless/intel/iwlegacy/common.c 		IL_ERR("Bad return from C_ADD_STA (0x%08X)\n", pkt->hdr.flags);
pkt              1774 drivers/net/wireless/intel/iwlegacy/common.c 	switch (pkt->u.add_sta.status) {
pkt              1792 drivers/net/wireless/intel/iwlegacy/common.c 		D_ASSOC("Received C_ADD_STA:(0x%08X)\n", pkt->u.add_sta.status);
pkt              1819 drivers/net/wireless/intel/iwlegacy/common.c 		    struct il_rx_pkt *pkt)
pkt              1823 drivers/net/wireless/intel/iwlegacy/common.c 	il_process_add_sta_resp(il, addsta, pkt, false);
pkt              1830 drivers/net/wireless/intel/iwlegacy/common.c 	struct il_rx_pkt *pkt = NULL;
pkt              1857 drivers/net/wireless/intel/iwlegacy/common.c 	pkt = (struct il_rx_pkt *)cmd.reply_page;
pkt              1858 drivers/net/wireless/intel/iwlegacy/common.c 	ret = il_process_add_sta_resp(il, sta, pkt, true);
pkt              2087 drivers/net/wireless/intel/iwlegacy/common.c 	struct il_rx_pkt *pkt;
pkt              2111 drivers/net/wireless/intel/iwlegacy/common.c 	pkt = (struct il_rx_pkt *)cmd.reply_page;
pkt              2112 drivers/net/wireless/intel/iwlegacy/common.c 	if (pkt->hdr.flags & IL_CMD_FAILED_MSK) {
pkt              2113 drivers/net/wireless/intel/iwlegacy/common.c 		IL_ERR("Bad return from C_REM_STA (0x%08X)\n", pkt->hdr.flags);
pkt              2118 drivers/net/wireless/intel/iwlegacy/common.c 		switch (pkt->u.rem_sta.status) {
pkt              2640 drivers/net/wireless/intel/iwlegacy/common.c 	struct il_rx_pkt *pkt = rxb_addr(rxb);
pkt              2641 drivers/net/wireless/intel/iwlegacy/common.c 	struct il_spectrum_notification *report = &(pkt->u.spectrum_notif);
pkt              3280 drivers/net/wireless/intel/iwlegacy/common.c 	struct il_rx_pkt *pkt = rxb_addr(rxb);
pkt              3281 drivers/net/wireless/intel/iwlegacy/common.c 	u16 sequence = le16_to_cpu(pkt->hdr.sequence);
pkt              3285 drivers/net/wireless/intel/iwlegacy/common.c 	bool huge = !!(pkt->hdr.sequence & SEQ_HUGE_FRAME);
pkt              3299 drivers/net/wireless/intel/iwlegacy/common.c 		il_print_hex_error(il, pkt, 32);
pkt              3317 drivers/net/wireless/intel/iwlegacy/common.c 		meta->callback(il, cmd, pkt);
pkt              4109 drivers/net/wireless/intel/iwlegacy/common.c 	struct il_rx_pkt *pkt = rxb_addr(rxb);
pkt              4110 drivers/net/wireless/intel/iwlegacy/common.c 	struct il_csa_notification *csa = &(pkt->u.csa_notif);
pkt              4451 drivers/net/wireless/intel/iwlegacy/common.c 	struct il_rx_pkt *pkt = rxb_addr(rxb);
pkt              4452 drivers/net/wireless/intel/iwlegacy/common.c 	struct il_sleep_notification *sleep = &(pkt->u.sleep_notif);
pkt              4462 drivers/net/wireless/intel/iwlegacy/common.c 	struct il_rx_pkt *pkt = rxb_addr(rxb);
pkt              4463 drivers/net/wireless/intel/iwlegacy/common.c 	u32 len = le32_to_cpu(pkt->len_n_flags) & IL_RX_FRAME_SIZE_MSK;
pkt              4465 drivers/net/wireless/intel/iwlegacy/common.c 		il_get_cmd_string(pkt->hdr.cmd));
pkt              4466 drivers/net/wireless/intel/iwlegacy/common.c 	il_print_hex_dump(il, IL_DL_RADIO, pkt->u.raw, len);
pkt              4473 drivers/net/wireless/intel/iwlegacy/common.c 	struct il_rx_pkt *pkt = rxb_addr(rxb);
pkt              4477 drivers/net/wireless/intel/iwlegacy/common.c 	       le32_to_cpu(pkt->u.err_resp.error_type),
pkt              4478 drivers/net/wireless/intel/iwlegacy/common.c 	       il_get_cmd_string(pkt->u.err_resp.cmd_id),
pkt              4479 drivers/net/wireless/intel/iwlegacy/common.c 	       pkt->u.err_resp.cmd_id,
pkt              4480 drivers/net/wireless/intel/iwlegacy/common.c 	       le16_to_cpu(pkt->u.err_resp.bad_cmd_seq_num),
pkt              4481 drivers/net/wireless/intel/iwlegacy/common.c 	       le32_to_cpu(pkt->u.err_resp.error_info));
pkt               103 drivers/net/wireless/intel/iwlegacy/common.h 			  struct il_rx_pkt *pkt);
pkt               567 drivers/net/wireless/intel/iwlegacy/common.h 			  struct il_rx_pkt *pkt);
pkt              1745 drivers/net/wireless/intel/iwlegacy/common.h void il_recover_from_stats(struct il_priv *il, struct il_rx_pkt *pkt);
pkt              1812 drivers/net/wireless/intel/iwlegacy/common.h 					    struct il_rx_pkt *pkt));
pkt              1969 drivers/net/wireless/intel/iwlegacy/common.h static inline bool il_need_reclaim(struct il_priv *il, struct il_rx_pkt *pkt)
pkt              1977 drivers/net/wireless/intel/iwlegacy/common.h 	return !(pkt->hdr.sequence & SEQ_RX_FRAME) &&
pkt              1978 drivers/net/wireless/intel/iwlegacy/common.h 	       pkt->hdr.cmd != N_STATS && pkt->hdr.cmd != C_TX &&
pkt              1979 drivers/net/wireless/intel/iwlegacy/common.h 	       pkt->hdr.cmd != N_RX_PHY && pkt->hdr.cmd != N_RX &&
pkt              1980 drivers/net/wireless/intel/iwlegacy/common.h 	       pkt->hdr.cmd != N_RX_MPDU && pkt->hdr.cmd != N_COMPRESSED_BA;
pkt               630 drivers/net/wireless/intel/iwlwifi/dvm/lib.c 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
pkt               631 drivers/net/wireless/intel/iwlwifi/dvm/lib.c 	struct iwl_bt_coex_profile_notif *coex = (void *)pkt->data;
pkt               409 drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c 				 struct iwl_rx_packet *pkt, void *data)
pkt               414 drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c 	if (iwl_rx_packet_payload_len(pkt) != sizeof(*resume_data->cmd)) {
pkt               418 drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c 	memcpy(resume_data->cmd, pkt->data, sizeof(*resume_data->cmd));
pkt                38 drivers/net/wireless/intel/iwlwifi/dvm/rx.c 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
pkt                39 drivers/net/wireless/intel/iwlwifi/dvm/rx.c 	struct iwl_error_resp *err_resp = (void *)pkt->data;
pkt                51 drivers/net/wireless/intel/iwlwifi/dvm/rx.c 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
pkt                52 drivers/net/wireless/intel/iwlwifi/dvm/rx.c 	struct iwl_csa_notification *csa = (void *)pkt->data;
pkt                80 drivers/net/wireless/intel/iwlwifi/dvm/rx.c 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
pkt                81 drivers/net/wireless/intel/iwlwifi/dvm/rx.c 	struct iwl_spectrum_notification *report = (void *)pkt->data;
pkt                97 drivers/net/wireless/intel/iwlwifi/dvm/rx.c 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
pkt                98 drivers/net/wireless/intel/iwlwifi/dvm/rx.c 	struct iwl_sleep_notification *sleep = (void *)pkt->data;
pkt               107 drivers/net/wireless/intel/iwlwifi/dvm/rx.c 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
pkt               108 drivers/net/wireless/intel/iwlwifi/dvm/rx.c 	u32 __maybe_unused len = iwl_rx_packet_len(pkt);
pkt               111 drivers/net/wireless/intel/iwlwifi/dvm/rx.c 	iwl_print_hex_dump(priv, IWL_DL_RADIO, pkt->data, len);
pkt               117 drivers/net/wireless/intel/iwlwifi/dvm/rx.c 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
pkt               118 drivers/net/wireless/intel/iwlwifi/dvm/rx.c 	struct iwlagn_beacon_notif *beacon = (void *)pkt->data;
pkt               353 drivers/net/wireless/intel/iwlwifi/dvm/rx.c 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
pkt               354 drivers/net/wireless/intel/iwlwifi/dvm/rx.c 	u32 len = iwl_rx_packet_payload_len(pkt);
pkt               371 drivers/net/wireless/intel/iwlwifi/dvm/rx.c 		stats = (void *)&pkt->data;
pkt               389 drivers/net/wireless/intel/iwlwifi/dvm/rx.c 		stats = (void *)&pkt->data;
pkt               440 drivers/net/wireless/intel/iwlwifi/dvm/rx.c 	    (pkt->hdr.cmd == STATISTICS_NOTIFICATION)) {
pkt               453 drivers/net/wireless/intel/iwlwifi/dvm/rx.c 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
pkt               454 drivers/net/wireless/intel/iwlwifi/dvm/rx.c 	struct iwl_notif_statistics *stats = (void *)pkt->data;
pkt               476 drivers/net/wireless/intel/iwlwifi/dvm/rx.c 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
pkt               477 drivers/net/wireless/intel/iwlwifi/dvm/rx.c 	struct iwl_card_state_notif *card_state_notif = (void *)pkt->data;
pkt               527 drivers/net/wireless/intel/iwlwifi/dvm/rx.c 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
pkt               528 drivers/net/wireless/intel/iwlwifi/dvm/rx.c 	struct iwl_missed_beacon_notif *missed_beacon = (void *)pkt->data;
pkt               548 drivers/net/wireless/intel/iwlwifi/dvm/rx.c 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
pkt               552 drivers/net/wireless/intel/iwlwifi/dvm/rx.c 	memcpy(&priv->last_phy_res, pkt->data,
pkt               785 drivers/net/wireless/intel/iwlwifi/dvm/rx.c 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
pkt               798 drivers/net/wireless/intel/iwlwifi/dvm/rx.c 	amsdu = (struct iwl_rx_mpdu_res_start *)pkt->data;
pkt               799 drivers/net/wireless/intel/iwlwifi/dvm/rx.c 	header = (struct ieee80211_hdr *)(pkt->data + sizeof(*amsdu));
pkt               801 drivers/net/wireless/intel/iwlwifi/dvm/rx.c 	rx_pkt_status = *(__le32 *)(pkt->data + sizeof(*amsdu) + len);
pkt               895 drivers/net/wireless/intel/iwlwifi/dvm/rx.c 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
pkt               896 drivers/net/wireless/intel/iwlwifi/dvm/rx.c 	struct iwl_wipan_noa_notification *noa_notif = (void *)pkt->data;
pkt               991 drivers/net/wireless/intel/iwlwifi/dvm/rx.c 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
pkt               999 drivers/net/wireless/intel/iwlwifi/dvm/rx.c 	iwl_notification_wait_notify(&priv->notif_wait, pkt);
pkt              1004 drivers/net/wireless/intel/iwlwifi/dvm/rx.c 	if (priv->rx_handlers[pkt->hdr.cmd]) {
pkt              1005 drivers/net/wireless/intel/iwlwifi/dvm/rx.c 		priv->rx_handlers_stats[pkt->hdr.cmd]++;
pkt              1006 drivers/net/wireless/intel/iwlwifi/dvm/rx.c 		priv->rx_handlers[pkt->hdr.cmd](priv, rxb);
pkt              1011 drivers/net/wireless/intel/iwlwifi/dvm/rx.c 						iwl_cmd_id(pkt->hdr.cmd,
pkt              1013 drivers/net/wireless/intel/iwlwifi/dvm/rx.c 			     pkt->hdr.cmd);
pkt               241 drivers/net/wireless/intel/iwlwifi/dvm/scan.c 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
pkt               242 drivers/net/wireless/intel/iwlwifi/dvm/scan.c 	struct iwl_scanreq_notification *notif = (void *)pkt->data;
pkt               252 drivers/net/wireless/intel/iwlwifi/dvm/scan.c 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
pkt               253 drivers/net/wireless/intel/iwlwifi/dvm/scan.c 	struct iwl_scanstart_notification *notif = (void *)pkt->data;
pkt               271 drivers/net/wireless/intel/iwlwifi/dvm/scan.c 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
pkt               272 drivers/net/wireless/intel/iwlwifi/dvm/scan.c 	struct iwl_scanresults_notification *notif = (void *)pkt->data;
pkt               293 drivers/net/wireless/intel/iwlwifi/dvm/scan.c 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
pkt               294 drivers/net/wireless/intel/iwlwifi/dvm/scan.c 	struct iwl_scancomplete_notification *scan_notif = (void *)pkt->data;
pkt                49 drivers/net/wireless/intel/iwlwifi/dvm/sta.c 				     struct iwl_rx_packet *pkt)
pkt                51 drivers/net/wireless/intel/iwlwifi/dvm/sta.c 	struct iwl_add_sta_resp *add_sta_resp = (void *)pkt->data;
pkt                82 drivers/net/wireless/intel/iwlwifi/dvm/sta.c 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
pkt                84 drivers/net/wireless/intel/iwlwifi/dvm/sta.c 	iwl_process_add_sta_resp(priv, pkt);
pkt                98 drivers/net/wireless/intel/iwlwifi/dvm/sta.c 	struct iwl_rx_packet *pkt;
pkt               114 drivers/net/wireless/intel/iwlwifi/dvm/sta.c 	pkt = cmd.resp_pkt;
pkt               115 drivers/net/wireless/intel/iwlwifi/dvm/sta.c 	add_sta_resp = (void *)pkt->data;
pkt               409 drivers/net/wireless/intel/iwlwifi/dvm/sta.c 	struct iwl_rx_packet *pkt;
pkt               431 drivers/net/wireless/intel/iwlwifi/dvm/sta.c 	pkt = cmd.resp_pkt;
pkt               432 drivers/net/wireless/intel/iwlwifi/dvm/sta.c 	rem_sta_resp = (void *)pkt->data;
pkt              1115 drivers/net/wireless/intel/iwlwifi/dvm/tx.c 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
pkt              1116 drivers/net/wireless/intel/iwlwifi/dvm/tx.c 	u16 sequence = le16_to_cpu(pkt->hdr.sequence);
pkt              1119 drivers/net/wireless/intel/iwlwifi/dvm/tx.c 	struct iwlagn_tx_resp *tx_resp = (void *)pkt->data;
pkt              1268 drivers/net/wireless/intel/iwlwifi/dvm/tx.c 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
pkt              1269 drivers/net/wireless/intel/iwlwifi/dvm/tx.c 	struct iwl_compressed_ba_resp *ba_resp = (void *)pkt->data;
pkt               268 drivers/net/wireless/intel/iwlwifi/dvm/ucode.c 			 struct iwl_rx_packet *pkt, void *data)
pkt               275 drivers/net/wireless/intel/iwlwifi/dvm/ucode.c 	palive = (void *)pkt->data;
pkt               361 drivers/net/wireless/intel/iwlwifi/dvm/ucode.c 			      struct iwl_rx_packet *pkt, void *data)
pkt               366 drivers/net/wireless/intel/iwlwifi/dvm/ucode.c 	if (pkt->hdr.cmd != CALIBRATION_RES_NOTIFICATION) {
pkt               367 drivers/net/wireless/intel/iwlwifi/dvm/ucode.c 		WARN_ON(pkt->hdr.cmd != CALIBRATION_COMPLETE_NOTIFICATION);
pkt               371 drivers/net/wireless/intel/iwlwifi/dvm/ucode.c 	hdr = (struct iwl_calib_hdr *)pkt->data;
pkt               373 drivers/net/wireless/intel/iwlwifi/dvm/ucode.c 	if (iwl_calib_set(priv, hdr, iwl_rx_packet_payload_len(pkt)))
pkt                76 drivers/net/wireless/intel/iwlwifi/fw/notif-wait.c 			   struct iwl_rx_packet *pkt)
pkt                98 drivers/net/wireless/intel/iwlwifi/fw/notif-wait.c 				u16 rec_id = WIDE_ID(pkt->hdr.group_id,
pkt                99 drivers/net/wireless/intel/iwlwifi/fw/notif-wait.c 						     pkt->hdr.cmd);
pkt               111 drivers/net/wireless/intel/iwlwifi/fw/notif-wait.c 			if (!w->fn || w->fn(notif_wait, pkt, w->fn_data)) {
pkt               141 drivers/net/wireless/intel/iwlwifi/fw/notif-wait.c 				      struct iwl_rx_packet *pkt, void *data),
pkt               102 drivers/net/wireless/intel/iwlwifi/fw/notif-wait.h 		   struct iwl_rx_packet *pkt, void *data);
pkt               114 drivers/net/wireless/intel/iwlwifi/fw/notif-wait.h 			   struct iwl_rx_packet *pkt);
pkt               125 drivers/net/wireless/intel/iwlwifi/fw/notif-wait.h 			     struct iwl_rx_packet *pkt)
pkt               127 drivers/net/wireless/intel/iwlwifi/fw/notif-wait.h 	if (iwl_notification_wait(notif_data, pkt))
pkt               137 drivers/net/wireless/intel/iwlwifi/fw/notif-wait.h 				      struct iwl_rx_packet *pkt, void *data),
pkt                69 drivers/net/wireless/intel/iwlwifi/fw/smem.c 				       struct iwl_rx_packet *pkt)
pkt                71 drivers/net/wireless/intel/iwlwifi/fw/smem.c 	struct iwl_shared_mem_cfg *mem_cfg = (void *)pkt->data;
pkt                96 drivers/net/wireless/intel/iwlwifi/fw/smem.c 				 struct iwl_rx_packet *pkt)
pkt                98 drivers/net/wireless/intel/iwlwifi/fw/smem.c 	struct iwl_shared_mem_cfg_v2 *mem_cfg = (void *)pkt->data;
pkt               136 drivers/net/wireless/intel/iwlwifi/fw/smem.c 	struct iwl_rx_packet *pkt;
pkt               153 drivers/net/wireless/intel/iwlwifi/fw/smem.c 	pkt = cmd.resp_pkt;
pkt               155 drivers/net/wireless/intel/iwlwifi/fw/smem.c 		iwl_parse_shared_mem_22000(fwrt, pkt);
pkt               157 drivers/net/wireless/intel/iwlwifi/fw/smem.c 		iwl_parse_shared_mem(fwrt, pkt);
pkt                59 drivers/net/wireless/intel/iwlwifi/iwl-devtrace-iwlwifi.h 		 struct iwl_rx_packet *pkt, size_t len),
pkt                60 drivers/net/wireless/intel/iwlwifi/iwl-devtrace-iwlwifi.h 	TP_ARGS(dev, trans, pkt, len),
pkt                66 drivers/net/wireless/intel/iwlwifi/iwl-devtrace-iwlwifi.h 				iwl_rx_trace_len(trans, pkt, len, NULL))
pkt                72 drivers/net/wireless/intel/iwlwifi/iwl-devtrace-iwlwifi.h 		__entry->cmd = WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd);
pkt                73 drivers/net/wireless/intel/iwlwifi/iwl-devtrace-iwlwifi.h 		memcpy(__get_dynamic_array(rxbuf), pkt,
pkt                74 drivers/net/wireless/intel/iwlwifi/iwl-devtrace-iwlwifi.h 		       iwl_rx_trace_len(trans, pkt, len, &hdr_offset));
pkt               202 drivers/net/wireless/intel/iwlwifi/iwl-phy-db.c 			   struct iwl_rx_packet *pkt)
pkt               205 drivers/net/wireless/intel/iwlwifi/iwl-phy-db.c 			(struct iwl_calib_res_notif_phy_db *)pkt->data;
pkt                72 drivers/net/wireless/intel/iwlwifi/iwl-phy-db.h 			   struct iwl_rx_packet *pkt);
pkt               146 drivers/net/wireless/intel/iwlwifi/iwl-trans.h static inline u32 iwl_rx_packet_len(const struct iwl_rx_packet *pkt)
pkt               148 drivers/net/wireless/intel/iwlwifi/iwl-trans.h 	return le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
pkt               151 drivers/net/wireless/intel/iwlwifi/iwl-trans.h static inline u32 iwl_rx_packet_payload_len(const struct iwl_rx_packet *pkt)
pkt               153 drivers/net/wireless/intel/iwlwifi/iwl-trans.h 	return iwl_rx_packet_len(pkt) - sizeof(pkt->hdr);
pkt               546 drivers/net/wireless/intel/iwlwifi/mvm/coex.c 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
pkt               547 drivers/net/wireless/intel/iwlwifi/mvm/coex.c 	struct iwl_bt_coex_profile_notif *notif = (void *)pkt->data;
pkt              1126 drivers/net/wireless/intel/iwlwifi/mvm/d3.c 	struct sk_buff *pkt = NULL;
pkt              1187 drivers/net/wireless/intel/iwlwifi/mvm/d3.c 			pkt = alloc_skb(pktsize, GFP_KERNEL);
pkt              1188 drivers/net/wireless/intel/iwlwifi/mvm/d3.c 			if (!pkt)
pkt              1191 drivers/net/wireless/intel/iwlwifi/mvm/d3.c 			skb_put_data(pkt, pktdata, hdrlen);
pkt              1223 drivers/net/wireless/intel/iwlwifi/mvm/d3.c 			skb_put_data(pkt, pktdata, pktsize);
pkt              1225 drivers/net/wireless/intel/iwlwifi/mvm/d3.c 			if (ieee80211_data_to_8023(pkt, vif->addr, vif->type))
pkt              1227 drivers/net/wireless/intel/iwlwifi/mvm/d3.c 			wakeup.packet = pkt->data;
pkt              1228 drivers/net/wireless/intel/iwlwifi/mvm/d3.c 			wakeup.packet_present_len = pkt->len;
pkt              1229 drivers/net/wireless/intel/iwlwifi/mvm/d3.c 			wakeup.packet_len = pkt->len - truncated;
pkt              1251 drivers/net/wireless/intel/iwlwifi/mvm/d3.c 	kfree_skb(pkt);
pkt              1172 drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c 	struct iwl_rx_packet *pkt;
pkt              1191 drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c 	pkt = rxb_addr(&rxb);
pkt              1198 drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c 	if (bin_len < sizeof(*pkt) + mpdu_cmd_hdr_size)
pkt              1202 drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c 	if (WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd) !=
pkt              1207 drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c 	desc = (void *)pkt->data;
pkt              1209 drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c 	    (bin_len - mpdu_cmd_hdr_size - sizeof(*pkt)))
pkt              1655 drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c 				  struct iwl_rx_packet *pkt, void *data)
pkt               497 drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
pkt               498 drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c 	struct iwl_tof_range_rsp_ntfy_v5 *fw_resp_v5 = (void *)pkt->data;
pkt               499 drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c 	struct iwl_tof_range_rsp_ntfy_v6 *fw_resp_v6 = (void *)pkt->data;
pkt               500 drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c 	struct iwl_tof_range_rsp_ntfy *fw_resp = (void *)pkt->data;
pkt               632 drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
pkt               633 drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c 	const struct ieee80211_mgmt *mgmt = (void *)pkt->data;
pkt               634 drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c 	size_t len = iwl_rx_packet_payload_len(pkt);
pkt               215 drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
pkt               216 drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c 	struct iwl_ftm_responder_stats *resp = (void *)pkt->data;
pkt               191 drivers/net/wireless/intel/iwlwifi/mvm/fw.c 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
pkt               192 drivers/net/wireless/intel/iwlwifi/mvm/fw.c 	struct iwl_mfu_assert_dump_notif *mfu_dump_notif = (void *)pkt->data;
pkt               210 drivers/net/wireless/intel/iwlwifi/mvm/fw.c 			 struct iwl_rx_packet *pkt, void *data)
pkt               223 drivers/net/wireless/intel/iwlwifi/mvm/fw.c 	if (iwl_rx_packet_payload_len(pkt) == sizeof(*palive)) {
pkt               224 drivers/net/wireless/intel/iwlwifi/mvm/fw.c 		palive = (void *)pkt->data;
pkt               230 drivers/net/wireless/intel/iwlwifi/mvm/fw.c 		palive3 = (void *)pkt->data;
pkt               285 drivers/net/wireless/intel/iwlwifi/mvm/fw.c 				   struct iwl_rx_packet *pkt, void *data)
pkt               287 drivers/net/wireless/intel/iwlwifi/mvm/fw.c 	WARN_ON(pkt->hdr.cmd != INIT_COMPLETE_NOTIF);
pkt               293 drivers/net/wireless/intel/iwlwifi/mvm/fw.c 				  struct iwl_rx_packet *pkt, void *data)
pkt               297 drivers/net/wireless/intel/iwlwifi/mvm/fw.c 	if (pkt->hdr.cmd != CALIB_RES_NOTIF_PHY_DB) {
pkt               298 drivers/net/wireless/intel/iwlwifi/mvm/fw.c 		WARN_ON(pkt->hdr.cmd != INIT_COMPLETE_NOTIF);
pkt               302 drivers/net/wireless/intel/iwlwifi/mvm/fw.c 	WARN_ON(iwl_phy_db_set_section(phy_db, pkt));
pkt              1565 drivers/net/wireless/intel/iwlwifi/mvm/fw.c 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
pkt              1566 drivers/net/wireless/intel/iwlwifi/mvm/fw.c 	struct iwl_card_state_notif *card_state_notif = (void *)pkt->data;
pkt              1579 drivers/net/wireless/intel/iwlwifi/mvm/fw.c 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
pkt              1580 drivers/net/wireless/intel/iwlwifi/mvm/fw.c 	struct iwl_mfuart_load_notif *mfuart_notif = (void *)pkt->data;
pkt              1589 drivers/net/wireless/intel/iwlwifi/mvm/fw.c 	if (iwl_rx_packet_payload_len(pkt) == sizeof(*mfuart_notif))
pkt              1331 drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
pkt              1332 drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c 	struct iwl_extended_beacon_notif *beacon = (void *)pkt->data;
pkt              1333 drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c 	struct iwl_extended_beacon_notif_v5 *beacon_v5 = (void *)pkt->data;
pkt              1399 drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
pkt              1400 drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c 	struct iwl_missed_beacons_notif *mb = (void *)pkt->data;
pkt              1460 drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
pkt              1461 drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c 	struct iwl_stored_beacon_notif *sb = (void *)pkt->data;
pkt              1499 drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
pkt              1500 drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c 	struct iwl_probe_resp_data_notif *notif = (void *)pkt->data;
pkt              1502 drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c 	int len = iwl_rx_packet_payload_len(pkt);
pkt              1552 drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
pkt              1553 drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c 	struct iwl_channel_switch_noa_notif *notif = (void *)pkt->data;
pkt              1556 drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c 	int len = iwl_rx_packet_payload_len(pkt);
pkt              1968 drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
pkt              1969 drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c 	struct iwl_mu_group_mgmt_notif *notif = (void *)pkt->data;
pkt              2827 drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
pkt              2828 drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c 	struct iwl_mvm_pm_state_notification *notif = (void *)pkt->data;
pkt              3556 drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c 			       struct iwl_rx_packet *pkt, void *data)
pkt              3561 drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c 	int resp_len = iwl_rx_packet_payload_len(pkt);
pkt              3564 drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c 	if (WARN_ON(pkt->hdr.cmd != HOT_SPOT_CMD))
pkt              3572 drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c 	resp = (void *)pkt->data;
pkt              1553 drivers/net/wireless/intel/iwlwifi/mvm/mvm.h 				  struct iwl_rx_packet *pkt);
pkt               108 drivers/net/wireless/intel/iwlwifi/mvm/nvm.c 	struct iwl_rx_packet *pkt;
pkt               116 drivers/net/wireless/intel/iwlwifi/mvm/nvm.c 	pkt = cmd.resp_pkt;
pkt               118 drivers/net/wireless/intel/iwlwifi/mvm/nvm.c 	nvm_resp = (void *)pkt->data;
pkt               140 drivers/net/wireless/intel/iwlwifi/mvm/nvm.c 	struct iwl_rx_packet *pkt;
pkt               155 drivers/net/wireless/intel/iwlwifi/mvm/nvm.c 	pkt = cmd.resp_pkt;
pkt               158 drivers/net/wireless/intel/iwlwifi/mvm/nvm.c 	nvm_resp = (void *)pkt->data;
pkt               479 drivers/net/wireless/intel/iwlwifi/mvm/nvm.c 	struct iwl_rx_packet *pkt;
pkt               503 drivers/net/wireless/intel/iwlwifi/mvm/nvm.c 	pkt = cmd.resp_pkt;
pkt               508 drivers/net/wireless/intel/iwlwifi/mvm/nvm.c 		struct iwl_mcc_update_resp *mcc_resp = (void *)pkt->data;
pkt               519 drivers/net/wireless/intel/iwlwifi/mvm/nvm.c 		struct iwl_mcc_update_resp_v3 *mcc_resp_v3 = (void *)pkt->data;
pkt               619 drivers/net/wireless/intel/iwlwifi/mvm/nvm.c 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
pkt               620 drivers/net/wireless/intel/iwlwifi/mvm/nvm.c 	struct iwl_mcc_chub_notif *notif = (void *)pkt->data;
pkt               957 drivers/net/wireless/intel/iwlwifi/mvm/ops.c 					    struct iwl_rx_packet *pkt)
pkt               975 drivers/net/wireless/intel/iwlwifi/mvm/ops.c 		if (cmds_trig->cmds[i].cmd_id != pkt->hdr.cmd ||
pkt               976 drivers/net/wireless/intel/iwlwifi/mvm/ops.c 		    cmds_trig->cmds[i].group_id != pkt->hdr.group_id)
pkt               981 drivers/net/wireless/intel/iwlwifi/mvm/ops.c 					pkt->hdr.group_id, pkt->hdr.cmd);
pkt               988 drivers/net/wireless/intel/iwlwifi/mvm/ops.c 			      struct iwl_rx_packet *pkt)
pkt               991 drivers/net/wireless/intel/iwlwifi/mvm/ops.c 	union iwl_dbg_tlv_tp_data tp_data = { .fw_pkt = pkt };
pkt               995 drivers/net/wireless/intel/iwlwifi/mvm/ops.c 	iwl_mvm_rx_check_trigger(mvm, pkt);
pkt              1002 drivers/net/wireless/intel/iwlwifi/mvm/ops.c 	iwl_notification_wait_notify(&mvm->notif_wait, pkt);
pkt              1008 drivers/net/wireless/intel/iwlwifi/mvm/ops.c 		if (rx_h->cmd_id != WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd))
pkt              1038 drivers/net/wireless/intel/iwlwifi/mvm/ops.c 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
pkt              1040 drivers/net/wireless/intel/iwlwifi/mvm/ops.c 	u16 cmd = WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd);
pkt              1047 drivers/net/wireless/intel/iwlwifi/mvm/ops.c 		iwl_mvm_rx_common(mvm, rxb, pkt);
pkt              1054 drivers/net/wireless/intel/iwlwifi/mvm/ops.c 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
pkt              1056 drivers/net/wireless/intel/iwlwifi/mvm/ops.c 	u16 cmd = WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd);
pkt              1070 drivers/net/wireless/intel/iwlwifi/mvm/ops.c 		iwl_mvm_rx_common(mvm, rxb, pkt);
pkt              1360 drivers/net/wireless/intel/iwlwifi/mvm/ops.c 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
pkt              1361 drivers/net/wireless/intel/iwlwifi/mvm/ops.c 	u16 cmd = WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd);
pkt               583 drivers/net/wireless/intel/iwlwifi/mvm/power.c 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
pkt               584 drivers/net/wireless/intel/iwlwifi/mvm/power.c 	struct iwl_uapsd_misbehaving_ap_notif *notif = (void *)pkt->data;
pkt               331 drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
pkt               340 drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c 	notif = (void *)pkt->data;
pkt                78 drivers/net/wireless/intel/iwlwifi/mvm/rx.c 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
pkt                80 drivers/net/wireless/intel/iwlwifi/mvm/rx.c 	memcpy(&mvm->last_phy_info, pkt->data, sizeof(mvm->last_phy_info));
pkt               347 drivers/net/wireless/intel/iwlwifi/mvm/rx.c 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
pkt               358 drivers/net/wireless/intel/iwlwifi/mvm/rx.c 	rx_res = (struct iwl_rx_mpdu_res_start *)pkt->data;
pkt               359 drivers/net/wireless/intel/iwlwifi/mvm/rx.c 	hdr = (struct ieee80211_hdr *)(pkt->data + sizeof(*rx_res));
pkt               362 drivers/net/wireless/intel/iwlwifi/mvm/rx.c 		(pkt->data + sizeof(*rx_res) + len));
pkt               680 drivers/net/wireless/intel/iwlwifi/mvm/rx.c iwl_mvm_rx_stats_check_trigger(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt)
pkt               695 drivers/net/wireless/intel/iwlwifi/mvm/rx.c 	if (WARN_ON_ONCE(trig_offset >= iwl_rx_packet_payload_len(pkt)))
pkt               698 drivers/net/wireless/intel/iwlwifi/mvm/rx.c 	if (le32_to_cpup((__le32 *) (pkt->data + trig_offset)) < trig_thold)
pkt               705 drivers/net/wireless/intel/iwlwifi/mvm/rx.c 				  struct iwl_rx_packet *pkt)
pkt               726 drivers/net/wireless/intel/iwlwifi/mvm/rx.c 	if (WARN_ONCE(iwl_rx_packet_payload_len(pkt) != expected_size,
pkt               728 drivers/net/wireless/intel/iwlwifi/mvm/rx.c 		      iwl_rx_packet_payload_len(pkt)))
pkt               732 drivers/net/wireless/intel/iwlwifi/mvm/rx.c 		struct iwl_notif_statistics_v11 *stats = (void *)&pkt->data;
pkt               753 drivers/net/wireless/intel/iwlwifi/mvm/rx.c 		struct iwl_notif_statistics *stats = (void *)&pkt->data;
pkt               776 drivers/net/wireless/intel/iwlwifi/mvm/rx.c 	iwl_mvm_rx_stats_check_trigger(mvm, pkt);
pkt               787 drivers/net/wireless/intel/iwlwifi/mvm/rx.c 		struct iwl_notif_statistics_v11 *v11 = (void *)&pkt->data;
pkt               793 drivers/net/wireless/intel/iwlwifi/mvm/rx.c 		struct iwl_notif_statistics *stats = (void *)&pkt->data;
pkt               849 drivers/net/wireless/intel/iwlwifi/mvm/rx.c 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
pkt               850 drivers/net/wireless/intel/iwlwifi/mvm/rx.c 	struct iwl_ba_window_status_notif *notif = (void *)pkt->data;
pkt               852 drivers/net/wireless/intel/iwlwifi/mvm/rx.c 	u32 pkt_len = iwl_rx_packet_payload_len(pkt);
pkt               176 drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
pkt               177 drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c 	struct iwl_rx_mpdu_desc *desc = (void *)pkt->data;
pkt               763 drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
pkt               767 drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c 	notif = (void *)pkt->data;
pkt              1552 drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
pkt              1553 drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c 	struct iwl_rx_mpdu_desc *desc = (void *)pkt->data;
pkt              1602 drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c 	hdr = (void *)(pkt->data + desc_size);
pkt              1648 drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c 			      le32_to_cpu(pkt->len_n_flags), queue,
pkt              1880 drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
pkt              1881 drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c 	struct iwl_rx_no_data *desc = (void *)pkt->data;
pkt              2021 drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
pkt              2022 drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c 	struct iwl_frame_release *release = (void *)pkt->data;
pkt              2032 drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
pkt              2033 drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c 	struct iwl_bar_frame_release *release = (void *)pkt->data;
pkt               411 drivers/net/wireless/intel/iwlwifi/mvm/scan.c 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
pkt               412 drivers/net/wireless/intel/iwlwifi/mvm/scan.c 	struct iwl_lmac_scan_complete_notif *notif = (void *)pkt->data;
pkt               449 drivers/net/wireless/intel/iwlwifi/mvm/scan.c 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
pkt               450 drivers/net/wireless/intel/iwlwifi/mvm/scan.c 	struct iwl_periodic_scan_complete *scan_notif = (void *)pkt->data;
pkt              1924 drivers/net/wireless/intel/iwlwifi/mvm/scan.c 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
pkt              1925 drivers/net/wireless/intel/iwlwifi/mvm/scan.c 	struct iwl_umac_scan_complete *notif = (void *)pkt->data;
pkt              1971 drivers/net/wireless/intel/iwlwifi/mvm/scan.c 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
pkt              1972 drivers/net/wireless/intel/iwlwifi/mvm/scan.c 	struct iwl_umac_scan_iter_complete_notif *notif = (void *)pkt->data;
pkt              3749 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
pkt              3750 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 	struct iwl_mvm_eosp_notification *notif = (void *)pkt->data;
pkt               123 drivers/net/wireless/intel/iwlwifi/mvm/tdls.c 	struct iwl_rx_packet *pkt;
pkt               170 drivers/net/wireless/intel/iwlwifi/mvm/tdls.c 	pkt = cmd.resp_pkt;
pkt               172 drivers/net/wireless/intel/iwlwifi/mvm/tdls.c 	WARN_ON_ONCE(iwl_rx_packet_payload_len(pkt) != sizeof(*resp));
pkt               254 drivers/net/wireless/intel/iwlwifi/mvm/tdls.c 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
pkt               255 drivers/net/wireless/intel/iwlwifi/mvm/tdls.c 	struct iwl_tdls_channel_switch_notif *notif = (void *)pkt->data;
pkt               422 drivers/net/wireless/intel/iwlwifi/mvm/time-event.c 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
pkt               423 drivers/net/wireless/intel/iwlwifi/mvm/time-event.c 	struct iwl_time_event_notif *notif = (void *)pkt->data;
pkt               444 drivers/net/wireless/intel/iwlwifi/mvm/time-event.c 			     struct iwl_rx_packet *pkt, void *data)
pkt               450 drivers/net/wireless/intel/iwlwifi/mvm/time-event.c 	int resp_len = iwl_rx_packet_payload_len(pkt);
pkt               452 drivers/net/wireless/intel/iwlwifi/mvm/time-event.c 	if (WARN_ON(pkt->hdr.cmd != TIME_EVENT_NOTIFICATION))
pkt               460 drivers/net/wireless/intel/iwlwifi/mvm/time-event.c 	resp = (void *)pkt->data;
pkt               476 drivers/net/wireless/intel/iwlwifi/mvm/time-event.c 					struct iwl_rx_packet *pkt, void *data)
pkt               482 drivers/net/wireless/intel/iwlwifi/mvm/time-event.c 	int resp_len = iwl_rx_packet_payload_len(pkt);
pkt               484 drivers/net/wireless/intel/iwlwifi/mvm/time-event.c 	if (WARN_ON(pkt->hdr.cmd != TIME_EVENT_CMD))
pkt               492 drivers/net/wireless/intel/iwlwifi/mvm/time-event.c 	resp = (void *)pkt->data;
pkt               117 drivers/net/wireless/intel/iwlwifi/mvm/tt.c 				    struct iwl_rx_packet *pkt)
pkt               120 drivers/net/wireless/intel/iwlwifi/mvm/tt.c 	int len = iwl_rx_packet_payload_len(pkt);
pkt               131 drivers/net/wireless/intel/iwlwifi/mvm/tt.c 	notif_v1 = (void *)pkt->data;
pkt               145 drivers/net/wireless/intel/iwlwifi/mvm/tt.c 				    struct iwl_rx_packet *pkt, void *data)
pkt               152 drivers/net/wireless/intel/iwlwifi/mvm/tt.c 	ret = iwl_mvm_temp_notif_parse(mvm, pkt);
pkt               163 drivers/net/wireless/intel/iwlwifi/mvm/tt.c 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
pkt               165 drivers/net/wireless/intel/iwlwifi/mvm/tt.c 	int len = iwl_rx_packet_payload_len(pkt);
pkt               173 drivers/net/wireless/intel/iwlwifi/mvm/tt.c 	temp = iwl_mvm_temp_notif_parse(mvm, pkt);
pkt               186 drivers/net/wireless/intel/iwlwifi/mvm/tt.c 	notif_v2 = (void *)pkt->data;
pkt               213 drivers/net/wireless/intel/iwlwifi/mvm/tt.c 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
pkt               215 drivers/net/wireless/intel/iwlwifi/mvm/tt.c 	int len = iwl_rx_packet_payload_len(pkt);
pkt               222 drivers/net/wireless/intel/iwlwifi/mvm/tt.c 	notif = (struct ct_kill_notif *)pkt->data;
pkt              1442 drivers/net/wireless/intel/iwlwifi/mvm/tx.c 				     struct iwl_rx_packet *pkt)
pkt              1445 drivers/net/wireless/intel/iwlwifi/mvm/tx.c 	u16 sequence = le16_to_cpu(pkt->hdr.sequence);
pkt              1448 drivers/net/wireless/intel/iwlwifi/mvm/tx.c 	struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data;
pkt              1694 drivers/net/wireless/intel/iwlwifi/mvm/tx.c 				      struct iwl_rx_packet *pkt)
pkt              1696 drivers/net/wireless/intel/iwlwifi/mvm/tx.c 	struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data;
pkt              1715 drivers/net/wireless/intel/iwlwifi/mvm/tx.c 				      struct iwl_rx_packet *pkt)
pkt              1720 drivers/net/wireless/intel/iwlwifi/mvm/tx.c 				  struct iwl_rx_packet *pkt)
pkt              1722 drivers/net/wireless/intel/iwlwifi/mvm/tx.c 	struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data;
pkt              1725 drivers/net/wireless/intel/iwlwifi/mvm/tx.c 	u16 sequence = le16_to_cpu(pkt->hdr.sequence);
pkt              1734 drivers/net/wireless/intel/iwlwifi/mvm/tx.c 	iwl_mvm_rx_tx_cmd_agg_dbg(mvm, pkt);
pkt              1762 drivers/net/wireless/intel/iwlwifi/mvm/tx.c 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
pkt              1763 drivers/net/wireless/intel/iwlwifi/mvm/tx.c 	struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data;
pkt              1766 drivers/net/wireless/intel/iwlwifi/mvm/tx.c 		iwl_mvm_rx_tx_cmd_single(mvm, pkt);
pkt              1768 drivers/net/wireless/intel/iwlwifi/mvm/tx.c 		iwl_mvm_rx_tx_cmd_agg(mvm, pkt);
pkt              1896 drivers/net/wireless/intel/iwlwifi/mvm/tx.c 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
pkt              1907 drivers/net/wireless/intel/iwlwifi/mvm/tx.c 			(void *)pkt->data;
pkt              1958 drivers/net/wireless/intel/iwlwifi/mvm/tx.c 	ba_notif = (void *)pkt->data;
pkt               129 drivers/net/wireless/intel/iwlwifi/mvm/utils.c 	struct iwl_rx_packet *pkt;
pkt               161 drivers/net/wireless/intel/iwlwifi/mvm/utils.c 	pkt = cmd->resp_pkt;
pkt               163 drivers/net/wireless/intel/iwlwifi/mvm/utils.c 	resp_len = iwl_rx_packet_payload_len(pkt);
pkt               169 drivers/net/wireless/intel/iwlwifi/mvm/utils.c 	resp = (void *)pkt->data;
pkt               249 drivers/net/wireless/intel/iwlwifi/mvm/utils.c 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
pkt               250 drivers/net/wireless/intel/iwlwifi/mvm/utils.c 	struct iwl_error_resp *err_resp = (void *)pkt->data;
pkt              1255 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 		struct iwl_rx_packet *pkt;
pkt              1267 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 		pkt = rxb_addr(&rxcb);
pkt              1269 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 		if (pkt->len_n_flags == cpu_to_le32(FH_RSCSR_FRAME_INVALID)) {
pkt              1276 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 		WARN((le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_RXQ_MASK) >>
pkt              1280 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 		     (le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_RXQ_MASK) >>
pkt              1287 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 						iwl_cmd_id(pkt->hdr.cmd,
pkt              1288 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 							   pkt->hdr.group_id,
pkt              1290 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 			     pkt->hdr.group_id, pkt->hdr.cmd,
pkt              1291 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 			     le16_to_cpu(pkt->hdr.sequence));
pkt              1293 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 		len = iwl_rx_packet_len(pkt);
pkt              1295 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 		trace_iwlwifi_dev_rx(trans->dev, trans, pkt, len);
pkt              1296 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 		trace_iwlwifi_dev_rx_data(trans->dev, trans, pkt, len);
pkt              1304 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 		reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME);
pkt              1305 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 		if (reclaim && !pkt->hdr.group_id) {
pkt              1310 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 							pkt->hdr.cmd) {
pkt              1317 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 		sequence = le16_to_cpu(pkt->hdr.sequence);
pkt              1803 drivers/net/wireless/intel/iwlwifi/pcie/tx.c 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
pkt              1804 drivers/net/wireless/intel/iwlwifi/pcie/tx.c 	u16 sequence = le16_to_cpu(pkt->hdr.sequence);
pkt              1822 drivers/net/wireless/intel/iwlwifi/pcie/tx.c 		iwl_print_hex_error(trans, pkt, 32);
pkt              1840 drivers/net/wireless/intel/iwlwifi/pcie/tx.c 		meta->source->resp_pkt = pkt;
pkt               661 drivers/net/wireless/marvell/libertas/if_usb.c 	__le32 *pkt = (__le32 *)(skb->data + IPFIELD_ALIGN_OFFSET);
pkt               673 drivers/net/wireless/marvell/libertas/if_usb.c 		recvtype = le32_to_cpu(pkt[0]);
pkt               693 drivers/net/wireless/marvell/libertas/if_usb.c 		event = le32_to_cpu(pkt[1]);
pkt               632 drivers/net/wireless/marvell/libertas_tf/if_usb.c 	__le32 *pkt = (__le32 *) skb->data;
pkt               645 drivers/net/wireless/marvell/libertas_tf/if_usb.c 		recvtype = le32_to_cpu(pkt[0]);
pkt               667 drivers/net/wireless/marvell/libertas_tf/if_usb.c 		u32 event_cause = le32_to_cpu(pkt[1]);
pkt              1110 drivers/net/wireless/mediatek/mt76/mt7615/mcu.c 		u8 pkt[512];
pkt              1133 drivers/net/wireless/mediatek/mt76/mt7615/mcu.c 	mt7615_mac_write_txwi(dev, (__le32 *)(req.pkt), skb, wcid, NULL,
pkt              1135 drivers/net/wireless/mediatek/mt76/mt7615/mcu.c 	memcpy(req.pkt + MT_TXD_SIZE, skb->data, skb->len);
pkt               767 drivers/net/wireless/rsi/rsi_91x_sdio.c 					u8 *pkt,
pkt               777 drivers/net/wireless/rsi/rsi_91x_sdio.c 	queueno = ((pkt[1] >> 4) & 0xf);
pkt               791 drivers/net/wireless/rsi/rsi_91x_sdio.c 						  (u8 *)pkt,
pkt               810 drivers/net/wireless/rsi/rsi_91x_sdio.c 				u8 *pkt,
pkt               822 drivers/net/wireless/rsi/rsi_91x_sdio.c 						 (u8 *)pkt,
pkt               450 drivers/net/wireless/rsi/rsi_91x_usb.c 				       u8 *pkt,
pkt               453 drivers/net/wireless/rsi/rsi_91x_usb.c 	u32 queueno = ((pkt[1] >> 4) & 0x7);
pkt               461 drivers/net/wireless/rsi/rsi_91x_usb.c 				  (u8 *)pkt,
pkt               384 drivers/net/wireless/rsi/rsi_main.h 	int (*read_pkt)(struct rsi_hw *adapter, u8 *pkt, u32 len);
pkt               385 drivers/net/wireless/rsi/rsi_main.h 	int (*write_pkt)(struct rsi_hw *adapter, u8 *pkt, u32 len);
pkt               138 drivers/net/wireless/rsi/rsi_sdio.h int rsi_sdio_host_intf_read_pkt(struct rsi_hw *adapter, u8 *pkt, u32 length);
pkt               227 drivers/net/xen-netback/rx.c 			       struct xenvif_pkt_state *pkt)
pkt               238 drivers/net/xen-netback/rx.c 	memset(pkt, 0, sizeof(struct xenvif_pkt_state));
pkt               240 drivers/net/xen-netback/rx.c 	pkt->skb = skb;
pkt               241 drivers/net/xen-netback/rx.c 	pkt->frag_iter = skb;
pkt               242 drivers/net/xen-netback/rx.c 	pkt->remaining_len = skb->len;
pkt               243 drivers/net/xen-netback/rx.c 	pkt->frag = -1;
pkt               249 drivers/net/xen-netback/rx.c 		extra = &pkt->extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
pkt               258 drivers/net/xen-netback/rx.c 		pkt->extra_count++;
pkt               264 drivers/net/xen-netback/rx.c 		extra = &pkt->extras[XEN_NETIF_EXTRA_TYPE_HASH - 1];
pkt               285 drivers/net/xen-netback/rx.c 		pkt->extra_count++;
pkt               290 drivers/net/xen-netback/rx.c 			       struct xenvif_pkt_state *pkt)
pkt               295 drivers/net/xen-netback/rx.c 	__skb_queue_tail(queue->rx_copy.completed, pkt->skb);
pkt               298 drivers/net/xen-netback/rx.c static void xenvif_rx_next_frag(struct xenvif_pkt_state *pkt)
pkt               300 drivers/net/xen-netback/rx.c 	struct sk_buff *frag_iter = pkt->frag_iter;
pkt               303 drivers/net/xen-netback/rx.c 	pkt->frag++;
pkt               304 drivers/net/xen-netback/rx.c 	pkt->frag_offset = 0;
pkt               306 drivers/net/xen-netback/rx.c 	if (pkt->frag >= nr_frags) {
pkt               307 drivers/net/xen-netback/rx.c 		if (frag_iter == pkt->skb)
pkt               308 drivers/net/xen-netback/rx.c 			pkt->frag_iter = skb_shinfo(frag_iter)->frag_list;
pkt               310 drivers/net/xen-netback/rx.c 			pkt->frag_iter = frag_iter->next;
pkt               312 drivers/net/xen-netback/rx.c 		pkt->frag = -1;
pkt               317 drivers/net/xen-netback/rx.c 				 struct xenvif_pkt_state *pkt,
pkt               321 drivers/net/xen-netback/rx.c 	struct sk_buff *frag_iter = pkt->frag_iter;
pkt               327 drivers/net/xen-netback/rx.c 	if (pkt->frag == -1) {
pkt               331 drivers/net/xen-netback/rx.c 		skb_frag_t *frag = &skb_shinfo(frag_iter)->frags[pkt->frag];
pkt               337 drivers/net/xen-netback/rx.c 	frag_data += pkt->frag_offset;
pkt               338 drivers/net/xen-netback/rx.c 	frag_len -= pkt->frag_offset;
pkt               344 drivers/net/xen-netback/rx.c 	pkt->frag_offset += chunk_len;
pkt               348 drivers/net/xen-netback/rx.c 		xenvif_rx_next_frag(pkt);
pkt               355 drivers/net/xen-netback/rx.c 				struct xenvif_pkt_state *pkt,
pkt               366 drivers/net/xen-netback/rx.c 		xenvif_rx_next_chunk(queue, pkt, offset, &data, &len);
pkt               370 drivers/net/xen-netback/rx.c 		pkt->remaining_len -= len;
pkt               372 drivers/net/xen-netback/rx.c 	} while (offset < XEN_PAGE_SIZE && pkt->remaining_len > 0);
pkt               374 drivers/net/xen-netback/rx.c 	if (pkt->remaining_len > 0)
pkt               379 drivers/net/xen-netback/rx.c 	if (pkt->slot == 0) {
pkt               380 drivers/net/xen-netback/rx.c 		struct sk_buff *skb = pkt->skb;
pkt               388 drivers/net/xen-netback/rx.c 		if (pkt->extra_count != 0)
pkt               399 drivers/net/xen-netback/rx.c 				 struct xenvif_pkt_state *pkt,
pkt               406 drivers/net/xen-netback/rx.c 	pkt->extra_count--;
pkt               408 drivers/net/xen-netback/rx.c 	for (i = 0; i < ARRAY_SIZE(pkt->extras); i++) {
pkt               409 drivers/net/xen-netback/rx.c 		if (pkt->extras[i].type) {
pkt               410 drivers/net/xen-netback/rx.c 			*extra = pkt->extras[i];
pkt               412 drivers/net/xen-netback/rx.c 			if (pkt->extra_count != 0)
pkt               415 drivers/net/xen-netback/rx.c 			pkt->extras[i].type = 0;
pkt               424 drivers/net/xen-netback/rx.c 	struct xenvif_pkt_state pkt;
pkt               426 drivers/net/xen-netback/rx.c 	xenvif_rx_next_skb(queue, &pkt);
pkt               438 drivers/net/xen-netback/rx.c 		if (pkt.slot != 0 && pkt.extra_count != 0)
pkt               439 drivers/net/xen-netback/rx.c 			xenvif_rx_extra_slot(queue, &pkt, req, rsp);
pkt               441 drivers/net/xen-netback/rx.c 			xenvif_rx_data_slot(queue, &pkt, req, rsp);
pkt               444 drivers/net/xen-netback/rx.c 		pkt.slot++;
pkt               445 drivers/net/xen-netback/rx.c 	} while (pkt.remaining_len > 0 || pkt.extra_count != 0);
pkt               447 drivers/net/xen-netback/rx.c 	xenvif_rx_complete(queue, &pkt);
pkt               940 drivers/pci/controller/pci-hyperv.c 		struct pci_packet pkt;
pkt               942 drivers/pci/controller/pci-hyperv.c 	} pkt;
pkt               954 drivers/pci/controller/pci-hyperv.c 	memset(&pkt, 0, sizeof(pkt));
pkt               955 drivers/pci/controller/pci-hyperv.c 	pkt.pkt.completion_func = hv_pci_read_config_compl;
pkt               956 drivers/pci/controller/pci-hyperv.c 	pkt.pkt.compl_ctxt = &comp_pkt;
pkt               957 drivers/pci/controller/pci-hyperv.c 	read_blk = (struct pci_read_block *)&pkt.pkt.message;
pkt               964 drivers/pci/controller/pci-hyperv.c 			       sizeof(*read_blk), (unsigned long)&pkt.pkt,
pkt              1020 drivers/pci/controller/pci-hyperv.c 		struct pci_packet pkt;
pkt              1023 drivers/pci/controller/pci-hyperv.c 	} pkt;
pkt              1034 drivers/pci/controller/pci-hyperv.c 	memset(&pkt, 0, sizeof(pkt));
pkt              1035 drivers/pci/controller/pci-hyperv.c 	pkt.pkt.completion_func = hv_pci_write_config_compl;
pkt              1036 drivers/pci/controller/pci-hyperv.c 	pkt.pkt.compl_ctxt = &comp_pkt;
pkt              1037 drivers/pci/controller/pci-hyperv.c 	write_blk = (struct pci_write_block *)&pkt.pkt.message;
pkt              1051 drivers/pci/controller/pci-hyperv.c 	pkt_size += sizeof(pkt.reserved);
pkt              1054 drivers/pci/controller/pci-hyperv.c 			       (unsigned long)&pkt.pkt, VM_PKT_DATA_INBAND,
pkt              1109 drivers/pci/controller/pci-hyperv.c 		struct pci_packet pkt;
pkt              1114 drivers/pci/controller/pci-hyperv.c 	int_pkt = (struct pci_delete_interrupt *)&ctxt.pkt.message;
pkt              1120 drivers/pci/controller/pci-hyperv.c 			 (unsigned long)&ctxt.pkt, VM_PKT_DATA_INBAND, 0);
pkt              1873 drivers/pci/controller/pci-hyperv.c 	} pkt;
pkt              1883 drivers/pci/controller/pci-hyperv.c 	memset(&pkt, 0, sizeof(pkt));
pkt              1886 drivers/pci/controller/pci-hyperv.c 	pkt.init_packet.compl_ctxt = &comp_pkt;
pkt              1887 drivers/pci/controller/pci-hyperv.c 	pkt.init_packet.completion_func = q_resource_requirements;
pkt              1888 drivers/pci/controller/pci-hyperv.c 	res_req = (struct pci_child_message *)&pkt.init_packet.message;
pkt              1894 drivers/pci/controller/pci-hyperv.c 			       (unsigned long)&pkt.init_packet,
pkt              2167 drivers/pci/controller/pci-hyperv.c 		struct pci_packet pkt;
pkt              2199 drivers/pci/controller/pci-hyperv.c 	ejct_pkt = (struct pci_eject_response *)&ctxt.pkt.message;
pkt              2203 drivers/pci/controller/pci-hyperv.c 			 sizeof(*ejct_pkt), (unsigned long)&ctxt.pkt,
pkt              2386 drivers/pci/controller/pci-hyperv.c 	struct pci_packet *pkt;
pkt              2396 drivers/pci/controller/pci-hyperv.c 	pkt = kzalloc(sizeof(*pkt) + sizeof(*version_req), GFP_KERNEL);
pkt              2397 drivers/pci/controller/pci-hyperv.c 	if (!pkt)
pkt              2401 drivers/pci/controller/pci-hyperv.c 	pkt->completion_func = hv_pci_generic_compl;
pkt              2402 drivers/pci/controller/pci-hyperv.c 	pkt->compl_ctxt = &comp_pkt;
pkt              2403 drivers/pci/controller/pci-hyperv.c 	version_req = (struct pci_version_request *)&pkt->message;
pkt              2410 drivers/pci/controller/pci-hyperv.c 				(unsigned long)pkt, VM_PKT_DATA_INBAND,
pkt              2446 drivers/pci/controller/pci-hyperv.c 	kfree(pkt);
pkt              2607 drivers/pci/controller/pci-hyperv.c 	struct pci_packet *pkt;
pkt              2616 drivers/pci/controller/pci-hyperv.c 	pkt = kzalloc(sizeof(*pkt) + sizeof(*d0_entry), GFP_KERNEL);
pkt              2617 drivers/pci/controller/pci-hyperv.c 	if (!pkt)
pkt              2621 drivers/pci/controller/pci-hyperv.c 	pkt->completion_func = hv_pci_generic_compl;
pkt              2622 drivers/pci/controller/pci-hyperv.c 	pkt->compl_ctxt = &comp_pkt;
pkt              2623 drivers/pci/controller/pci-hyperv.c 	d0_entry = (struct pci_bus_d0_entry *)&pkt->message;
pkt              2628 drivers/pci/controller/pci-hyperv.c 			       (unsigned long)pkt, VM_PKT_DATA_INBAND,
pkt              2647 drivers/pci/controller/pci-hyperv.c 	kfree(pkt);
pkt              2705 drivers/pci/controller/pci-hyperv.c 	struct pci_packet *pkt;
pkt              2713 drivers/pci/controller/pci-hyperv.c 	pkt = kmalloc(sizeof(*pkt) + size_res, GFP_KERNEL);
pkt              2714 drivers/pci/controller/pci-hyperv.c 	if (!pkt)
pkt              2724 drivers/pci/controller/pci-hyperv.c 		memset(pkt, 0, sizeof(*pkt) + size_res);
pkt              2726 drivers/pci/controller/pci-hyperv.c 		pkt->completion_func = hv_pci_generic_compl;
pkt              2727 drivers/pci/controller/pci-hyperv.c 		pkt->compl_ctxt = &comp_pkt;
pkt              2731 drivers/pci/controller/pci-hyperv.c 				(struct pci_resources_assigned *)&pkt->message;
pkt              2737 drivers/pci/controller/pci-hyperv.c 				(struct pci_resources_assigned2 *)&pkt->message;
pkt              2744 drivers/pci/controller/pci-hyperv.c 		ret = vmbus_sendpacket(hdev->channel, &pkt->message,
pkt              2745 drivers/pci/controller/pci-hyperv.c 				size_res, (unsigned long)pkt,
pkt              2762 drivers/pci/controller/pci-hyperv.c 	kfree(pkt);
pkt              2776 drivers/pci/controller/pci-hyperv.c 	struct pci_child_message pkt;
pkt              2786 drivers/pci/controller/pci-hyperv.c 		memset(&pkt, 0, sizeof(pkt));
pkt              2787 drivers/pci/controller/pci-hyperv.c 		pkt.message_type.type = PCI_RESOURCES_RELEASED;
pkt              2788 drivers/pci/controller/pci-hyperv.c 		pkt.wslot.slot = hpdev->desc.win_slot.slot;
pkt              2792 drivers/pci/controller/pci-hyperv.c 		ret = vmbus_sendpacket(hdev->channel, &pkt, sizeof(pkt), 0,
pkt              3020 drivers/pci/controller/pci-hyperv.c 	} pkt;
pkt              3041 drivers/pci/controller/pci-hyperv.c 	memset(&pkt.teardown_packet, 0, sizeof(pkt.teardown_packet));
pkt              3043 drivers/pci/controller/pci-hyperv.c 	pkt.teardown_packet.completion_func = hv_pci_generic_compl;
pkt              3044 drivers/pci/controller/pci-hyperv.c 	pkt.teardown_packet.compl_ctxt = &comp_pkt;
pkt              3045 drivers/pci/controller/pci-hyperv.c 	pkt.teardown_packet.message[0].type = PCI_BUS_D0EXIT;
pkt              3047 drivers/pci/controller/pci-hyperv.c 	ret = vmbus_sendpacket(hdev->channel, &pkt.teardown_packet.message,
pkt              3049 drivers/pci/controller/pci-hyperv.c 			       (unsigned long)&pkt.teardown_packet,
pkt                55 drivers/scsi/libfc/fc_libfc.h #define FC_FCP_DBG(pkt, fmt, args...)					\
pkt                58 drivers/scsi/libfc/fc_libfc.h 		if ((pkt)->seq_ptr) {					\
pkt                60 drivers/scsi/libfc/fc_libfc.h 			_ep = fc_seq_exch((pkt)->seq_ptr);		\
pkt                63 drivers/scsi/libfc/fc_libfc.h 				(pkt)->lp->host->host_no,		\
pkt                64 drivers/scsi/libfc/fc_libfc.h 				(pkt)->rport->port_id,			\
pkt                68 drivers/scsi/libfc/fc_libfc.h 				(pkt)->lp->host->host_no,		\
pkt                69 drivers/scsi/libfc/fc_libfc.h 				(pkt)->rport->port_id, ##args);		\
pkt                41 drivers/scsi/ps3rom.c 	u8	pkt[32];	/* packet command block           */
pkt                96 drivers/scsi/ps3rom.c 	memcpy(&atapi_cmnd.pkt, cmd->cmnd, 12);
pkt               738 drivers/scsi/qedi/qedi_main.c 	void *pkt;
pkt               753 drivers/scsi/qedi/qedi_main.c 	pkt = udev->rx_pkt + (prod * qedi_ll2_buf_size);
pkt               755 drivers/scsi/qedi/qedi_main.c 	memcpy(pkt, skb->data, len);
pkt              2704 drivers/scsi/qla1280.c 	struct mrk_entry *pkt;
pkt              2709 drivers/scsi/qla1280.c 	if ((pkt = (struct mrk_entry *) qla1280_req_pkt(ha))) {
pkt              2710 drivers/scsi/qla1280.c 		pkt->entry_type = MARKER_TYPE;
pkt              2711 drivers/scsi/qla1280.c 		pkt->lun = (uint8_t) lun;
pkt              2712 drivers/scsi/qla1280.c 		pkt->target = (uint8_t) (bus ? (id | BIT_7) : id);
pkt              2713 drivers/scsi/qla1280.c 		pkt->modifier = type;
pkt              2714 drivers/scsi/qla1280.c 		pkt->entry_status = 0;
pkt              2742 drivers/scsi/qla1280.c 	cmd_a64_entry_t *pkt;
pkt              2813 drivers/scsi/qla1280.c 	pkt = (cmd_a64_entry_t *) ha->request_ring_ptr;
pkt              2815 drivers/scsi/qla1280.c 	pkt->entry_type = COMMAND_A64_TYPE;
pkt              2816 drivers/scsi/qla1280.c 	pkt->entry_count = (uint8_t) req_cnt;
pkt              2817 drivers/scsi/qla1280.c 	pkt->sys_define = (uint8_t) ha->req_ring_index;
pkt              2818 drivers/scsi/qla1280.c 	pkt->entry_status = 0;
pkt              2819 drivers/scsi/qla1280.c 	pkt->handle = cpu_to_le32(cnt);
pkt              2822 drivers/scsi/qla1280.c 	memset(((char *)pkt + 8), 0, (REQUEST_ENTRY_SIZE - 8));
pkt              2825 drivers/scsi/qla1280.c 	pkt->timeout = cpu_to_le16(cmd->request->timeout/HZ);
pkt              2828 drivers/scsi/qla1280.c 	pkt->lun = SCSI_LUN_32(cmd);
pkt              2829 drivers/scsi/qla1280.c 	pkt->target = SCSI_BUS_32(cmd) ?
pkt              2834 drivers/scsi/qla1280.c 		pkt->control_flags |= cpu_to_le16(BIT_3);
pkt              2837 drivers/scsi/qla1280.c 	pkt->cdb_len = cpu_to_le16(CMD_CDBLEN(cmd));
pkt              2838 drivers/scsi/qla1280.c 	memcpy(pkt->scsi_cdb, CMD_CDBP(cmd), CMD_CDBLEN(cmd));
pkt              2843 drivers/scsi/qla1280.c 	pkt->control_flags |= cpu_to_le16(dir);
pkt              2846 drivers/scsi/qla1280.c 	pkt->dseg_count = cpu_to_le16(seg_cnt);
pkt              2858 drivers/scsi/qla1280.c 		dword_ptr = (u32 *)&pkt->dseg_0_address;
pkt              2881 drivers/scsi/qla1280.c 		qla1280_dump_buffer(5, (char *)pkt,
pkt              2902 drivers/scsi/qla1280.c 			pkt = (cmd_a64_entry_t *)ha->request_ring_ptr;
pkt              2905 drivers/scsi/qla1280.c 			memset(pkt, 0, REQUEST_ENTRY_SIZE);
pkt              2908 drivers/scsi/qla1280.c 			((struct cont_a64_entry *) pkt)->entry_type =
pkt              2910 drivers/scsi/qla1280.c 			((struct cont_a64_entry *) pkt)->entry_count = 1;
pkt              2911 drivers/scsi/qla1280.c 			((struct cont_a64_entry *) pkt)->sys_define =
pkt              2915 drivers/scsi/qla1280.c 				(u32 *)&((struct cont_a64_entry *) pkt)->dseg_0_address;
pkt              2938 drivers/scsi/qla1280.c 			qla1280_dump_buffer(5, (char *)pkt,
pkt              2945 drivers/scsi/qla1280.c 		qla1280_dump_buffer(5, (char *)pkt, REQUEST_ENTRY_SIZE);
pkt              2996 drivers/scsi/qla1280.c 	struct cmd_entry *pkt;
pkt              3068 drivers/scsi/qla1280.c 	pkt = (struct cmd_entry *) ha->request_ring_ptr;
pkt              3070 drivers/scsi/qla1280.c 	pkt->entry_type = COMMAND_TYPE;
pkt              3071 drivers/scsi/qla1280.c 	pkt->entry_count = (uint8_t) req_cnt;
pkt              3072 drivers/scsi/qla1280.c 	pkt->sys_define = (uint8_t) ha->req_ring_index;
pkt              3073 drivers/scsi/qla1280.c 	pkt->entry_status = 0;
pkt              3074 drivers/scsi/qla1280.c 	pkt->handle = cpu_to_le32(cnt);
pkt              3077 drivers/scsi/qla1280.c 	memset(((char *)pkt + 8), 0, (REQUEST_ENTRY_SIZE - 8));
pkt              3080 drivers/scsi/qla1280.c 	pkt->timeout = cpu_to_le16(cmd->request->timeout/HZ);
pkt              3083 drivers/scsi/qla1280.c 	pkt->lun = SCSI_LUN_32(cmd);
pkt              3084 drivers/scsi/qla1280.c 	pkt->target = SCSI_BUS_32(cmd) ?
pkt              3089 drivers/scsi/qla1280.c 		pkt->control_flags |= cpu_to_le16(BIT_3);
pkt              3092 drivers/scsi/qla1280.c 	pkt->cdb_len = cpu_to_le16(CMD_CDBLEN(cmd));
pkt              3093 drivers/scsi/qla1280.c 	memcpy(pkt->scsi_cdb, CMD_CDBP(cmd), CMD_CDBLEN(cmd));
pkt              3098 drivers/scsi/qla1280.c 	pkt->control_flags |= cpu_to_le16(dir);
pkt              3101 drivers/scsi/qla1280.c 	pkt->dseg_count = cpu_to_le16(seg_cnt);
pkt              3113 drivers/scsi/qla1280.c 		dword_ptr = &pkt->dseg_0_address;
pkt              3147 drivers/scsi/qla1280.c 			pkt = (struct cmd_entry *)ha->request_ring_ptr;
pkt              3150 drivers/scsi/qla1280.c 			memset(pkt, 0, REQUEST_ENTRY_SIZE);
pkt              3153 drivers/scsi/qla1280.c 			((struct cont_entry *) pkt)->
pkt              3155 drivers/scsi/qla1280.c 			((struct cont_entry *) pkt)->entry_count = 1;
pkt              3157 drivers/scsi/qla1280.c 			((struct cont_entry *) pkt)->sys_define =
pkt              3162 drivers/scsi/qla1280.c 				&((struct cont_entry *) pkt)->dseg_0_address;
pkt              3183 drivers/scsi/qla1280.c 			qla1280_dump_buffer(5, (char *)pkt,
pkt              3189 drivers/scsi/qla1280.c 		qla1280_dump_buffer(5, (char *)pkt, REQUEST_ENTRY_SIZE);
pkt              3235 drivers/scsi/qla1280.c 	request_t *pkt = NULL;
pkt              3259 drivers/scsi/qla1280.c 			pkt = ha->request_ring_ptr;
pkt              3262 drivers/scsi/qla1280.c 			memset(pkt, 0, REQUEST_ENTRY_SIZE);
pkt              3269 drivers/scsi/qla1280.c 			pkt->sys_define = (uint8_t) ha->req_ring_index;
pkt              3272 drivers/scsi/qla1280.c 			pkt->entry_count = 1;
pkt              3283 drivers/scsi/qla1280.c 	if (!pkt)
pkt              3288 drivers/scsi/qla1280.c 	return pkt;
pkt              3342 drivers/scsi/qla1280.c 	struct response *pkt;
pkt              3502 drivers/scsi/qla1280.c 		pkt = ha->response_ring_ptr;
pkt              3507 drivers/scsi/qla1280.c 		qla1280_dump_buffer(5, (char *)pkt, RESPONSE_ENTRY_SIZE);
pkt              3509 drivers/scsi/qla1280.c 		if (pkt->entry_type == STATUS_TYPE) {
pkt              3510 drivers/scsi/qla1280.c 			if ((le16_to_cpu(pkt->scsi_status) & 0xff)
pkt              3511 drivers/scsi/qla1280.c 			    || pkt->comp_status || pkt->entry_status) {
pkt              3516 drivers/scsi/qla1280.c 					le16_to_cpu(pkt->comp_status),
pkt              3517 drivers/scsi/qla1280.c 					le16_to_cpu(pkt->scsi_status));
pkt              3524 drivers/scsi/qla1280.c 			qla1280_dump_buffer(2, (char *)pkt,
pkt              3528 drivers/scsi/qla1280.c 		if (pkt->entry_type == STATUS_TYPE || pkt->entry_status) {
pkt              3530 drivers/scsi/qla1280.c 				ha->outstanding_cmds[pkt->handle]->cmd,
pkt              3531 drivers/scsi/qla1280.c 				pkt->handle);
pkt              3532 drivers/scsi/qla1280.c 			if (pkt->entry_type == STATUS_TYPE)
pkt              3533 drivers/scsi/qla1280.c 				qla1280_status_entry(ha, pkt, done_q);
pkt              3535 drivers/scsi/qla1280.c 				qla1280_error_entry(ha, pkt, done_q);
pkt              3596 drivers/scsi/qla1280.c qla1280_status_entry(struct scsi_qla_host *ha, struct response *pkt,
pkt              3603 drivers/scsi/qla1280.c 	uint32_t handle = le32_to_cpu(pkt->handle);
pkt              3604 drivers/scsi/qla1280.c 	uint16_t scsi_status = le16_to_cpu(pkt->scsi_status);
pkt              3605 drivers/scsi/qla1280.c 	uint16_t comp_status = le16_to_cpu(pkt->comp_status);
pkt              3643 drivers/scsi/qla1280.c 		CMD_RESULT(cmd) = qla1280_return_status(pkt, cmd);
pkt              3648 drivers/scsi/qla1280.c 					le16_to_cpu(pkt->req_sense_length);
pkt              3660 drivers/scsi/qla1280.c 				       &pkt->req_sense_data, sense_sz);
pkt              3694 drivers/scsi/qla1280.c qla1280_error_entry(struct scsi_qla_host *ha, struct response *pkt,
pkt              3698 drivers/scsi/qla1280.c 	uint32_t handle = le32_to_cpu(pkt->handle);
pkt              3702 drivers/scsi/qla1280.c 	if (pkt->entry_status & BIT_3)
pkt              3704 drivers/scsi/qla1280.c 	else if (pkt->entry_status & BIT_2)
pkt              3706 drivers/scsi/qla1280.c 	else if (pkt->entry_status & BIT_1)
pkt              3722 drivers/scsi/qla1280.c 		if (pkt->entry_status & (BIT_3 + BIT_2)) {
pkt              3726 drivers/scsi/qla1280.c 		} else if (pkt->entry_status & BIT_1) {	/* FULL flag */
pkt              3739 drivers/scsi/qla1280.c 	else if (pkt->entry_type == COMMAND_A64_TYPE) {
pkt              3755 drivers/scsi/qla2xxx/qla_init.c 	response_t *pkt;
pkt              3760 drivers/scsi/qla2xxx/qla_init.c 	pkt = rsp->ring_ptr;
pkt              3762 drivers/scsi/qla2xxx/qla_init.c 		pkt->signature = RESPONSE_PROCESSED;
pkt              3763 drivers/scsi/qla2xxx/qla_init.c 		pkt++;
pkt               769 drivers/scsi/qla2xxx/qla_iocb.c qla24xx_set_t10dif_tags(srb_t *sp, struct fw_dif_context *pkt,
pkt               780 drivers/scsi/qla2xxx/qla_iocb.c 		pkt->ref_tag = cpu_to_le32((uint32_t)
pkt               786 drivers/scsi/qla2xxx/qla_iocb.c 		pkt->ref_tag_mask[0] = 0xff;
pkt               787 drivers/scsi/qla2xxx/qla_iocb.c 		pkt->ref_tag_mask[1] = 0xff;
pkt               788 drivers/scsi/qla2xxx/qla_iocb.c 		pkt->ref_tag_mask[2] = 0xff;
pkt               789 drivers/scsi/qla2xxx/qla_iocb.c 		pkt->ref_tag_mask[3] = 0xff;
pkt               797 drivers/scsi/qla2xxx/qla_iocb.c 		pkt->app_tag = cpu_to_le16(0);
pkt               798 drivers/scsi/qla2xxx/qla_iocb.c 		pkt->app_tag_mask[0] = 0x0;
pkt               799 drivers/scsi/qla2xxx/qla_iocb.c 		pkt->app_tag_mask[1] = 0x0;
pkt               801 drivers/scsi/qla2xxx/qla_iocb.c 		pkt->ref_tag = cpu_to_le32((uint32_t)
pkt               808 drivers/scsi/qla2xxx/qla_iocb.c 		pkt->ref_tag_mask[0] = 0xff;
pkt               809 drivers/scsi/qla2xxx/qla_iocb.c 		pkt->ref_tag_mask[1] = 0xff;
pkt               810 drivers/scsi/qla2xxx/qla_iocb.c 		pkt->ref_tag_mask[2] = 0xff;
pkt               811 drivers/scsi/qla2xxx/qla_iocb.c 		pkt->ref_tag_mask[3] = 0xff;
pkt               816 drivers/scsi/qla2xxx/qla_iocb.c 		pkt->ref_tag_mask[0] = pkt->ref_tag_mask[1] =
pkt               817 drivers/scsi/qla2xxx/qla_iocb.c 			pkt->ref_tag_mask[2] = pkt->ref_tag_mask[3] =
pkt               826 drivers/scsi/qla2xxx/qla_iocb.c 		pkt->ref_tag = cpu_to_le32((uint32_t)
pkt               828 drivers/scsi/qla2xxx/qla_iocb.c 		pkt->app_tag = cpu_to_le16(0);
pkt               829 drivers/scsi/qla2xxx/qla_iocb.c 		pkt->app_tag_mask[0] = 0x0;
pkt               830 drivers/scsi/qla2xxx/qla_iocb.c 		pkt->app_tag_mask[1] = 0x0;
pkt               836 drivers/scsi/qla2xxx/qla_iocb.c 		pkt->ref_tag_mask[0] = 0xff;
pkt               837 drivers/scsi/qla2xxx/qla_iocb.c 		pkt->ref_tag_mask[1] = 0xff;
pkt               838 drivers/scsi/qla2xxx/qla_iocb.c 		pkt->ref_tag_mask[2] = 0xff;
pkt               839 drivers/scsi/qla2xxx/qla_iocb.c 		pkt->ref_tag_mask[3] = 0xff;
pkt              2251 drivers/scsi/qla2xxx/qla_iocb.c 	request_t *pkt;
pkt              2254 drivers/scsi/qla2xxx/qla_iocb.c 	pkt = NULL;
pkt              2305 drivers/scsi/qla2xxx/qla_iocb.c 	pkt = req->ring_ptr;
pkt              2306 drivers/scsi/qla2xxx/qla_iocb.c 	memset(pkt, 0, REQUEST_ENTRY_SIZE);
pkt              2308 drivers/scsi/qla2xxx/qla_iocb.c 		WRT_REG_BYTE((void __iomem *)&pkt->entry_count, req_cnt);
pkt              2309 drivers/scsi/qla2xxx/qla_iocb.c 		WRT_REG_WORD((void __iomem *)&pkt->handle, handle);
pkt              2311 drivers/scsi/qla2xxx/qla_iocb.c 		pkt->entry_count = req_cnt;
pkt              2312 drivers/scsi/qla2xxx/qla_iocb.c 		pkt->handle = handle;
pkt              2315 drivers/scsi/qla2xxx/qla_iocb.c 	return pkt;
pkt              2319 drivers/scsi/qla2xxx/qla_iocb.c 	return pkt;
pkt              3546 drivers/scsi/qla2xxx/qla_iocb.c 	void *pkt;
pkt              3550 drivers/scsi/qla2xxx/qla_iocb.c 	pkt = __qla2x00_alloc_iocbs(sp->qpair, sp);
pkt              3551 drivers/scsi/qla2xxx/qla_iocb.c 	if (!pkt) {
pkt              3561 drivers/scsi/qla2xxx/qla_iocb.c 		    qla24xx_login_iocb(sp, pkt) :
pkt              3562 drivers/scsi/qla2xxx/qla_iocb.c 		    qla2x00_login_iocb(sp, pkt);
pkt              3565 drivers/scsi/qla2xxx/qla_iocb.c 		qla24xx_prli_iocb(sp, pkt);
pkt              3569 drivers/scsi/qla2xxx/qla_iocb.c 		    qla24xx_logout_iocb(sp, pkt) :
pkt              3570 drivers/scsi/qla2xxx/qla_iocb.c 		    qla2x00_logout_iocb(sp, pkt);
pkt              3574 drivers/scsi/qla2xxx/qla_iocb.c 		qla24xx_els_iocb(sp, pkt);
pkt              3578 drivers/scsi/qla2xxx/qla_iocb.c 		    qla24xx_ct_iocb(sp, pkt) :
pkt              3579 drivers/scsi/qla2xxx/qla_iocb.c 		    qla2x00_ct_iocb(sp, pkt);
pkt              3583 drivers/scsi/qla2xxx/qla_iocb.c 		    qla24xx_adisc_iocb(sp, pkt) :
pkt              3584 drivers/scsi/qla2xxx/qla_iocb.c 		    qla2x00_adisc_iocb(sp, pkt);
pkt              3588 drivers/scsi/qla2xxx/qla_iocb.c 		    qlafx00_tm_iocb(sp, pkt) :
pkt              3589 drivers/scsi/qla2xxx/qla_iocb.c 		    qla24xx_tm_iocb(sp, pkt);
pkt              3593 drivers/scsi/qla2xxx/qla_iocb.c 		qlafx00_fxdisc_iocb(sp, pkt);
pkt              3596 drivers/scsi/qla2xxx/qla_iocb.c 		qla_nvme_ls(sp, pkt);
pkt              3600 drivers/scsi/qla2xxx/qla_iocb.c 			qlafx00_abort_iocb(sp, pkt) :
pkt              3601 drivers/scsi/qla2xxx/qla_iocb.c 			qla24xx_abort_iocb(sp, pkt);
pkt              3604 drivers/scsi/qla2xxx/qla_iocb.c 		qla24xx_els_logo_iocb(sp, pkt);
pkt              3607 drivers/scsi/qla2xxx/qla_iocb.c 		qla2x00_ctpthru_cmd_iocb(sp, pkt);
pkt              3610 drivers/scsi/qla2xxx/qla_iocb.c 		qla2x00_mb_iocb(sp, pkt);
pkt              3615 drivers/scsi/qla2xxx/qla_iocb.c 		qla2x00_send_notify_ack_iocb(sp, pkt);
pkt              3618 drivers/scsi/qla2xxx/qla_iocb.c 		qla25xx_ctrlvp_iocb(sp, pkt);
pkt              3621 drivers/scsi/qla2xxx/qla_iocb.c 		qla24xx_prlo_iocb(sp, pkt);
pkt              1313 drivers/scsi/qla2xxx/qla_isr.c 	sts_entry_t *pkt = iocb;
pkt              1317 drivers/scsi/qla2xxx/qla_isr.c 	index = LSW(pkt->handle);
pkt              1434 drivers/scsi/qla2xxx/qla_isr.c     struct mbx_24xx_entry *pkt)
pkt              1442 drivers/scsi/qla2xxx/qla_isr.c 	sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
pkt              1447 drivers/scsi/qla2xxx/qla_isr.c 	sz = min(ARRAY_SIZE(pkt->mb), ARRAY_SIZE(sp->u.iocb_cmd.u.mbx.in_mb));
pkt              1450 drivers/scsi/qla2xxx/qla_isr.c 		si->u.mbx.in_mb[i] = le16_to_cpu(pkt->mb[i]);
pkt              1459 drivers/scsi/qla2xxx/qla_isr.c     struct nack_to_isp *pkt)
pkt              1465 drivers/scsi/qla2xxx/qla_isr.c 	sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
pkt              1469 drivers/scsi/qla2xxx/qla_isr.c 	if (pkt->u.isp2x.status != cpu_to_le16(NOTIFY_ACK_SUCCESS))
pkt              1477 drivers/scsi/qla2xxx/qla_isr.c     sts_entry_t *pkt, int iocb_type)
pkt              1487 drivers/scsi/qla2xxx/qla_isr.c 	sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
pkt              1498 drivers/scsi/qla2xxx/qla_isr.c 	    comp_status = le16_to_cpu(pkt->comp_status);
pkt              1511 drivers/scsi/qla2xxx/qla_isr.c 				le16_to_cpu(pkt->rsp_info_len);
pkt              1525 drivers/scsi/qla2xxx/qla_isr.c 			pkt, sizeof(*pkt));
pkt              1538 drivers/scsi/qla2xxx/qla_isr.c 	     res = qla2x00_chk_ms_status(vha, (ms_iocb_entry_t *)pkt,
pkt              1549 drivers/scsi/qla2xxx/qla_isr.c     struct sts_entry_24xx *pkt, int iocb_type)
pkt              1561 drivers/scsi/qla2xxx/qla_isr.c 	sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
pkt              1588 drivers/scsi/qla2xxx/qla_isr.c 		res = qla2x00_chk_ms_status(sp->vha, (ms_iocb_entry_t *)pkt,
pkt              1599 drivers/scsi/qla2xxx/qla_isr.c 	comp_status = fw_status[0] = le16_to_cpu(pkt->comp_status);
pkt              1600 drivers/scsi/qla2xxx/qla_isr.c 	fw_status[1] = le16_to_cpu(((struct els_sts_entry_24xx *)pkt)->error_subcode_1);
pkt              1601 drivers/scsi/qla2xxx/qla_isr.c 	fw_status[2] = le16_to_cpu(((struct els_sts_entry_24xx *)pkt)->error_subcode_2);
pkt              1616 drivers/scsi/qla2xxx/qla_isr.c 					pkt)->total_byte_count);
pkt              1626 drivers/scsi/qla2xxx/qla_isr.c 			pkt)->total_byte_count));
pkt              1642 drivers/scsi/qla2xxx/qla_isr.c 			    le16_to_cpu(((struct els_sts_entry_24xx *)pkt)->total_byte_count);
pkt              1649 drivers/scsi/qla2xxx/qla_isr.c 				pkt)->total_byte_count));
pkt              1656 drivers/scsi/qla2xxx/qla_isr.c 				pkt)->error_subcode_1),
pkt              1658 drivers/scsi/qla2xxx/qla_isr.c 				    pkt)->error_subcode_2));
pkt              1665 drivers/scsi/qla2xxx/qla_isr.c 		    pkt, sizeof(*pkt));
pkt              1993 drivers/scsi/qla2xxx/qla_isr.c 					   sts_entry_t *pkt)
pkt              2000 drivers/scsi/qla2xxx/qla_isr.c 	switch (pkt->entry_type) {
pkt              2002 drivers/scsi/qla2xxx/qla_isr.c 		qla2x00_status_entry(vha, rsp, pkt);
pkt              2005 drivers/scsi/qla2xxx/qla_isr.c 		sts21_entry = (sts21_entry_t *)pkt;
pkt              2012 drivers/scsi/qla2xxx/qla_isr.c 		sts22_entry = (sts22_entry_t *)pkt;
pkt              2019 drivers/scsi/qla2xxx/qla_isr.c 		qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt);
pkt              2022 drivers/scsi/qla2xxx/qla_isr.c 		qla2x00_mbx_iocb_entry(vha, rsp->req, (struct mbx_entry *)pkt);
pkt              2025 drivers/scsi/qla2xxx/qla_isr.c 		qla2x00_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE);
pkt              2031 drivers/scsi/qla2xxx/qla_isr.c 		       pkt->entry_type, pkt->entry_status);
pkt              2046 drivers/scsi/qla2xxx/qla_isr.c 	sts_entry_t	*pkt;
pkt              2054 drivers/scsi/qla2xxx/qla_isr.c 		pkt = (sts_entry_t *)rsp->ring_ptr;
pkt              2064 drivers/scsi/qla2xxx/qla_isr.c 		if (pkt->entry_status != 0) {
pkt              2065 drivers/scsi/qla2xxx/qla_isr.c 			qla2x00_error_entry(vha, rsp, pkt);
pkt              2066 drivers/scsi/qla2xxx/qla_isr.c 			((response_t *)pkt)->signature = RESPONSE_PROCESSED;
pkt              2071 drivers/scsi/qla2xxx/qla_isr.c 		qla2x00_process_response_entry(vha, rsp, pkt);
pkt              2072 drivers/scsi/qla2xxx/qla_isr.c 		((response_t *)pkt)->signature = RESPONSE_PROCESSED;
pkt              2254 drivers/scsi/qla2xxx/qla_isr.c qla25xx_process_bidir_status_iocb(scsi_qla_host_t *vha, void *pkt,
pkt              2266 drivers/scsi/qla2xxx/qla_isr.c 	sts_entry_t *sts = pkt;
pkt              2267 drivers/scsi/qla2xxx/qla_isr.c 	struct sts_entry_24xx *sts24 = pkt;
pkt              2408 drivers/scsi/qla2xxx/qla_isr.c qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
pkt              2413 drivers/scsi/qla2xxx/qla_isr.c 	sts_entry_t *sts = pkt;
pkt              2414 drivers/scsi/qla2xxx/qla_isr.c 	struct sts_entry_24xx *sts24 = pkt;
pkt              2493 drivers/scsi/qla2xxx/qla_isr.c 		qla24xx_nvme_iocb_entry(vha, req, pkt, sp);
pkt              2498 drivers/scsi/qla2xxx/qla_isr.c 		qla25xx_process_bidir_status_iocb(vha, pkt, req, handle);
pkt              2504 drivers/scsi/qla2xxx/qla_isr.c 		qla24xx_tm_iocb_entry(vha, req, pkt);
pkt              2774 drivers/scsi/qla2xxx/qla_isr.c 		    pkt, sizeof(*sts24));
pkt              2806 drivers/scsi/qla2xxx/qla_isr.c qla2x00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt)
pkt              2831 drivers/scsi/qla2xxx/qla_isr.c 	if (sense_len > sizeof(pkt->data))
pkt              2832 drivers/scsi/qla2xxx/qla_isr.c 		sense_sz = sizeof(pkt->data);
pkt              2838 drivers/scsi/qla2xxx/qla_isr.c 		host_to_fcp_swap(pkt->data, sizeof(pkt->data));
pkt              2839 drivers/scsi/qla2xxx/qla_isr.c 	memcpy(sense_ptr, pkt->data, sense_sz);
pkt              2864 drivers/scsi/qla2xxx/qla_isr.c qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
pkt              2869 drivers/scsi/qla2xxx/qla_isr.c 	uint16_t que = MSW(pkt->handle);
pkt              2875 drivers/scsi/qla2xxx/qla_isr.c 	    pkt->entry_type, pkt->entry_status, pkt->handle, rsp->id);
pkt              2882 drivers/scsi/qla2xxx/qla_isr.c 	if (pkt->entry_status & RF_BUSY)
pkt              2885 drivers/scsi/qla2xxx/qla_isr.c 	if ((pkt->handle & ~QLA_TGT_HANDLE_MASK) == QLA_TGT_SKIP_HANDLE)
pkt              2888 drivers/scsi/qla2xxx/qla_isr.c 	switch (pkt->entry_type) {
pkt              2898 drivers/scsi/qla2xxx/qla_isr.c 		sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
pkt              2955 drivers/scsi/qla2xxx/qla_isr.c 	struct abort_entry_24xx *pkt)
pkt              2961 drivers/scsi/qla2xxx/qla_isr.c 	sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
pkt              2966 drivers/scsi/qla2xxx/qla_isr.c 	abt->u.abt.comp_status = le16_to_cpu(pkt->nport_handle);
pkt              2971 drivers/scsi/qla2xxx/qla_isr.c     struct pt_ls4_request *pkt, struct req_que *req)
pkt              2977 drivers/scsi/qla2xxx/qla_isr.c 	sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
pkt              2981 drivers/scsi/qla2xxx/qla_isr.c 	comp_status = le16_to_cpu(pkt->status);
pkt              2993 drivers/scsi/qla2xxx/qla_isr.c 	struct sts_entry_24xx *pkt;
pkt              3003 drivers/scsi/qla2xxx/qla_isr.c 		pkt = (struct sts_entry_24xx *)rsp->ring_ptr;
pkt              3013 drivers/scsi/qla2xxx/qla_isr.c 		if (pkt->entry_status != 0) {
pkt              3014 drivers/scsi/qla2xxx/qla_isr.c 			if (qla2x00_error_entry(vha, rsp, (sts_entry_t *) pkt))
pkt              3017 drivers/scsi/qla2xxx/qla_isr.c 			((response_t *)pkt)->signature = RESPONSE_PROCESSED;
pkt              3023 drivers/scsi/qla2xxx/qla_isr.c 		switch (pkt->entry_type) {
pkt              3025 drivers/scsi/qla2xxx/qla_isr.c 			qla2x00_status_entry(vha, rsp, pkt);
pkt              3028 drivers/scsi/qla2xxx/qla_isr.c 			qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt);
pkt              3032 drivers/scsi/qla2xxx/qla_isr.c 			    (struct vp_rpt_id_entry_24xx *)pkt);
pkt              3036 drivers/scsi/qla2xxx/qla_isr.c 			    (struct logio_entry_24xx *)pkt);
pkt              3039 drivers/scsi/qla2xxx/qla_isr.c 			qla24xx_els_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE);
pkt              3042 drivers/scsi/qla2xxx/qla_isr.c 			qla24xx_els_ct_entry(vha, rsp->req, pkt, ELS_IOCB_TYPE);
pkt              3049 drivers/scsi/qla2xxx/qla_isr.c 				    (response_t *)pkt);
pkt              3058 drivers/scsi/qla2xxx/qla_isr.c 			qlt_response_pkt_all_vps(vha, rsp, (response_t *)pkt);
pkt              3061 drivers/scsi/qla2xxx/qla_isr.c 			qla24xx_nvme_ls4_iocb(vha, (struct pt_ls4_request *)pkt,
pkt              3065 drivers/scsi/qla2xxx/qla_isr.c 			if (pkt->handle == QLA_TGT_SKIP_HANDLE)
pkt              3067 drivers/scsi/qla2xxx/qla_isr.c 				    (response_t *)pkt);
pkt              3070 drivers/scsi/qla2xxx/qla_isr.c 					(struct nack_to_isp *)pkt);
pkt              3079 drivers/scsi/qla2xxx/qla_isr.c 			    (struct abort_entry_24xx *)pkt);
pkt              3083 drivers/scsi/qla2xxx/qla_isr.c 			    (struct mbx_24xx_entry *)pkt);
pkt              3087 drivers/scsi/qla2xxx/qla_isr.c 			    (struct vp_ctrl_entry_24xx *)pkt);
pkt              3094 drivers/scsi/qla2xxx/qla_isr.c 			    pkt->entry_type, pkt->entry_status);
pkt              3097 drivers/scsi/qla2xxx/qla_isr.c 		((response_t *)pkt)->signature = RESPONSE_PROCESSED;
pkt              1424 drivers/scsi/qla2xxx/qla_mr.c 	response_t *pkt;
pkt              1429 drivers/scsi/qla2xxx/qla_mr.c 	pkt = rsp->ring_ptr;
pkt              1431 drivers/scsi/qla2xxx/qla_mr.c 		pkt->signature = RESPONSE_PROCESSED;
pkt              1432 drivers/scsi/qla2xxx/qla_mr.c 		WRT_REG_DWORD((void __force __iomem *)&pkt->signature,
pkt              1434 drivers/scsi/qla2xxx/qla_mr.c 		pkt++;
pkt              2177 drivers/scsi/qla2xxx/qla_mr.c 		      struct tsk_mgmt_entry_fx00 *pkt, srb_t *sp,
pkt              2192 drivers/scsi/qla2xxx/qla_mr.c 			 struct abort_iocb_entry_fx00 *pkt)
pkt              2198 drivers/scsi/qla2xxx/qla_mr.c 	sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
pkt              2203 drivers/scsi/qla2xxx/qla_mr.c 	abt->u.abt.comp_status = pkt->tgt_id_sts;
pkt              2209 drivers/scsi/qla2xxx/qla_mr.c 			 struct ioctl_iocb_entry_fx00 *pkt)
pkt              2220 drivers/scsi/qla2xxx/qla_mr.c 	sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
pkt              2226 drivers/scsi/qla2xxx/qla_mr.c 		iocb_job->u.fxiocb.seq_number = pkt->seq_no;
pkt              2227 drivers/scsi/qla2xxx/qla_mr.c 		iocb_job->u.fxiocb.fw_flags = pkt->fw_iotcl_flags;
pkt              2228 drivers/scsi/qla2xxx/qla_mr.c 		iocb_job->u.fxiocb.result = pkt->status;
pkt              2231 drivers/scsi/qla2xxx/qla_mr.c 			    pkt->dataword_r;
pkt              2238 drivers/scsi/qla2xxx/qla_mr.c 		fstatus.reserved_1 = pkt->reserved_0;
pkt              2239 drivers/scsi/qla2xxx/qla_mr.c 		fstatus.func_type = pkt->comp_func_num;
pkt              2240 drivers/scsi/qla2xxx/qla_mr.c 		fstatus.ioctl_flags = pkt->fw_iotcl_flags;
pkt              2241 drivers/scsi/qla2xxx/qla_mr.c 		fstatus.ioctl_data = pkt->dataword_r;
pkt              2242 drivers/scsi/qla2xxx/qla_mr.c 		fstatus.adapid = pkt->adapid;
pkt              2243 drivers/scsi/qla2xxx/qla_mr.c 		fstatus.reserved_2 = pkt->dataword_r_extra;
pkt              2244 drivers/scsi/qla2xxx/qla_mr.c 		fstatus.res_count = pkt->residuallen;
pkt              2245 drivers/scsi/qla2xxx/qla_mr.c 		fstatus.status = pkt->status;
pkt              2246 drivers/scsi/qla2xxx/qla_mr.c 		fstatus.seq_number = pkt->seq_no;
pkt              2248 drivers/scsi/qla2xxx/qla_mr.c 		    pkt->reserved_2, 20 * sizeof(uint8_t));
pkt              2257 drivers/scsi/qla2xxx/qla_mr.c 		    sp->vha, 0x5080, pkt, sizeof(*pkt));
pkt              2277 drivers/scsi/qla2xxx/qla_mr.c qlafx00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
pkt              2297 drivers/scsi/qla2xxx/qla_mr.c 	sts = (struct sts_entry_fx00 *) pkt;
pkt              2324 drivers/scsi/qla2xxx/qla_mr.c 		qlafx00_tm_iocb_entry(vha, req, pkt, sp,
pkt              2554 drivers/scsi/qla2xxx/qla_mr.c qlafx00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt)
pkt              2594 drivers/scsi/qla2xxx/qla_mr.c 		if (sense_len > sizeof(pkt->data))
pkt              2595 drivers/scsi/qla2xxx/qla_mr.c 			sense_sz = sizeof(pkt->data);
pkt              2601 drivers/scsi/qla2xxx/qla_mr.c 		    pkt, sizeof(*pkt));
pkt              2602 drivers/scsi/qla2xxx/qla_mr.c 		memcpy(sense_ptr, pkt->data, sense_sz);
pkt              2613 drivers/scsi/qla2xxx/qla_mr.c 	sense_len = (sense_len > sizeof(pkt->data)) ?
pkt              2614 drivers/scsi/qla2xxx/qla_mr.c 	    (sense_len - sizeof(pkt->data)) : 0;
pkt              2634 drivers/scsi/qla2xxx/qla_mr.c 	struct rsp_que *rsp, void *pkt)
pkt              2644 drivers/scsi/qla2xxx/qla_mr.c 	stsmfx = (struct multi_sts_entry_fx00 *) pkt;
pkt              2690 drivers/scsi/qla2xxx/qla_mr.c 		    struct sts_entry_fx00 *pkt)
pkt              2701 drivers/scsi/qla2xxx/qla_mr.c 	sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
pkt              2720 drivers/scsi/qla2xxx/qla_mr.c 	struct sts_entry_fx00 *pkt;
pkt              2732 drivers/scsi/qla2xxx/qla_mr.c 		pkt = (struct sts_entry_fx00 *)rsp->rsp_pkt;
pkt              2744 drivers/scsi/qla2xxx/qla_mr.c 		if (pkt->entry_status != 0 &&
pkt              2745 drivers/scsi/qla2xxx/qla_mr.c 		    pkt->entry_type != IOCTL_IOSB_TYPE_FX00) {
pkt              2748 drivers/scsi/qla2xxx/qla_mr.c 			       pkt->entry_status);
pkt              2750 drivers/scsi/qla2xxx/qla_mr.c 					    (struct sts_entry_fx00 *)pkt);
pkt              2754 drivers/scsi/qla2xxx/qla_mr.c 		switch (pkt->entry_type) {
pkt              2756 drivers/scsi/qla2xxx/qla_mr.c 			qlafx00_status_entry(vha, rsp, pkt);
pkt              2760 drivers/scsi/qla2xxx/qla_mr.c 			qlafx00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt);
pkt              2764 drivers/scsi/qla2xxx/qla_mr.c 			qlafx00_multistatus_entry(vha, rsp, pkt);
pkt              2769 drivers/scsi/qla2xxx/qla_mr.c 			   (struct abort_iocb_entry_fx00 *)pkt);
pkt              2774 drivers/scsi/qla2xxx/qla_mr.c 			    (struct ioctl_iocb_entry_fx00 *)pkt);
pkt              2781 drivers/scsi/qla2xxx/qla_mr.c 			    pkt->entry_type, pkt->entry_status);
pkt               111 drivers/scsi/qla2xxx/qla_target.c 	struct atio_from_isp *pkt, uint8_t);
pkt               113 drivers/scsi/qla2xxx/qla_target.c 	response_t *pkt);
pkt               440 drivers/scsi/qla2xxx/qla_target.c 	struct rsp_que *rsp, response_t *pkt)
pkt               442 drivers/scsi/qla2xxx/qla_target.c 	switch (pkt->entry_type) {
pkt               450 drivers/scsi/qla2xxx/qla_target.c 		struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt;
pkt               460 drivers/scsi/qla2xxx/qla_target.c 		qlt_response_pkt(host, rsp, pkt);
pkt               468 drivers/scsi/qla2xxx/qla_target.c 		    (struct imm_ntfy_from_isp *)pkt;
pkt               478 drivers/scsi/qla2xxx/qla_target.c 		qlt_response_pkt(host, rsp, pkt);
pkt               485 drivers/scsi/qla2xxx/qla_target.c 		struct nack_to_isp *entry = (struct nack_to_isp *)pkt;
pkt               500 drivers/scsi/qla2xxx/qla_target.c 		qlt_response_pkt(host, rsp, pkt);
pkt               507 drivers/scsi/qla2xxx/qla_target.c 		    (struct abts_recv_from_24xx *)pkt;
pkt               517 drivers/scsi/qla2xxx/qla_target.c 		qlt_response_pkt(host, rsp, pkt);
pkt               524 drivers/scsi/qla2xxx/qla_target.c 		    (struct abts_resp_to_24xx *)pkt;
pkt               534 drivers/scsi/qla2xxx/qla_target.c 		qlt_response_pkt(host, rsp, pkt);
pkt               538 drivers/scsi/qla2xxx/qla_target.c 		qlt_response_pkt(vha, rsp, pkt);
pkt              1663 drivers/scsi/qla2xxx/qla_target.c 	request_t *pkt;
pkt              1671 drivers/scsi/qla2xxx/qla_target.c 	pkt = (request_t *)__qla2x00_alloc_iocbs(qpair, NULL);
pkt              1672 drivers/scsi/qla2xxx/qla_target.c 	if (!pkt) {
pkt              1682 drivers/scsi/qla2xxx/qla_target.c 	pkt->entry_type = NOTIFY_ACK_TYPE;
pkt              1683 drivers/scsi/qla2xxx/qla_target.c 	pkt->entry_count = 1;
pkt              1685 drivers/scsi/qla2xxx/qla_target.c 	nack = (struct nack_to_isp *)pkt;
pkt              1876 drivers/scsi/qla2xxx/qla_target.c     struct qla_qpair *qpair, response_t *pkt, struct qla_tgt_mgmt_cmd *mcmd)
pkt              1895 drivers/scsi/qla2xxx/qla_target.c 		entry = (struct abts_recv_from_24xx *)pkt;
pkt              2552 drivers/scsi/qla2xxx/qla_target.c 	struct ctio7_to_24xx *pkt;
pkt              2556 drivers/scsi/qla2xxx/qla_target.c 	pkt = (struct ctio7_to_24xx *)qpair->req->ring_ptr;
pkt              2557 drivers/scsi/qla2xxx/qla_target.c 	prm->pkt = pkt;
pkt              2558 drivers/scsi/qla2xxx/qla_target.c 	memset(pkt, 0, sizeof(*pkt));
pkt              2560 drivers/scsi/qla2xxx/qla_target.c 	pkt->entry_type = CTIO_TYPE7;
pkt              2561 drivers/scsi/qla2xxx/qla_target.c 	pkt->entry_count = (uint8_t)prm->req_cnt;
pkt              2562 drivers/scsi/qla2xxx/qla_target.c 	pkt->vp_index = prm->cmd->vp_idx;
pkt              2575 drivers/scsi/qla2xxx/qla_target.c 	pkt->handle = MAKE_HANDLE(qpair->req->id, h);
pkt              2576 drivers/scsi/qla2xxx/qla_target.c 	pkt->handle |= CTIO_COMPLETION_HANDLE_MARK;
pkt              2577 drivers/scsi/qla2xxx/qla_target.c 	pkt->nport_handle = cpu_to_le16(prm->cmd->loop_id);
pkt              2578 drivers/scsi/qla2xxx/qla_target.c 	pkt->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
pkt              2579 drivers/scsi/qla2xxx/qla_target.c 	pkt->initiator_id = be_id_to_le(atio->u.isp24.fcp_hdr.s_id);
pkt              2580 drivers/scsi/qla2xxx/qla_target.c 	pkt->exchange_addr = atio->u.isp24.exchange_addr;
pkt              2582 drivers/scsi/qla2xxx/qla_target.c 	pkt->u.status0.flags |= cpu_to_le16(temp);
pkt              2584 drivers/scsi/qla2xxx/qla_target.c 	pkt->u.status0.ox_id = cpu_to_le16(temp);
pkt              2585 drivers/scsi/qla2xxx/qla_target.c 	pkt->u.status0.relative_offset = cpu_to_le32(prm->cmd->offset);
pkt              2638 drivers/scsi/qla2xxx/qla_target.c 	struct ctio7_to_24xx *pkt24 = (struct ctio7_to_24xx *)prm->pkt;
pkt              2730 drivers/scsi/qla2xxx/qla_target.c 	prm->pkt = NULL;
pkt              2993 drivers/scsi/qla2xxx/qla_target.c 	struct ctio_crc2_to_fw	*pkt;
pkt              3006 drivers/scsi/qla2xxx/qla_target.c 	pkt = (struct ctio_crc2_to_fw *)qpair->req->ring_ptr;
pkt              3007 drivers/scsi/qla2xxx/qla_target.c 	prm->pkt = pkt;
pkt              3008 drivers/scsi/qla2xxx/qla_target.c 	memset(pkt, 0, sizeof(*pkt));
pkt              3073 drivers/scsi/qla2xxx/qla_target.c 	pkt->entry_type  = CTIO_CRC2;
pkt              3074 drivers/scsi/qla2xxx/qla_target.c 	pkt->entry_count = 1;
pkt              3075 drivers/scsi/qla2xxx/qla_target.c 	pkt->vp_index = cmd->vp_idx;
pkt              3088 drivers/scsi/qla2xxx/qla_target.c 	pkt->handle  = MAKE_HANDLE(qpair->req->id, h);
pkt              3089 drivers/scsi/qla2xxx/qla_target.c 	pkt->handle |= CTIO_COMPLETION_HANDLE_MARK;
pkt              3090 drivers/scsi/qla2xxx/qla_target.c 	pkt->nport_handle = cpu_to_le16(prm->cmd->loop_id);
pkt              3091 drivers/scsi/qla2xxx/qla_target.c 	pkt->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
pkt              3092 drivers/scsi/qla2xxx/qla_target.c 	pkt->initiator_id = be_id_to_le(atio->u.isp24.fcp_hdr.s_id);
pkt              3093 drivers/scsi/qla2xxx/qla_target.c 	pkt->exchange_addr   = atio->u.isp24.exchange_addr;
pkt              3097 drivers/scsi/qla2xxx/qla_target.c 	pkt->ox_id  = cpu_to_le16(t16);
pkt              3100 drivers/scsi/qla2xxx/qla_target.c 	pkt->flags |= cpu_to_le16(t16);
pkt              3101 drivers/scsi/qla2xxx/qla_target.c 	pkt->relative_offset = cpu_to_le32(prm->cmd->offset);
pkt              3105 drivers/scsi/qla2xxx/qla_target.c 		pkt->flags = cpu_to_le16(CTIO7_FLAGS_DATA_IN);
pkt              3107 drivers/scsi/qla2xxx/qla_target.c 		pkt->flags = cpu_to_le16(CTIO7_FLAGS_DATA_OUT);
pkt              3109 drivers/scsi/qla2xxx/qla_target.c 	pkt->dseg_count = prm->tot_dsds;
pkt              3111 drivers/scsi/qla2xxx/qla_target.c 	pkt->transfer_length = cpu_to_le32(transfer_length);
pkt              3126 drivers/scsi/qla2xxx/qla_target.c 	crc_ctx_pkt->handle = pkt->handle;
pkt              3130 drivers/scsi/qla2xxx/qla_target.c 	put_unaligned_le64(crc_ctx_dma, &pkt->crc_context_address);
pkt              3131 drivers/scsi/qla2xxx/qla_target.c 	pkt->crc_context_len = CRC_CONTEXT_LEN_FW;
pkt              3163 drivers/scsi/qla2xxx/qla_target.c 	pkt->flags |= cpu_to_le16(CTIO7_FLAGS_DSD_PTR);
pkt              3175 drivers/scsi/qla2xxx/qla_target.c 		pkt->add_flags |= CTIO_CRC2_AF_DIF_DSD_ENA;
pkt              3200 drivers/scsi/qla2xxx/qla_target.c 	struct ctio7_to_24xx *pkt;
pkt              3260 drivers/scsi/qla2xxx/qla_target.c 	pkt = (struct ctio7_to_24xx *)prm.pkt;
pkt              3263 drivers/scsi/qla2xxx/qla_target.c 		pkt->u.status0.flags |=
pkt              3272 drivers/scsi/qla2xxx/qla_target.c 				pkt->u.status0.scsi_status =
pkt              3274 drivers/scsi/qla2xxx/qla_target.c 				pkt->u.status0.residual =
pkt              3276 drivers/scsi/qla2xxx/qla_target.c 				pkt->u.status0.flags |= cpu_to_le16(
pkt              3279 drivers/scsi/qla2xxx/qla_target.c 					pkt->u.status0.flags |=
pkt              3304 drivers/scsi/qla2xxx/qla_target.c 			memcpy(ctio, pkt, sizeof(*ctio));
pkt              3313 drivers/scsi/qla2xxx/qla_target.c 			pkt->handle |= CTIO_INTERMEDIATE_HANDLE_MARK;
pkt              3314 drivers/scsi/qla2xxx/qla_target.c 			pkt->u.status0.flags |= cpu_to_le16(
pkt              3325 drivers/scsi/qla2xxx/qla_target.c 		qlt_24xx_init_ctio_to_isp(pkt, &prm);
pkt              3330 drivers/scsi/qla2xxx/qla_target.c 	cmd->ctio_flags = le16_to_cpu(pkt->u.status0.flags);
pkt              3354 drivers/scsi/qla2xxx/qla_target.c 	struct ctio7_to_24xx *pkt;
pkt              3404 drivers/scsi/qla2xxx/qla_target.c 	pkt = (struct ctio7_to_24xx *)prm.pkt;
pkt              3405 drivers/scsi/qla2xxx/qla_target.c 	pkt->u.status0.flags |= cpu_to_le16(CTIO7_FLAGS_DATA_OUT |
pkt              3413 drivers/scsi/qla2xxx/qla_target.c 	cmd->ctio_flags = le16_to_cpu(pkt->u.status0.flags);
pkt              3544 drivers/scsi/qla2xxx/qla_target.c 	request_t *pkt;
pkt              3550 drivers/scsi/qla2xxx/qla_target.c 	pkt = (request_t *)qla2x00_alloc_iocbs(vha, NULL);
pkt              3551 drivers/scsi/qla2xxx/qla_target.c 	if (pkt == NULL) {
pkt              3558 drivers/scsi/qla2xxx/qla_target.c 	pkt->entry_type = NOTIFY_ACK_TYPE;
pkt              3559 drivers/scsi/qla2xxx/qla_target.c 	pkt->entry_count = 1;
pkt              3560 drivers/scsi/qla2xxx/qla_target.c 	pkt->handle = QLA_TGT_SKIP_HANDLE;
pkt              3562 drivers/scsi/qla2xxx/qla_target.c 	nack = (struct nack_to_isp *)pkt;
pkt              3609 drivers/scsi/qla2xxx/qla_target.c 	request_t *pkt;
pkt              3618 drivers/scsi/qla2xxx/qla_target.c 	pkt = (request_t *)qla2x00_alloc_iocbs_ready(qpair, NULL);
pkt              3619 drivers/scsi/qla2xxx/qla_target.c 	if (pkt == NULL) {
pkt              3637 drivers/scsi/qla2xxx/qla_target.c 	pkt->entry_count = 1;
pkt              3638 drivers/scsi/qla2xxx/qla_target.c 	pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
pkt              3640 drivers/scsi/qla2xxx/qla_target.c 	ctio24 = (struct ctio7_to_24xx *)pkt;
pkt              5272 drivers/scsi/qla2xxx/qla_target.c 	request_t *pkt;
pkt              5289 drivers/scsi/qla2xxx/qla_target.c 	pkt = (request_t *)__qla2x00_alloc_iocbs(qpair, NULL);
pkt              5290 drivers/scsi/qla2xxx/qla_target.c 	if (!pkt) {
pkt              5298 drivers/scsi/qla2xxx/qla_target.c 	pkt->entry_count = 1;
pkt              5299 drivers/scsi/qla2xxx/qla_target.c 	pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
pkt              5301 drivers/scsi/qla2xxx/qla_target.c 	ctio24 = (struct ctio7_to_24xx *)pkt;
pkt              5697 drivers/scsi/qla2xxx/qla_target.c 	struct rsp_que *rsp, response_t *pkt)
pkt              5700 drivers/scsi/qla2xxx/qla_target.c 		(struct abts_resp_from_24xx_fw *)pkt;
pkt              5701 drivers/scsi/qla2xxx/qla_target.c 	u32 h = pkt->handle & ~QLA_TGT_HANDLE_MASK;
pkt              5705 drivers/scsi/qla2xxx/qla_target.c 	mcmd = qlt_ctio_to_cmd(vha, rsp, pkt->handle, pkt);
pkt              5729 drivers/scsi/qla2xxx/qla_target.c 			    pkt, mcmd);
pkt              5746 drivers/scsi/qla2xxx/qla_target.c 	struct rsp_que *rsp, response_t *pkt)
pkt              5753 drivers/scsi/qla2xxx/qla_target.c 		    vha->vp_idx, pkt->entry_type, vha->hw);
pkt              5762 drivers/scsi/qla2xxx/qla_target.c 	switch (pkt->entry_type) {
pkt              5766 drivers/scsi/qla2xxx/qla_target.c 		struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt;
pkt              5769 drivers/scsi/qla2xxx/qla_target.c 		    le16_to_cpu(entry->status)|(pkt->entry_status << 16),
pkt              5776 drivers/scsi/qla2xxx/qla_target.c 		struct atio_from_isp *atio = (struct atio_from_isp *)pkt;
pkt              5826 drivers/scsi/qla2xxx/qla_target.c 		struct ctio_to_2xxx *entry = (struct ctio_to_2xxx *)pkt;
pkt              5829 drivers/scsi/qla2xxx/qla_target.c 		    le16_to_cpu(entry->status)|(pkt->entry_status << 16),
pkt              5836 drivers/scsi/qla2xxx/qla_target.c 		struct ctio_to_2xxx *entry = (struct ctio_to_2xxx *)pkt;
pkt              5839 drivers/scsi/qla2xxx/qla_target.c 		    le16_to_cpu(entry->status)|(pkt->entry_status << 16),
pkt              5846 drivers/scsi/qla2xxx/qla_target.c 		qlt_handle_imm_notify(vha, (struct imm_ntfy_from_isp *)pkt);
pkt              5851 drivers/scsi/qla2xxx/qla_target.c 			struct nack_to_isp *entry = (struct nack_to_isp *)pkt;
pkt              5875 drivers/scsi/qla2xxx/qla_target.c 		qlt_24xx_handle_abts(vha, (struct abts_recv_from_24xx *)pkt);
pkt              5880 drivers/scsi/qla2xxx/qla_target.c 			qlt_handle_abts_completion(vha, rsp, pkt);
pkt              5891 drivers/scsi/qla2xxx/qla_target.c 		    "type %x\n", vha->vp_idx, pkt->entry_type);
pkt              6732 drivers/scsi/qla2xxx/qla_target.c 	struct atio_from_isp *pkt = (struct atio_from_isp *)ha->tgt.atio_ring;
pkt              6738 drivers/scsi/qla2xxx/qla_target.c 		pkt->u.raw.signature = ATIO_PROCESSED;
pkt              6739 drivers/scsi/qla2xxx/qla_target.c 		pkt++;
pkt              6752 drivers/scsi/qla2xxx/qla_target.c 	struct atio_from_isp *pkt;
pkt              6760 drivers/scsi/qla2xxx/qla_target.c 		pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr;
pkt              6761 drivers/scsi/qla2xxx/qla_target.c 		cnt = pkt->u.raw.entry_count;
pkt              6771 drivers/scsi/qla2xxx/qla_target.c 			    &pkt->u.isp24.fcp_hdr.s_id,
pkt              6772 drivers/scsi/qla2xxx/qla_target.c 			    be16_to_cpu(pkt->u.isp24.fcp_hdr.ox_id),
pkt              6773 drivers/scsi/qla2xxx/qla_target.c 			    le32_to_cpu(pkt->u.isp24.exchange_addr), pkt);
pkt              6775 drivers/scsi/qla2xxx/qla_target.c 			adjust_corrupted_atio(pkt);
pkt              6776 drivers/scsi/qla2xxx/qla_target.c 			qlt_send_term_exchange(ha->base_qpair, NULL, pkt,
pkt              6780 drivers/scsi/qla2xxx/qla_target.c 			    (struct atio_from_isp *)pkt, ha_locked);
pkt              6791 drivers/scsi/qla2xxx/qla_target.c 			pkt->u.raw.signature = ATIO_PROCESSED;
pkt              6792 drivers/scsi/qla2xxx/qla_target.c 			pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr;
pkt              7153 drivers/scsi/qla2xxx/qla_target.c     response_t *pkt)
pkt              7163 drivers/scsi/qla2xxx/qla_target.c 		qlt_response_pkt_all_vps(vha, rsp, pkt);
pkt              7167 drivers/scsi/qla2xxx/qla_target.c 	memcpy(&op->atio, pkt, sizeof(*pkt));
pkt               969 drivers/scsi/qla2xxx/qla_target.h 	void *pkt;
pkt                69 drivers/scsi/qla4xxx/ql4_init.c 	struct response *pkt;
pkt                71 drivers/scsi/qla4xxx/ql4_init.c 	pkt = (struct response *)ha->response_ptr;
pkt                73 drivers/scsi/qla4xxx/ql4_init.c 		pkt->signature = RESPONSE_PROCESSED;
pkt                74 drivers/scsi/qla4xxx/ql4_init.c 		pkt++;
pkt               210 drivers/slimbus/qcom-ctrl.c 	u32 *rx_buf, pkt[10];
pkt               214 drivers/slimbus/qcom-ctrl.c 	pkt[0] = readl_relaxed(ctrl->base + MGR_RX_MSG);
pkt               215 drivers/slimbus/qcom-ctrl.c 	mt = SLIM_HEADER_GET_MT(pkt[0]);
pkt               216 drivers/slimbus/qcom-ctrl.c 	len = SLIM_HEADER_GET_RL(pkt[0]);
pkt               217 drivers/slimbus/qcom-ctrl.c 	mc = SLIM_HEADER_GET_MC(pkt[0]>>8);
pkt               227 drivers/slimbus/qcom-ctrl.c 					pkt[0]);
pkt               230 drivers/slimbus/qcom-ctrl.c 		rx_buf[0] = pkt[0];
pkt               233 drivers/slimbus/qcom-ctrl.c 		rx_buf = pkt;
pkt                72 drivers/soc/mediatek/mtk-cmdq-helper.c 	struct cmdq_pkt *pkt;
pkt                76 drivers/soc/mediatek/mtk-cmdq-helper.c 	pkt = kzalloc(sizeof(*pkt), GFP_KERNEL);
pkt                77 drivers/soc/mediatek/mtk-cmdq-helper.c 	if (!pkt)
pkt                79 drivers/soc/mediatek/mtk-cmdq-helper.c 	pkt->va_base = kzalloc(size, GFP_KERNEL);
pkt                80 drivers/soc/mediatek/mtk-cmdq-helper.c 	if (!pkt->va_base) {
pkt                81 drivers/soc/mediatek/mtk-cmdq-helper.c 		kfree(pkt);
pkt                84 drivers/soc/mediatek/mtk-cmdq-helper.c 	pkt->buf_size = size;
pkt                85 drivers/soc/mediatek/mtk-cmdq-helper.c 	pkt->cl = (void *)client;
pkt                88 drivers/soc/mediatek/mtk-cmdq-helper.c 	dma_addr = dma_map_single(dev, pkt->va_base, pkt->buf_size,
pkt                92 drivers/soc/mediatek/mtk-cmdq-helper.c 		kfree(pkt->va_base);
pkt                93 drivers/soc/mediatek/mtk-cmdq-helper.c 		kfree(pkt);
pkt                97 drivers/soc/mediatek/mtk-cmdq-helper.c 	pkt->pa_base = dma_addr;
pkt                99 drivers/soc/mediatek/mtk-cmdq-helper.c 	return pkt;
pkt               103 drivers/soc/mediatek/mtk-cmdq-helper.c void cmdq_pkt_destroy(struct cmdq_pkt *pkt)
pkt               105 drivers/soc/mediatek/mtk-cmdq-helper.c 	struct cmdq_client *client = (struct cmdq_client *)pkt->cl;
pkt               107 drivers/soc/mediatek/mtk-cmdq-helper.c 	dma_unmap_single(client->chan->mbox->dev, pkt->pa_base, pkt->buf_size,
pkt               109 drivers/soc/mediatek/mtk-cmdq-helper.c 	kfree(pkt->va_base);
pkt               110 drivers/soc/mediatek/mtk-cmdq-helper.c 	kfree(pkt);
pkt               114 drivers/soc/mediatek/mtk-cmdq-helper.c static int cmdq_pkt_append_command(struct cmdq_pkt *pkt, enum cmdq_code code,
pkt               119 drivers/soc/mediatek/mtk-cmdq-helper.c 	if (unlikely(pkt->cmd_buf_size + CMDQ_INST_SIZE > pkt->buf_size)) {
pkt               128 drivers/soc/mediatek/mtk-cmdq-helper.c 		pkt->cmd_buf_size += CMDQ_INST_SIZE;
pkt               130 drivers/soc/mediatek/mtk-cmdq-helper.c 			__func__, (u32)pkt->buf_size);
pkt               133 drivers/soc/mediatek/mtk-cmdq-helper.c 	cmd_ptr = pkt->va_base + pkt->cmd_buf_size;
pkt               135 drivers/soc/mediatek/mtk-cmdq-helper.c 	pkt->cmd_buf_size += CMDQ_INST_SIZE;
pkt               140 drivers/soc/mediatek/mtk-cmdq-helper.c int cmdq_pkt_write(struct cmdq_pkt *pkt, u8 subsys, u16 offset, u32 value)
pkt               145 drivers/soc/mediatek/mtk-cmdq-helper.c 	return cmdq_pkt_append_command(pkt, CMDQ_CODE_WRITE, arg_a, value);
pkt               149 drivers/soc/mediatek/mtk-cmdq-helper.c int cmdq_pkt_write_mask(struct cmdq_pkt *pkt, u8 subsys,
pkt               156 drivers/soc/mediatek/mtk-cmdq-helper.c 		err = cmdq_pkt_append_command(pkt, CMDQ_CODE_MASK, 0, ~mask);
pkt               159 drivers/soc/mediatek/mtk-cmdq-helper.c 	err |= cmdq_pkt_write(pkt, subsys, offset_mask, value);
pkt               165 drivers/soc/mediatek/mtk-cmdq-helper.c int cmdq_pkt_wfe(struct cmdq_pkt *pkt, u16 event)
pkt               181 drivers/soc/mediatek/mtk-cmdq-helper.c 	return cmdq_pkt_append_command(pkt, CMDQ_CODE_WFE, event, arg_b);
pkt               185 drivers/soc/mediatek/mtk-cmdq-helper.c int cmdq_pkt_clear_event(struct cmdq_pkt *pkt, u16 event)
pkt               190 drivers/soc/mediatek/mtk-cmdq-helper.c 	return cmdq_pkt_append_command(pkt, CMDQ_CODE_WFE, event,
pkt               195 drivers/soc/mediatek/mtk-cmdq-helper.c static int cmdq_pkt_finalize(struct cmdq_pkt *pkt)
pkt               200 drivers/soc/mediatek/mtk-cmdq-helper.c 	err = cmdq_pkt_append_command(pkt, CMDQ_CODE_EOC, 0, CMDQ_EOC_IRQ_EN);
pkt               203 drivers/soc/mediatek/mtk-cmdq-helper.c 	err |= cmdq_pkt_append_command(pkt, CMDQ_CODE_JUMP, 0, CMDQ_JUMP_PASS);
pkt               210 drivers/soc/mediatek/mtk-cmdq-helper.c 	struct cmdq_pkt *pkt = (struct cmdq_pkt *)data.data;
pkt               211 drivers/soc/mediatek/mtk-cmdq-helper.c 	struct cmdq_task_cb *cb = &pkt->cb;
pkt               212 drivers/soc/mediatek/mtk-cmdq-helper.c 	struct cmdq_client *client = (struct cmdq_client *)pkt->cl;
pkt               226 drivers/soc/mediatek/mtk-cmdq-helper.c 	dma_sync_single_for_cpu(client->chan->mbox->dev, pkt->pa_base,
pkt               227 drivers/soc/mediatek/mtk-cmdq-helper.c 				pkt->cmd_buf_size, DMA_TO_DEVICE);
pkt               234 drivers/soc/mediatek/mtk-cmdq-helper.c int cmdq_pkt_flush_async(struct cmdq_pkt *pkt, cmdq_async_flush_cb cb,
pkt               239 drivers/soc/mediatek/mtk-cmdq-helper.c 	struct cmdq_client *client = (struct cmdq_client *)pkt->cl;
pkt               241 drivers/soc/mediatek/mtk-cmdq-helper.c 	err = cmdq_pkt_finalize(pkt);
pkt               245 drivers/soc/mediatek/mtk-cmdq-helper.c 	pkt->cb.cb = cb;
pkt               246 drivers/soc/mediatek/mtk-cmdq-helper.c 	pkt->cb.data = data;
pkt               247 drivers/soc/mediatek/mtk-cmdq-helper.c 	pkt->async_cb.cb = cmdq_pkt_flush_async_cb;
pkt               248 drivers/soc/mediatek/mtk-cmdq-helper.c 	pkt->async_cb.data = pkt;
pkt               250 drivers/soc/mediatek/mtk-cmdq-helper.c 	dma_sync_single_for_device(client->chan->mbox->dev, pkt->pa_base,
pkt               251 drivers/soc/mediatek/mtk-cmdq-helper.c 				   pkt->cmd_buf_size, DMA_TO_DEVICE);
pkt               261 drivers/soc/mediatek/mtk-cmdq-helper.c 	err = mbox_send_message(client->chan, pkt);
pkt               288 drivers/soc/mediatek/mtk-cmdq-helper.c int cmdq_pkt_flush(struct cmdq_pkt *pkt)
pkt               294 drivers/soc/mediatek/mtk-cmdq-helper.c 	err = cmdq_pkt_flush_async(pkt, cmdq_pkt_flush_cb, &cmplt);
pkt                43 drivers/soc/qcom/apr.c int apr_send_pkt(struct apr_device *adev, struct apr_pkt *pkt)
pkt                52 drivers/soc/qcom/apr.c 	hdr = &pkt->hdr;
pkt                58 drivers/soc/qcom/apr.c 	ret = rpmsg_trysend(apr->ch, pkt, hdr->pkt_size);
pkt               133 drivers/soc/qcom/qmi_interface.c 	const struct qrtr_ctrl_pkt *pkt = buf;
pkt               140 drivers/soc/qcom/qmi_interface.c 	switch (le32_to_cpu(pkt->cmd)) {
pkt               142 drivers/soc/qcom/qmi_interface.c 		qmi_recv_bye(qmi, le32_to_cpu(pkt->client.node));
pkt               146 drivers/soc/qcom/qmi_interface.c 				    le32_to_cpu(pkt->server.service),
pkt               147 drivers/soc/qcom/qmi_interface.c 				    le32_to_cpu(pkt->server.instance),
pkt               148 drivers/soc/qcom/qmi_interface.c 				    le32_to_cpu(pkt->server.node),
pkt               149 drivers/soc/qcom/qmi_interface.c 				    le32_to_cpu(pkt->server.port));
pkt               153 drivers/soc/qcom/qmi_interface.c 				    le32_to_cpu(pkt->server.node),
pkt               154 drivers/soc/qcom/qmi_interface.c 				    le32_to_cpu(pkt->server.port));
pkt               158 drivers/soc/qcom/qmi_interface.c 				    le32_to_cpu(pkt->client.node),
pkt               159 drivers/soc/qcom/qmi_interface.c 				    le32_to_cpu(pkt->client.port));
pkt               166 drivers/soc/qcom/qmi_interface.c 	struct qrtr_ctrl_pkt pkt;
pkt               169 drivers/soc/qcom/qmi_interface.c 	struct kvec iv = { &pkt, sizeof(pkt) };
pkt               172 drivers/soc/qcom/qmi_interface.c 	memset(&pkt, 0, sizeof(pkt));
pkt               173 drivers/soc/qcom/qmi_interface.c 	pkt.cmd = cpu_to_le32(QRTR_TYPE_NEW_LOOKUP);
pkt               174 drivers/soc/qcom/qmi_interface.c 	pkt.server.service = cpu_to_le32(svc->service);
pkt               175 drivers/soc/qcom/qmi_interface.c 	pkt.server.instance = cpu_to_le32(svc->version | svc->instance << 8);
pkt               186 drivers/soc/qcom/qmi_interface.c 		ret = kernel_sendmsg(qmi->sock, &msg, &iv, 1, sizeof(pkt));
pkt               229 drivers/soc/qcom/qmi_interface.c 	struct qrtr_ctrl_pkt pkt;
pkt               232 drivers/soc/qcom/qmi_interface.c 	struct kvec iv = { &pkt, sizeof(pkt) };
pkt               235 drivers/soc/qcom/qmi_interface.c 	memset(&pkt, 0, sizeof(pkt));
pkt               236 drivers/soc/qcom/qmi_interface.c 	pkt.cmd = cpu_to_le32(QRTR_TYPE_NEW_SERVER);
pkt               237 drivers/soc/qcom/qmi_interface.c 	pkt.server.service = cpu_to_le32(svc->service);
pkt               238 drivers/soc/qcom/qmi_interface.c 	pkt.server.instance = cpu_to_le32(svc->version | svc->instance << 8);
pkt               239 drivers/soc/qcom/qmi_interface.c 	pkt.server.node = cpu_to_le32(qmi->sq.sq_node);
pkt               240 drivers/soc/qcom/qmi_interface.c 	pkt.server.port = cpu_to_le32(qmi->sq.sq_port);
pkt               251 drivers/soc/qcom/qmi_interface.c 		ret = kernel_sendmsg(qmi->sock, &msg, &iv, 1, sizeof(pkt));
pkt               105 drivers/soc/qcom/smd-rpm.c 	} *pkt;
pkt               106 drivers/soc/qcom/smd-rpm.c 	size_t size = sizeof(*pkt) + count;
pkt               112 drivers/soc/qcom/smd-rpm.c 	pkt = kmalloc(size, GFP_KERNEL);
pkt               113 drivers/soc/qcom/smd-rpm.c 	if (!pkt)
pkt               118 drivers/soc/qcom/smd-rpm.c 	pkt->hdr.service_type = cpu_to_le32(RPM_SERVICE_TYPE_REQUEST);
pkt               119 drivers/soc/qcom/smd-rpm.c 	pkt->hdr.length = cpu_to_le32(sizeof(struct qcom_rpm_request) + count);
pkt               121 drivers/soc/qcom/smd-rpm.c 	pkt->req.msg_id = cpu_to_le32(msg_id++);
pkt               122 drivers/soc/qcom/smd-rpm.c 	pkt->req.flags = cpu_to_le32(state);
pkt               123 drivers/soc/qcom/smd-rpm.c 	pkt->req.type = cpu_to_le32(type);
pkt               124 drivers/soc/qcom/smd-rpm.c 	pkt->req.id = cpu_to_le32(id);
pkt               125 drivers/soc/qcom/smd-rpm.c 	pkt->req.data_len = cpu_to_le32(count);
pkt               126 drivers/soc/qcom/smd-rpm.c 	memcpy(pkt->payload, buf, count);
pkt               128 drivers/soc/qcom/smd-rpm.c 	ret = rpmsg_send(rpm->rpm_channel, pkt, size);
pkt               139 drivers/soc/qcom/smd-rpm.c 	kfree(pkt);
pkt              1555 drivers/staging/fwserial/fwserial.c 	static const struct fwserial_mgmt_pkt pkt;
pkt              1559 drivers/staging/fwserial/fwserial.c 		return sizeof(pkt.hdr) + sizeof(pkt.plug_req);
pkt              1562 drivers/staging/fwserial/fwserial.c 		return sizeof(pkt.hdr) + sizeof(pkt.plug_rsp);
pkt              1568 drivers/staging/fwserial/fwserial.c 		return sizeof(pkt.hdr);
pkt              1589 drivers/staging/fwserial/fwserial.c static inline void fill_plug_req(struct fwserial_mgmt_pkt *pkt,
pkt              1592 drivers/staging/fwserial/fwserial.c 	pkt->hdr.code = cpu_to_be16(FWSC_VIRT_CABLE_PLUG);
pkt              1593 drivers/staging/fwserial/fwserial.c 	pkt->hdr.len = cpu_to_be16(mgmt_pkt_expected_len(pkt->hdr.code));
pkt              1594 drivers/staging/fwserial/fwserial.c 	fill_plug_params(&pkt->plug_req, port);
pkt              1597 drivers/staging/fwserial/fwserial.c static inline void fill_plug_rsp_ok(struct fwserial_mgmt_pkt *pkt,
pkt              1600 drivers/staging/fwserial/fwserial.c 	pkt->hdr.code = cpu_to_be16(FWSC_VIRT_CABLE_PLUG_RSP);
pkt              1601 drivers/staging/fwserial/fwserial.c 	pkt->hdr.len = cpu_to_be16(mgmt_pkt_expected_len(pkt->hdr.code));
pkt              1602 drivers/staging/fwserial/fwserial.c 	fill_plug_params(&pkt->plug_rsp, port);
pkt              1605 drivers/staging/fwserial/fwserial.c static inline void fill_plug_rsp_nack(struct fwserial_mgmt_pkt *pkt)
pkt              1607 drivers/staging/fwserial/fwserial.c 	pkt->hdr.code = cpu_to_be16(FWSC_VIRT_CABLE_PLUG_RSP | FWSC_RSP_NACK);
pkt              1608 drivers/staging/fwserial/fwserial.c 	pkt->hdr.len = cpu_to_be16(mgmt_pkt_expected_len(pkt->hdr.code));
pkt              1611 drivers/staging/fwserial/fwserial.c static inline void fill_unplug_rsp_nack(struct fwserial_mgmt_pkt *pkt)
pkt              1613 drivers/staging/fwserial/fwserial.c 	pkt->hdr.code = cpu_to_be16(FWSC_VIRT_CABLE_UNPLUG_RSP | FWSC_RSP_NACK);
pkt              1614 drivers/staging/fwserial/fwserial.c 	pkt->hdr.len = cpu_to_be16(mgmt_pkt_expected_len(pkt->hdr.code));
pkt              1617 drivers/staging/fwserial/fwserial.c static inline void fill_unplug_rsp_ok(struct fwserial_mgmt_pkt *pkt)
pkt              1619 drivers/staging/fwserial/fwserial.c 	pkt->hdr.code = cpu_to_be16(FWSC_VIRT_CABLE_UNPLUG_RSP);
pkt              1620 drivers/staging/fwserial/fwserial.c 	pkt->hdr.len = cpu_to_be16(mgmt_pkt_expected_len(pkt->hdr.code));
pkt              1647 drivers/staging/fwserial/fwserial.c 					  struct fwserial_mgmt_pkt *pkt)
pkt              1661 drivers/staging/fwserial/fwserial.c 					   pkt, be16_to_cpu(pkt->hdr.len));
pkt              1777 drivers/staging/fwserial/fwserial.c 	struct fwserial_mgmt_pkt *pkt;
pkt              1780 drivers/staging/fwserial/fwserial.c 	pkt = kmalloc(sizeof(*pkt), GFP_KERNEL);
pkt              1781 drivers/staging/fwserial/fwserial.c 	if (!pkt)
pkt              1802 drivers/staging/fwserial/fwserial.c 	fill_plug_req(pkt, peer->port);
pkt              1807 drivers/staging/fwserial/fwserial.c 	rcode = fwserial_send_mgmt_sync(peer, pkt);
pkt              1819 drivers/staging/fwserial/fwserial.c 	kfree(pkt);
pkt              1829 drivers/staging/fwserial/fwserial.c 	kfree(pkt);
pkt              2484 drivers/staging/fwserial/fwserial.c 	struct fwserial_mgmt_pkt *pkt;
pkt              2487 drivers/staging/fwserial/fwserial.c 	pkt = kmalloc(sizeof(*pkt), GFP_KERNEL);
pkt              2488 drivers/staging/fwserial/fwserial.c 	if (!pkt)
pkt              2499 drivers/staging/fwserial/fwserial.c 			fill_plug_rsp_nack(pkt);
pkt              2502 drivers/staging/fwserial/fwserial.c 			fill_plug_rsp_ok(pkt, peer->port);
pkt              2515 drivers/staging/fwserial/fwserial.c 		fill_plug_rsp_ok(pkt, peer->port);
pkt              2520 drivers/staging/fwserial/fwserial.c 		fill_plug_rsp_nack(pkt);
pkt              2527 drivers/staging/fwserial/fwserial.c 	rcode = fwserial_send_mgmt_sync(peer, pkt);
pkt              2548 drivers/staging/fwserial/fwserial.c 	kfree(pkt);
pkt              2555 drivers/staging/fwserial/fwserial.c 	struct fwserial_mgmt_pkt *pkt;
pkt              2558 drivers/staging/fwserial/fwserial.c 	pkt = kmalloc(sizeof(*pkt), GFP_KERNEL);
pkt              2559 drivers/staging/fwserial/fwserial.c 	if (!pkt)
pkt              2566 drivers/staging/fwserial/fwserial.c 		fill_unplug_rsp_ok(pkt);
pkt              2576 drivers/staging/fwserial/fwserial.c 		fill_unplug_rsp_ok(pkt);
pkt              2581 drivers/staging/fwserial/fwserial.c 		fill_unplug_rsp_nack(pkt);
pkt              2586 drivers/staging/fwserial/fwserial.c 	rcode = fwserial_send_mgmt_sync(peer, pkt);
pkt              2599 drivers/staging/fwserial/fwserial.c 	kfree(pkt);
pkt              2603 drivers/staging/fwserial/fwserial.c 				     struct fwserial_mgmt_pkt *pkt,
pkt              2611 drivers/staging/fwserial/fwserial.c 	if (addr != fwserial_mgmt_addr_handler.offset || len < sizeof(pkt->hdr))
pkt              2614 drivers/staging/fwserial/fwserial.c 	if (len != be16_to_cpu(pkt->hdr.len) ||
pkt              2615 drivers/staging/fwserial/fwserial.c 	    len != mgmt_pkt_expected_len(pkt->hdr.code))
pkt              2633 drivers/staging/fwserial/fwserial.c 	fwtty_dbg(&peer->unit, "mgmt: hdr.code: %04hx\n", pkt->hdr.code);
pkt              2635 drivers/staging/fwserial/fwserial.c 	switch (be16_to_cpu(pkt->hdr.code) & FWSC_CODE_MASK) {
pkt              2642 drivers/staging/fwserial/fwserial.c 			peer->work_params.plug_req = pkt->plug_req;
pkt              2652 drivers/staging/fwserial/fwserial.c 		} else if (be16_to_cpu(pkt->hdr.code) & FWSC_RSP_NACK) {
pkt              2659 drivers/staging/fwserial/fwserial.c 			fwserial_virt_plug_complete(peer, &pkt->plug_rsp);
pkt              2681 drivers/staging/fwserial/fwserial.c 			if (be16_to_cpu(pkt->hdr.code) & FWSC_RSP_NACK)
pkt              2690 drivers/staging/fwserial/fwserial.c 			  be16_to_cpu(pkt->hdr.code));
pkt              2716 drivers/staging/fwserial/fwserial.c 	struct fwserial_mgmt_pkt *pkt = data;
pkt              2730 drivers/staging/fwserial/fwserial.c 			rcode = fwserial_parse_mgmt_write(peer, pkt, addr, len);
pkt              2017 drivers/staging/rtl8188eu/core/rtw_mlme_ext.c 	u8 *pframe = precv_frame->pkt->data;
pkt              2018 drivers/staging/rtl8188eu/core/rtw_mlme_ext.c 	u32 packet_len = precv_frame->pkt->len;
pkt              2514 drivers/staging/rtl8188eu/core/rtw_mlme_ext.c 	u8 *pframe = precv_frame->pkt->data;
pkt              2515 drivers/staging/rtl8188eu/core/rtw_mlme_ext.c 	uint len = precv_frame->pkt->len;
pkt              2562 drivers/staging/rtl8188eu/core/rtw_mlme_ext.c 	u8 *pframe = precv_frame->pkt->data;
pkt              2563 drivers/staging/rtl8188eu/core/rtw_mlme_ext.c 	uint len = precv_frame->pkt->len;
pkt              2659 drivers/staging/rtl8188eu/core/rtw_mlme_ext.c 	u8 *pframe = precv_frame->pkt->data;
pkt              2660 drivers/staging/rtl8188eu/core/rtw_mlme_ext.c 	uint len = precv_frame->pkt->len;
pkt              2822 drivers/staging/rtl8188eu/core/rtw_mlme_ext.c 	u8 *pframe = precv_frame->pkt->data;
pkt              2823 drivers/staging/rtl8188eu/core/rtw_mlme_ext.c 	uint pkt_len = precv_frame->pkt->len;
pkt              2910 drivers/staging/rtl8188eu/core/rtw_mlme_ext.c 	u8 *pframe = precv_frame->pkt->data;
pkt              2911 drivers/staging/rtl8188eu/core/rtw_mlme_ext.c 	uint ie_len, pkt_len = precv_frame->pkt->len;
pkt              3335 drivers/staging/rtl8188eu/core/rtw_mlme_ext.c 	u8 *pframe = precv_frame->pkt->data;
pkt              3336 drivers/staging/rtl8188eu/core/rtw_mlme_ext.c 	uint pkt_len = precv_frame->pkt->len;
pkt              3420 drivers/staging/rtl8188eu/core/rtw_mlme_ext.c 	u8 *pframe = precv_frame->pkt->data;
pkt              3474 drivers/staging/rtl8188eu/core/rtw_mlme_ext.c 	u8 *pframe = precv_frame->pkt->data;
pkt              3533 drivers/staging/rtl8188eu/core/rtw_mlme_ext.c 	u8 *pframe = precv_frame->pkt->data;
pkt              3589 drivers/staging/rtl8188eu/core/rtw_mlme_ext.c 	u8 *pframe = precv_frame->pkt->data;
pkt              3663 drivers/staging/rtl8188eu/core/rtw_mlme_ext.c 	u8 *frame = recv_frame->pkt->data;
pkt              3693 drivers/staging/rtl8188eu/core/rtw_mlme_ext.c 	u8 *pframe = precv_frame->pkt->data;
pkt              3709 drivers/staging/rtl8188eu/core/rtw_mlme_ext.c 	u8 *pframe = precv_frame->pkt->data;
pkt              3722 drivers/staging/rtl8188eu/core/rtw_mlme_ext.c 	u8 *pframe = precv_frame->pkt->data;
pkt              3741 drivers/staging/rtl8188eu/core/rtw_mlme_ext.c 	u8 *pframe = precv_frame->pkt->data;
pkt              3812 drivers/staging/rtl8188eu/core/rtw_mlme_ext.c 	u8 *pframe = precv_frame->pkt->data;
pkt              4069 drivers/staging/rtl8188eu/core/rtw_mlme_ext.c 	u8 *pframe = precv_frame->pkt->data;
pkt              4088 drivers/staging/rtl8188eu/core/rtw_mlme_ext.c 	u8 *pframe = precv_frame->pkt->data;
pkt                72 drivers/staging/rtl8188eu/core/rtw_recv.c 		precvframe->pkt = NULL;
pkt               130 drivers/staging/rtl8188eu/core/rtw_recv.c 	if (precvframe->pkt) {
pkt               131 drivers/staging/rtl8188eu/core/rtw_recv.c 		dev_kfree_skb_any(precvframe->pkt);/* free skb by driver */
pkt               132 drivers/staging/rtl8188eu/core/rtw_recv.c 		precvframe->pkt = NULL;
pkt               255 drivers/staging/rtl8188eu/core/rtw_recv.c 			datalen = precvframe->pkt->len-prxattrib->hdrlen -
pkt               257 drivers/staging/rtl8188eu/core/rtw_recv.c 			pframe = precvframe->pkt->data;
pkt               293 drivers/staging/rtl8188eu/core/rtw_recv.c 						 precvframe->pkt->len));
pkt               294 drivers/staging/rtl8188eu/core/rtw_recv.c 					for (i = 0; i < precvframe->pkt->len; i += 8) {
pkt               298 drivers/staging/rtl8188eu/core/rtw_recv.c 							 *(precvframe->pkt->data+i),
pkt               299 drivers/staging/rtl8188eu/core/rtw_recv.c 							 *(precvframe->pkt->data+i+1),
pkt               300 drivers/staging/rtl8188eu/core/rtw_recv.c 							 *(precvframe->pkt->data+i+2),
pkt               301 drivers/staging/rtl8188eu/core/rtw_recv.c 							 *(precvframe->pkt->data+i+3),
pkt               302 drivers/staging/rtl8188eu/core/rtw_recv.c 							 *(precvframe->pkt->data+i+4),
pkt               303 drivers/staging/rtl8188eu/core/rtw_recv.c 							 *(precvframe->pkt->data+i+5),
pkt               304 drivers/staging/rtl8188eu/core/rtw_recv.c 							 *(precvframe->pkt->data+i+6),
pkt               305 drivers/staging/rtl8188eu/core/rtw_recv.c 							 *(precvframe->pkt->data+i+7)));
pkt               310 drivers/staging/rtl8188eu/core/rtw_recv.c 						 precvframe->pkt->len));
pkt               348 drivers/staging/rtl8188eu/core/rtw_recv.c 		skb_trim(precvframe->pkt, precvframe->pkt->len - 8);
pkt               368 drivers/staging/rtl8188eu/core/rtw_recv.c 		u8 *iv = precv_frame->pkt->data + prxattrib->hdrlen;
pkt               437 drivers/staging/rtl8188eu/core/rtw_recv.c 	ptr = precv_frame->pkt->data;
pkt               526 drivers/staging/rtl8188eu/core/rtw_recv.c 	u8 *ptr = precv_frame->pkt->data;
pkt               614 drivers/staging/rtl8188eu/core/rtw_recv.c 	sz = prframe->pkt->len;
pkt               718 drivers/staging/rtl8188eu/core/rtw_recv.c 	u8 *ptr = precv_frame->pkt->data;
pkt               811 drivers/staging/rtl8188eu/core/rtw_recv.c 	u8 *ptr = precv_frame->pkt->data;
pkt               868 drivers/staging/rtl8188eu/core/rtw_recv.c 	u8 *pframe = precv_frame->pkt->data;
pkt              1006 drivers/staging/rtl8188eu/core/rtw_recv.c 			       GetAddr2Ptr(precv_frame->pkt->data));
pkt              1009 drivers/staging/rtl8188eu/core/rtw_recv.c 		if (GetFrameSubType(precv_frame->pkt->data) == WIFI_BEACON) {
pkt              1011 drivers/staging/rtl8188eu/core/rtw_recv.c 		} else if (GetFrameSubType(precv_frame->pkt->data) == WIFI_PROBEREQ) {
pkt              1013 drivers/staging/rtl8188eu/core/rtw_recv.c 		} else if (GetFrameSubType(precv_frame->pkt->data) == WIFI_PROBERSP) {
pkt              1015 drivers/staging/rtl8188eu/core/rtw_recv.c 				    GetAddr1Ptr(precv_frame->pkt->data), ETH_ALEN))
pkt              1017 drivers/staging/rtl8188eu/core/rtw_recv.c 			else if (is_multicast_ether_addr(GetAddr1Ptr(precv_frame->pkt->data)))
pkt              1035 drivers/staging/rtl8188eu/core/rtw_recv.c 	u8 *ptr = precv_frame->pkt->data;
pkt              1157 drivers/staging/rtl8188eu/core/rtw_recv.c 	u8 *ptr = precv_frame->pkt->data;
pkt              1271 drivers/staging/rtl8188eu/core/rtw_recv.c 	u8 *ptr = precvframe->pkt->data;
pkt              1275 drivers/staging/rtl8188eu/core/rtw_recv.c 		skb_trim(precvframe->pkt, precvframe->pkt->len - pattrib->icv_len);
pkt              1292 drivers/staging/rtl8188eu/core/rtw_recv.c 	len = precvframe->pkt->len - rmv_len;
pkt              1301 drivers/staging/rtl8188eu/core/rtw_recv.c 	ptr = skb_pull(precvframe->pkt, rmv_len - sizeof(struct ethhdr) + (bsnaphdr ? 2 : 0));
pkt              1371 drivers/staging/rtl8188eu/core/rtw_recv.c 		skb_pull(pnextrframe->pkt, wlanhdr_offset);
pkt              1374 drivers/staging/rtl8188eu/core/rtw_recv.c 		skb_trim(prframe->pkt, prframe->pkt->len - prframe->attrib.icv_len);
pkt              1376 drivers/staging/rtl8188eu/core/rtw_recv.c 		skb_put_data(prframe->pkt, pnfhdr->pkt->data, pnfhdr->pkt->len);
pkt              1417 drivers/staging/rtl8188eu/core/rtw_recv.c 		u8 type = GetFrameType(pfhdr->pkt->data);
pkt              1504 drivers/staging/rtl8188eu/core/rtw_recv.c 	skb_pull(prframe->pkt, prframe->attrib.hdrlen);
pkt              1507 drivers/staging/rtl8188eu/core/rtw_recv.c 		skb_pull(prframe->pkt, prframe->attrib.iv_len);
pkt              1509 drivers/staging/rtl8188eu/core/rtw_recv.c 	a_len = prframe->pkt->len;
pkt              1511 drivers/staging/rtl8188eu/core/rtw_recv.c 	pdata = prframe->pkt->data;
pkt              1532 drivers/staging/rtl8188eu/core/rtw_recv.c 			sub_skb = skb_clone(prframe->pkt, GFP_ATOMIC);
pkt               207 drivers/staging/rtl8188eu/core/rtw_security.c 		struct sk_buff *skb = ((struct recv_frame *)precvframe)->pkt;
pkt               674 drivers/staging/rtl8188eu/core/rtw_security.c 	pframe = (unsigned char *)((struct recv_frame *)precvframe)->pkt->data;
pkt               694 drivers/staging/rtl8188eu/core/rtw_security.c 			length = ((struct recv_frame *)precvframe)->pkt->len-prxattrib->hdrlen-prxattrib->iv_len;
pkt              1290 drivers/staging/rtl8188eu/core/rtw_security.c 			struct sk_buff *skb = ((struct recv_frame *)precvframe)->pkt;
pkt                89 drivers/staging/rtl8188eu/core/rtw_xmit.c 		pxframe->pkt = NULL;
pkt               393 drivers/staging/rtl8188eu/core/rtw_xmit.c static s32 update_attrib(struct adapter *padapter, struct sk_buff *pkt, struct pkt_attrib *pattrib)
pkt               405 drivers/staging/rtl8188eu/core/rtw_xmit.c 	skb_copy_bits(pkt, 0, &etherhdr, ETH_HLEN);
pkt               426 drivers/staging/rtl8188eu/core/rtw_xmit.c 	pattrib->pktlen = pkt->len - ETH_HLEN;
pkt               435 drivers/staging/rtl8188eu/core/rtw_xmit.c 		skb_copy_bits(pkt, ETH_HLEN, tmp, 24);
pkt               438 drivers/staging/rtl8188eu/core/rtw_xmit.c 		if (pkt->len > ETH_HLEN + 24 + 282) {/* MINIMUM_DHCP_PACKET_SIZE) { */
pkt               499 drivers/staging/rtl8188eu/core/rtw_xmit.c 			set_qos(pkt, pattrib);
pkt               502 drivers/staging/rtl8188eu/core/rtw_xmit.c 			set_qos(pkt, pattrib);
pkt               907 drivers/staging/rtl8188eu/core/rtw_xmit.c s32 rtw_xmitframe_coalesce(struct adapter *padapter, struct sk_buff *pkt, struct xmit_frame *pxmitframe)
pkt               919 drivers/staging/rtl8188eu/core/rtw_xmit.c 	size_t remainder = pkt->len - ETH_HLEN;
pkt              1004 drivers/staging/rtl8188eu/core/rtw_xmit.c 		skb_copy_bits(pkt, pkt->len - remainder, pframe, mem_sz);
pkt              1282 drivers/staging/rtl8188eu/core/rtw_xmit.c 		pxframe->pkt = NULL;
pkt              1308 drivers/staging/rtl8188eu/core/rtw_xmit.c 	if (pxmitframe->pkt) {
pkt              1309 drivers/staging/rtl8188eu/core/rtw_xmit.c 		pndis_pkt = pxmitframe->pkt;
pkt              1310 drivers/staging/rtl8188eu/core/rtw_xmit.c 		pxmitframe->pkt = NULL;
pkt              1611 drivers/staging/rtl8188eu/core/rtw_xmit.c 	pxmitframe->pkt = *ppkt;
pkt               145 drivers/staging/rtl8188eu/hal/rtl8188e_rxdesc.c 	wlanhdr = precvframe->pkt->data;
pkt               463 drivers/staging/rtl8188eu/hal/rtl8188eu_xmit.c 	rtw_xmitframe_coalesce(adapt, pxmitframe->pkt, pxmitframe);
pkt               534 drivers/staging/rtl8188eu/hal/rtl8188eu_xmit.c 		rtw_xmitframe_coalesce(adapt, pxmitframe->pkt, pxmitframe);
pkt               627 drivers/staging/rtl8188eu/hal/rtl8188eu_xmit.c 	res = rtw_xmitframe_coalesce(adapt, pxmitframe->pkt, pxmitframe);
pkt               222 drivers/staging/rtl8188eu/include/rtw_recv.h 	struct sk_buff	 *pkt;
pkt               210 drivers/staging/rtl8188eu/include/rtw_xmit.h 	struct sk_buff *pkt;
pkt               329 drivers/staging/rtl8188eu/include/rtw_xmit.h s32 rtw_xmitframe_coalesce(struct adapter *padapter, struct sk_buff *pkt,
pkt               341 drivers/staging/rtl8188eu/include/rtw_xmit.h s32 rtw_xmit(struct adapter *padapter, struct sk_buff **pkt);
pkt                21 drivers/staging/rtl8188eu/include/xmit_osdep.h int rtw_xmit_entry(struct sk_buff *pkt, struct  net_device *pnetdev);
pkt                29 drivers/staging/rtl8188eu/include/xmit_osdep.h void rtw_os_pkt_complete(struct adapter *padapter, struct sk_buff *pkt);
pkt                87 drivers/staging/rtl8188eu/os_dep/mon.c 	data = frame->pkt->data;
pkt                88 drivers/staging/rtl8188eu/os_dep/mon.c 	data_len = frame->pkt->len;
pkt                75 drivers/staging/rtl8188eu/os_dep/recv_linux.c 	skb = precv_frame->pkt;
pkt               124 drivers/staging/rtl8188eu/os_dep/recv_linux.c 	precv_frame->pkt = NULL;
pkt               122 drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c 			precvframe->pkt = pkt_copy;
pkt               155 drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c 			handle_txrpt_ccx_88e(adapt, precvframe->pkt->data);
pkt               159 drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c 						  precvframe->pkt->data,
pkt               165 drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c 			interrupt_handler_8188eu(adapt, pattrib->pkt_len, precvframe->pkt->data);
pkt                51 drivers/staging/rtl8188eu/os_dep/xmit_linux.c void rtw_os_pkt_complete(struct adapter *padapter, struct sk_buff *pkt)
pkt                56 drivers/staging/rtl8188eu/os_dep/xmit_linux.c 	queue = skb_get_queue_mapping(pkt);
pkt                66 drivers/staging/rtl8188eu/os_dep/xmit_linux.c 	dev_kfree_skb_any(pkt);
pkt                71 drivers/staging/rtl8188eu/os_dep/xmit_linux.c 	if (pxframe->pkt)
pkt                72 drivers/staging/rtl8188eu/os_dep/xmit_linux.c 		rtw_os_pkt_complete(padapter, pxframe->pkt);
pkt                73 drivers/staging/rtl8188eu/os_dep/xmit_linux.c 	pxframe->pkt = NULL;
pkt                94 drivers/staging/rtl8188eu/os_dep/xmit_linux.c 				    struct sk_buff *pkt)
pkt                99 drivers/staging/rtl8188eu/os_dep/xmit_linux.c 	queue = skb_get_queue_mapping(pkt);
pkt               167 drivers/staging/rtl8188eu/os_dep/xmit_linux.c int rtw_xmit_entry(struct sk_buff *pkt, struct net_device *pnetdev)
pkt               182 drivers/staging/rtl8188eu/os_dep/xmit_linux.c 	rtw_check_xmit_resource(padapter, pkt);
pkt               185 drivers/staging/rtl8188eu/os_dep/xmit_linux.c 	    (IP_MCAST_MAC(pkt->data) || ICMPV6_MCAST_MAC(pkt->data)) &&
pkt               188 drivers/staging/rtl8188eu/os_dep/xmit_linux.c 			res = rtw_mlcst2unicst(padapter, pkt);
pkt               194 drivers/staging/rtl8188eu/os_dep/xmit_linux.c 	res = rtw_xmit(padapter, &pkt);
pkt               205 drivers/staging/rtl8188eu/os_dep/xmit_linux.c 	dev_kfree_skb_any(pkt);
pkt                36 drivers/staging/rtl8712/recv_linux.c 	precvframe->u.hdr.pkt = NULL;
pkt               102 drivers/staging/rtl8712/recv_linux.c 	skb = recvframe->u.hdr.pkt;
pkt               115 drivers/staging/rtl8712/recv_linux.c 	recvframe->u.hdr.pkt = NULL; /* pointers to NULL before
pkt               276 drivers/staging/rtl8712/rtl8712_efuse.c 	struct PGPKT_STRUCT pkt;
pkt               282 drivers/staging/rtl8712/rtl8712_efuse.c 	pkt.offset = GET_EFUSE_OFFSET(header);
pkt               283 drivers/staging/rtl8712/rtl8712_efuse.c 	pkt.word_en = GET_EFUSE_WORD_EN(header);
pkt               284 drivers/staging/rtl8712/rtl8712_efuse.c 	addr = header_addr + 1 + calculate_word_cnts(pkt.word_en) * 2;
pkt               296 drivers/staging/rtl8712/rtl8712_efuse.c 		if (pkt.offset != offset) {
pkt               302 drivers/staging/rtl8712/rtl8712_efuse.c 				if (BIT(i) & pkt.word_en) {
pkt               306 drivers/staging/rtl8712/rtl8712_efuse.c 						pkt.data[i * 2] = value;
pkt               313 drivers/staging/rtl8712/rtl8712_efuse.c 						pkt.data[i * 2 + 1] =
pkt               327 drivers/staging/rtl8712/rtl8712_efuse.c 		if (BIT(i) & pkt.word_en) {
pkt               328 drivers/staging/rtl8712/rtl8712_efuse.c 			efuse_one_byte_write(adapter, addr, pkt.data[i * 2]);
pkt               330 drivers/staging/rtl8712/rtl8712_efuse.c 					     pkt.data[i * 2 + 1]);
pkt               334 drivers/staging/rtl8712/rtl8712_efuse.c 			} else if (pkt.data[i * 2] != value) {
pkt               338 drivers/staging/rtl8712/rtl8712_efuse.c 							     pkt.data[i * 2]);
pkt               342 drivers/staging/rtl8712/rtl8712_efuse.c 			} else if (pkt.data[i * 2 + 1] != value) {
pkt               346 drivers/staging/rtl8712/rtl8712_efuse.c 							     pkt.data[i * 2 +
pkt               128 drivers/staging/rtl8712/rtl8712_recv.c 	if (precvframe->u.hdr.pkt) {
pkt               129 drivers/staging/rtl8712/rtl8712_recv.c 		dev_kfree_skb_any(precvframe->u.hdr.pkt);/*free skb by driver*/
pkt               130 drivers/staging/rtl8712/rtl8712_recv.c 		precvframe->u.hdr.pkt = NULL;
pkt              1050 drivers/staging/rtl8712/rtl8712_recv.c 			precvframe->u.hdr.pkt = pkt_copy;
pkt              1059 drivers/staging/rtl8712/rtl8712_recv.c 			precvframe->u.hdr.pkt = skb_clone(pskb, GFP_ATOMIC);
pkt              1060 drivers/staging/rtl8712/rtl8712_recv.c 			if (!precvframe->u.hdr.pkt)
pkt               116 drivers/staging/rtl8712/rtl8712_recv.h 	_pkt	*pkt;
pkt               290 drivers/staging/rtl8712/rtl8712_xmit.c 	r8712_xmitframe_coalesce(padapter, pxmitframe->pkt, pxmitframe);
pkt               667 drivers/staging/rtl8712/rtl8712_xmit.c 					pxmitframe->pkt, pxmitframe);
pkt               733 drivers/staging/rtl8712/rtl8712_xmit.c 	res = r8712_xmitframe_coalesce(padapter, pxmitframe->pkt, pxmitframe);
pkt               734 drivers/staging/rtl8712/rtl8712_xmit.c 	pxmitframe->pkt = NULL;
pkt               742 drivers/staging/rtl8712/rtl8712_xmit.c 		pxmitframe->pkt = NULL;
pkt                58 drivers/staging/rtl8712/rtl871x_mp.c 		pmp_xmitframe->pkt = NULL;
pkt                53 drivers/staging/rtl8712/rtl871x_mp.h 	_pkt *pkt;
pkt                91 drivers/staging/rtl8712/rtl871x_xmit.c 		pxframe->pkt = NULL;
pkt               176 drivers/staging/rtl8712/rtl871x_xmit.c int r8712_update_attrib(struct _adapter *padapter, _pkt *pkt,
pkt               191 drivers/staging/rtl8712/rtl871x_xmit.c 	_r8712_open_pktfile(pkt, &pktfile);
pkt               582 drivers/staging/rtl8712/rtl871x_xmit.c sint r8712_xmitframe_coalesce(struct _adapter *padapter, _pkt *pkt,
pkt               610 drivers/staging/rtl8712/rtl871x_xmit.c 	_r8712_open_pktfile(pkt, &pktfile);
pkt               801 drivers/staging/rtl8712/rtl871x_xmit.c 		pxframe->pkt = NULL;
pkt               818 drivers/staging/rtl8712/rtl871x_xmit.c 	if (pxmitframe->pkt)
pkt               819 drivers/staging/rtl8712/rtl871x_xmit.c 		pxmitframe->pkt = NULL;
pkt               148 drivers/staging/rtl8712/rtl871x_xmit.h 	_pkt *pkt;
pkt               263 drivers/staging/rtl8712/rtl871x_xmit.h sint r8712_xmitframe_coalesce(struct _adapter *padapter, _pkt *pkt,
pkt               267 drivers/staging/rtl8712/rtl871x_xmit.h int r8712_update_attrib(struct _adapter *padapter, _pkt *pkt,
pkt                40 drivers/staging/rtl8712/xmit_linux.c 	pfile->pkt = pktptr;
pkt                53 drivers/staging/rtl8712/xmit_linux.c 		skb_copy_bits(pfile->pkt, pfile->buf_len - pfile->pkt_len,
pkt                72 drivers/staging/rtl8712/xmit_linux.c 	_r8712_open_pktfile(ppktfile->pkt, ppktfile);
pkt               145 drivers/staging/rtl8712/xmit_linux.c 	if (pxframe->pkt)
pkt               146 drivers/staging/rtl8712/xmit_linux.c 		dev_kfree_skb_any(pxframe->pkt);
pkt               147 drivers/staging/rtl8712/xmit_linux.c 	pxframe->pkt = NULL;
pkt               150 drivers/staging/rtl8712/xmit_linux.c int r8712_xmit_entry(_pkt *pkt, struct  net_device *netdev)
pkt               163 drivers/staging/rtl8712/xmit_linux.c 	if (r8712_update_attrib(adapter, pkt, &xmitframe->attrib))
pkt               167 drivers/staging/rtl8712/xmit_linux.c 	xmitframe->pkt = pkt;
pkt               170 drivers/staging/rtl8712/xmit_linux.c 		dev_kfree_skb_any(pkt);
pkt               171 drivers/staging/rtl8712/xmit_linux.c 		xmitframe->pkt = NULL;
pkt               180 drivers/staging/rtl8712/xmit_linux.c 	dev_kfree_skb_any(pkt);
pkt                21 drivers/staging/rtl8712/xmit_osdep.h 	_pkt	*pkt;
pkt                37 drivers/staging/rtl8712/xmit_osdep.h int r8712_xmit_entry(_pkt *pkt, struct  net_device *pnetdev);
pkt                97 drivers/staging/rtl8723bs/core/rtw_xmit.c 		pxframe->pkt = NULL;
pkt               184 drivers/staging/rtl8723bs/core/rtw_xmit.c 		pxframe->pkt = NULL;
pkt               662 drivers/staging/rtl8723bs/core/rtw_xmit.c 	_rtw_open_pktfile(ppktfile->pkt, ppktfile);
pkt               676 drivers/staging/rtl8723bs/core/rtw_xmit.c static s32 update_attrib(struct adapter *padapter, _pkt *pkt, struct pkt_attrib *pattrib)
pkt               691 drivers/staging/rtl8723bs/core/rtw_xmit.c 	_rtw_open_pktfile(pkt, &pktfile);
pkt              1195 drivers/staging/rtl8723bs/core/rtw_xmit.c s32 rtw_xmitframe_coalesce(struct adapter *padapter, _pkt *pkt, struct xmit_frame *pxmitframe)
pkt              1259 drivers/staging/rtl8723bs/core/rtw_xmit.c 	_rtw_open_pktfile(pkt, &pktfile);
pkt              1355 drivers/staging/rtl8723bs/core/rtw_xmit.c s32 rtw_mgmt_xmitframe_coalesce(struct adapter *padapter, _pkt *pkt, struct xmit_frame *pxmitframe)
pkt              1937 drivers/staging/rtl8723bs/core/rtw_xmit.c 	pxframe->pkt = NULL;
pkt              1961 drivers/staging/rtl8723bs/core/rtw_xmit.c 	if (pxmitframe->pkt) {
pkt              1962 drivers/staging/rtl8723bs/core/rtw_xmit.c 		pndis_pkt = pxmitframe->pkt;
pkt              1963 drivers/staging/rtl8723bs/core/rtw_xmit.c 		pxmitframe->pkt = NULL;
pkt              2328 drivers/staging/rtl8723bs/core/rtw_xmit.c 	pxmitframe->pkt = *ppkt;
pkt               241 drivers/staging/rtl8723bs/hal/hal_intf.c 		rtw_mgmt_xmitframe_coalesce(padapter, pmgntframe->pkt, pmgntframe);
pkt               319 drivers/staging/rtl8723bs/hal/rtl8723bs_recv.c 					precvframe->u.hdr.pkt = pkt_copy;
pkt               333 drivers/staging/rtl8723bs/hal/rtl8723bs_recv.c 					precvframe->u.hdr.pkt = rtw_skb_clone(precvbuf->pskb);
pkt               334 drivers/staging/rtl8723bs/hal/rtl8723bs_recv.c 					if (precvframe->u.hdr.pkt) {
pkt               335 drivers/staging/rtl8723bs/hal/rtl8723bs_recv.c 						_pkt *pkt_clone = precvframe->u.hdr.pkt;
pkt               347 drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c 				ret = rtw_xmitframe_coalesce(padapter, pxmitframe->pkt, pxmitframe);
pkt                35 drivers/staging/rtl8723bs/include/recv_osdep.h void rtw_os_recv_indicate_pkt(struct adapter *padapter, _pkt *pkt, struct rx_pkt_attrib *pattrib);
pkt                19 drivers/staging/rtl8723bs/include/rtw_mp.h 	_pkt *pkt;
pkt               340 drivers/staging/rtl8723bs/include/rtw_recv.h 	struct sk_buff	 *pkt;
pkt               343 drivers/staging/rtl8723bs/include/rtw_recv.h 	_pkt	*pkt;
pkt               290 drivers/staging/rtl8723bs/include/rtw_xmit.h 	_pkt *pkt;
pkt               476 drivers/staging/rtl8723bs/include/rtw_xmit.h extern s32 rtw_xmitframe_coalesce(struct adapter *padapter, _pkt *pkt, struct xmit_frame *pxmitframe);
pkt               477 drivers/staging/rtl8723bs/include/rtw_xmit.h extern s32 rtw_mgmt_xmitframe_coalesce(struct adapter *padapter, _pkt *pkt, struct xmit_frame *pxmitframe);
pkt               494 drivers/staging/rtl8723bs/include/rtw_xmit.h s32 rtw_xmit(struct adapter *padapter, _pkt **pkt);
pkt                12 drivers/staging/rtl8723bs/include/xmit_osdep.h 	_pkt *pkt;
pkt                28 drivers/staging/rtl8723bs/include/xmit_osdep.h extern int _rtw_xmit_entry(_pkt *pkt, _nic_hdl pnetdev);
pkt                29 drivers/staging/rtl8723bs/include/xmit_osdep.h extern int rtw_xmit_entry(_pkt *pkt, _nic_hdl pnetdev);
pkt                37 drivers/staging/rtl8723bs/include/xmit_osdep.h extern void _rtw_open_pktfile(_pkt *pkt, struct pkt_file *pfile);
pkt                41 drivers/staging/rtl8723bs/include/xmit_osdep.h extern void rtw_os_pkt_complete(struct adapter *padapter, _pkt *pkt);
pkt                15 drivers/staging/rtl8723bs/os_dep/recv_linux.c 	if (precvframe->u.hdr.pkt) {
pkt                16 drivers/staging/rtl8723bs/os_dep/recv_linux.c 		dev_kfree_skb_any(precvframe->u.hdr.pkt);/* free skb by driver */
pkt                18 drivers/staging/rtl8723bs/os_dep/recv_linux.c 		precvframe->u.hdr.pkt = NULL;
pkt                25 drivers/staging/rtl8723bs/os_dep/recv_linux.c 	precvframe->u.hdr.pkt_newalloc = precvframe->u.hdr.pkt = NULL;
pkt                37 drivers/staging/rtl8723bs/os_dep/recv_linux.c 		if (precvframe->u.hdr.pkt) {
pkt                38 drivers/staging/rtl8723bs/os_dep/recv_linux.c 			dev_kfree_skb_any(precvframe->u.hdr.pkt);/* free skb by driver */
pkt                39 drivers/staging/rtl8723bs/os_dep/recv_linux.c 			precvframe->u.hdr.pkt = NULL;
pkt                66 drivers/staging/rtl8723bs/os_dep/recv_linux.c 		sub_skb = rtw_skb_clone(prframe->u.hdr.pkt);
pkt                99 drivers/staging/rtl8723bs/os_dep/recv_linux.c void rtw_os_recv_indicate_pkt(struct adapter *padapter, _pkt *pkt, struct rx_pkt_attrib *pattrib)
pkt               105 drivers/staging/rtl8723bs/os_dep/recv_linux.c 	if (pkt) {
pkt               119 drivers/staging/rtl8723bs/os_dep/recv_linux.c 					pskb2 = rtw_skb_clone(pkt);
pkt               130 drivers/staging/rtl8723bs/os_dep/recv_linux.c 					pkt->dev = pnetdev;
pkt               131 drivers/staging/rtl8723bs/os_dep/recv_linux.c 					skb_set_queue_mapping(pkt, rtw_recv_select_queue(pkt));
pkt               133 drivers/staging/rtl8723bs/os_dep/recv_linux.c 					_rtw_xmit_entry(pkt, pnetdev);
pkt               136 drivers/staging/rtl8723bs/os_dep/recv_linux.c 						pkt = pskb2;
pkt               150 drivers/staging/rtl8723bs/os_dep/recv_linux.c 		pkt->protocol = eth_type_trans(pkt, padapter->pnetdev);
pkt               151 drivers/staging/rtl8723bs/os_dep/recv_linux.c 		pkt->dev = padapter->pnetdev;
pkt               155 drivers/staging/rtl8723bs/os_dep/recv_linux.c 			pkt->ip_summed = CHECKSUM_UNNECESSARY;
pkt               157 drivers/staging/rtl8723bs/os_dep/recv_linux.c 			pkt->ip_summed = CHECKSUM_NONE;
pkt               160 drivers/staging/rtl8723bs/os_dep/recv_linux.c 		pkt->ip_summed = CHECKSUM_NONE;
pkt               163 drivers/staging/rtl8723bs/os_dep/recv_linux.c 		ret = rtw_netif_rx(padapter->pnetdev, pkt);
pkt               220 drivers/staging/rtl8723bs/os_dep/recv_linux.c 	_pkt *skb = precv_frame->u.hdr.pkt;
pkt               266 drivers/staging/rtl8723bs/os_dep/recv_linux.c 	skb = precv_frame->u.hdr.pkt;
pkt               294 drivers/staging/rtl8723bs/os_dep/recv_linux.c 	precv_frame->u.hdr.pkt = NULL; /*  pointers to NULL before rtw_free_recvframe() */
pkt                20 drivers/staging/rtl8723bs/os_dep/xmit_linux.c 	pfile->pkt = pktptr;
pkt                35 drivers/staging/rtl8723bs/os_dep/xmit_linux.c 		skb_copy_bits(pfile->pkt, pfile->buf_len - pfile->pkt_len, rmem, len);
pkt                70 drivers/staging/rtl8723bs/os_dep/xmit_linux.c void rtw_os_pkt_complete(struct adapter *padapter, _pkt *pkt)
pkt                75 drivers/staging/rtl8723bs/os_dep/xmit_linux.c 	queue = skb_get_queue_mapping(pkt);
pkt                85 drivers/staging/rtl8723bs/os_dep/xmit_linux.c 	dev_kfree_skb_any(pkt);
pkt                90 drivers/staging/rtl8723bs/os_dep/xmit_linux.c 	if (pxframe->pkt)
pkt                91 drivers/staging/rtl8723bs/os_dep/xmit_linux.c 		rtw_os_pkt_complete(padapter, pxframe->pkt);
pkt                93 drivers/staging/rtl8723bs/os_dep/xmit_linux.c 	pxframe->pkt = NULL;
pkt               107 drivers/staging/rtl8723bs/os_dep/xmit_linux.c static void rtw_check_xmit_resource(struct adapter *padapter, _pkt *pkt)
pkt               112 drivers/staging/rtl8723bs/os_dep/xmit_linux.c 	queue = skb_get_queue_mapping(pkt);
pkt               202 drivers/staging/rtl8723bs/os_dep/xmit_linux.c int _rtw_xmit_entry(_pkt *pkt, _nic_hdl pnetdev)
pkt               221 drivers/staging/rtl8723bs/os_dep/xmit_linux.c 	rtw_check_xmit_resource(padapter, pkt);
pkt               225 drivers/staging/rtl8723bs/os_dep/xmit_linux.c 		&& (IP_MCAST_MAC(pkt->data)
pkt               226 drivers/staging/rtl8723bs/os_dep/xmit_linux.c 			|| ICMPV6_MCAST_MAC(pkt->data)
pkt               228 drivers/staging/rtl8723bs/os_dep/xmit_linux.c 			|| is_broadcast_mac_addr(pkt->data)
pkt               233 drivers/staging/rtl8723bs/os_dep/xmit_linux.c 			res = rtw_mlcst2unicst(padapter, pkt);
pkt               243 drivers/staging/rtl8723bs/os_dep/xmit_linux.c 	res = rtw_xmit(padapter, &pkt);
pkt               256 drivers/staging/rtl8723bs/os_dep/xmit_linux.c 	dev_kfree_skb_any(pkt);
pkt               263 drivers/staging/rtl8723bs/os_dep/xmit_linux.c int rtw_xmit_entry(_pkt *pkt, _nic_hdl pnetdev)
pkt               267 drivers/staging/rtl8723bs/os_dep/xmit_linux.c 	if (pkt) {
pkt               268 drivers/staging/rtl8723bs/os_dep/xmit_linux.c 		rtw_mstat_update(MSTAT_TYPE_SKB, MSTAT_ALLOC_SUCCESS, pkt->truesize);
pkt               269 drivers/staging/rtl8723bs/os_dep/xmit_linux.c 		ret = _rtw_xmit_entry(pkt, pnetdev);
pkt                70 drivers/tty/hvc/hvsi_lib.c 	struct hvsi_control *pkt = (struct hvsi_control *)pv->inbuf;
pkt                72 drivers/tty/hvc/hvsi_lib.c 	switch (be16_to_cpu(pkt->verb)) {
pkt                79 drivers/tty/hvc/hvsi_lib.c 		hvsi_cd_change(pv, be32_to_cpu(pkt->word) & HVSI_TSCD);
pkt                86 drivers/tty/hvc/hvsi_lib.c 	struct hvsi_query *pkt = (struct hvsi_query *)pv->inbuf;
pkt                90 drivers/tty/hvc/hvsi_lib.c 	if (be16_to_cpu(pkt->verb) != VSV_SEND_VERSION_NUMBER)
pkt               101 drivers/tty/hvc/hvsi_lib.c 	r.query_seqno = pkt->hdr.seqno;
pkt               464 drivers/tty/ipwireless/hardware.c 	union nl_packet pkt;
pkt               478 drivers/tty/ipwireless/hardware.c 	pkt.hdr_first.protocol = packet->protocol;
pkt               479 drivers/tty/ipwireless/hardware.c 	pkt.hdr_first.address = packet->dest_addr;
pkt               480 drivers/tty/ipwireless/hardware.c 	pkt.hdr_first.packet_rank = 0;
pkt               484 drivers/tty/ipwireless/hardware.c 		pkt.hdr_first.packet_rank |= NL_FIRST_PACKET;
pkt               485 drivers/tty/ipwireless/hardware.c 		pkt.hdr_first.length_lsb = (unsigned char) packet->length;
pkt               486 drivers/tty/ipwireless/hardware.c 		pkt.hdr_first.length_msb =
pkt               490 drivers/tty/ipwireless/hardware.c 	memcpy(pkt.rawpkt + header_size,
pkt               498 drivers/tty/ipwireless/hardware.c 		pkt.hdr_first.packet_rank |= NL_LAST_PACKET;
pkt               499 drivers/tty/ipwireless/hardware.c 	do_send_fragment(hw, pkt.rawpkt, header_size + fragment_data_len);
pkt               858 drivers/tty/ipwireless/hardware.c 	unsigned char pkt[LL_MTU_MAX];
pkt               875 drivers/tty/ipwireless/hardware.c 			pkt[i] = (unsigned char) data;
pkt               876 drivers/tty/ipwireless/hardware.c 			pkt[i + 1] = (unsigned char) (data >> 8);
pkt               892 drivers/tty/ipwireless/hardware.c 			pkt[i] = (unsigned char) data;
pkt               893 drivers/tty/ipwireless/hardware.c 			pkt[i + 1] = (unsigned char) (data >> 8);
pkt               904 drivers/tty/ipwireless/hardware.c 	swap_packet_bitfield_from_le(pkt);
pkt               907 drivers/tty/ipwireless/hardware.c 		dump_data_bytes("recv", pkt, len);
pkt               909 drivers/tty/ipwireless/hardware.c 	handle_received_packet(hw, (union nl_packet *) pkt, len);
pkt                93 drivers/tty/vcc.c #define vccdbgp(pkt)						\
pkt                97 drivers/tty/vcc.c 			for (i = 0; i < pkt.tag.stype; i++)	\
pkt                98 drivers/tty/vcc.c 				pr_info("[%c]", pkt.data[i]);	\
pkt               318 drivers/tty/vcc.c 	struct vio_vcc pkt;
pkt               337 drivers/tty/vcc.c 		rv = ldc_read(vio->lp, &pkt, sizeof(pkt));
pkt               343 drivers/tty/vcc.c 		       pkt.tag.type, pkt.tag.stype,
pkt               344 drivers/tty/vcc.c 		       pkt.tag.stype_env, pkt.tag.sid);
pkt               346 drivers/tty/vcc.c 		if (pkt.tag.type == VIO_TYPE_DATA) {
pkt               347 drivers/tty/vcc.c 			vccdbgp(pkt);
pkt               349 drivers/tty/vcc.c 			vcc_rx(tty, pkt.data, pkt.tag.stype);
pkt               352 drivers/tty/vcc.c 			       pkt.tag.type, pkt.tag.stype,
pkt               353 drivers/tty/vcc.c 			       pkt.tag.stype_env, pkt.tag.sid);
pkt               394 drivers/tty/vcc.c 	struct vio_vcc *pkt;
pkt               409 drivers/tty/vcc.c 	pkt = &port->buffer;
pkt               410 drivers/tty/vcc.c 	pkt->tag.type = VIO_TYPE_DATA;
pkt               411 drivers/tty/vcc.c 	pkt->tag.stype = tosend;
pkt               414 drivers/tty/vcc.c 	rv = ldc_write(port->vio.lp, pkt, (VIO_TAG_SIZE + tosend));
pkt               503 drivers/tty/vcc.c 	struct vio_vcc pkt;
pkt               506 drivers/tty/vcc.c 	pkt.tag.type = VIO_TYPE_CTRL;
pkt               507 drivers/tty/vcc.c 	pkt.tag.sid = ctl;
pkt               508 drivers/tty/vcc.c 	pkt.tag.stype = 0;
pkt               510 drivers/tty/vcc.c 	rv = ldc_write(port->vio.lp, &pkt, sizeof(pkt.tag));
pkt               512 drivers/tty/vcc.c 	vccdbg("VCC: ldc_write(%ld)=%d\n", sizeof(pkt.tag), rv);
pkt               841 drivers/tty/vcc.c 	struct vio_vcc *pkt;
pkt               860 drivers/tty/vcc.c 	pkt = &port->buffer;
pkt               861 drivers/tty/vcc.c 	pkt->tag.type = VIO_TYPE_DATA;
pkt               870 drivers/tty/vcc.c 		memcpy(&pkt->data[port->chars_in_buffer], &buf[total_sent],
pkt               873 drivers/tty/vcc.c 		pkt->tag.stype = tosend;
pkt               875 drivers/tty/vcc.c 		vccdbg("TAG [%02x:%02x:%04x:%08x]\n", pkt->tag.type,
pkt               876 drivers/tty/vcc.c 		       pkt->tag.stype, pkt->tag.stype_env, pkt->tag.sid);
pkt               877 drivers/tty/vcc.c 		vccdbg("DATA [%s]\n", pkt->data);
pkt               884 drivers/tty/vcc.c 		rv = ldc_write(port->vio.lp, pkt, (VIO_TAG_SIZE + tosend));
pkt              1051 drivers/usb/gadget/udc/at91_udc.c 	union setup	pkt;
pkt              1058 drivers/usb/gadget/udc/at91_udc.c 			pkt.raw[i++] = __raw_readb(dreg);
pkt              1059 drivers/usb/gadget/udc/at91_udc.c 		if (pkt.r.bRequestType & USB_DIR_IN) {
pkt              1080 drivers/usb/gadget/udc/at91_udc.c #define w_index		le16_to_cpu(pkt.r.wIndex)
pkt              1081 drivers/usb/gadget/udc/at91_udc.c #define w_value		le16_to_cpu(pkt.r.wValue)
pkt              1082 drivers/usb/gadget/udc/at91_udc.c #define w_length	le16_to_cpu(pkt.r.wLength)
pkt              1085 drivers/usb/gadget/udc/at91_udc.c 			pkt.r.bRequestType, pkt.r.bRequest,
pkt              1096 drivers/usb/gadget/udc/at91_udc.c 	switch ((pkt.r.bRequestType << 8) | pkt.r.bRequest) {
pkt              1110 drivers/usb/gadget/udc/at91_udc.c 		if (pkt.r.wValue)
pkt              1247 drivers/usb/gadget/udc/at91_udc.c 		status = udc->driver->setup(&udc->gadget, &pkt.r);
pkt              1255 drivers/usb/gadget/udc/at91_udc.c 				pkt.r.bRequestType, pkt.r.bRequest, status);
pkt                28 drivers/usb/host/fhci-sched.c static void recycle_frame(struct fhci_usb *usb, struct packet *pkt)
pkt                30 drivers/usb/host/fhci-sched.c 	pkt->data = NULL;
pkt                31 drivers/usb/host/fhci-sched.c 	pkt->len = 0;
pkt                32 drivers/usb/host/fhci-sched.c 	pkt->status = USB_TD_OK;
pkt                33 drivers/usb/host/fhci-sched.c 	pkt->info = 0;
pkt                34 drivers/usb/host/fhci-sched.c 	pkt->priv_data = NULL;
pkt                36 drivers/usb/host/fhci-sched.c 	cq_put(&usb->ep0->empty_frame_Q, pkt);
pkt                40 drivers/usb/host/fhci-sched.c void fhci_transaction_confirm(struct fhci_usb *usb, struct packet *pkt)
pkt                49 drivers/usb/host/fhci-sched.c 	td_pkt = td->pkt;
pkt                50 drivers/usb/host/fhci-sched.c 	trans_len = pkt->len;
pkt                51 drivers/usb/host/fhci-sched.c 	td->status = pkt->status;
pkt                54 drivers/usb/host/fhci-sched.c 			memcpy(td->data + td->actual_len, pkt->data,
pkt                56 drivers/usb/host/fhci-sched.c 		cq_put(&usb->ep0->dummy_packets_Q, pkt->data);
pkt                59 drivers/usb/host/fhci-sched.c 	recycle_frame(usb, pkt);
pkt               137 drivers/usb/host/fhci-sched.c 		struct packet *pkt = td->pkt;
pkt               139 drivers/usb/host/fhci-sched.c 		pkt->status = USB_TD_TX_ER_TIMEOUT;
pkt               140 drivers/usb/host/fhci-sched.c 		fhci_transaction_confirm(usb, pkt);
pkt               158 drivers/usb/host/fhci-sched.c 	struct packet *pkt;
pkt               212 drivers/usb/host/fhci-sched.c 	pkt = cq_get(&usb->ep0->empty_frame_Q);
pkt               213 drivers/usb/host/fhci-sched.c 	if (!pkt) {
pkt               217 drivers/usb/host/fhci-sched.c 	td->pkt = pkt;
pkt               219 drivers/usb/host/fhci-sched.c 	pkt->info = 0;
pkt               223 drivers/usb/host/fhci-sched.c 		pkt->info = PKT_DUMMY_PACKET;
pkt               225 drivers/usb/host/fhci-sched.c 	pkt->data = data;
pkt               226 drivers/usb/host/fhci-sched.c 	pkt->len = len;
pkt               227 drivers/usb/host/fhci-sched.c 	pkt->status = USB_TD_OK;
pkt               236 drivers/usb/host/fhci-sched.c 		pkt->status = USB_TD_TX_ER_TIMEOUT;
pkt               237 drivers/usb/host/fhci-sched.c 		pkt->len = 0;
pkt               238 drivers/usb/host/fhci-sched.c 		fhci_transaction_confirm(usb, pkt);
pkt               239 drivers/usb/host/fhci-sched.c 	} else if (fhci_host_transaction(usb, pkt, td->type, ed->dev_addr,
pkt               244 drivers/usb/host/fhci-sched.c 		if (pkt->info & PKT_DUMMY_PACKET)
pkt               245 drivers/usb/host/fhci-sched.c 			cq_put(&usb->ep0->dummy_packets_Q, pkt->data);
pkt               246 drivers/usb/host/fhci-sched.c 		recycle_frame(usb, pkt);
pkt               108 drivers/usb/host/fhci-tds.c 				struct packet *pkt = cq_get(&ep->conf_frame_Q);
pkt               110 drivers/usb/host/fhci-tds.c 				kfree(pkt);
pkt               118 drivers/usb/host/fhci-tds.c 				struct packet *pkt = cq_get(&ep->empty_frame_Q);
pkt               120 drivers/usb/host/fhci-tds.c 				kfree(pkt);
pkt               183 drivers/usb/host/fhci-tds.c 		struct packet *pkt;
pkt               186 drivers/usb/host/fhci-tds.c 		pkt = kmalloc(sizeof(*pkt), GFP_KERNEL);
pkt               187 drivers/usb/host/fhci-tds.c 		if (!pkt) {
pkt               194 drivers/usb/host/fhci-tds.c 			kfree(pkt);
pkt               198 drivers/usb/host/fhci-tds.c 		cq_put(&ep->empty_frame_Q, pkt);
pkt               280 drivers/usb/host/fhci-tds.c 	struct packet *pkt;
pkt               318 drivers/usb/host/fhci-tds.c 		pkt = cq_get(&ep->conf_frame_Q);
pkt               319 drivers/usb/host/fhci-tds.c 		if (!pkt)
pkt               325 drivers/usb/host/fhci-tds.c 					pkt->status = USB_TD_RX_ER_CRC;
pkt               327 drivers/usb/host/fhci-tds.c 					pkt->status = USB_TD_RX_ER_BITSTUFF;
pkt               329 drivers/usb/host/fhci-tds.c 					pkt->status = USB_TD_RX_ER_OVERUN;
pkt               331 drivers/usb/host/fhci-tds.c 					pkt->status = USB_TD_RX_DATA_OVERUN;
pkt               333 drivers/usb/host/fhci-tds.c 					pkt->status = USB_TD_RX_ER_NONOCT;
pkt               338 drivers/usb/host/fhci-tds.c 				pkt->status = USB_TD_TX_ER_NAK;
pkt               340 drivers/usb/host/fhci-tds.c 				pkt->status = USB_TD_TX_ER_TIMEOUT;
pkt               342 drivers/usb/host/fhci-tds.c 				pkt->status = USB_TD_TX_ER_UNDERUN;
pkt               344 drivers/usb/host/fhci-tds.c 				pkt->status = USB_TD_TX_ER_STALL;
pkt               348 drivers/usb/host/fhci-tds.c 				pkt->len > td_length - CRC_SIZE) {
pkt               349 drivers/usb/host/fhci-tds.c 			pkt->status = USB_TD_RX_DATA_UNDERUN;
pkt               353 drivers/usb/host/fhci-tds.c 			pkt->len = td_length - CRC_SIZE;
pkt               354 drivers/usb/host/fhci-tds.c 		else if (pkt->info & PKT_ZLP)
pkt               355 drivers/usb/host/fhci-tds.c 			pkt->len = 0;
pkt               357 drivers/usb/host/fhci-tds.c 			pkt->len = td_length;
pkt               359 drivers/usb/host/fhci-tds.c 		fhci_transaction_confirm(usb, pkt);
pkt               378 drivers/usb/host/fhci-tds.c 			  struct packet *pkt,
pkt               404 drivers/usb/host/fhci-tds.c 	pkt->priv_data = td;
pkt               405 drivers/usb/host/fhci-tds.c 	out_be32(&td->buf_ptr, virt_to_phys(pkt->data));
pkt               425 drivers/usb/host/fhci-tds.c 	if (!(pkt->info & PKT_NO_CRC))
pkt               431 drivers/usb/host/fhci-tds.c 			pkt->info |= PKT_PID_DATA1;
pkt               433 drivers/usb/host/fhci-tds.c 			pkt->info |= PKT_PID_DATA0;
pkt               438 drivers/usb/host/fhci-tds.c 			pkt->info |= PKT_PID_DATA1;
pkt               441 drivers/usb/host/fhci-tds.c 			pkt->info |= PKT_PID_DATA0;
pkt               454 drivers/usb/host/fhci-tds.c 		out_be16(&td->length, pkt->len + CRC_SIZE);
pkt               456 drivers/usb/host/fhci-tds.c 		out_be16(&td->length, pkt->len);
pkt               459 drivers/usb/host/fhci-tds.c 	cq_put(&ep->conf_frame_Q, pkt);
pkt               355 drivers/usb/host/fhci.h 	struct packet *pkt;
pkt               537 drivers/usb/host/fhci.h u32 fhci_host_transaction(struct fhci_usb *usb, struct packet *pkt,
pkt               553 drivers/usb/host/fhci.h void fhci_transaction_confirm(struct fhci_usb *usb, struct packet *pkt);
pkt                22 drivers/usb/renesas_usbhs/fifo.c void usbhs_pkt_init(struct usbhs_pkt *pkt)
pkt                24 drivers/usb/renesas_usbhs/fifo.c 	INIT_LIST_HEAD(&pkt->node);
pkt                30 drivers/usb/renesas_usbhs/fifo.c static int usbhsf_null_handle(struct usbhs_pkt *pkt, int *is_done)
pkt                32 drivers/usb/renesas_usbhs/fifo.c 	struct usbhs_priv *priv = usbhs_pipe_to_priv(pkt->pipe);
pkt                45 drivers/usb/renesas_usbhs/fifo.c void usbhs_pkt_push(struct usbhs_pipe *pipe, struct usbhs_pkt *pkt,
pkt                47 drivers/usb/renesas_usbhs/fifo.c 				 struct usbhs_pkt *pkt),
pkt                67 drivers/usb/renesas_usbhs/fifo.c 	list_move_tail(&pkt->node, &pipe->list);
pkt                74 drivers/usb/renesas_usbhs/fifo.c 	pkt->pipe	= pipe;
pkt                75 drivers/usb/renesas_usbhs/fifo.c 	pkt->buf	= buf;
pkt                76 drivers/usb/renesas_usbhs/fifo.c 	pkt->handler	= pipe->handler;
pkt                77 drivers/usb/renesas_usbhs/fifo.c 	pkt->length	= len;
pkt                78 drivers/usb/renesas_usbhs/fifo.c 	pkt->zero	= zero;
pkt                79 drivers/usb/renesas_usbhs/fifo.c 	pkt->actual	= 0;
pkt                80 drivers/usb/renesas_usbhs/fifo.c 	pkt->done	= done;
pkt                81 drivers/usb/renesas_usbhs/fifo.c 	pkt->sequence	= sequence;
pkt                87 drivers/usb/renesas_usbhs/fifo.c static void __usbhsf_pkt_del(struct usbhs_pkt *pkt)
pkt                89 drivers/usb/renesas_usbhs/fifo.c 	list_del_init(&pkt->node);
pkt               100 drivers/usb/renesas_usbhs/fifo.c 					    struct usbhs_pkt *pkt);
pkt               103 drivers/usb/renesas_usbhs/fifo.c static int __usbhsf_dma_map_ctrl(struct usbhs_pkt *pkt, int map);
pkt               104 drivers/usb/renesas_usbhs/fifo.c struct usbhs_pkt *usbhs_pkt_pop(struct usbhs_pipe *pipe, struct usbhs_pkt *pkt)
pkt               115 drivers/usb/renesas_usbhs/fifo.c 	if (!pkt)
pkt               116 drivers/usb/renesas_usbhs/fifo.c 		pkt = __usbhsf_pkt_get(pipe);
pkt               118 drivers/usb/renesas_usbhs/fifo.c 	if (pkt) {
pkt               122 drivers/usb/renesas_usbhs/fifo.c 			chan = usbhsf_dma_chan_get(fifo, pkt);
pkt               125 drivers/usb/renesas_usbhs/fifo.c 			usbhsf_dma_unmap(pkt);
pkt               130 drivers/usb/renesas_usbhs/fifo.c 		__usbhsf_pkt_del(pkt);
pkt               139 drivers/usb/renesas_usbhs/fifo.c 	return pkt;
pkt               151 drivers/usb/renesas_usbhs/fifo.c 	struct usbhs_pkt *pkt;
pkt               153 drivers/usb/renesas_usbhs/fifo.c 	int (*func)(struct usbhs_pkt *pkt, int *is_done);
pkt               161 drivers/usb/renesas_usbhs/fifo.c 	pkt = __usbhsf_pkt_get(pipe);
pkt               162 drivers/usb/renesas_usbhs/fifo.c 	if (!pkt)
pkt               167 drivers/usb/renesas_usbhs/fifo.c 		func = pkt->handler->prepare;
pkt               170 drivers/usb/renesas_usbhs/fifo.c 		func = pkt->handler->try_run;
pkt               173 drivers/usb/renesas_usbhs/fifo.c 		func = pkt->handler->dma_done;
pkt               181 drivers/usb/renesas_usbhs/fifo.c 		ret = func(pkt, &is_done);
pkt               184 drivers/usb/renesas_usbhs/fifo.c 		__usbhsf_pkt_del(pkt);
pkt               191 drivers/usb/renesas_usbhs/fifo.c 		pkt->done(priv, pkt);
pkt               347 drivers/usb/renesas_usbhs/fifo.c static int usbhs_dcp_dir_switch_to_write(struct usbhs_pkt *pkt, int *is_done)
pkt               349 drivers/usb/renesas_usbhs/fifo.c 	struct usbhs_pipe *pipe = pkt->pipe;
pkt               376 drivers/usb/renesas_usbhs/fifo.c static int usbhs_dcp_dir_switch_to_read(struct usbhs_pkt *pkt, int *is_done)
pkt               378 drivers/usb/renesas_usbhs/fifo.c 	struct usbhs_pipe *pipe = pkt->pipe;
pkt               404 drivers/usb/renesas_usbhs/fifo.c static int usbhs_dcp_dir_switch_done(struct usbhs_pkt *pkt, int *is_done)
pkt               406 drivers/usb/renesas_usbhs/fifo.c 	struct usbhs_pipe *pipe = pkt->pipe;
pkt               408 drivers/usb/renesas_usbhs/fifo.c 	if (pkt->handler == &usbhs_dcp_status_stage_in_handler)
pkt               413 drivers/usb/renesas_usbhs/fifo.c 	pkt->actual = pkt->length;
pkt               432 drivers/usb/renesas_usbhs/fifo.c static int usbhsf_dcp_data_stage_try_push(struct usbhs_pkt *pkt, int *is_done)
pkt               434 drivers/usb/renesas_usbhs/fifo.c 	struct usbhs_pipe *pipe = pkt->pipe;
pkt               441 drivers/usb/renesas_usbhs/fifo.c 	pkt->handler = &usbhs_fifo_pio_push_handler;
pkt               443 drivers/usb/renesas_usbhs/fifo.c 	return pkt->handler->prepare(pkt, is_done);
pkt               453 drivers/usb/renesas_usbhs/fifo.c static int usbhsf_dcp_data_stage_prepare_pop(struct usbhs_pkt *pkt,
pkt               456 drivers/usb/renesas_usbhs/fifo.c 	struct usbhs_pipe *pipe = pkt->pipe;
pkt               480 drivers/usb/renesas_usbhs/fifo.c 	pkt->handler = &usbhs_fifo_pio_pop_handler;
pkt               482 drivers/usb/renesas_usbhs/fifo.c 	return pkt->handler->prepare(pkt, is_done);
pkt               492 drivers/usb/renesas_usbhs/fifo.c static int usbhsf_pio_try_push(struct usbhs_pkt *pkt, int *is_done)
pkt               494 drivers/usb/renesas_usbhs/fifo.c 	struct usbhs_pipe *pipe = pkt->pipe;
pkt               505 drivers/usb/renesas_usbhs/fifo.c 	usbhs_pipe_data_sequence(pipe, pkt->sequence);
pkt               506 drivers/usb/renesas_usbhs/fifo.c 	pkt->sequence = -1; /* -1 sequence will be ignored */
pkt               508 drivers/usb/renesas_usbhs/fifo.c 	usbhs_pipe_set_trans_count_if_bulk(pipe, pkt->length);
pkt               525 drivers/usb/renesas_usbhs/fifo.c 	buf		= pkt->buf    + pkt->actual;
pkt               526 drivers/usb/renesas_usbhs/fifo.c 	len		= pkt->length - pkt->actual;
pkt               554 drivers/usb/renesas_usbhs/fifo.c 	pkt->actual += total_len;
pkt               556 drivers/usb/renesas_usbhs/fifo.c 	if (pkt->actual < pkt->length)
pkt               561 drivers/usb/renesas_usbhs/fifo.c 		*is_done = !pkt->zero;	/* send zero packet ? */
pkt               575 drivers/usb/renesas_usbhs/fifo.c 		pkt->length, pkt->actual, *is_done, pkt->zero);
pkt               594 drivers/usb/renesas_usbhs/fifo.c static int usbhsf_pio_prepare_push(struct usbhs_pkt *pkt, int *is_done)
pkt               596 drivers/usb/renesas_usbhs/fifo.c 	if (usbhs_pipe_is_running(pkt->pipe))
pkt               599 drivers/usb/renesas_usbhs/fifo.c 	return usbhsf_pio_try_push(pkt, is_done);
pkt               610 drivers/usb/renesas_usbhs/fifo.c static int usbhsf_prepare_pop(struct usbhs_pkt *pkt, int *is_done)
pkt               612 drivers/usb/renesas_usbhs/fifo.c 	struct usbhs_pipe *pipe = pkt->pipe;
pkt               625 drivers/usb/renesas_usbhs/fifo.c 	usbhs_pipe_data_sequence(pipe, pkt->sequence);
pkt               626 drivers/usb/renesas_usbhs/fifo.c 	pkt->sequence = -1; /* -1 sequence will be ignored */
pkt               631 drivers/usb/renesas_usbhs/fifo.c 	usbhs_pipe_set_trans_count_if_bulk(pipe, pkt->length);
pkt               639 drivers/usb/renesas_usbhs/fifo.c static int usbhsf_pio_try_pop(struct usbhs_pkt *pkt, int *is_done)
pkt               641 drivers/usb/renesas_usbhs/fifo.c 	struct usbhs_pipe *pipe = pkt->pipe;
pkt               663 drivers/usb/renesas_usbhs/fifo.c 	buf		= pkt->buf    + pkt->actual;
pkt               664 drivers/usb/renesas_usbhs/fifo.c 	len		= pkt->length - pkt->actual;
pkt               673 drivers/usb/renesas_usbhs/fifo.c 	pkt->actual += total_len;
pkt               675 drivers/usb/renesas_usbhs/fifo.c 	if ((pkt->actual == pkt->length) ||	/* receive all data */
pkt               697 drivers/usb/renesas_usbhs/fifo.c 		pkt->zero = 1;
pkt               724 drivers/usb/renesas_usbhs/fifo.c 		pkt->length, pkt->actual, *is_done, pkt->zero);
pkt               740 drivers/usb/renesas_usbhs/fifo.c static int usbhsf_ctrl_stage_end(struct usbhs_pkt *pkt, int *is_done)
pkt               742 drivers/usb/renesas_usbhs/fifo.c 	usbhs_dcp_control_transfer_done(pkt->pipe);
pkt               758 drivers/usb/renesas_usbhs/fifo.c 					    struct usbhs_pkt *pkt)
pkt               760 drivers/usb/renesas_usbhs/fifo.c 	if (&usbhs_fifo_dma_push_handler == pkt->handler)
pkt               763 drivers/usb/renesas_usbhs/fifo.c 	if (&usbhs_fifo_dma_pop_handler == pkt->handler)
pkt               770 drivers/usb/renesas_usbhs/fifo.c 					      struct usbhs_pkt *pkt)
pkt               776 drivers/usb/renesas_usbhs/fifo.c 		if (usbhsf_dma_chan_get(fifo, pkt) &&
pkt               795 drivers/usb/renesas_usbhs/fifo.c static int __usbhsf_dma_map_ctrl(struct usbhs_pkt *pkt, int map)
pkt               797 drivers/usb/renesas_usbhs/fifo.c 	struct usbhs_pipe *pipe = pkt->pipe;
pkt               801 drivers/usb/renesas_usbhs/fifo.c 	struct dma_chan *chan = usbhsf_dma_chan_get(fifo, pkt);
pkt               803 drivers/usb/renesas_usbhs/fifo.c 	return info->dma_map_ctrl(chan->device->dev, pkt, map);
pkt               807 drivers/usb/renesas_usbhs/fifo.c static void usbhsf_dma_xfer_preparing(struct usbhs_pkt *pkt)
pkt               809 drivers/usb/renesas_usbhs/fifo.c 	struct usbhs_pipe *pipe = pkt->pipe;
pkt               821 drivers/usb/renesas_usbhs/fifo.c 	chan = usbhsf_dma_chan_get(fifo, pkt);
pkt               824 drivers/usb/renesas_usbhs/fifo.c 	desc = dmaengine_prep_slave_single(chan, pkt->dma + pkt->actual,
pkt               825 drivers/usb/renesas_usbhs/fifo.c 					pkt->trans, dir,
pkt               833 drivers/usb/renesas_usbhs/fifo.c 	pkt->cookie = dmaengine_submit(desc);
pkt               834 drivers/usb/renesas_usbhs/fifo.c 	if (pkt->cookie < 0) {
pkt               840 drivers/usb/renesas_usbhs/fifo.c 		fifo->name, usbhs_pipe_number(pipe), pkt->length, pkt->zero);
pkt               843 drivers/usb/renesas_usbhs/fifo.c 	usbhs_pipe_set_trans_count_if_bulk(pipe, pkt->trans);
pkt               851 drivers/usb/renesas_usbhs/fifo.c 	struct usbhs_pkt *pkt = container_of(work, struct usbhs_pkt, work);
pkt               852 drivers/usb/renesas_usbhs/fifo.c 	struct usbhs_pipe *pipe = pkt->pipe;
pkt               857 drivers/usb/renesas_usbhs/fifo.c 	usbhsf_dma_xfer_preparing(pkt);
pkt               864 drivers/usb/renesas_usbhs/fifo.c static int usbhsf_dma_prepare_push(struct usbhs_pkt *pkt, int *is_done)
pkt               866 drivers/usb/renesas_usbhs/fifo.c 	struct usbhs_pipe *pipe = pkt->pipe;
pkt               869 drivers/usb/renesas_usbhs/fifo.c 	int len = pkt->length - pkt->actual;
pkt               888 drivers/usb/renesas_usbhs/fifo.c 	if ((uintptr_t)(pkt->buf + pkt->actual) & align_mask)
pkt               896 drivers/usb/renesas_usbhs/fifo.c 	fifo = usbhsf_get_dma_fifo(priv, pkt);
pkt               904 drivers/usb/renesas_usbhs/fifo.c 	if (usbhsf_dma_map(pkt) < 0)
pkt               907 drivers/usb/renesas_usbhs/fifo.c 	pkt->trans = len;
pkt               912 drivers/usb/renesas_usbhs/fifo.c 		usbhsf_dma_xfer_preparing(pkt);
pkt               914 drivers/usb/renesas_usbhs/fifo.c 		INIT_WORK(&pkt->work, xfer_work);
pkt               915 drivers/usb/renesas_usbhs/fifo.c 		schedule_work(&pkt->work);
pkt               926 drivers/usb/renesas_usbhs/fifo.c 	pkt->handler = &usbhs_fifo_pio_push_handler;
pkt               928 drivers/usb/renesas_usbhs/fifo.c 	return pkt->handler->prepare(pkt, is_done);
pkt               931 drivers/usb/renesas_usbhs/fifo.c static int usbhsf_dma_push_done(struct usbhs_pkt *pkt, int *is_done)
pkt               933 drivers/usb/renesas_usbhs/fifo.c 	struct usbhs_pipe *pipe = pkt->pipe;
pkt               934 drivers/usb/renesas_usbhs/fifo.c 	int is_short = pkt->trans % usbhs_pipe_get_maxpacket(pipe);
pkt               936 drivers/usb/renesas_usbhs/fifo.c 	pkt->actual += pkt->trans;
pkt               938 drivers/usb/renesas_usbhs/fifo.c 	if (pkt->actual < pkt->length)
pkt               943 drivers/usb/renesas_usbhs/fifo.c 		*is_done = !pkt->zero;	/* send zero packet? */
pkt               948 drivers/usb/renesas_usbhs/fifo.c 	usbhsf_dma_unmap(pkt);
pkt               953 drivers/usb/renesas_usbhs/fifo.c 		pkt->handler = &usbhs_fifo_pio_push_handler;
pkt               954 drivers/usb/renesas_usbhs/fifo.c 		return pkt->handler->try_run(pkt, is_done);
pkt               969 drivers/usb/renesas_usbhs/fifo.c static int usbhsf_dma_prepare_pop_with_rx_irq(struct usbhs_pkt *pkt,
pkt               972 drivers/usb/renesas_usbhs/fifo.c 	return usbhsf_prepare_pop(pkt, is_done);
pkt               975 drivers/usb/renesas_usbhs/fifo.c static int usbhsf_dma_prepare_pop_with_usb_dmac(struct usbhs_pkt *pkt,
pkt               978 drivers/usb/renesas_usbhs/fifo.c 	struct usbhs_pipe *pipe = pkt->pipe;
pkt               987 drivers/usb/renesas_usbhs/fifo.c 	if ((pkt->length < usbhs_get_dparam(priv, pio_dma_border)) ||
pkt               991 drivers/usb/renesas_usbhs/fifo.c 	fifo = usbhsf_get_dma_fifo(priv, pkt);
pkt               995 drivers/usb/renesas_usbhs/fifo.c 	if ((uintptr_t)pkt->buf & (USBHS_USB_DMAC_XFER_SIZE - 1))
pkt              1008 drivers/usb/renesas_usbhs/fifo.c 	if (usbhsf_dma_map(pkt) < 0)
pkt              1020 drivers/usb/renesas_usbhs/fifo.c 	pkt->trans = pkt->length;
pkt              1022 drivers/usb/renesas_usbhs/fifo.c 	usbhsf_dma_xfer_preparing(pkt);
pkt              1033 drivers/usb/renesas_usbhs/fifo.c 	pkt->handler = &usbhs_fifo_pio_pop_handler;
pkt              1036 drivers/usb/renesas_usbhs/fifo.c 	return pkt->handler->prepare(pkt, is_done);
pkt              1039 drivers/usb/renesas_usbhs/fifo.c static int usbhsf_dma_prepare_pop(struct usbhs_pkt *pkt, int *is_done)
pkt              1041 drivers/usb/renesas_usbhs/fifo.c 	struct usbhs_priv *priv = usbhs_pipe_to_priv(pkt->pipe);
pkt              1044 drivers/usb/renesas_usbhs/fifo.c 		return usbhsf_dma_prepare_pop_with_usb_dmac(pkt, is_done);
pkt              1046 drivers/usb/renesas_usbhs/fifo.c 		return usbhsf_dma_prepare_pop_with_rx_irq(pkt, is_done);
pkt              1049 drivers/usb/renesas_usbhs/fifo.c static int usbhsf_dma_try_pop_with_rx_irq(struct usbhs_pkt *pkt, int *is_done)
pkt              1051 drivers/usb/renesas_usbhs/fifo.c 	struct usbhs_pipe *pipe = pkt->pipe;
pkt              1063 drivers/usb/renesas_usbhs/fifo.c 	fifo = usbhsf_get_dma_fifo(priv, pkt);
pkt              1067 drivers/usb/renesas_usbhs/fifo.c 	if ((uintptr_t)(pkt->buf + pkt->actual) & 0x7) /* 8byte alignment */
pkt              1076 drivers/usb/renesas_usbhs/fifo.c 	len = min(pkt->length - pkt->actual, len);
pkt              1087 drivers/usb/renesas_usbhs/fifo.c 	if (usbhsf_dma_map(pkt) < 0)
pkt              1099 drivers/usb/renesas_usbhs/fifo.c 	pkt->trans = len;
pkt              1101 drivers/usb/renesas_usbhs/fifo.c 	INIT_WORK(&pkt->work, xfer_work);
pkt              1102 drivers/usb/renesas_usbhs/fifo.c 	schedule_work(&pkt->work);
pkt              1113 drivers/usb/renesas_usbhs/fifo.c 	pkt->handler = &usbhs_fifo_pio_pop_handler;
pkt              1115 drivers/usb/renesas_usbhs/fifo.c 	return pkt->handler->try_run(pkt, is_done);
pkt              1118 drivers/usb/renesas_usbhs/fifo.c static int usbhsf_dma_try_pop(struct usbhs_pkt *pkt, int *is_done)
pkt              1120 drivers/usb/renesas_usbhs/fifo.c 	struct usbhs_priv *priv = usbhs_pipe_to_priv(pkt->pipe);
pkt              1124 drivers/usb/renesas_usbhs/fifo.c 	return usbhsf_dma_try_pop_with_rx_irq(pkt, is_done);
pkt              1127 drivers/usb/renesas_usbhs/fifo.c static int usbhsf_dma_pop_done_with_rx_irq(struct usbhs_pkt *pkt, int *is_done)
pkt              1129 drivers/usb/renesas_usbhs/fifo.c 	struct usbhs_pipe *pipe = pkt->pipe;
pkt              1133 drivers/usb/renesas_usbhs/fifo.c 	usbhsf_dma_unmap(pkt);
pkt              1136 drivers/usb/renesas_usbhs/fifo.c 	pkt->actual += pkt->trans;
pkt              1138 drivers/usb/renesas_usbhs/fifo.c 	if ((pkt->actual == pkt->length) ||	/* receive all data */
pkt              1139 drivers/usb/renesas_usbhs/fifo.c 	    (pkt->trans < maxp)) {		/* short packet */
pkt              1145 drivers/usb/renesas_usbhs/fifo.c 		usbhsf_prepare_pop(pkt, is_done);
pkt              1151 drivers/usb/renesas_usbhs/fifo.c static size_t usbhs_dma_calc_received_size(struct usbhs_pkt *pkt,
pkt              1154 drivers/usb/renesas_usbhs/fifo.c 	struct usbhs_pipe *pipe = pkt->pipe;
pkt              1159 drivers/usb/renesas_usbhs/fifo.c 	dmaengine_tx_status(chan, pkt->cookie, &state);
pkt              1160 drivers/usb/renesas_usbhs/fifo.c 	received_size = pkt->length - state.residue;
pkt              1171 drivers/usb/renesas_usbhs/fifo.c static int usbhsf_dma_pop_done_with_usb_dmac(struct usbhs_pkt *pkt,
pkt              1174 drivers/usb/renesas_usbhs/fifo.c 	struct usbhs_pipe *pipe = pkt->pipe;
pkt              1177 drivers/usb/renesas_usbhs/fifo.c 	struct dma_chan *chan = usbhsf_dma_chan_get(fifo, pkt);
pkt              1189 drivers/usb/renesas_usbhs/fifo.c 	pkt->actual = usbhs_dma_calc_received_size(pkt, chan, rcv_len);
pkt              1193 drivers/usb/renesas_usbhs/fifo.c 	usbhsf_dma_unmap(pkt);
pkt              1202 drivers/usb/renesas_usbhs/fifo.c static int usbhsf_dma_pop_done(struct usbhs_pkt *pkt, int *is_done)
pkt              1204 drivers/usb/renesas_usbhs/fifo.c 	struct usbhs_priv *priv = usbhs_pipe_to_priv(pkt->pipe);
pkt              1207 drivers/usb/renesas_usbhs/fifo.c 		return usbhsf_dma_pop_done_with_usb_dmac(pkt, is_done);
pkt              1209 drivers/usb/renesas_usbhs/fifo.c 		return usbhsf_dma_pop_done_with_rx_irq(pkt, is_done);
pkt                50 drivers/usb/renesas_usbhs/fifo.h 		     struct usbhs_pkt *pkt);
pkt                63 drivers/usb/renesas_usbhs/fifo.h 	int (*prepare)(struct usbhs_pkt *pkt, int *is_done);
pkt                64 drivers/usb/renesas_usbhs/fifo.h 	int (*try_run)(struct usbhs_pkt *pkt, int *is_done);
pkt                65 drivers/usb/renesas_usbhs/fifo.h 	int (*dma_done)(struct usbhs_pkt *pkt, int *is_done);
pkt                93 drivers/usb/renesas_usbhs/fifo.h void usbhs_pkt_init(struct usbhs_pkt *pkt);
pkt                94 drivers/usb/renesas_usbhs/fifo.h void usbhs_pkt_push(struct usbhs_pipe *pipe, struct usbhs_pkt *pkt,
pkt                96 drivers/usb/renesas_usbhs/fifo.h 				 struct usbhs_pkt *pkt),
pkt                98 drivers/usb/renesas_usbhs/fifo.h struct usbhs_pkt *usbhs_pkt_pop(struct usbhs_pipe *pipe, struct usbhs_pkt *pkt);
pkt                24 drivers/usb/renesas_usbhs/mod_gadget.c 	struct usbhs_pkt	pkt;
pkt               103 drivers/usb/renesas_usbhs/mod_gadget.c #define usbhsg_ureq_to_pkt(u)		(&(u)->pkt)
pkt               105 drivers/usb/renesas_usbhs/mod_gadget.c 	container_of(i, struct usbhsg_request, pkt)
pkt               149 drivers/usb/renesas_usbhs/mod_gadget.c static void usbhsg_queue_done(struct usbhs_priv *priv, struct usbhs_pkt *pkt)
pkt               151 drivers/usb/renesas_usbhs/mod_gadget.c 	struct usbhs_pipe *pipe = pkt->pipe;
pkt               153 drivers/usb/renesas_usbhs/mod_gadget.c 	struct usbhsg_request *ureq = usbhsg_pkt_to_ureq(pkt);
pkt               156 drivers/usb/renesas_usbhs/mod_gadget.c 	ureq->req.actual = pkt->actual;
pkt               170 drivers/usb/renesas_usbhs/mod_gadget.c 	struct usbhs_pkt *pkt = usbhsg_ureq_to_pkt(ureq);
pkt               175 drivers/usb/renesas_usbhs/mod_gadget.c 	usbhs_pkt_push(pipe, pkt, usbhsg_queue_done,
pkt               187 drivers/usb/renesas_usbhs/mod_gadget.c static int usbhsg_dma_map_ctrl(struct device *dma_dev, struct usbhs_pkt *pkt,
pkt               190 drivers/usb/renesas_usbhs/mod_gadget.c 	struct usbhsg_request *ureq = usbhsg_pkt_to_ureq(pkt);
pkt               192 drivers/usb/renesas_usbhs/mod_gadget.c 	struct usbhs_pipe *pipe = pkt->pipe;
pkt               206 drivers/usb/renesas_usbhs/mod_gadget.c 		pkt->dma = req->dma;
pkt               307 drivers/usb/renesas_usbhs/mod_gadget.c 	kfree(ureq->pkt.buf);
pkt               559 drivers/usb/renesas_usbhs/mod_gadget.c 	struct usbhs_pkt *pkt;
pkt               562 drivers/usb/renesas_usbhs/mod_gadget.c 		pkt = usbhs_pkt_pop(pipe, NULL);
pkt               563 drivers/usb/renesas_usbhs/mod_gadget.c 		if (!pkt)
pkt               566 drivers/usb/renesas_usbhs/mod_gadget.c 		usbhsg_queue_pop(uep, usbhsg_pkt_to_ureq(pkt), -ESHUTDOWN);
pkt               678 drivers/usb/renesas_usbhs/mod_gadget.c 	WARN_ON(!list_empty(&ureq->pkt.node));
pkt                66 drivers/usb/renesas_usbhs/mod_host.c 	struct usbhs_pkt	pkt;
pkt               150 drivers/usb/renesas_usbhs/mod_host.c 	container_of((void *)p, struct usbhsh_request, pkt)
pkt               165 drivers/usb/renesas_usbhs/mod_host.c 	usbhs_pkt_init(&ureq->pkt);
pkt               201 drivers/usb/renesas_usbhs/mod_host.c 					  struct usbhs_pkt *pkt)
pkt               231 drivers/usb/renesas_usbhs/mod_host.c 	if (pkt->zero)
pkt               629 drivers/usb/renesas_usbhs/mod_host.c static void usbhsh_queue_done(struct usbhs_priv *priv, struct usbhs_pkt *pkt)
pkt               631 drivers/usb/renesas_usbhs/mod_host.c 	struct usbhsh_request *ureq = usbhsh_pkt_to_ureq(pkt);
pkt               648 drivers/usb/renesas_usbhs/mod_host.c 	urb->actual_length = pkt->actual;
pkt               650 drivers/usb/renesas_usbhs/mod_host.c 	usbhsh_endpoint_sequence_save(hpriv, urb, pkt);
pkt               696 drivers/usb/renesas_usbhs/mod_host.c 	usbhs_pkt_push(pipe, &ureq->pkt, usbhsh_queue_done,
pkt               708 drivers/usb/renesas_usbhs/mod_host.c 	struct usbhs_pkt *pkt;
pkt               711 drivers/usb/renesas_usbhs/mod_host.c 		pkt = usbhs_pkt_pop(pipe, NULL);
pkt               712 drivers/usb/renesas_usbhs/mod_host.c 		if (!pkt)
pkt               720 drivers/usb/renesas_usbhs/mod_host.c 		usbhsh_queue_done(priv, pkt);
pkt               798 drivers/usb/renesas_usbhs/mod_host.c 					  struct usbhs_pkt *pkt)
pkt               800 drivers/usb/renesas_usbhs/mod_host.c 	struct usbhsh_request *ureq = usbhsh_pkt_to_ureq(pkt);
pkt               826 drivers/usb/renesas_usbhs/mod_host.c 	usbhs_pkt_push(pipe, &ureq->pkt,
pkt               856 drivers/usb/renesas_usbhs/mod_host.c 	usbhs_pkt_push(pipe, &ureq->pkt,
pkt               917 drivers/usb/renesas_usbhs/mod_host.c static int usbhsh_dma_map_ctrl(struct device *dma_dev, struct usbhs_pkt *pkt,
pkt               921 drivers/usb/renesas_usbhs/mod_host.c 		struct usbhsh_request *ureq = usbhsh_pkt_to_ureq(pkt);
pkt               928 drivers/usb/renesas_usbhs/mod_host.c 		pkt->dma = urb->transfer_dma;
pkt               929 drivers/usb/renesas_usbhs/mod_host.c 		if (!pkt->dma)
pkt              1038 drivers/usb/renesas_usbhs/mod_host.c 		struct usbhs_pkt *pkt = &ureq->pkt;
pkt              1040 drivers/usb/renesas_usbhs/mod_host.c 		usbhs_pkt_pop(pkt->pipe, pkt);
pkt              1041 drivers/usb/renesas_usbhs/mod_host.c 		usbhsh_queue_done(priv, pkt);
pkt               673 drivers/usb/renesas_usbhs/pipe.c 					 struct usbhs_pkt *pkt, int map))
pkt                41 drivers/usb/renesas_usbhs/pipe.h 	int (*dma_map_ctrl)(struct device *dma_dev, struct usbhs_pkt *pkt,
pkt                80 drivers/usb/renesas_usbhs/pipe.h 					 struct usbhs_pkt *pkt, int map));
pkt               263 drivers/usb/serial/garmin_gps.c 	struct garmin_packet *pkt;
pkt               267 drivers/usb/serial/garmin_gps.c 		pkt = kmalloc(sizeof(struct garmin_packet)+data_length,
pkt               269 drivers/usb/serial/garmin_gps.c 		if (!pkt)
pkt               272 drivers/usb/serial/garmin_gps.c 		pkt->size = data_length;
pkt               273 drivers/usb/serial/garmin_gps.c 		memcpy(pkt->data, data, data_length);
pkt               278 drivers/usb/serial/garmin_gps.c 		pkt->seq = garmin_data_p->seq_counter++;
pkt               279 drivers/usb/serial/garmin_gps.c 		list_add_tail(&pkt->list, &garmin_data_p->pktlist);
pkt               285 drivers/usb/serial/garmin_gps.c 			pkt->seq, data_length);
pkt               335 drivers/usb/serial/garmin_gps.c 	__u8 pkt[10];
pkt               337 drivers/usb/serial/garmin_gps.c 	__u8 *ptr = pkt;
pkt               361 drivers/usb/serial/garmin_gps.c 	l = ptr-pkt;
pkt               363 drivers/usb/serial/garmin_gps.c 	send_to_tty(garmin_data_p->port, pkt, l);
pkt               703 drivers/usb/serial/garmin_gps.c 	struct garmin_packet *pkt = NULL;
pkt               705 drivers/usb/serial/garmin_gps.c 	while ((pkt = pkt_pop(garmin_data_p)) != NULL) {
pkt               706 drivers/usb/serial/garmin_gps.c 		dev_dbg(&garmin_data_p->port->dev, "%s - next pkt: %d\n", __func__, pkt->seq);
pkt               707 drivers/usb/serial/garmin_gps.c 		result = gsp_send(garmin_data_p, pkt->data, pkt->size);
pkt               709 drivers/usb/serial/garmin_gps.c 			kfree(pkt);
pkt               712 drivers/usb/serial/garmin_gps.c 		kfree(pkt);
pkt               805 drivers/usb/serial/garmin_gps.c 	__le32 *pkt = (__le32 *)garmin_data_p->privpkt;
pkt               807 drivers/usb/serial/garmin_gps.c 	pkt[0] = __cpu_to_le32(GARMIN_LAYERID_PRIVATE);
pkt               808 drivers/usb/serial/garmin_gps.c 	pkt[1] = __cpu_to_le32(PRIV_PKTID_INFO_RESP);
pkt               809 drivers/usb/serial/garmin_gps.c 	pkt[2] = __cpu_to_le32(12);
pkt               810 drivers/usb/serial/garmin_gps.c 	pkt[3] = __cpu_to_le32(VERSION_MAJOR << 16 | VERSION_MINOR);
pkt               811 drivers/usb/serial/garmin_gps.c 	pkt[4] = __cpu_to_le32(garmin_data_p->mode);
pkt               812 drivers/usb/serial/garmin_gps.c 	pkt[5] = __cpu_to_le32(garmin_data_p->serial_num);
pkt               814 drivers/usb/serial/garmin_gps.c 	send_to_tty(port, (__u8 *)pkt, 6 * 4);
pkt              1297 drivers/usb/serial/garmin_gps.c 	struct garmin_packet *pkt;
pkt              1300 drivers/usb/serial/garmin_gps.c 		pkt = pkt_pop(garmin_data_p);
pkt              1301 drivers/usb/serial/garmin_gps.c 		if (pkt != NULL) {
pkt              1302 drivers/usb/serial/garmin_gps.c 			send_to_tty(garmin_data_p->port, pkt->data, pkt->size);
pkt              1303 drivers/usb/serial/garmin_gps.c 			kfree(pkt);
pkt               101 drivers/vhost/vsock.c 		struct virtio_vsock_pkt *pkt;
pkt               115 drivers/vhost/vsock.c 		pkt = list_first_entry(&vsock->send_pkt_list,
pkt               117 drivers/vhost/vsock.c 		list_del_init(&pkt->list);
pkt               124 drivers/vhost/vsock.c 			list_add(&pkt->list, &vsock->send_pkt_list);
pkt               131 drivers/vhost/vsock.c 			list_add(&pkt->list, &vsock->send_pkt_list);
pkt               145 drivers/vhost/vsock.c 			virtio_transport_free_pkt(pkt);
pkt               151 drivers/vhost/vsock.c 		if (iov_len < sizeof(pkt->hdr)) {
pkt               152 drivers/vhost/vsock.c 			virtio_transport_free_pkt(pkt);
pkt               158 drivers/vhost/vsock.c 		payload_len = pkt->len - pkt->off;
pkt               163 drivers/vhost/vsock.c 		if (payload_len > iov_len - sizeof(pkt->hdr))
pkt               164 drivers/vhost/vsock.c 			payload_len = iov_len - sizeof(pkt->hdr);
pkt               167 drivers/vhost/vsock.c 		pkt->hdr.len = cpu_to_le32(payload_len);
pkt               169 drivers/vhost/vsock.c 		nbytes = copy_to_iter(&pkt->hdr, sizeof(pkt->hdr), &iov_iter);
pkt               170 drivers/vhost/vsock.c 		if (nbytes != sizeof(pkt->hdr)) {
pkt               171 drivers/vhost/vsock.c 			virtio_transport_free_pkt(pkt);
pkt               176 drivers/vhost/vsock.c 		nbytes = copy_to_iter(pkt->buf + pkt->off, payload_len,
pkt               179 drivers/vhost/vsock.c 			virtio_transport_free_pkt(pkt);
pkt               187 drivers/vhost/vsock.c 		virtio_transport_deliver_tap_pkt(pkt);
pkt               189 drivers/vhost/vsock.c 		vhost_add_used(vq, head, sizeof(pkt->hdr) + payload_len);
pkt               192 drivers/vhost/vsock.c 		pkt->off += payload_len;
pkt               198 drivers/vhost/vsock.c 		if (pkt->off < pkt->len) {
pkt               200 drivers/vhost/vsock.c 			list_add(&pkt->list, &vsock->send_pkt_list);
pkt               203 drivers/vhost/vsock.c 			if (pkt->reply) {
pkt               215 drivers/vhost/vsock.c 			virtio_transport_free_pkt(pkt);
pkt               240 drivers/vhost/vsock.c vhost_transport_send_pkt(struct virtio_vsock_pkt *pkt)
pkt               243 drivers/vhost/vsock.c 	int len = pkt->len;
pkt               248 drivers/vhost/vsock.c 	vsock = vhost_vsock_get(le64_to_cpu(pkt->hdr.dst_cid));
pkt               251 drivers/vhost/vsock.c 		virtio_transport_free_pkt(pkt);
pkt               255 drivers/vhost/vsock.c 	if (pkt->reply)
pkt               259 drivers/vhost/vsock.c 	list_add_tail(&pkt->list, &vsock->send_pkt_list);
pkt               272 drivers/vhost/vsock.c 	struct virtio_vsock_pkt *pkt, *n;
pkt               285 drivers/vhost/vsock.c 	list_for_each_entry_safe(pkt, n, &vsock->send_pkt_list, list) {
pkt               286 drivers/vhost/vsock.c 		if (pkt->vsk != vsk)
pkt               288 drivers/vhost/vsock.c 		list_move(&pkt->list, &freeme);
pkt               292 drivers/vhost/vsock.c 	list_for_each_entry_safe(pkt, n, &freeme, list) {
pkt               293 drivers/vhost/vsock.c 		if (pkt->reply)
pkt               295 drivers/vhost/vsock.c 		list_del(&pkt->list);
pkt               296 drivers/vhost/vsock.c 		virtio_transport_free_pkt(pkt);
pkt               318 drivers/vhost/vsock.c 	struct virtio_vsock_pkt *pkt;
pkt               328 drivers/vhost/vsock.c 	pkt = kzalloc(sizeof(*pkt), GFP_KERNEL);
pkt               329 drivers/vhost/vsock.c 	if (!pkt)
pkt               335 drivers/vhost/vsock.c 	nbytes = copy_from_iter(&pkt->hdr, sizeof(pkt->hdr), &iov_iter);
pkt               336 drivers/vhost/vsock.c 	if (nbytes != sizeof(pkt->hdr)) {
pkt               338 drivers/vhost/vsock.c 		       sizeof(pkt->hdr), nbytes);
pkt               339 drivers/vhost/vsock.c 		kfree(pkt);
pkt               343 drivers/vhost/vsock.c 	if (le16_to_cpu(pkt->hdr.type) == VIRTIO_VSOCK_TYPE_STREAM)
pkt               344 drivers/vhost/vsock.c 		pkt->len = le32_to_cpu(pkt->hdr.len);
pkt               347 drivers/vhost/vsock.c 	if (!pkt->len)
pkt               348 drivers/vhost/vsock.c 		return pkt;
pkt               351 drivers/vhost/vsock.c 	if (pkt->len > VIRTIO_VSOCK_MAX_PKT_BUF_SIZE) {
pkt               352 drivers/vhost/vsock.c 		kfree(pkt);
pkt               356 drivers/vhost/vsock.c 	pkt->buf = kmalloc(pkt->len, GFP_KERNEL);
pkt               357 drivers/vhost/vsock.c 	if (!pkt->buf) {
pkt               358 drivers/vhost/vsock.c 		kfree(pkt);
pkt               362 drivers/vhost/vsock.c 	pkt->buf_len = pkt->len;
pkt               364 drivers/vhost/vsock.c 	nbytes = copy_from_iter(pkt->buf, pkt->len, &iov_iter);
pkt               365 drivers/vhost/vsock.c 	if (nbytes != pkt->len) {
pkt               367 drivers/vhost/vsock.c 		       pkt->len, nbytes);
pkt               368 drivers/vhost/vsock.c 		virtio_transport_free_pkt(pkt);
pkt               372 drivers/vhost/vsock.c 	return pkt;
pkt               393 drivers/vhost/vsock.c 	struct virtio_vsock_pkt *pkt;
pkt               428 drivers/vhost/vsock.c 		pkt = vhost_vsock_alloc_pkt(vq, out, in);
pkt               429 drivers/vhost/vsock.c 		if (!pkt) {
pkt               434 drivers/vhost/vsock.c 		len = pkt->len;
pkt               437 drivers/vhost/vsock.c 		virtio_transport_deliver_tap_pkt(pkt);
pkt               440 drivers/vhost/vsock.c 		if (le64_to_cpu(pkt->hdr.src_cid) == vsock->guest_cid &&
pkt               441 drivers/vhost/vsock.c 		    le64_to_cpu(pkt->hdr.dst_cid) ==
pkt               443 drivers/vhost/vsock.c 			virtio_transport_recv_pkt(pkt);
pkt               445 drivers/vhost/vsock.c 			virtio_transport_free_pkt(pkt);
pkt               447 drivers/vhost/vsock.c 		len += sizeof(pkt->hdr);
pkt               657 drivers/vhost/vsock.c 		struct virtio_vsock_pkt *pkt;
pkt               659 drivers/vhost/vsock.c 		pkt = list_first_entry(&vsock->send_pkt_list,
pkt               661 drivers/vhost/vsock.c 		list_del_init(&pkt->list);
pkt               662 drivers/vhost/vsock.c 		virtio_transport_free_pkt(pkt);
pkt               541 fs/autofs/expire.c 	struct autofs_packet_expire pkt;
pkt               546 fs/autofs/expire.c 	memset(&pkt, 0, sizeof(pkt));
pkt               548 fs/autofs/expire.c 	pkt.hdr.proto_version = sbi->version;
pkt               549 fs/autofs/expire.c 	pkt.hdr.type = autofs_ptype_expire;
pkt               555 fs/autofs/expire.c 	pkt.len = dentry->d_name.len;
pkt               556 fs/autofs/expire.c 	memcpy(pkt.name, dentry->d_name.name, pkt.len);
pkt               557 fs/autofs/expire.c 	pkt.name[pkt.len] = '\0';
pkt               559 fs/autofs/expire.c 	if (copy_to_user(pkt_p, &pkt, sizeof(struct autofs_packet_expire)))
pkt                86 fs/autofs/waitq.c 	} pkt;
pkt                95 fs/autofs/waitq.c 	memset(&pkt, 0, sizeof(pkt)); /* For security reasons */
pkt                97 fs/autofs/waitq.c 	pkt.hdr.proto_version = sbi->version;
pkt                98 fs/autofs/waitq.c 	pkt.hdr.type = type;
pkt               104 fs/autofs/waitq.c 		struct autofs_packet_missing *mp = &pkt.v4_pkt.missing;
pkt               117 fs/autofs/waitq.c 					&pkt.v4_pkt.expire_multi;
pkt               136 fs/autofs/waitq.c 		struct autofs_v5_packet *packet = &pkt.v5_pkt.v5_packet;
pkt               163 fs/autofs/waitq.c 	switch (ret = autofs_write(sbi, pipe, &pkt, pktsz)) {
pkt              1570 include/linux/hyperv.h 		   const struct vmpacket_descriptor *pkt);
pkt              1580 include/linux/hyperv.h 		 const struct vmpacket_descriptor *pkt)
pkt              1584 include/linux/hyperv.h 	nxt = __hv_pkt_iter_next(channel, pkt);
pkt              1591 include/linux/hyperv.h #define foreach_vmbus_pkt(pkt, channel) \
pkt              1592 include/linux/hyperv.h 	for (pkt = hv_pkt_iter_first(channel); pkt; \
pkt              1593 include/linux/hyperv.h 	    pkt = hv_pkt_iter_next(channel, pkt))
pkt               146 include/linux/mroute_base.h 			unsigned long pkt;
pkt               657 include/linux/qed/qed_rdma_if.h 				     struct qed_ll2_tx_pkt_info *pkt,
pkt                58 include/linux/soc/mediatek/mtk-cmdq.h void cmdq_pkt_destroy(struct cmdq_pkt *pkt);
pkt                69 include/linux/soc/mediatek/mtk-cmdq.h int cmdq_pkt_write(struct cmdq_pkt *pkt, u8 subsys, u16 offset, u32 value);
pkt                81 include/linux/soc/mediatek/mtk-cmdq.h int cmdq_pkt_write_mask(struct cmdq_pkt *pkt, u8 subsys,
pkt                91 include/linux/soc/mediatek/mtk-cmdq.h int cmdq_pkt_wfe(struct cmdq_pkt *pkt, u16 event);
pkt               100 include/linux/soc/mediatek/mtk-cmdq.h int cmdq_pkt_clear_event(struct cmdq_pkt *pkt, u16 event);
pkt               115 include/linux/soc/mediatek/mtk-cmdq.h int cmdq_pkt_flush_async(struct cmdq_pkt *pkt, cmdq_async_flush_cb cb,
pkt               128 include/linux/soc/mediatek/mtk-cmdq.h int cmdq_pkt_flush(struct cmdq_pkt *pkt);
pkt               126 include/linux/soc/qcom/apr.h int apr_send_pkt(struct apr_device *adev, struct apr_pkt *pkt);
pkt                44 include/linux/usb/musb.h #define MUSB_EP_FIFO(ep, st, m, pkt)		\
pkt                49 include/linux/usb/musb.h 	.maxpacket	= pkt,			\
pkt                52 include/linux/usb/musb.h #define MUSB_EP_FIFO_SINGLE(ep, st, pkt)	\
pkt                53 include/linux/usb/musb.h 	MUSB_EP_FIFO(ep, st, BUF_SINGLE, pkt)
pkt                55 include/linux/usb/musb.h #define MUSB_EP_FIFO_DOUBLE(ep, st, pkt)	\
pkt                56 include/linux/usb/musb.h 	MUSB_EP_FIFO(ep, st, BUF_DOUBLE, pkt)
pkt                77 include/linux/virtio_vsock.h 	int (*send_pkt)(struct virtio_vsock_pkt *pkt);
pkt               153 include/linux/virtio_vsock.h void virtio_transport_recv_pkt(struct virtio_vsock_pkt *pkt);
pkt               154 include/linux/virtio_vsock.h void virtio_transport_free_pkt(struct virtio_vsock_pkt *pkt);
pkt               155 include/linux/virtio_vsock.h void virtio_transport_inc_tx_pkt(struct virtio_vsock_sock *vvs, struct virtio_vsock_pkt *pkt);
pkt               158 include/linux/virtio_vsock.h void virtio_transport_deliver_tap_pkt(struct virtio_vsock_pkt *pkt);
pkt                23 include/net/caif/cfpkt.h void cfpkt_destroy(struct cfpkt *pkt);
pkt                33 include/net/caif/cfpkt.h int cfpkt_extr_head(struct cfpkt *pkt, void *data, u16 len);
pkt                35 include/net/caif/cfpkt.h static inline u8 cfpkt_extr_head_u8(struct cfpkt *pkt)
pkt                39 include/net/caif/cfpkt.h 	cfpkt_extr_head(pkt, &tmp, 1);
pkt                44 include/net/caif/cfpkt.h static inline u16 cfpkt_extr_head_u16(struct cfpkt *pkt)
pkt                48 include/net/caif/cfpkt.h 	cfpkt_extr_head(pkt, &tmp, 2);
pkt                53 include/net/caif/cfpkt.h static inline u32 cfpkt_extr_head_u32(struct cfpkt *pkt)
pkt                57 include/net/caif/cfpkt.h 	cfpkt_extr_head(pkt, &tmp, 4);
pkt                71 include/net/caif/cfpkt.h int cfpkt_peek_head(struct cfpkt *pkt, void *data, u16 len);
pkt                81 include/net/caif/cfpkt.h int cfpkt_extr_trail(struct cfpkt *pkt, void *data, u16 len);
pkt                92 include/net/caif/cfpkt.h int cfpkt_add_head(struct cfpkt *pkt, const void *data, u16 len);
pkt               103 include/net/caif/cfpkt.h int cfpkt_add_trail(struct cfpkt *pkt, const void *data, u16 len);
pkt               113 include/net/caif/cfpkt.h int cfpkt_pad_trail(struct cfpkt *pkt, u16 len);
pkt               122 include/net/caif/cfpkt.h int cfpkt_addbdy(struct cfpkt *pkt, const u8 data);
pkt               132 include/net/caif/cfpkt.h int cfpkt_add_body(struct cfpkt *pkt, const void *data, u16 len);
pkt               139 include/net/caif/cfpkt.h bool cfpkt_more(struct cfpkt *pkt);
pkt               148 include/net/caif/cfpkt.h bool cfpkt_erroneous(struct cfpkt *pkt);
pkt               155 include/net/caif/cfpkt.h u16 cfpkt_getlen(struct cfpkt *pkt);
pkt               163 include/net/caif/cfpkt.h int cfpkt_setlen(struct cfpkt *pkt, u16 len);
pkt               185 include/net/caif/cfpkt.h struct cfpkt *cfpkt_split(struct cfpkt *pkt, u16 pos);
pkt               201 include/net/caif/cfpkt.h int cfpkt_iterate(struct cfpkt *pkt,
pkt               216 include/net/caif/cfpkt.h void *cfpkt_tonative(struct cfpkt *pkt);
pkt               223 include/net/caif/cfpkt.h struct caif_payload_info *cfpkt_info(struct cfpkt *pkt);
pkt               230 include/net/caif/cfpkt.h void cfpkt_set_prio(struct cfpkt *pkt, int prio);
pkt                 7 include/net/netfilter/nf_dup_netdev.h void nf_dup_netdev_egress(const struct nft_pktinfo *pkt, int oif);
pkt                 8 include/net/netfilter/nf_dup_netdev.h void nf_fwd_netdev_egress(const struct nft_pktinfo *pkt, int oif);
pkt                29 include/net/netfilter/nf_tables.h static inline struct net *nft_net(const struct nft_pktinfo *pkt)
pkt                31 include/net/netfilter/nf_tables.h 	return pkt->xt.state->net;
pkt                34 include/net/netfilter/nf_tables.h static inline unsigned int nft_hook(const struct nft_pktinfo *pkt)
pkt                36 include/net/netfilter/nf_tables.h 	return pkt->xt.state->hook;
pkt                39 include/net/netfilter/nf_tables.h static inline u8 nft_pf(const struct nft_pktinfo *pkt)
pkt                41 include/net/netfilter/nf_tables.h 	return pkt->xt.state->pf;
pkt                44 include/net/netfilter/nf_tables.h static inline const struct net_device *nft_in(const struct nft_pktinfo *pkt)
pkt                46 include/net/netfilter/nf_tables.h 	return pkt->xt.state->in;
pkt                49 include/net/netfilter/nf_tables.h static inline const struct net_device *nft_out(const struct nft_pktinfo *pkt)
pkt                51 include/net/netfilter/nf_tables.h 	return pkt->xt.state->out;
pkt                54 include/net/netfilter/nf_tables.h static inline void nft_set_pktinfo(struct nft_pktinfo *pkt,
pkt                58 include/net/netfilter/nf_tables.h 	pkt->skb = skb;
pkt                59 include/net/netfilter/nf_tables.h 	pkt->xt.state = state;
pkt                62 include/net/netfilter/nf_tables.h static inline void nft_set_pktinfo_unspec(struct nft_pktinfo *pkt,
pkt                65 include/net/netfilter/nf_tables.h 	pkt->tprot_set = false;
pkt                66 include/net/netfilter/nf_tables.h 	pkt->tprot = 0;
pkt                67 include/net/netfilter/nf_tables.h 	pkt->xt.thoff = 0;
pkt                68 include/net/netfilter/nf_tables.h 	pkt->xt.fragoff = 0;
pkt               779 include/net/netfilter/nf_tables.h 						const struct nft_pktinfo *pkt);
pkt              1001 include/net/netfilter/nf_tables.h unsigned int nft_do_chain(struct nft_pktinfo *pkt, void *priv);
pkt              1132 include/net/netfilter/nf_tables.h 						const struct nft_pktinfo *pkt);
pkt              1207 include/net/netfilter/nf_tables.h 	const struct nft_pktinfo	*pkt;
pkt              1217 include/net/netfilter/nf_tables.h void nft_trace_init(struct nft_traceinfo *info, const struct nft_pktinfo *pkt,
pkt                82 include/net/netfilter/nf_tables_core.h 		       struct nft_regs *regs, const struct nft_pktinfo *pkt);
pkt                84 include/net/netfilter/nf_tables_core.h 		  struct nft_regs *regs, const struct nft_pktinfo *pkt);
pkt                86 include/net/netfilter/nf_tables_core.h 		     struct nft_regs *regs, const struct nft_pktinfo *pkt);
pkt                88 include/net/netfilter/nf_tables_core.h 		      struct nft_regs *regs, const struct nft_pktinfo *pkt);
pkt                90 include/net/netfilter/nf_tables_core.h 			struct nft_regs *regs, const struct nft_pktinfo *pkt);
pkt                92 include/net/netfilter/nf_tables_core.h 		      struct nft_regs *regs, const struct nft_pktinfo *pkt);
pkt                94 include/net/netfilter/nf_tables_core.h 		    struct nft_regs *regs, const struct nft_pktinfo *pkt);
pkt                96 include/net/netfilter/nf_tables_core.h 			struct nft_regs *regs, const struct nft_pktinfo *pkt);
pkt                98 include/net/netfilter/nf_tables_core.h 		     struct nft_regs *regs, const struct nft_pktinfo *pkt);
pkt               100 include/net/netfilter/nf_tables_core.h 		     struct nft_regs *regs, const struct nft_pktinfo *pkt);
pkt                 8 include/net/netfilter/nf_tables_ipv4.h static inline void nft_set_pktinfo_ipv4(struct nft_pktinfo *pkt,
pkt                13 include/net/netfilter/nf_tables_ipv4.h 	ip = ip_hdr(pkt->skb);
pkt                14 include/net/netfilter/nf_tables_ipv4.h 	pkt->tprot_set = true;
pkt                15 include/net/netfilter/nf_tables_ipv4.h 	pkt->tprot = ip->protocol;
pkt                16 include/net/netfilter/nf_tables_ipv4.h 	pkt->xt.thoff = ip_hdrlen(pkt->skb);
pkt                17 include/net/netfilter/nf_tables_ipv4.h 	pkt->xt.fragoff = ntohs(ip->frag_off) & IP_OFFSET;
pkt                20 include/net/netfilter/nf_tables_ipv4.h static inline int __nft_set_pktinfo_ipv4_validate(struct nft_pktinfo *pkt,
pkt                41 include/net/netfilter/nf_tables_ipv4.h 	pkt->tprot_set = true;
pkt                42 include/net/netfilter/nf_tables_ipv4.h 	pkt->tprot = iph->protocol;
pkt                43 include/net/netfilter/nf_tables_ipv4.h 	pkt->xt.thoff = thoff;
pkt                44 include/net/netfilter/nf_tables_ipv4.h 	pkt->xt.fragoff = ntohs(iph->frag_off) & IP_OFFSET;
pkt                49 include/net/netfilter/nf_tables_ipv4.h static inline void nft_set_pktinfo_ipv4_validate(struct nft_pktinfo *pkt,
pkt                52 include/net/netfilter/nf_tables_ipv4.h 	if (__nft_set_pktinfo_ipv4_validate(pkt, skb) < 0)
pkt                53 include/net/netfilter/nf_tables_ipv4.h 		nft_set_pktinfo_unspec(pkt, skb);
pkt                 9 include/net/netfilter/nf_tables_ipv6.h static inline void nft_set_pktinfo_ipv6(struct nft_pktinfo *pkt,
pkt                16 include/net/netfilter/nf_tables_ipv6.h 	protohdr = ipv6_find_hdr(pkt->skb, &thoff, -1, &frag_off, &flags);
pkt                18 include/net/netfilter/nf_tables_ipv6.h 		nft_set_pktinfo_unspec(pkt, skb);
pkt                22 include/net/netfilter/nf_tables_ipv6.h 	pkt->tprot_set = true;
pkt                23 include/net/netfilter/nf_tables_ipv6.h 	pkt->tprot = protohdr;
pkt                24 include/net/netfilter/nf_tables_ipv6.h 	pkt->xt.thoff = thoff;
pkt                25 include/net/netfilter/nf_tables_ipv6.h 	pkt->xt.fragoff = frag_off;
pkt                28 include/net/netfilter/nf_tables_ipv6.h static inline int __nft_set_pktinfo_ipv6_validate(struct nft_pktinfo *pkt,
pkt                51 include/net/netfilter/nf_tables_ipv6.h 	protohdr = ipv6_find_hdr(pkt->skb, &thoff, -1, &frag_off, &flags);
pkt                55 include/net/netfilter/nf_tables_ipv6.h 	pkt->tprot_set = true;
pkt                56 include/net/netfilter/nf_tables_ipv6.h 	pkt->tprot = protohdr;
pkt                57 include/net/netfilter/nf_tables_ipv6.h 	pkt->xt.thoff = thoff;
pkt                58 include/net/netfilter/nf_tables_ipv6.h 	pkt->xt.fragoff = frag_off;
pkt                66 include/net/netfilter/nf_tables_ipv6.h static inline void nft_set_pktinfo_ipv6_validate(struct nft_pktinfo *pkt,
pkt                69 include/net/netfilter/nf_tables_ipv6.h 	if (__nft_set_pktinfo_ipv6_validate(pkt, skb) < 0)
pkt                70 include/net/netfilter/nf_tables_ipv6.h 		nft_set_pktinfo_unspec(pkt, skb);
pkt                29 include/net/netfilter/nft_fib.h 			const struct nft_pktinfo *pkt);
pkt                31 include/net/netfilter/nft_fib.h 		   const struct nft_pktinfo *pkt);
pkt                34 include/net/netfilter/nft_fib.h 			const struct nft_pktinfo *pkt);
pkt                36 include/net/netfilter/nft_fib.h 		   const struct nft_pktinfo *pkt);
pkt                33 include/net/netfilter/nft_meta.h 		       const struct nft_pktinfo *pkt);
pkt                37 include/net/netfilter/nft_meta.h 		       const struct nft_pktinfo *pkt);
pkt               391 kernel/debug/gdbstub.c static void error_packet(char *pkt, int error)
pkt               394 kernel/debug/gdbstub.c 	pkt[0] = 'E';
pkt               395 kernel/debug/gdbstub.c 	pkt[1] = hex_asc[(error / 10)];
pkt               396 kernel/debug/gdbstub.c 	pkt[2] = hex_asc[(error % 10)];
pkt               397 kernel/debug/gdbstub.c 	pkt[3] = '\0';
pkt               408 kernel/debug/gdbstub.c static char *pack_threadid(char *pkt, unsigned char *id)
pkt               416 kernel/debug/gdbstub.c 			pkt = hex_byte_pack(pkt, *id);
pkt               423 kernel/debug/gdbstub.c 		pkt = hex_byte_pack(pkt, 0);
pkt               425 kernel/debug/gdbstub.c 	return pkt;
pkt               245 net/bluetooth/bnep/core.c 			u8 pkt[3];
pkt               246 net/bluetooth/bnep/core.c 			pkt[0] = BNEP_CONTROL;
pkt               247 net/bluetooth/bnep/core.c 			pkt[1] = BNEP_CMD_NOT_UNDERSTOOD;
pkt               248 net/bluetooth/bnep/core.c 			pkt[2] = cmd;
pkt               249 net/bluetooth/bnep/core.c 			err = bnep_send(s, pkt, sizeof(pkt));
pkt              5626 net/bluetooth/l2cap_core.c 	struct l2cap_le_credits *pkt;
pkt              5630 net/bluetooth/l2cap_core.c 	if (cmd_len != sizeof(*pkt))
pkt              5633 net/bluetooth/l2cap_core.c 	pkt = (struct l2cap_le_credits *) data;
pkt              5634 net/bluetooth/l2cap_core.c 	cid	= __le16_to_cpu(pkt->cid);
pkt              5635 net/bluetooth/l2cap_core.c 	credits	= __le16_to_cpu(pkt->credits);
pkt              6765 net/bluetooth/l2cap_core.c 	struct l2cap_le_credits pkt;
pkt              6777 net/bluetooth/l2cap_core.c 	pkt.cid     = cpu_to_le16(chan->scid);
pkt              6778 net/bluetooth/l2cap_core.c 	pkt.credits = cpu_to_le16(return_credits);
pkt              6782 net/bluetooth/l2cap_core.c 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CREDITS, sizeof(pkt), &pkt);
pkt                23 net/bridge/netfilter/nft_meta_bridge.c 				     const struct nft_pktinfo *pkt)
pkt                26 net/bridge/netfilter/nft_meta_bridge.c 	const struct net_device *in = nft_in(pkt), *out = nft_out(pkt);
pkt                60 net/bridge/netfilter/nft_meta_bridge.c 		return nft_meta_get_eval(expr, regs, pkt);
pkt               299 net/bridge/netfilter/nft_reject_bridge.c 				   const struct nft_pktinfo *pkt)
pkt               302 net/bridge/netfilter/nft_reject_bridge.c 	const unsigned char *dest = eth_hdr(pkt->skb)->h_dest;
pkt               308 net/bridge/netfilter/nft_reject_bridge.c 	switch (eth_hdr(pkt->skb)->h_proto) {
pkt               312 net/bridge/netfilter/nft_reject_bridge.c 			nft_reject_br_send_v4_unreach(nft_net(pkt), pkt->skb,
pkt               313 net/bridge/netfilter/nft_reject_bridge.c 						      nft_in(pkt),
pkt               314 net/bridge/netfilter/nft_reject_bridge.c 						      nft_hook(pkt),
pkt               318 net/bridge/netfilter/nft_reject_bridge.c 			nft_reject_br_send_v4_tcp_reset(nft_net(pkt), pkt->skb,
pkt               319 net/bridge/netfilter/nft_reject_bridge.c 							nft_in(pkt),
pkt               320 net/bridge/netfilter/nft_reject_bridge.c 							nft_hook(pkt));
pkt               323 net/bridge/netfilter/nft_reject_bridge.c 			nft_reject_br_send_v4_unreach(nft_net(pkt), pkt->skb,
pkt               324 net/bridge/netfilter/nft_reject_bridge.c 						      nft_in(pkt),
pkt               325 net/bridge/netfilter/nft_reject_bridge.c 						      nft_hook(pkt),
pkt               333 net/bridge/netfilter/nft_reject_bridge.c 			nft_reject_br_send_v6_unreach(nft_net(pkt), pkt->skb,
pkt               334 net/bridge/netfilter/nft_reject_bridge.c 						      nft_in(pkt),
pkt               335 net/bridge/netfilter/nft_reject_bridge.c 						      nft_hook(pkt),
pkt               339 net/bridge/netfilter/nft_reject_bridge.c 			nft_reject_br_send_v6_tcp_reset(nft_net(pkt), pkt->skb,
pkt               340 net/bridge/netfilter/nft_reject_bridge.c 							nft_in(pkt),
pkt               341 net/bridge/netfilter/nft_reject_bridge.c 							nft_hook(pkt));
pkt               344 net/bridge/netfilter/nft_reject_bridge.c 			nft_reject_br_send_v6_unreach(nft_net(pkt), pkt->skb,
pkt               345 net/bridge/netfilter/nft_reject_bridge.c 						      nft_in(pkt),
pkt               346 net/bridge/netfilter/nft_reject_bridge.c 						      nft_hook(pkt),
pkt               166 net/caif/caif_dev.c static int transmit(struct cflayer *layer, struct cfpkt *pkt)
pkt               176 net/caif/caif_dev.c 	skb = cfpkt_tonative(pkt);
pkt               248 net/caif/caif_dev.c 	struct cfpkt *pkt;
pkt               252 net/caif/caif_dev.c 	pkt = cfpkt_fromnative(CAIF_DIR_IN, skb);
pkt               268 net/caif/caif_dev.c 	err = caifd->layer.up->receive(caifd->layer.up, pkt);
pkt               272 net/caif/caif_dev.c 		cfpkt_destroy(pkt);
pkt               165 net/caif/caif_socket.c static int caif_sktrecv_cb(struct cflayer *layr, struct cfpkt *pkt)
pkt               171 net/caif/caif_socket.c 	skb = cfpkt_tonative(pkt);
pkt               503 net/caif/caif_socket.c 	struct cfpkt *pkt;
pkt               505 net/caif/caif_socket.c 	pkt = cfpkt_fromnative(CAIF_DIR_OUT, skb);
pkt               507 net/caif/caif_socket.c 	cfpkt_set_prio(pkt, cf_sk->sk.sk_priority);
pkt               514 net/caif/caif_socket.c 	return cf_sk->layer.dn->transmit(cf_sk->layer.dn, pkt);
pkt                38 net/caif/caif_usb.c static int cfusbl_receive(struct cflayer *layr, struct cfpkt *pkt)
pkt                43 net/caif/caif_usb.c 	cfpkt_extr_head(pkt, &hpad, 1);
pkt                44 net/caif/caif_usb.c 	cfpkt_extr_head(pkt, NULL, hpad);
pkt                45 net/caif/caif_usb.c 	return layr->up->receive(layr->up, pkt);
pkt                48 net/caif/caif_usb.c static int cfusbl_transmit(struct cflayer *layr, struct cfpkt *pkt)
pkt                56 net/caif/caif_usb.c 	skb = cfpkt_tonative(pkt);
pkt                61 net/caif/caif_usb.c 	info = cfpkt_info(pkt);
pkt                71 net/caif/caif_usb.c 	cfpkt_add_head(pkt, zeros, hpad);
pkt                72 net/caif/caif_usb.c 	cfpkt_add_head(pkt, &hpad, 1);
pkt                73 net/caif/caif_usb.c 	cfpkt_add_head(pkt, usbl->tx_eth_hdr, sizeof(usbl->tx_eth_hdr));
pkt                74 net/caif/caif_usb.c 	return layr->dn->transmit(layr->dn, pkt);
pkt                23 net/caif/cfctrl.c 		       int cmd, struct cfpkt *pkt){
pkt                28 net/caif/cfctrl.c 		       int cmd, struct cfpkt *pkt);
pkt                30 net/caif/cfctrl.c static int cfctrl_recv(struct cflayer *layr, struct cfpkt *pkt);
pkt               178 net/caif/cfctrl.c 	struct cfpkt *pkt;
pkt               186 net/caif/cfctrl.c 	pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN);
pkt               187 net/caif/cfctrl.c 	if (!pkt)
pkt               190 net/caif/cfctrl.c 	init_info(cfpkt_info(pkt), cfctrl);
pkt               191 net/caif/cfctrl.c 	cfpkt_info(pkt)->dev_info->id = physlinkid;
pkt               193 net/caif/cfctrl.c 	cfpkt_addbdy(pkt, CFCTRL_CMD_ENUM);
pkt               194 net/caif/cfctrl.c 	cfpkt_addbdy(pkt, physlinkid);
pkt               195 net/caif/cfctrl.c 	cfpkt_set_prio(pkt, TC_PRIO_CONTROL);
pkt               196 net/caif/cfctrl.c 	dn->transmit(dn, pkt);
pkt               210 net/caif/cfctrl.c 	struct cfpkt *pkt;
pkt               225 net/caif/cfctrl.c 	pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN);
pkt               226 net/caif/cfctrl.c 	if (!pkt)
pkt               228 net/caif/cfctrl.c 	cfpkt_addbdy(pkt, CFCTRL_CMD_LINK_SETUP);
pkt               229 net/caif/cfctrl.c 	cfpkt_addbdy(pkt, (param->chtype << 4) | param->linktype);
pkt               230 net/caif/cfctrl.c 	cfpkt_addbdy(pkt, (param->priority << 3) | param->phyid);
pkt               231 net/caif/cfctrl.c 	cfpkt_addbdy(pkt, param->endpoint & 0x03);
pkt               237 net/caif/cfctrl.c 		cfpkt_addbdy(pkt, (u8) param->u.video.connid);
pkt               243 net/caif/cfctrl.c 		cfpkt_add_body(pkt, &tmp32, 4);
pkt               250 net/caif/cfctrl.c 		cfpkt_add_body(pkt, &tmp32, 4);
pkt               252 net/caif/cfctrl.c 		cfpkt_add_body(pkt, param->u.rfm.volume,
pkt               257 net/caif/cfctrl.c 		cfpkt_add_body(pkt, &tmp16, 2);
pkt               259 net/caif/cfctrl.c 		cfpkt_add_body(pkt, &tmp16, 2);
pkt               263 net/caif/cfctrl.c 		cfpkt_add_body(pkt, utility_name, UTILITY_NAME_LENGTH);
pkt               265 net/caif/cfctrl.c 		cfpkt_add_body(pkt, &tmp8, 1);
pkt               266 net/caif/cfctrl.c 		cfpkt_add_body(pkt, param->u.utility.params,
pkt               281 net/caif/cfctrl.c 	init_info(cfpkt_info(pkt), cfctrl);
pkt               287 net/caif/cfctrl.c 	cfpkt_info(pkt)->dev_info->id = param->phyid;
pkt               288 net/caif/cfctrl.c 	cfpkt_set_prio(pkt, TC_PRIO_CONTROL);
pkt               290 net/caif/cfctrl.c 	    dn->transmit(dn, pkt);
pkt               308 net/caif/cfctrl.c 	struct cfpkt *pkt;
pkt               316 net/caif/cfctrl.c 	pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN);
pkt               317 net/caif/cfctrl.c 	if (!pkt)
pkt               319 net/caif/cfctrl.c 	cfpkt_addbdy(pkt, CFCTRL_CMD_LINK_DESTROY);
pkt               320 net/caif/cfctrl.c 	cfpkt_addbdy(pkt, channelid);
pkt               321 net/caif/cfctrl.c 	init_info(cfpkt_info(pkt), cfctrl);
pkt               322 net/caif/cfctrl.c 	cfpkt_set_prio(pkt, TC_PRIO_CONTROL);
pkt               324 net/caif/cfctrl.c 	    dn->transmit(dn, pkt);
pkt               350 net/caif/cfctrl.c static int cfctrl_recv(struct cflayer *layer, struct cfpkt *pkt)
pkt               362 net/caif/cfctrl.c 	cmdrsp = cfpkt_extr_head_u8(pkt);
pkt               367 net/caif/cfctrl.c 		if (handle_loop(cfctrl, cmd, pkt) != 0)
pkt               385 net/caif/cfctrl.c 			tmp = cfpkt_extr_head_u8(pkt);
pkt               393 net/caif/cfctrl.c 			tmp = cfpkt_extr_head_u8(pkt);
pkt               399 net/caif/cfctrl.c 			endpoint = cfpkt_extr_head_u8(pkt);
pkt               408 net/caif/cfctrl.c 				linkid = cfpkt_extr_head_u8(pkt);
pkt               411 net/caif/cfctrl.c 				tmp = cfpkt_extr_head_u8(pkt);
pkt               416 net/caif/cfctrl.c 				linkid = cfpkt_extr_head_u8(pkt);
pkt               421 net/caif/cfctrl.c 				    cfpkt_extr_head_u32(pkt);
pkt               425 net/caif/cfctrl.c 				linkid = cfpkt_extr_head_u8(pkt);
pkt               433 net/caif/cfctrl.c 				    cfpkt_extr_head_u32(pkt);
pkt               435 net/caif/cfctrl.c 				for (tmp = cfpkt_extr_head_u8(pkt);
pkt               436 net/caif/cfctrl.c 				     cfpkt_more(pkt) && tmp != '\0';
pkt               437 net/caif/cfctrl.c 				     tmp = cfpkt_extr_head_u8(pkt))
pkt               444 net/caif/cfctrl.c 				linkid = cfpkt_extr_head_u8(pkt);
pkt               454 net/caif/cfctrl.c 				    cfpkt_extr_head_u16(pkt);
pkt               457 net/caif/cfctrl.c 				    cfpkt_extr_head_u16(pkt);
pkt               464 net/caif/cfctrl.c 				     && cfpkt_more(pkt); i++) {
pkt               465 net/caif/cfctrl.c 					tmp = cfpkt_extr_head_u8(pkt);
pkt               469 net/caif/cfctrl.c 				len = cfpkt_extr_head_u8(pkt);
pkt               473 net/caif/cfctrl.c 				while (cfpkt_more(pkt) && len--) {
pkt               474 net/caif/cfctrl.c 					tmp = cfpkt_extr_head_u8(pkt);
pkt               480 net/caif/cfctrl.c 				linkid = cfpkt_extr_head_u8(pkt);
pkt               482 net/caif/cfctrl.c 				len = cfpkt_extr_head_u8(pkt);
pkt               484 net/caif/cfctrl.c 				cfpkt_extr_head(pkt, &param, len);
pkt               498 net/caif/cfctrl.c 				cfpkt_erroneous(pkt)) {
pkt               519 net/caif/cfctrl.c 		linkid = cfpkt_extr_head_u8(pkt);
pkt               547 net/caif/cfctrl.c 	cfpkt_destroy(pkt);
pkt               586 net/caif/cfctrl.c static int handle_loop(struct cfctrl *ctrl, int cmd, struct cfpkt *pkt)
pkt               614 net/caif/cfctrl.c 		cfpkt_add_trail(pkt, &linkid, 1);
pkt               616 net/caif/cfctrl.c 		cfpkt_peek_head(pkt, &linktype, 1);
pkt               619 net/caif/cfctrl.c 			cfpkt_add_trail(pkt, &tmp, 1);
pkt               620 net/caif/cfctrl.c 			cfpkt_add_trail(pkt, &tmp, 1);
pkt               626 net/caif/cfctrl.c 		cfpkt_peek_head(pkt, &linkid, 1);
pkt                17 net/caif/cfdbgl.c static int cfdbgl_receive(struct cflayer *layr, struct cfpkt *pkt);
pkt                18 net/caif/cfdbgl.c static int cfdbgl_transmit(struct cflayer *layr, struct cfpkt *pkt);
pkt                33 net/caif/cfdbgl.c static int cfdbgl_receive(struct cflayer *layr, struct cfpkt *pkt)
pkt                35 net/caif/cfdbgl.c 	return layr->up->receive(layr->up, pkt);
pkt                38 net/caif/cfdbgl.c static int cfdbgl_transmit(struct cflayer *layr, struct cfpkt *pkt)
pkt                45 net/caif/cfdbgl.c 		cfpkt_destroy(pkt);
pkt                50 net/caif/cfdbgl.c 	info = cfpkt_info(pkt);
pkt                54 net/caif/cfdbgl.c 	return layr->dn->transmit(layr->dn, pkt);
pkt                24 net/caif/cfdgml.c static int cfdgml_receive(struct cflayer *layr, struct cfpkt *pkt);
pkt                25 net/caif/cfdgml.c static int cfdgml_transmit(struct cflayer *layr, struct cfpkt *pkt);
pkt                40 net/caif/cfdgml.c static int cfdgml_receive(struct cflayer *layr, struct cfpkt *pkt)
pkt                49 net/caif/cfdgml.c 	if (cfpkt_extr_head(pkt, &cmd, 1) < 0) {
pkt                51 net/caif/cfdgml.c 		cfpkt_destroy(pkt);
pkt                56 net/caif/cfdgml.c 		if (cfpkt_extr_head(pkt, &dgmhdr, 3) < 0) {
pkt                58 net/caif/cfdgml.c 			cfpkt_destroy(pkt);
pkt                61 net/caif/cfdgml.c 		ret = layr->up->receive(layr->up, pkt);
pkt                68 net/caif/cfdgml.c 		cfpkt_destroy(pkt);
pkt                72 net/caif/cfdgml.c 		cfpkt_destroy(pkt);
pkt                75 net/caif/cfdgml.c 		cfpkt_destroy(pkt);
pkt                81 net/caif/cfdgml.c static int cfdgml_transmit(struct cflayer *layr, struct cfpkt *pkt)
pkt                90 net/caif/cfdgml.c 		cfpkt_destroy(pkt);
pkt                95 net/caif/cfdgml.c 	if (cfpkt_getlen(pkt) > DGM_MTU) {
pkt                96 net/caif/cfdgml.c 		cfpkt_destroy(pkt);
pkt               100 net/caif/cfdgml.c 	cfpkt_add_head(pkt, &zero, 3);
pkt               102 net/caif/cfdgml.c 	cfpkt_add_head(pkt, &packet_type, 1);
pkt               105 net/caif/cfdgml.c 	info = cfpkt_info(pkt);
pkt               112 net/caif/cfdgml.c 	return layr->dn->transmit(layr->dn, pkt);
pkt                28 net/caif/cffrml.c static int cffrml_receive(struct cflayer *layr, struct cfpkt *pkt);
pkt                29 net/caif/cffrml.c static int cffrml_transmit(struct cflayer *layr, struct cfpkt *pkt);
pkt                82 net/caif/cffrml.c static int cffrml_receive(struct cflayer *layr, struct cfpkt *pkt)
pkt                91 net/caif/cffrml.c 	cfpkt_extr_head(pkt, &tmp, 2);
pkt                98 net/caif/cffrml.c 	if (cfpkt_setlen(pkt, len) < 0) {
pkt               101 net/caif/cffrml.c 		cfpkt_destroy(pkt);
pkt               109 net/caif/cffrml.c 		cfpkt_extr_trail(pkt, &tmp, 2);
pkt               111 net/caif/cffrml.c 		pktchks = cfpkt_iterate(pkt, cffrml_checksum, 0xffff);
pkt               113 net/caif/cffrml.c 			cfpkt_add_trail(pkt, &tmp, 2);
pkt               121 net/caif/cffrml.c 	if (cfpkt_erroneous(pkt)) {
pkt               124 net/caif/cffrml.c 		cfpkt_destroy(pkt);
pkt               130 net/caif/cffrml.c 		cfpkt_destroy(pkt);
pkt               134 net/caif/cffrml.c 	return layr->up->receive(layr->up, pkt);
pkt               137 net/caif/cffrml.c static int cffrml_transmit(struct cflayer *layr, struct cfpkt *pkt)
pkt               145 net/caif/cffrml.c 		chks = cfpkt_iterate(pkt, cffrml_checksum, 0xffff);
pkt               147 net/caif/cffrml.c 		cfpkt_add_trail(pkt, &data, 2);
pkt               149 net/caif/cffrml.c 		cfpkt_pad_trail(pkt, 2);
pkt               151 net/caif/cffrml.c 	len = cfpkt_getlen(pkt);
pkt               153 net/caif/cffrml.c 	cfpkt_add_head(pkt, &data, 2);
pkt               154 net/caif/cffrml.c 	cfpkt_info(pkt)->hdr_len += 2;
pkt               155 net/caif/cffrml.c 	if (cfpkt_erroneous(pkt)) {
pkt               157 net/caif/cffrml.c 		cfpkt_destroy(pkt);
pkt               162 net/caif/cffrml.c 		cfpkt_destroy(pkt);
pkt               166 net/caif/cffrml.c 	return layr->dn->transmit(layr->dn, pkt);
pkt                42 net/caif/cfmuxl.c static int cfmuxl_receive(struct cflayer *layr, struct cfpkt *pkt);
pkt                43 net/caif/cfmuxl.c static int cfmuxl_transmit(struct cflayer *layr, struct cfpkt *pkt);
pkt               174 net/caif/cfmuxl.c static int cfmuxl_receive(struct cflayer *layr, struct cfpkt *pkt)
pkt               180 net/caif/cfmuxl.c 	if (cfpkt_extr_head(pkt, &id, 1) < 0) {
pkt               182 net/caif/cfmuxl.c 		cfpkt_destroy(pkt);
pkt               191 net/caif/cfmuxl.c 		cfpkt_destroy(pkt);
pkt               205 net/caif/cfmuxl.c 	ret = up->receive(up, pkt);
pkt               211 net/caif/cfmuxl.c static int cfmuxl_transmit(struct cflayer *layr, struct cfpkt *pkt)
pkt               217 net/caif/cfmuxl.c 	struct caif_payload_info *info = cfpkt_info(pkt);
pkt               227 net/caif/cfmuxl.c 		cfpkt_destroy(pkt);
pkt               233 net/caif/cfmuxl.c 	cfpkt_add_head(pkt, &linkid, 1);
pkt               240 net/caif/cfmuxl.c 	err = dn->transmit(dn, pkt);
pkt                17 net/caif/cfpkt_skbuff.c #define PKT_ERROR(pkt, errmsg)		   \
pkt                19 net/caif/cfpkt_skbuff.c 	cfpkt_priv(pkt)->erronous = true;  \
pkt                20 net/caif/cfpkt_skbuff.c 	skb_reset_tail_pointer(&pkt->skb); \
pkt                45 net/caif/cfpkt_skbuff.c static inline struct cfpkt_priv_data *cfpkt_priv(struct cfpkt *pkt)
pkt                47 net/caif/cfpkt_skbuff.c 	return (struct cfpkt_priv_data *) pkt->skb.cb;
pkt                50 net/caif/cfpkt_skbuff.c static inline bool is_erronous(struct cfpkt *pkt)
pkt                52 net/caif/cfpkt_skbuff.c 	return cfpkt_priv(pkt)->erronous;
pkt                55 net/caif/cfpkt_skbuff.c static inline struct sk_buff *pkt_to_skb(struct cfpkt *pkt)
pkt                57 net/caif/cfpkt_skbuff.c 	return &pkt->skb;
pkt                67 net/caif/cfpkt_skbuff.c 	struct cfpkt *pkt = skb_to_pkt(nativepkt);
pkt                68 net/caif/cfpkt_skbuff.c 	cfpkt_priv(pkt)->erronous = false;
pkt                69 net/caif/cfpkt_skbuff.c 	return pkt;
pkt                73 net/caif/cfpkt_skbuff.c void *cfpkt_tonative(struct cfpkt *pkt)
pkt                75 net/caif/cfpkt_skbuff.c 	return (void *) pkt;
pkt                96 net/caif/cfpkt_skbuff.c void cfpkt_destroy(struct cfpkt *pkt)
pkt                98 net/caif/cfpkt_skbuff.c 	struct sk_buff *skb = pkt_to_skb(pkt);
pkt               102 net/caif/cfpkt_skbuff.c inline bool cfpkt_more(struct cfpkt *pkt)
pkt               104 net/caif/cfpkt_skbuff.c 	struct sk_buff *skb = pkt_to_skb(pkt);
pkt               108 net/caif/cfpkt_skbuff.c int cfpkt_peek_head(struct cfpkt *pkt, void *data, u16 len)
pkt               110 net/caif/cfpkt_skbuff.c 	struct sk_buff *skb = pkt_to_skb(pkt);
pkt               115 net/caif/cfpkt_skbuff.c 	return !cfpkt_extr_head(pkt, data, len) &&
pkt               116 net/caif/cfpkt_skbuff.c 	    !cfpkt_add_head(pkt, data, len);
pkt               119 net/caif/cfpkt_skbuff.c int cfpkt_extr_head(struct cfpkt *pkt, void *data, u16 len)
pkt               121 net/caif/cfpkt_skbuff.c 	struct sk_buff *skb = pkt_to_skb(pkt);
pkt               123 net/caif/cfpkt_skbuff.c 	if (unlikely(is_erronous(pkt)))
pkt               127 net/caif/cfpkt_skbuff.c 		PKT_ERROR(pkt, "read beyond end of packet\n");
pkt               133 net/caif/cfpkt_skbuff.c 			PKT_ERROR(pkt, "linearize failed\n");
pkt               145 net/caif/cfpkt_skbuff.c int cfpkt_extr_trail(struct cfpkt *pkt, void *dta, u16 len)
pkt               147 net/caif/cfpkt_skbuff.c 	struct sk_buff *skb = pkt_to_skb(pkt);
pkt               150 net/caif/cfpkt_skbuff.c 	if (unlikely(is_erronous(pkt)))
pkt               154 net/caif/cfpkt_skbuff.c 		PKT_ERROR(pkt, "linearize failed\n");
pkt               158 net/caif/cfpkt_skbuff.c 		PKT_ERROR(pkt, "read beyond end of packet\n");
pkt               167 net/caif/cfpkt_skbuff.c int cfpkt_pad_trail(struct cfpkt *pkt, u16 len)
pkt               169 net/caif/cfpkt_skbuff.c 	return cfpkt_add_body(pkt, NULL, len);
pkt               172 net/caif/cfpkt_skbuff.c int cfpkt_add_body(struct cfpkt *pkt, const void *data, u16 len)
pkt               174 net/caif/cfpkt_skbuff.c 	struct sk_buff *skb = pkt_to_skb(pkt);
pkt               180 net/caif/cfpkt_skbuff.c 	if (unlikely(is_erronous(pkt)))
pkt               198 net/caif/cfpkt_skbuff.c 			PKT_ERROR(pkt, "cow failed\n");
pkt               210 net/caif/cfpkt_skbuff.c inline int cfpkt_addbdy(struct cfpkt *pkt, u8 data)
pkt               212 net/caif/cfpkt_skbuff.c 	return cfpkt_add_body(pkt, &data, 1);
pkt               215 net/caif/cfpkt_skbuff.c int cfpkt_add_head(struct cfpkt *pkt, const void *data2, u16 len)
pkt               217 net/caif/cfpkt_skbuff.c 	struct sk_buff *skb = pkt_to_skb(pkt);
pkt               222 net/caif/cfpkt_skbuff.c 	if (unlikely(is_erronous(pkt)))
pkt               225 net/caif/cfpkt_skbuff.c 		PKT_ERROR(pkt, "no headroom\n");
pkt               232 net/caif/cfpkt_skbuff.c 		PKT_ERROR(pkt, "cow failed\n");
pkt               242 net/caif/cfpkt_skbuff.c inline int cfpkt_add_trail(struct cfpkt *pkt, const void *data, u16 len)
pkt               244 net/caif/cfpkt_skbuff.c 	return cfpkt_add_body(pkt, data, len);
pkt               247 net/caif/cfpkt_skbuff.c inline u16 cfpkt_getlen(struct cfpkt *pkt)
pkt               249 net/caif/cfpkt_skbuff.c 	struct sk_buff *skb = pkt_to_skb(pkt);
pkt               253 net/caif/cfpkt_skbuff.c int cfpkt_iterate(struct cfpkt *pkt,
pkt               261 net/caif/cfpkt_skbuff.c 	if (unlikely(is_erronous(pkt)))
pkt               263 net/caif/cfpkt_skbuff.c 	if (unlikely(skb_linearize(&pkt->skb) != 0)) {
pkt               264 net/caif/cfpkt_skbuff.c 		PKT_ERROR(pkt, "linearize failed\n");
pkt               267 net/caif/cfpkt_skbuff.c 	return iter_func(data, pkt->skb.data, cfpkt_getlen(pkt));
pkt               270 net/caif/cfpkt_skbuff.c int cfpkt_setlen(struct cfpkt *pkt, u16 len)
pkt               272 net/caif/cfpkt_skbuff.c 	struct sk_buff *skb = pkt_to_skb(pkt);
pkt               275 net/caif/cfpkt_skbuff.c 	if (unlikely(is_erronous(pkt)))
pkt               284 net/caif/cfpkt_skbuff.c 		return cfpkt_getlen(pkt);
pkt               288 net/caif/cfpkt_skbuff.c 	if (unlikely(!cfpkt_pad_trail(pkt, len - skb->len)))
pkt               289 net/caif/cfpkt_skbuff.c 		PKT_ERROR(pkt, "skb_pad_trail failed\n");
pkt               291 net/caif/cfpkt_skbuff.c 	return cfpkt_getlen(pkt);
pkt               331 net/caif/cfpkt_skbuff.c struct cfpkt *cfpkt_split(struct cfpkt *pkt, u16 pos)
pkt               334 net/caif/cfpkt_skbuff.c 	struct sk_buff *skb = pkt_to_skb(pkt);
pkt               339 net/caif/cfpkt_skbuff.c 	if (unlikely(is_erronous(pkt)))
pkt               343 net/caif/cfpkt_skbuff.c 		PKT_ERROR(pkt, "trying to split beyond end of packet\n");
pkt               367 net/caif/cfpkt_skbuff.c bool cfpkt_erroneous(struct cfpkt *pkt)
pkt               369 net/caif/cfpkt_skbuff.c 	return cfpkt_priv(pkt)->erronous;
pkt               372 net/caif/cfpkt_skbuff.c struct caif_payload_info *cfpkt_info(struct cfpkt *pkt)
pkt               374 net/caif/cfpkt_skbuff.c 	return (struct caif_payload_info *)&pkt_to_skb(pkt)->cb;
pkt               378 net/caif/cfpkt_skbuff.c void cfpkt_set_prio(struct cfpkt *pkt, int prio)
pkt               380 net/caif/cfpkt_skbuff.c 	pkt_to_skb(pkt)->priority = prio;
pkt                21 net/caif/cfrfml.c static int cfrfml_receive(struct cflayer *layr, struct cfpkt *pkt);
pkt                22 net/caif/cfrfml.c static int cfrfml_transmit(struct cflayer *layr, struct cfpkt *pkt);
pkt                72 net/caif/cfrfml.c 				struct cfpkt *pkt, int *err)
pkt                78 net/caif/cfrfml.c 	if (cfpkt_extr_head(pkt, seghead, 6) < 0)
pkt                85 net/caif/cfrfml.c 	tmppkt = cfpkt_append(rfml->incomplete_frm, pkt,
pkt                97 net/caif/cfrfml.c static int cfrfml_receive(struct cflayer *layr, struct cfpkt *pkt)
pkt               112 net/caif/cfrfml.c 	if (cfpkt_extr_head(pkt, &tmp, 1) < 0)
pkt               119 net/caif/cfrfml.c 			if (cfpkt_peek_head(pkt, rfml->seghead, 6) < 0)
pkt               124 net/caif/cfrfml.c 			if (cfpkt_erroneous(pkt))
pkt               126 net/caif/cfrfml.c 			rfml->incomplete_frm = pkt;
pkt               127 net/caif/cfrfml.c 			pkt = NULL;
pkt               130 net/caif/cfrfml.c 			tmppkt = rfm_append(rfml, seghead, pkt, &err);
pkt               150 net/caif/cfrfml.c 		tmppkt = rfm_append(rfml, seghead, pkt, &err);
pkt               158 net/caif/cfrfml.c 		pkt = tmppkt;
pkt               163 net/caif/cfrfml.c 		if (rfml->pdu_size != cfpkt_getlen(pkt) - RFM_HEAD_SIZE + 1)
pkt               167 net/caif/cfrfml.c 	err = rfml->serv.layer.up->receive(rfml->serv.layer.up, pkt);
pkt               174 net/caif/cfrfml.c 		if (pkt)
pkt               175 net/caif/cfrfml.c 			cfpkt_destroy(pkt);
pkt               196 net/caif/cfrfml.c static int cfrfml_transmit_segment(struct cfrfml *rfml, struct cfpkt *pkt)
pkt               198 net/caif/cfrfml.c 	caif_assert(cfpkt_getlen(pkt) < rfml->fragment_size + RFM_HEAD_SIZE);
pkt               201 net/caif/cfrfml.c 	cfpkt_info(pkt)->channel_id = rfml->serv.layer.id;
pkt               207 net/caif/cfrfml.c 	cfpkt_info(pkt)->hdr_len = RFM_HEAD_SIZE;
pkt               208 net/caif/cfrfml.c 	cfpkt_info(pkt)->dev_info = &rfml->serv.dev_info;
pkt               210 net/caif/cfrfml.c 	return rfml->serv.layer.dn->transmit(rfml->serv.layer.dn, pkt);
pkt               213 net/caif/cfrfml.c static int cfrfml_transmit(struct cflayer *layr, struct cfpkt *pkt)
pkt               219 net/caif/cfrfml.c 	struct cfpkt *frontpkt = pkt;
pkt               229 net/caif/cfrfml.c 	if (cfpkt_getlen(pkt) <= RFM_HEAD_SIZE-1)
pkt               233 net/caif/cfrfml.c 	if (cfpkt_getlen(pkt) > rfml->fragment_size + RFM_HEAD_SIZE)
pkt               234 net/caif/cfrfml.c 		err = cfpkt_peek_head(pkt, head, 6);
pkt                29 net/caif/cfserl.c static int cfserl_receive(struct cflayer *layr, struct cfpkt *pkt);
pkt                30 net/caif/cfserl.c static int cfserl_transmit(struct cflayer *layr, struct cfpkt *pkt);
pkt                53 net/caif/cfserl.c 	struct cfpkt *pkt = NULL;
pkt                67 net/caif/cfserl.c 		pkt = layr->incomplete_frm;
pkt                68 net/caif/cfserl.c 		if (pkt == NULL) {
pkt                73 net/caif/cfserl.c 		pkt = newpkt;
pkt                80 net/caif/cfserl.c 			cfpkt_extr_head(pkt, &tmp8, 1);
pkt                82 net/caif/cfserl.c 				while (cfpkt_more(pkt)
pkt                84 net/caif/cfserl.c 					cfpkt_extr_head(pkt, &tmp8, 1);
pkt                86 net/caif/cfserl.c 				if (!cfpkt_more(pkt)) {
pkt                87 net/caif/cfserl.c 					cfpkt_destroy(pkt);
pkt                95 net/caif/cfserl.c 		pkt_len = cfpkt_getlen(pkt);
pkt               105 net/caif/cfserl.c 				cfpkt_add_head(pkt, &stx, 1);
pkt               106 net/caif/cfserl.c 			layr->incomplete_frm = pkt;
pkt               115 net/caif/cfserl.c 		cfpkt_peek_head(pkt, &tmp, 2);
pkt               123 net/caif/cfserl.c 				if (pkt != NULL)
pkt               124 net/caif/cfserl.c 					cfpkt_destroy(pkt);
pkt               136 net/caif/cfserl.c 				cfpkt_add_head(pkt, &stx, 1);
pkt               137 net/caif/cfserl.c 			layr->incomplete_frm = pkt;
pkt               147 net/caif/cfserl.c 			tail_pkt = cfpkt_split(pkt, expectlen);
pkt               153 net/caif/cfserl.c 		ret = layr->layer.up->receive(layr->layer.up, pkt);
pkt               158 net/caif/cfserl.c 					pkt = cfpkt_append(pkt, tail_pkt, 0);
pkt               162 net/caif/cfserl.c 				cfpkt_destroy(pkt);
pkt               163 net/caif/cfserl.c 				pkt = NULL;
pkt               167 net/caif/cfserl.c 		pkt = tail_pkt;
pkt               169 net/caif/cfserl.c 	} while (pkt != NULL);
pkt               109 net/caif/cfsrvl.c 			struct cfpkt *pkt;
pkt               112 net/caif/cfsrvl.c 			pkt = cfpkt_create(SRVL_CTRL_PKT_SIZE);
pkt               113 net/caif/cfsrvl.c 			if (!pkt)
pkt               116 net/caif/cfsrvl.c 			if (cfpkt_add_head(pkt, &flow_on, 1) < 0) {
pkt               118 net/caif/cfsrvl.c 				cfpkt_destroy(pkt);
pkt               121 net/caif/cfsrvl.c 			info = cfpkt_info(pkt);
pkt               125 net/caif/cfsrvl.c 			cfpkt_set_prio(pkt, TC_PRIO_CONTROL);
pkt               126 net/caif/cfsrvl.c 			return layr->dn->transmit(layr->dn, pkt);
pkt               130 net/caif/cfsrvl.c 			struct cfpkt *pkt;
pkt               133 net/caif/cfsrvl.c 			pkt = cfpkt_create(SRVL_CTRL_PKT_SIZE);
pkt               134 net/caif/cfsrvl.c 			if (!pkt)
pkt               137 net/caif/cfsrvl.c 			if (cfpkt_add_head(pkt, &flow_off, 1) < 0) {
pkt               139 net/caif/cfsrvl.c 				cfpkt_destroy(pkt);
pkt               142 net/caif/cfsrvl.c 			info = cfpkt_info(pkt);
pkt               146 net/caif/cfsrvl.c 			cfpkt_set_prio(pkt, TC_PRIO_CONTROL);
pkt               147 net/caif/cfsrvl.c 			return layr->dn->transmit(layr->dn, pkt);
pkt                24 net/caif/cfutill.c static int cfutill_receive(struct cflayer *layr, struct cfpkt *pkt);
pkt                25 net/caif/cfutill.c static int cfutill_transmit(struct cflayer *layr, struct cfpkt *pkt);
pkt                40 net/caif/cfutill.c static int cfutill_receive(struct cflayer *layr, struct cfpkt *pkt)
pkt                48 net/caif/cfutill.c 	if (cfpkt_extr_head(pkt, &cmd, 1) < 0) {
pkt                50 net/caif/cfutill.c 		cfpkt_destroy(pkt);
pkt                56 net/caif/cfutill.c 		return layr->up->receive(layr->up, pkt);
pkt                59 net/caif/cfutill.c 		cfpkt_destroy(pkt);
pkt                63 net/caif/cfutill.c 		cfpkt_destroy(pkt);
pkt                69 net/caif/cfutill.c 		cfpkt_destroy(pkt);
pkt                72 net/caif/cfutill.c 		cfpkt_destroy(pkt);
pkt                78 net/caif/cfutill.c static int cfutill_transmit(struct cflayer *layr, struct cfpkt *pkt)
pkt                89 net/caif/cfutill.c 		cfpkt_destroy(pkt);
pkt                93 net/caif/cfutill.c 	cfpkt_add_head(pkt, &zero, 1);
pkt                95 net/caif/cfutill.c 	info = cfpkt_info(pkt);
pkt               103 net/caif/cfutill.c 	return layr->dn->transmit(layr->dn, pkt);
pkt                23 net/caif/cfveil.c static int cfvei_receive(struct cflayer *layr, struct cfpkt *pkt);
pkt                24 net/caif/cfveil.c static int cfvei_transmit(struct cflayer *layr, struct cfpkt *pkt);
pkt                39 net/caif/cfveil.c static int cfvei_receive(struct cflayer *layr, struct cfpkt *pkt)
pkt                48 net/caif/cfveil.c 	if (cfpkt_extr_head(pkt, &cmd, 1) < 0) {
pkt                50 net/caif/cfveil.c 		cfpkt_destroy(pkt);
pkt                55 net/caif/cfveil.c 		ret = layr->up->receive(layr->up, pkt);
pkt                59 net/caif/cfveil.c 		cfpkt_destroy(pkt);
pkt                63 net/caif/cfveil.c 		cfpkt_destroy(pkt);
pkt                66 net/caif/cfveil.c 		cfpkt_destroy(pkt);
pkt                70 net/caif/cfveil.c 		cfpkt_destroy(pkt);
pkt                75 net/caif/cfveil.c static int cfvei_transmit(struct cflayer *layr, struct cfpkt *pkt)
pkt                86 net/caif/cfveil.c 	if (cfpkt_add_head(pkt, &tmp, 1) < 0) {
pkt                93 net/caif/cfveil.c 	info = cfpkt_info(pkt);
pkt                97 net/caif/cfveil.c 	return layr->dn->transmit(layr->dn, pkt);
pkt                99 net/caif/cfveil.c 	cfpkt_destroy(pkt);
pkt                19 net/caif/cfvidl.c static int cfvidl_receive(struct cflayer *layr, struct cfpkt *pkt);
pkt                20 net/caif/cfvidl.c static int cfvidl_transmit(struct cflayer *layr, struct cfpkt *pkt);
pkt                36 net/caif/cfvidl.c static int cfvidl_receive(struct cflayer *layr, struct cfpkt *pkt)
pkt                39 net/caif/cfvidl.c 	if (cfpkt_extr_head(pkt, &videoheader, 4) < 0) {
pkt                41 net/caif/cfvidl.c 		cfpkt_destroy(pkt);
pkt                44 net/caif/cfvidl.c 	return layr->up->receive(layr->up, pkt);
pkt                47 net/caif/cfvidl.c static int cfvidl_transmit(struct cflayer *layr, struct cfpkt *pkt)
pkt                55 net/caif/cfvidl.c 		cfpkt_destroy(pkt);
pkt                59 net/caif/cfvidl.c 	cfpkt_add_head(pkt, &videoheader, 4);
pkt                61 net/caif/cfvidl.c 	info = cfpkt_info(pkt);
pkt                64 net/caif/cfvidl.c 	return layr->dn->transmit(layr->dn, pkt);
pkt                70 net/caif/chnl_net.c static int chnl_recv_cb(struct cflayer *layr, struct cfpkt *pkt)
pkt                82 net/caif/chnl_net.c 	skb = (struct sk_buff *) cfpkt_tonative(pkt);
pkt               217 net/caif/chnl_net.c 	struct cfpkt *pkt = NULL;
pkt               243 net/caif/chnl_net.c 	pkt = cfpkt_fromnative(CAIF_DIR_OUT, (void *) skb);
pkt               246 net/caif/chnl_net.c 	result = priv->chnl.dn->transmit(priv->chnl.dn, pkt);
pkt               283 net/can/j1939/j1939-priv.h 	} pkt;
pkt               910 net/can/j1939/socket.c 		size = min(session->pkt.tx_acked * 7,
pkt               325 net/can/j1939/transport.c 	offset_start = session->pkt.tx_acked * 7;
pkt               364 net/can/j1939/transport.c 	offset_start = session->pkt.dpo * 7;
pkt               693 net/can/j1939/transport.c 	dat[3] = session->pkt.total;
pkt               704 net/can/j1939/transport.c 		session->pkt.tx = 0;
pkt               731 net/can/j1939/transport.c 	unsigned int pkt;
pkt               738 net/can/j1939/transport.c 	session->pkt.dpo = session->pkt.tx_acked;
pkt               739 net/can/j1939/transport.c 	pkt = session->pkt.dpo;
pkt               740 net/can/j1939/transport.c 	dat[1] = session->pkt.last - session->pkt.tx_acked;
pkt               741 net/can/j1939/transport.c 	dat[2] = (pkt >> 0);
pkt               742 net/can/j1939/transport.c 	dat[3] = (pkt >> 8);
pkt               743 net/can/j1939/transport.c 	dat[4] = (pkt >> 16);
pkt               751 net/can/j1939/transport.c 	session->pkt.tx = session->pkt.tx_acked;
pkt               779 net/can/j1939/transport.c 		pkt_end = session->pkt.total;
pkt               781 net/can/j1939/transport.c 		pkt_end = session->pkt.last;
pkt               783 net/can/j1939/transport.c 	while (session->pkt.tx < pkt_end) {
pkt               784 net/can/j1939/transport.c 		dat[0] = session->pkt.tx - session->pkt.dpo + 1;
pkt               785 net/can/j1939/transport.c 		offset = (session->pkt.tx * 7) - skcb->offset;
pkt               803 net/can/j1939/transport.c 		session->pkt.tx++;
pkt               807 net/can/j1939/transport.c 		if (session->pkt.tx < session->pkt.total && pdelay) {
pkt               861 net/can/j1939/transport.c 	unsigned int pkt, len;
pkt               868 net/can/j1939/transport.c 	len = session->pkt.total - session->pkt.rx;
pkt               869 net/can/j1939/transport.c 	len = min3(len, session->pkt.block, j1939_tp_block ?: 255);
pkt               873 net/can/j1939/transport.c 		pkt = session->pkt.rx + 1;
pkt               876 net/can/j1939/transport.c 		dat[2] = (pkt >> 0);
pkt               877 net/can/j1939/transport.c 		dat[3] = (pkt >> 8);
pkt               878 net/can/j1939/transport.c 		dat[4] = (pkt >> 16);
pkt               882 net/can/j1939/transport.c 		dat[2] = session->pkt.rx + 1;
pkt               924 net/can/j1939/transport.c 		dat[3] = session->pkt.total;
pkt               970 net/can/j1939/transport.c 		if (session->pkt.rx >= session->pkt.total) {
pkt               972 net/can/j1939/transport.c 		} else if (session->pkt.rx >= session->pkt.last) {
pkt              1316 net/can/j1939/transport.c 	session->pkt.tx_acked = session->pkt.total;
pkt              1342 net/can/j1939/transport.c 	unsigned int pkt;
pkt              1358 net/can/j1939/transport.c 		pkt = j1939_etp_ctl_to_packet(dat);
pkt              1360 net/can/j1939/transport.c 		pkt = dat[2];
pkt              1362 net/can/j1939/transport.c 	if (!pkt)
pkt              1364 net/can/j1939/transport.c 	else if (dat[1] > session->pkt.block /* 0xff for etp */)
pkt              1368 net/can/j1939/transport.c 	session->pkt.tx_acked = pkt - 1;
pkt              1370 net/can/j1939/transport.c 	session->pkt.last = session->pkt.tx_acked + dat[1];
pkt              1371 net/can/j1939/transport.c 	if (session->pkt.last > session->pkt.total)
pkt              1373 net/can/j1939/transport.c 		session->pkt.last = session->pkt.total;
pkt              1375 net/can/j1939/transport.c 	session->pkt.tx = session->pkt.tx_acked;
pkt              1381 net/can/j1939/transport.c 			if (session->pkt.tx_acked)
pkt              1560 net/can/j1939/transport.c 	session->pkt.total = (len + 6) / 7;
pkt              1561 net/can/j1939/transport.c 	session->pkt.block = 0xff;
pkt              1563 net/can/j1939/transport.c 		if (dat[3] != session->pkt.total)
pkt              1565 net/can/j1939/transport.c 				     __func__, session, session->pkt.total,
pkt              1567 net/can/j1939/transport.c 		session->pkt.total = dat[3];
pkt              1568 net/can/j1939/transport.c 		session->pkt.block = min(dat[3], dat[4]);
pkt              1571 net/can/j1939/transport.c 	session->pkt.rx = 0;
pkt              1572 net/can/j1939/transport.c 	session->pkt.tx = 0;
pkt              1685 net/can/j1939/transport.c 	session->pkt.dpo = j1939_etp_ctl_to_packet(skb->data);
pkt              1746 net/can/j1939/transport.c 	packet = (dat[0] - 1 + session->pkt.dpo);
pkt              1747 net/can/j1939/transport.c 	if (packet > session->pkt.total ||
pkt              1748 net/can/j1939/transport.c 	    (session->pkt.rx + 1) > session->pkt.total) {
pkt              1773 net/can/j1939/transport.c 	if (packet == session->pkt.rx)
pkt              1774 net/can/j1939/transport.c 		session->pkt.rx++;
pkt              1778 net/can/j1939/transport.c 		if (session->pkt.rx >= session->pkt.total)
pkt              1782 net/can/j1939/transport.c 		if (session->pkt.rx >= session->pkt.last)
pkt              1885 net/can/j1939/transport.c 	session->pkt.total = (size + 6) / 7;
pkt              1886 net/can/j1939/transport.c 	session->pkt.block = skcb->addr.type == J1939_ETP ? 255 :
pkt              1887 net/can/j1939/transport.c 		min(j1939_tp_block ?: 255, session->pkt.total);
pkt              1891 net/can/j1939/transport.c 		session->pkt.last = session->pkt.total;
pkt                62 net/dccp/ccid.h 	int		(*ccid_hc_rx_parse_options)(struct sock *sk, u8 pkt,
pkt                68 net/dccp/ccid.h 	int		(*ccid_hc_tx_parse_options)(struct sock *sk, u8 pkt,
pkt               200 net/dccp/ccid.h 					   u8 pkt, u8 opt, u8 *val, u8 len)
pkt               204 net/dccp/ccid.h 	return ccid->ccid_ops->ccid_hc_tx_parse_options(sk, pkt, opt, val, len);
pkt               212 net/dccp/ccid.h 					   u8 pkt, u8 opt, u8 *val, u8 len)
pkt               216 net/dccp/ccid.h 	return ccid->ccid_ops->ccid_hc_rx_parse_options(sk, pkt, opt, val, len);
pkt               104 net/ipv4/ipmr.c 			     struct sk_buff *pkt, vifi_t vifi, int assert);
pkt               107 net/ipv4/ipmr.c static void igmpmsg_netlink_event(struct mr_table *mrt, struct sk_buff *pkt);
pkt              1040 net/ipv4/ipmr.c 			     struct sk_buff *pkt, vifi_t vifi, int assert)
pkt              1042 net/ipv4/ipmr.c 	const int ihl = ip_hdrlen(pkt);
pkt              1050 net/ipv4/ipmr.c 		skb = skb_realloc_headroom(pkt, sizeof(struct iphdr));
pkt              1067 net/ipv4/ipmr.c 		memcpy(msg, skb_network_header(pkt), sizeof(struct iphdr));
pkt              1075 net/ipv4/ipmr.c 		ip_hdr(skb)->tot_len = htons(ntohs(ip_hdr(pkt)->tot_len) +
pkt              1081 net/ipv4/ipmr.c 		skb_copy_to_linear_data(skb, pkt->data, ihl);
pkt              1086 net/ipv4/ipmr.c 		skb_dst_set(skb, dst_clone(skb_dst(pkt)));
pkt              1652 net/ipv4/ipmr.c 			sr.pktcnt = c->_c.mfc_un.res.pkt;
pkt              1727 net/ipv4/ipmr.c 			sr.pktcnt = c->_c.mfc_un.res.pkt;
pkt              1959 net/ipv4/ipmr.c 	c->_c.mfc_un.res.pkt++;
pkt              2435 net/ipv4/ipmr.c static void igmpmsg_netlink_event(struct mr_table *mrt, struct sk_buff *pkt)
pkt              2445 net/ipv4/ipmr.c 	payloadlen = pkt->len - sizeof(struct igmpmsg);
pkt              2446 net/ipv4/ipmr.c 	msg = (struct igmpmsg *)skb_network_header(pkt);
pkt              2467 net/ipv4/ipmr.c 	if (!nla || skb_copy_bits(pkt, sizeof(struct igmpmsg),
pkt              3000 net/ipv4/ipmr.c 				   mfc->_c.mfc_un.res.pkt,
pkt               258 net/ipv4/ipmr_base.c 	mfcs.mfcs_packets = c->mfc_un.res.pkt;
pkt                22 net/ipv4/netfilter/nft_dup_ipv4.c 			      const struct nft_pktinfo *pkt)
pkt                30 net/ipv4/netfilter/nft_dup_ipv4.c 	nf_dup_ipv4(nft_net(pkt), pkt->skb, nft_hook(pkt), &gw, oif);
pkt                28 net/ipv4/netfilter/nft_fib_ipv4.c 			const struct nft_pktinfo *pkt)
pkt                31 net/ipv4/netfilter/nft_fib_ipv4.c 	int noff = skb_network_offset(pkt->skb);
pkt                38 net/ipv4/netfilter/nft_fib_ipv4.c 		dev = nft_in(pkt);
pkt                40 net/ipv4/netfilter/nft_fib_ipv4.c 		dev = nft_out(pkt);
pkt                42 net/ipv4/netfilter/nft_fib_ipv4.c 	iph = skb_header_pointer(pkt->skb, noff, sizeof(_iph), &_iph);
pkt                53 net/ipv4/netfilter/nft_fib_ipv4.c 	*dst = inet_dev_addr_type(nft_net(pkt), dev, addr);
pkt                58 net/ipv4/netfilter/nft_fib_ipv4.c 		   const struct nft_pktinfo *pkt)
pkt                61 net/ipv4/netfilter/nft_fib_ipv4.c 	int noff = skb_network_offset(pkt->skb);
pkt                80 net/ipv4/netfilter/nft_fib_ipv4.c 		oif = nft_out(pkt);
pkt                82 net/ipv4/netfilter/nft_fib_ipv4.c 		oif = nft_in(pkt);
pkt                86 net/ipv4/netfilter/nft_fib_ipv4.c 	if (nft_hook(pkt) == NF_INET_PRE_ROUTING &&
pkt                87 net/ipv4/netfilter/nft_fib_ipv4.c 	    nft_fib_is_loopback(pkt->skb, nft_in(pkt))) {
pkt                88 net/ipv4/netfilter/nft_fib_ipv4.c 		nft_fib_store_result(dest, priv, nft_in(pkt));
pkt                92 net/ipv4/netfilter/nft_fib_ipv4.c 	iph = skb_header_pointer(pkt->skb, noff, sizeof(_iph), &_iph);
pkt               101 net/ipv4/netfilter/nft_fib_ipv4.c 			nft_fib_store_result(dest, priv, pkt->skb->dev);
pkt               107 net/ipv4/netfilter/nft_fib_ipv4.c 		fl4.flowi4_mark = pkt->skb->mark;
pkt               121 net/ipv4/netfilter/nft_fib_ipv4.c 	if (fib_lookup(nft_net(pkt), &fl4, &res, FIB_LOOKUP_IGNORE_LINKSTATE))
pkt                21 net/ipv4/netfilter/nft_reject_ipv4.c 				 const struct nft_pktinfo *pkt)
pkt                27 net/ipv4/netfilter/nft_reject_ipv4.c 		nf_send_unreach(pkt->skb, priv->icmp_code, nft_hook(pkt));
pkt                30 net/ipv4/netfilter/nft_reject_ipv4.c 		nf_send_reset(nft_net(pkt), pkt->skb, nft_hook(pkt));
pkt                88 net/ipv6/ip6mr.c static int ip6mr_cache_report(struct mr_table *mrt, struct sk_buff *pkt,
pkt                92 net/ipv6/ip6mr.c static void mrt6msg_netlink_event(struct mr_table *mrt, struct sk_buff *pkt);
pkt               485 net/ipv6/ip6mr.c 				   mfc->_c.mfc_un.res.pkt,
pkt              1035 net/ipv6/ip6mr.c static int ip6mr_cache_report(struct mr_table *mrt, struct sk_buff *pkt,
pkt              1045 net/ipv6/ip6mr.c 		skb = skb_realloc_headroom(pkt, -skb_network_offset(pkt)
pkt              1066 net/ipv6/ip6mr.c 		skb_push(skb, -skb_network_offset(pkt));
pkt              1075 net/ipv6/ip6mr.c 		msg->im6_src = ipv6_hdr(pkt)->saddr;
pkt              1076 net/ipv6/ip6mr.c 		msg->im6_dst = ipv6_hdr(pkt)->daddr;
pkt              1088 net/ipv6/ip6mr.c 	skb_copy_to_linear_data(skb, ipv6_hdr(pkt), sizeof(struct ipv6hdr));
pkt              1101 net/ipv6/ip6mr.c 	msg->im6_src = ipv6_hdr(pkt)->saddr;
pkt              1102 net/ipv6/ip6mr.c 	msg->im6_dst = ipv6_hdr(pkt)->daddr;
pkt              1104 net/ipv6/ip6mr.c 	skb_dst_set(skb, dst_clone(skb_dst(pkt)));
pkt              1891 net/ipv6/ip6mr.c 			sr.pktcnt = c->_c.mfc_un.res.pkt;
pkt              1966 net/ipv6/ip6mr.c 			sr.pktcnt = c->_c.mfc_un.res.pkt;
pkt              2090 net/ipv6/ip6mr.c 	c->_c.mfc_un.res.pkt++;
pkt              2435 net/ipv6/ip6mr.c static void mrt6msg_netlink_event(struct mr_table *mrt, struct sk_buff *pkt)
pkt              2445 net/ipv6/ip6mr.c 	payloadlen = pkt->len - sizeof(struct mrt6msg);
pkt              2446 net/ipv6/ip6mr.c 	msg = (struct mrt6msg *)skb_transport_header(pkt);
pkt              2467 net/ipv6/ip6mr.c 	if (!nla || skb_copy_bits(pkt, sizeof(struct mrt6msg),
pkt               482 net/ipv6/ipv6_sockglue.c 		struct in6_pktinfo pkt;
pkt               489 net/ipv6/ipv6_sockglue.c 		if (copy_from_user(&pkt, optval, sizeof(struct in6_pktinfo))) {
pkt               493 net/ipv6/ipv6_sockglue.c 		if (!sk_dev_equal_l3scope(sk, pkt.ipi6_ifindex))
pkt               496 net/ipv6/ipv6_sockglue.c 		np->sticky_pktinfo.ipi6_ifindex = pkt.ipi6_ifindex;
pkt               497 net/ipv6/ipv6_sockglue.c 		np->sticky_pktinfo.ipi6_addr = pkt.ipi6_addr;
pkt                22 net/ipv6/netfilter/nft_dup_ipv6.c 			      const struct nft_pktinfo *pkt)
pkt                28 net/ipv6/netfilter/nft_dup_ipv6.c 	nf_dup_ipv6(nft_net(pkt), pkt->skb, nft_hook(pkt), gw, oif);
pkt                23 net/ipv6/netfilter/nft_fib_ipv6.c 			       const struct nft_pktinfo *pkt,
pkt                39 net/ipv6/netfilter/nft_fib_ipv6.c 		fl6->flowi6_oif = get_ifindex(dev ? dev : pkt->skb->dev);
pkt                46 net/ipv6/netfilter/nft_fib_ipv6.c 		fl6->flowi6_mark = pkt->skb->mark;
pkt                54 net/ipv6/netfilter/nft_fib_ipv6.c 				const struct nft_pktinfo *pkt,
pkt                62 net/ipv6/netfilter/nft_fib_ipv6.c 		.flowi6_proto = pkt->tprot,
pkt                67 net/ipv6/netfilter/nft_fib_ipv6.c 		dev = nft_in(pkt);
pkt                69 net/ipv6/netfilter/nft_fib_ipv6.c 		dev = nft_out(pkt);
pkt                71 net/ipv6/netfilter/nft_fib_ipv6.c 	nft_fib6_flowi_init(&fl6, priv, pkt, dev, iph);
pkt                73 net/ipv6/netfilter/nft_fib_ipv6.c 	if (dev && nf_ipv6_chk_addr(nft_net(pkt), &fl6.daddr, dev, true))
pkt                76 net/ipv6/netfilter/nft_fib_ipv6.c 	route_err = nf_ip6_route(nft_net(pkt), (struct dst_entry **)&rt,
pkt               121 net/ipv6/netfilter/nft_fib_ipv6.c 			const struct nft_pktinfo *pkt)
pkt               124 net/ipv6/netfilter/nft_fib_ipv6.c 	int noff = skb_network_offset(pkt->skb);
pkt               128 net/ipv6/netfilter/nft_fib_ipv6.c 	iph = skb_header_pointer(pkt->skb, noff, sizeof(_iph), &_iph);
pkt               134 net/ipv6/netfilter/nft_fib_ipv6.c 	*dest = __nft_fib6_eval_type(priv, pkt, iph);
pkt               139 net/ipv6/netfilter/nft_fib_ipv6.c 		   const struct nft_pktinfo *pkt)
pkt               142 net/ipv6/netfilter/nft_fib_ipv6.c 	int noff = skb_network_offset(pkt->skb);
pkt               148 net/ipv6/netfilter/nft_fib_ipv6.c 		.flowi6_proto = pkt->tprot,
pkt               154 net/ipv6/netfilter/nft_fib_ipv6.c 		oif = nft_in(pkt);
pkt               156 net/ipv6/netfilter/nft_fib_ipv6.c 		oif = nft_out(pkt);
pkt               158 net/ipv6/netfilter/nft_fib_ipv6.c 	iph = skb_header_pointer(pkt->skb, noff, sizeof(_iph), &_iph);
pkt               164 net/ipv6/netfilter/nft_fib_ipv6.c 	lookup_flags = nft_fib6_flowi_init(&fl6, priv, pkt, oif, iph);
pkt               166 net/ipv6/netfilter/nft_fib_ipv6.c 	if (nft_hook(pkt) == NF_INET_PRE_ROUTING &&
pkt               167 net/ipv6/netfilter/nft_fib_ipv6.c 	    nft_fib_is_loopback(pkt->skb, nft_in(pkt))) {
pkt               168 net/ipv6/netfilter/nft_fib_ipv6.c 		nft_fib_store_result(dest, priv, nft_in(pkt));
pkt               173 net/ipv6/netfilter/nft_fib_ipv6.c 	rt = (void *)ip6_route_lookup(nft_net(pkt), &fl6, pkt->skb,
pkt                21 net/ipv6/netfilter/nft_reject_ipv6.c 				 const struct nft_pktinfo *pkt)
pkt                27 net/ipv6/netfilter/nft_reject_ipv6.c 		nf_send_unreach6(nft_net(pkt), pkt->skb, priv->icmp_code,
pkt                28 net/ipv6/netfilter/nft_reject_ipv6.c 				 nft_hook(pkt));
pkt                31 net/ipv6/netfilter/nft_reject_ipv6.c 		nf_send_reset6(nft_net(pkt), pkt->skb, nft_hook(pkt));
pkt                25 net/netfilter/nf_dup_netdev.c void nf_fwd_netdev_egress(const struct nft_pktinfo *pkt, int oif)
pkt                29 net/netfilter/nf_dup_netdev.c 	dev = dev_get_by_index_rcu(nft_net(pkt), oif);
pkt                31 net/netfilter/nf_dup_netdev.c 		kfree_skb(pkt->skb);
pkt                35 net/netfilter/nf_dup_netdev.c 	nf_do_netdev_egress(pkt->skb, dev);
pkt                39 net/netfilter/nf_dup_netdev.c void nf_dup_netdev_egress(const struct nft_pktinfo *pkt, int oif)
pkt                44 net/netfilter/nf_dup_netdev.c 	dev = dev_get_by_index_rcu(nft_net(pkt), oif);
pkt                48 net/netfilter/nf_dup_netdev.c 	skb = skb_clone(pkt->skb, GFP_ATOMIC);
pkt                28 net/netfilter/nf_tables_core.c 	const struct nft_pktinfo *pkt = info->pkt;
pkt                30 net/netfilter/nf_tables_core.c 	if (!info->trace || !pkt->skb->nf_trace)
pkt                63 net/netfilter/nf_tables_core.c 				  const struct nft_pktinfo *pkt)
pkt                66 net/netfilter/nf_tables_core.c 	const struct sk_buff *skb = pkt->skb;
pkt                73 net/netfilter/nf_tables_core.c 		if (!pkt->tprot_set)
pkt                75 net/netfilter/nf_tables_core.c 		ptr = skb_network_header(skb) + pkt->xt.thoff;
pkt                96 net/netfilter/nf_tables_core.c 					    const struct nft_pktinfo *pkt)
pkt               111 net/netfilter/nf_tables_core.c 		stats->bytes += pkt->skb->len;
pkt               125 net/netfilter/nf_tables_core.c 			       struct nft_pktinfo *pkt)
pkt               131 net/netfilter/nf_tables_core.c 		return fun(expr, regs, pkt); } while (0)
pkt               145 net/netfilter/nf_tables_core.c 	expr->ops->eval(expr, regs, pkt);
pkt               149 net/netfilter/nf_tables_core.c nft_do_chain(struct nft_pktinfo *pkt, void *priv)
pkt               152 net/netfilter/nf_tables_core.c 	const struct net *net = nft_net(pkt);
pkt               164 net/netfilter/nf_tables_core.c 		nft_trace_init(&info, pkt, &regs.verdict, basechain);
pkt               180 net/netfilter/nf_tables_core.c 				 !nft_payload_fast_eval(expr, &regs, pkt))
pkt               181 net/netfilter/nf_tables_core.c 				expr_call_ops_eval(expr, &regs, pkt);
pkt               242 net/netfilter/nf_tables_core.c 		nft_update_chain_stats(basechain, pkt);
pkt               110 net/netfilter/nf_tables_trace.c 				  const struct nft_pktinfo *pkt)
pkt               112 net/netfilter/nf_tables_trace.c 	const struct sk_buff *skb = pkt->skb;
pkt               116 net/netfilter/nf_tables_trace.c 	nh_end = pkt->tprot_set ? pkt->xt.thoff : skb->len;
pkt               122 net/netfilter/nf_tables_trace.c 	if (pkt->tprot_set) {
pkt               123 net/netfilter/nf_tables_trace.c 		len = min_t(unsigned int, skb->len - pkt->xt.thoff,
pkt               126 net/netfilter/nf_tables_trace.c 				      pkt->xt.thoff, len))
pkt               185 net/netfilter/nf_tables_trace.c 	const struct nft_pktinfo *pkt = info->pkt;
pkt               192 net/netfilter/nf_tables_trace.c 	if (!nfnetlink_has_listeners(nft_net(pkt), NFNLGRP_NFTRACE))
pkt               231 net/netfilter/nf_tables_trace.c 	if (nla_put_be32(skb, NFTA_TRACE_NFPROTO, htonl(nft_pf(pkt))))
pkt               237 net/netfilter/nf_tables_trace.c 	if (trace_fill_id(skb, pkt->skb))
pkt               265 net/netfilter/nf_tables_trace.c 	if (pkt->skb->mark &&
pkt               266 net/netfilter/nf_tables_trace.c 	    nla_put_be32(skb, NFTA_TRACE_MARK, htonl(pkt->skb->mark)))
pkt               270 net/netfilter/nf_tables_trace.c 		if (nf_trace_fill_dev_info(skb, nft_in(pkt), nft_out(pkt)))
pkt               273 net/netfilter/nf_tables_trace.c 		if (nf_trace_fill_pkt_info(skb, pkt))
pkt               279 net/netfilter/nf_tables_trace.c 	nfnetlink_send(skb, nft_net(pkt), 0, NFNLGRP_NFTRACE, 0, GFP_ATOMIC);
pkt               287 net/netfilter/nf_tables_trace.c void nft_trace_init(struct nft_traceinfo *info, const struct nft_pktinfo *pkt,
pkt               294 net/netfilter/nf_tables_trace.c 	info->pkt = pkt;
pkt                27 net/netfilter/nft_bitwise.c 		      struct nft_regs *regs, const struct nft_pktinfo *pkt)
pkt                28 net/netfilter/nft_byteorder.c 			const struct nft_pktinfo *pkt)
pkt                18 net/netfilter/nft_chain_filter.c 	struct nft_pktinfo pkt;
pkt                20 net/netfilter/nft_chain_filter.c 	nft_set_pktinfo(&pkt, skb, state);
pkt                21 net/netfilter/nft_chain_filter.c 	nft_set_pktinfo_ipv4(&pkt, skb);
pkt                23 net/netfilter/nft_chain_filter.c 	return nft_do_chain(&pkt, priv);
pkt                62 net/netfilter/nft_chain_filter.c 	struct nft_pktinfo pkt;
pkt                64 net/netfilter/nft_chain_filter.c 	nft_set_pktinfo(&pkt, skb, state);
pkt                65 net/netfilter/nft_chain_filter.c 	nft_set_pktinfo_unspec(&pkt, skb);
pkt                67 net/netfilter/nft_chain_filter.c 	return nft_do_chain(&pkt, priv);
pkt               102 net/netfilter/nft_chain_filter.c 	struct nft_pktinfo pkt;
pkt               104 net/netfilter/nft_chain_filter.c 	nft_set_pktinfo(&pkt, skb, state);
pkt               105 net/netfilter/nft_chain_filter.c 	nft_set_pktinfo_ipv6(&pkt, skb);
pkt               107 net/netfilter/nft_chain_filter.c 	return nft_do_chain(&pkt, priv);
pkt               146 net/netfilter/nft_chain_filter.c 	struct nft_pktinfo pkt;
pkt               148 net/netfilter/nft_chain_filter.c 	nft_set_pktinfo(&pkt, skb, state);
pkt               152 net/netfilter/nft_chain_filter.c 		nft_set_pktinfo_ipv4(&pkt, skb);
pkt               155 net/netfilter/nft_chain_filter.c 		nft_set_pktinfo_ipv6(&pkt, skb);
pkt               161 net/netfilter/nft_chain_filter.c 	return nft_do_chain(&pkt, priv);
pkt               202 net/netfilter/nft_chain_filter.c 	struct nft_pktinfo pkt;
pkt               204 net/netfilter/nft_chain_filter.c 	nft_set_pktinfo(&pkt, skb, state);
pkt               208 net/netfilter/nft_chain_filter.c 		nft_set_pktinfo_ipv4_validate(&pkt, skb);
pkt               211 net/netfilter/nft_chain_filter.c 		nft_set_pktinfo_ipv6_validate(&pkt, skb);
pkt               214 net/netfilter/nft_chain_filter.c 		nft_set_pktinfo_unspec(&pkt, skb);
pkt               218 net/netfilter/nft_chain_filter.c 	return nft_do_chain(&pkt, priv);
pkt               257 net/netfilter/nft_chain_filter.c 	struct nft_pktinfo pkt;
pkt               259 net/netfilter/nft_chain_filter.c 	nft_set_pktinfo(&pkt, skb, state);
pkt               263 net/netfilter/nft_chain_filter.c 		nft_set_pktinfo_ipv4_validate(&pkt, skb);
pkt               266 net/netfilter/nft_chain_filter.c 		nft_set_pktinfo_ipv6_validate(&pkt, skb);
pkt               269 net/netfilter/nft_chain_filter.c 		nft_set_pktinfo_unspec(&pkt, skb);
pkt               273 net/netfilter/nft_chain_filter.c 	return nft_do_chain(&pkt, priv);
pkt                13 net/netfilter/nft_chain_nat.c 	struct nft_pktinfo pkt;
pkt                15 net/netfilter/nft_chain_nat.c 	nft_set_pktinfo(&pkt, skb, state);
pkt                20 net/netfilter/nft_chain_nat.c 		nft_set_pktinfo_ipv4(&pkt, skb);
pkt                25 net/netfilter/nft_chain_nat.c 		nft_set_pktinfo_ipv6(&pkt, skb);
pkt                32 net/netfilter/nft_chain_nat.c 	return nft_do_chain(&pkt, priv);
pkt                21 net/netfilter/nft_chain_route.c 	struct nft_pktinfo pkt;
pkt                28 net/netfilter/nft_chain_route.c 	nft_set_pktinfo(&pkt, skb, state);
pkt                29 net/netfilter/nft_chain_route.c 	nft_set_pktinfo_ipv4(&pkt, skb);
pkt                37 net/netfilter/nft_chain_route.c 	ret = nft_do_chain(&pkt, priv);
pkt                70 net/netfilter/nft_chain_route.c 	struct nft_pktinfo pkt;
pkt                76 net/netfilter/nft_chain_route.c 	nft_set_pktinfo(&pkt, skb, state);
pkt                77 net/netfilter/nft_chain_route.c 	nft_set_pktinfo_ipv6(&pkt, skb);
pkt                88 net/netfilter/nft_chain_route.c 	ret = nft_do_chain(&pkt, priv);
pkt               119 net/netfilter/nft_chain_route.c 	struct nft_pktinfo pkt;
pkt               127 net/netfilter/nft_chain_route.c 		nft_set_pktinfo(&pkt, skb, state);
pkt               131 net/netfilter/nft_chain_route.c 	return nft_do_chain(&pkt, priv);
pkt                27 net/netfilter/nft_cmp.c 		  const struct nft_pktinfo *pkt)
pkt                69 net/netfilter/nft_compat.c 			       const struct nft_pktinfo *pkt)
pkt                73 net/netfilter/nft_compat.c 	struct sk_buff *skb = pkt->skb;
pkt                76 net/netfilter/nft_compat.c 	nft_compat_set_par((struct xt_action_param *)&pkt->xt, target, info);
pkt                78 net/netfilter/nft_compat.c 	ret = target->target(skb, &pkt->xt);
pkt                80 net/netfilter/nft_compat.c 	if (pkt->xt.hotdrop)
pkt                95 net/netfilter/nft_compat.c 				   const struct nft_pktinfo *pkt)
pkt                99 net/netfilter/nft_compat.c 	struct sk_buff *skb = pkt->skb;
pkt               102 net/netfilter/nft_compat.c 	nft_compat_set_par((struct xt_action_param *)&pkt->xt, target, info);
pkt               104 net/netfilter/nft_compat.c 	ret = target->target(skb, &pkt->xt);
pkt               106 net/netfilter/nft_compat.c 	if (pkt->xt.hotdrop)
pkt               330 net/netfilter/nft_compat.c 			     const struct nft_pktinfo *pkt,
pkt               334 net/netfilter/nft_compat.c 	struct sk_buff *skb = pkt->skb;
pkt               337 net/netfilter/nft_compat.c 	nft_compat_set_par((struct xt_action_param *)&pkt->xt, match, info);
pkt               339 net/netfilter/nft_compat.c 	ret = match->match(skb, (struct xt_action_param *)&pkt->xt);
pkt               341 net/netfilter/nft_compat.c 	if (pkt->xt.hotdrop) {
pkt               358 net/netfilter/nft_compat.c 				 const struct nft_pktinfo *pkt)
pkt               362 net/netfilter/nft_compat.c 	__nft_match_eval(expr, regs, pkt, priv->info);
pkt               367 net/netfilter/nft_compat.c 			   const struct nft_pktinfo *pkt)
pkt               369 net/netfilter/nft_compat.c 	__nft_match_eval(expr, regs, pkt, nft_expr_priv(expr));
pkt                24 net/netfilter/nft_connlimit.c 					 const struct nft_pktinfo *pkt,
pkt                36 net/netfilter/nft_connlimit.c 	ct = nf_ct_get(pkt->skb, &ctinfo);
pkt                40 net/netfilter/nft_connlimit.c 	} else if (!nf_ct_get_tuplepr(pkt->skb, skb_network_offset(pkt->skb),
pkt                41 net/netfilter/nft_connlimit.c 				      nft_pf(pkt), nft_net(pkt), &tuple)) {
pkt                46 net/netfilter/nft_connlimit.c 	if (nf_conncount_add(nft_net(pkt), &priv->list, tuple_ptr, zone)) {
pkt               110 net/netfilter/nft_connlimit.c 					const struct nft_pktinfo *pkt)
pkt               114 net/netfilter/nft_connlimit.c 	nft_connlimit_do_eval(priv, regs, pkt, NULL);
pkt               167 net/netfilter/nft_connlimit.c 			       const struct nft_pktinfo *pkt)
pkt               171 net/netfilter/nft_connlimit.c 	nft_connlimit_do_eval(priv, regs, pkt, NULL);
pkt                30 net/netfilter/nft_counter.c 				       const struct nft_pktinfo *pkt)
pkt                41 net/netfilter/nft_counter.c 	this_cpu->bytes += pkt->skb->len;
pkt                50 net/netfilter/nft_counter.c 					const struct nft_pktinfo *pkt)
pkt                54 net/netfilter/nft_counter.c 	nft_counter_do_eval(priv, regs, pkt);
pkt               196 net/netfilter/nft_counter.c 			     const struct nft_pktinfo *pkt)
pkt               200 net/netfilter/nft_counter.c 	nft_counter_do_eval(priv, regs, pkt);
pkt                60 net/netfilter/nft_ct.c 			    const struct nft_pktinfo *pkt)
pkt                71 net/netfilter/nft_ct.c 	ct = nf_ct_get(pkt->skb, &ctinfo);
pkt               235 net/netfilter/nft_ct.c 				 const struct nft_pktinfo *pkt)
pkt               239 net/netfilter/nft_ct.c 	struct sk_buff *skb = pkt->skb;
pkt               267 net/netfilter/nft_ct.c 		ct = nf_ct_tmpl_alloc(nft_net(pkt), &zone, GFP_ATOMIC);
pkt               281 net/netfilter/nft_ct.c 			    const struct nft_pktinfo *pkt)
pkt               284 net/netfilter/nft_ct.c 	struct sk_buff *skb = pkt->skb;
pkt               764 net/netfilter/nft_ct.c 			     const struct nft_pktinfo *pkt)
pkt               766 net/netfilter/nft_ct.c 	struct sk_buff *skb = pkt->skb;
pkt               770 net/netfilter/nft_ct.c 	ct = nf_ct_get(pkt->skb, &ctinfo);
pkt               828 net/netfilter/nft_ct.c 				    const struct nft_pktinfo *pkt)
pkt               831 net/netfilter/nft_ct.c 	struct nf_conn *ct = (struct nf_conn *)skb_nfct(pkt->skb);
pkt               835 net/netfilter/nft_ct.c 	if (priv->l4proto != pkt->tprot)
pkt               857 net/netfilter/nft_ct.c 		nf_ct_refresh(ct, pkt->skb, values[0]);
pkt              1064 net/netfilter/nft_ct.c 				   const struct nft_pktinfo *pkt)
pkt              1067 net/netfilter/nft_ct.c 	struct nf_conn *ct = (struct nf_conn *)skb_nfct(pkt->skb);
pkt              1212 net/netfilter/nft_ct.c 				   const struct nft_pktinfo *pkt)
pkt              1222 net/netfilter/nft_ct.c 	ct = nf_ct_get(pkt->skb, &ctinfo);
pkt                22 net/netfilter/nft_dup_netdev.c 				const struct nft_pktinfo *pkt)
pkt                27 net/netfilter/nft_dup_netdev.c 	nf_dup_netdev_egress(pkt, oif);
pkt                79 net/netfilter/nft_dynset.c 		     struct nft_regs *regs, const struct nft_pktinfo *pkt)
pkt               105 net/netfilter/nft_dynset.c 			sexpr->ops->eval(sexpr, regs, pkt);
pkt                38 net/netfilter/nft_exthdr.c 				 const struct nft_pktinfo *pkt)
pkt                45 net/netfilter/nft_exthdr.c 	err = ipv6_find_hdr(pkt->skb, &offset, priv->type, NULL, NULL);
pkt                55 net/netfilter/nft_exthdr.c 	if (skb_copy_bits(pkt->skb, offset, dest, priv->len) < 0)
pkt               131 net/netfilter/nft_exthdr.c 				 const struct nft_pktinfo *pkt)
pkt               135 net/netfilter/nft_exthdr.c 	struct sk_buff *skb = pkt->skb;
pkt               142 net/netfilter/nft_exthdr.c 	err = ipv4_find_option(nft_net(pkt), skb, &offset, priv->type);
pkt               152 net/netfilter/nft_exthdr.c 	if (skb_copy_bits(pkt->skb, offset, dest, priv->len) < 0)
pkt               160 net/netfilter/nft_exthdr.c nft_tcp_header_pointer(const struct nft_pktinfo *pkt,
pkt               165 net/netfilter/nft_exthdr.c 	if (!pkt->tprot_set || pkt->tprot != IPPROTO_TCP)
pkt               168 net/netfilter/nft_exthdr.c 	tcph = skb_header_pointer(pkt->skb, pkt->xt.thoff, sizeof(*tcph), buffer);
pkt               176 net/netfilter/nft_exthdr.c 	return skb_header_pointer(pkt->skb, pkt->xt.thoff, *tcphdr_len, buffer);
pkt               181 net/netfilter/nft_exthdr.c 				const struct nft_pktinfo *pkt)
pkt               190 net/netfilter/nft_exthdr.c 	tcph = nft_tcp_header_pointer(pkt, sizeof(buff), buff, &tcphdr_len);
pkt               224 net/netfilter/nft_exthdr.c 				    const struct nft_pktinfo *pkt)
pkt               233 net/netfilter/nft_exthdr.c 	tcph = nft_tcp_header_pointer(pkt, sizeof(buff), buff, &tcphdr_len);
pkt               253 net/netfilter/nft_exthdr.c 		if (skb_ensure_writable(pkt->skb,
pkt               254 net/netfilter/nft_exthdr.c 					pkt->xt.thoff + i + priv->len))
pkt               257 net/netfilter/nft_exthdr.c 		tcph = nft_tcp_header_pointer(pkt, sizeof(buff), buff,
pkt               282 net/netfilter/nft_exthdr.c 			inet_proto_csum_replace2(&tcph->check, pkt->skb,
pkt               293 net/netfilter/nft_exthdr.c 			inet_proto_csum_replace4(&tcph->check, pkt->skb,
pkt                16 net/netfilter/nft_fib_inet.c 			      const struct nft_pktinfo *pkt)
pkt                20 net/netfilter/nft_fib_inet.c 	switch (nft_pf(pkt)) {
pkt                25 net/netfilter/nft_fib_inet.c 			return nft_fib4_eval(expr, regs, pkt);
pkt                27 net/netfilter/nft_fib_inet.c 			return nft_fib4_eval_type(expr, regs, pkt);
pkt                34 net/netfilter/nft_fib_inet.c 			return nft_fib6_eval(expr, regs, pkt);
pkt                36 net/netfilter/nft_fib_inet.c 			return nft_fib6_eval_type(expr, regs, pkt);
pkt                23 net/netfilter/nft_fib_netdev.c 				const struct nft_pktinfo *pkt)
pkt                27 net/netfilter/nft_fib_netdev.c 	switch (ntohs(pkt->skb->protocol)) {
pkt                32 net/netfilter/nft_fib_netdev.c 			return nft_fib4_eval(expr, regs, pkt);
pkt                34 net/netfilter/nft_fib_netdev.c 			return nft_fib4_eval_type(expr, regs, pkt);
pkt                43 net/netfilter/nft_fib_netdev.c 			return nft_fib6_eval(expr, regs, pkt);
pkt                45 net/netfilter/nft_fib_netdev.c 			return nft_fib6_eval_type(expr, regs, pkt);
pkt                22 net/netfilter/nft_flow_offload.c static int nft_flow_route(const struct nft_pktinfo *pkt,
pkt                27 net/netfilter/nft_flow_offload.c 	struct dst_entry *this_dst = skb_dst(pkt->skb);
pkt                32 net/netfilter/nft_flow_offload.c 	switch (nft_pf(pkt)) {
pkt                35 net/netfilter/nft_flow_offload.c 		fl.u.ip4.flowi4_oif = nft_in(pkt)->ifindex;
pkt                39 net/netfilter/nft_flow_offload.c 		fl.u.ip6.flowi6_oif = nft_in(pkt)->ifindex;
pkt                43 net/netfilter/nft_flow_offload.c 	nf_route(nft_net(pkt), &other_dst, &fl, false, nft_pf(pkt));
pkt                72 net/netfilter/nft_flow_offload.c 				  const struct nft_pktinfo *pkt)
pkt                84 net/netfilter/nft_flow_offload.c 	if (nft_flow_offload_skip(pkt->skb, nft_pf(pkt)))
pkt                87 net/netfilter/nft_flow_offload.c 	ct = nf_ct_get(pkt->skb, &ctinfo);
pkt                93 net/netfilter/nft_flow_offload.c 		tcph = skb_header_pointer(pkt->skb, pkt->xt.thoff,
pkt               115 net/netfilter/nft_flow_offload.c 	if (nft_flow_route(pkt, ct, &route, dir) < 0)
pkt                26 net/netfilter/nft_fwd_netdev.c 				const struct nft_pktinfo *pkt)
pkt                32 net/netfilter/nft_fwd_netdev.c 	skb_set_redirected(pkt->skb, true);
pkt                34 net/netfilter/nft_fwd_netdev.c 	nf_fwd_netdev_egress(pkt, oif);
pkt                88 net/netfilter/nft_fwd_netdev.c 			      const struct nft_pktinfo *pkt)
pkt                94 net/netfilter/nft_fwd_netdev.c 	struct sk_buff *skb = pkt->skb;
pkt               136 net/netfilter/nft_fwd_netdev.c 	dev = dev_get_by_index_rcu(nft_net(pkt), oif);
pkt                28 net/netfilter/nft_hash.c 			   const struct nft_pktinfo *pkt)
pkt                48 net/netfilter/nft_hash.c 			     const struct nft_pktinfo *pkt)
pkt                51 net/netfilter/nft_hash.c 	struct sk_buff *skb = pkt->skb;
pkt                20 net/netfilter/nft_immediate.c 			const struct nft_pktinfo *pkt)
pkt               131 net/netfilter/nft_limit.c 				const struct nft_pktinfo *pkt)
pkt               180 net/netfilter/nft_limit.c 				 const struct nft_pktinfo *pkt)
pkt               183 net/netfilter/nft_limit.c 	u64 cost = div64_u64(priv->nsecs * pkt->skb->len, priv->rate);
pkt               241 net/netfilter/nft_limit.c 				    const struct nft_pktinfo *pkt)
pkt               284 net/netfilter/nft_limit.c 				     const struct nft_pktinfo *pkt)
pkt               287 net/netfilter/nft_limit.c 	u64 cost = div64_u64(priv->nsecs * pkt->skb->len, priv->rate);
pkt                64 net/netfilter/nft_log.c static void nft_log_eval_audit(const struct nft_pktinfo *pkt)
pkt                66 net/netfilter/nft_log.c 	struct sk_buff *skb = pkt->skb;
pkt                79 net/netfilter/nft_log.c 	switch (nft_pf(pkt)) {
pkt               106 net/netfilter/nft_log.c 			 const struct nft_pktinfo *pkt)
pkt               112 net/netfilter/nft_log.c 		nft_log_eval_audit(pkt);
pkt               116 net/netfilter/nft_log.c 	nf_log_packet(nft_net(pkt), nft_pf(pkt), nft_hook(pkt), pkt->skb,
pkt               117 net/netfilter/nft_log.c 		      nft_in(pkt), nft_out(pkt), &priv->loginfo, "%s",
pkt                28 net/netfilter/nft_lookup.c 		     const struct nft_pktinfo *pkt)
pkt                35 net/netfilter/nft_lookup.c 	found = set->ops->lookup(nft_net(pkt), set, &regs->data[priv->sreg],
pkt               104 net/netfilter/nft_masq.c 			       const struct nft_pktinfo *pkt)
pkt               117 net/netfilter/nft_masq.c 	regs->verdict.code = nf_nat_masquerade_ipv4(pkt->skb, nft_hook(pkt),
pkt               118 net/netfilter/nft_masq.c 						    &range, nft_out(pkt));
pkt               150 net/netfilter/nft_masq.c 			       const struct nft_pktinfo *pkt)
pkt               163 net/netfilter/nft_masq.c 	regs->verdict.code = nf_nat_masquerade_ipv6(pkt->skb, &range,
pkt               164 net/netfilter/nft_masq.c 						    nft_out(pkt));
pkt               210 net/netfilter/nft_masq.c 			       const struct nft_pktinfo *pkt)
pkt               212 net/netfilter/nft_masq.c 	switch (nft_pf(pkt)) {
pkt               214 net/netfilter/nft_masq.c 		return nft_masq_ipv4_eval(expr, regs, pkt);
pkt               216 net/netfilter/nft_masq.c 		return nft_masq_ipv6_eval(expr, regs, pkt);
pkt                61 net/netfilter/nft_meta.c 		       const struct nft_pktinfo *pkt)
pkt                64 net/netfilter/nft_meta.c 	const struct sk_buff *skb = pkt->skb;
pkt                65 net/netfilter/nft_meta.c 	const struct net_device *in = nft_in(pkt), *out = nft_out(pkt);
pkt                77 net/netfilter/nft_meta.c 		nft_reg_store8(dest, nft_pf(pkt));
pkt                80 net/netfilter/nft_meta.c 		if (!pkt->tprot_set)
pkt                82 net/netfilter/nft_meta.c 		nft_reg_store8(dest, pkt->tprot);
pkt               115 net/netfilter/nft_meta.c 		    !net_eq(nft_net(pkt), sock_net(sk)))
pkt               132 net/netfilter/nft_meta.c 		    !net_eq(nft_net(pkt), sock_net(sk)))
pkt               166 net/netfilter/nft_meta.c 		switch (nft_pf(pkt)) {
pkt               224 net/netfilter/nft_meta.c 		    !net_eq(nft_net(pkt), sock_net(sk)))
pkt               271 net/netfilter/nft_meta.c 		       const struct nft_pktinfo *pkt)
pkt               274 net/netfilter/nft_meta.c 	struct sk_buff *skb = pkt->skb;
pkt               639 net/netfilter/nft_meta.c 				 const struct nft_pktinfo *pkt)
pkt               642 net/netfilter/nft_meta.c 	struct sk_buff *skb = pkt->skb;
pkt                35 net/netfilter/nft_nat.c 			 const struct nft_pktinfo *pkt)
pkt                39 net/netfilter/nft_nat.c 	struct nf_conn *ct = nf_ct_get(pkt->skb, &ctinfo);
pkt               280 net/netfilter/nft_nat.c 			      const struct nft_pktinfo *pkt)
pkt               284 net/netfilter/nft_nat.c 	if (priv->family == nft_pf(pkt))
pkt               285 net/netfilter/nft_nat.c 		nft_nat_eval(expr, regs, pkt);
pkt                39 net/netfilter/nft_numgen.c 			    const struct nft_pktinfo *pkt)
pkt               118 net/netfilter/nft_numgen.c 			       const struct nft_pktinfo *pkt)
pkt                18 net/netfilter/nft_objref.c 			    const struct nft_pktinfo *pkt)
pkt                22 net/netfilter/nft_objref.c 	obj->ops->eval(obj, regs, pkt);
pkt               104 net/netfilter/nft_objref.c 				const struct nft_pktinfo *pkt)
pkt               112 net/netfilter/nft_objref.c 	found = set->ops->lookup(nft_net(pkt), set, &regs->data[priv->sreg],
pkt               119 net/netfilter/nft_objref.c 	obj->ops->eval(obj, regs, pkt);
pkt                21 net/netfilter/nft_osf.c 			 const struct nft_pktinfo *pkt)
pkt                25 net/netfilter/nft_osf.c 	struct sk_buff *skb = pkt->skb;
pkt                75 net/netfilter/nft_payload.c 		      const struct nft_pktinfo *pkt)
pkt                78 net/netfilter/nft_payload.c 	const struct sk_buff *skb = pkt->skb;
pkt               100 net/netfilter/nft_payload.c 		if (!pkt->tprot_set)
pkt               102 net/netfilter/nft_payload.c 		offset = pkt->xt.thoff;
pkt               413 net/netfilter/nft_payload.c static int nft_payload_l4csum_offset(const struct nft_pktinfo *pkt,
pkt               417 net/netfilter/nft_payload.c 	switch (pkt->tprot) {
pkt               422 net/netfilter/nft_payload.c 		if (!nft_payload_udp_checksum(skb, pkt->xt.thoff))
pkt               435 net/netfilter/nft_payload.c 	*l4csum_offset += pkt->xt.thoff;
pkt               439 net/netfilter/nft_payload.c static int nft_payload_l4csum_update(const struct nft_pktinfo *pkt,
pkt               449 net/netfilter/nft_payload.c 	if (nft_payload_l4csum_offset(pkt, skb, &l4csum_offset) < 0)
pkt               494 net/netfilter/nft_payload.c 				 const struct nft_pktinfo *pkt)
pkt               497 net/netfilter/nft_payload.c 	struct sk_buff *skb = pkt->skb;
pkt               512 net/netfilter/nft_payload.c 		if (!pkt->tprot_set)
pkt               514 net/netfilter/nft_payload.c 		offset = pkt->xt.thoff;
pkt               534 net/netfilter/nft_payload.c 		    nft_payload_l4csum_update(pkt, skb, fsum, tsum) < 0)
pkt                30 net/netfilter/nft_queue.c 			   const struct nft_pktinfo *pkt)
pkt                42 net/netfilter/nft_queue.c 			queue = nfqueue_hash(pkt->skb, queue,
pkt                43 net/netfilter/nft_queue.c 					     priv->queues_total, nft_pf(pkt),
pkt                57 net/netfilter/nft_queue.c 				const struct nft_pktinfo *pkt)
pkt                35 net/netfilter/nft_quota.c 				     const struct nft_pktinfo *pkt)
pkt                37 net/netfilter/nft_quota.c 	if (nft_overquota(priv, pkt->skb) ^ nft_quota_invert(priv))
pkt                51 net/netfilter/nft_quota.c 			       const struct nft_pktinfo *pkt)
pkt                56 net/netfilter/nft_quota.c 	overquota = nft_overquota(priv, pkt->skb);
pkt                62 net/netfilter/nft_quota.c 		nft_obj_notify(nft_net(pkt), obj->key.table, obj, 0, 0,
pkt                63 net/netfilter/nft_quota.c 			       NFT_MSG_NEWOBJ, nft_pf(pkt), 0, GFP_ATOMIC);
pkt               185 net/netfilter/nft_quota.c 			   const struct nft_pktinfo *pkt)
pkt               189 net/netfilter/nft_quota.c 	nft_quota_do_eval(priv, regs, pkt);
pkt                24 net/netfilter/nft_range.c 		    struct nft_regs *regs, const struct nft_pktinfo *pkt)
pkt               107 net/netfilter/nft_redir.c 				const struct nft_pktinfo *pkt)
pkt               123 net/netfilter/nft_redir.c 	regs->verdict.code = nf_nat_redirect_ipv4(pkt->skb, &mr, nft_hook(pkt));
pkt               155 net/netfilter/nft_redir.c 				const struct nft_pktinfo *pkt)
pkt               172 net/netfilter/nft_redir.c 		nf_nat_redirect_ipv6(pkt->skb, &range, nft_hook(pkt));
pkt               205 net/netfilter/nft_redir.c 				const struct nft_pktinfo *pkt)
pkt               207 net/netfilter/nft_redir.c 	switch (nft_pf(pkt)) {
pkt               209 net/netfilter/nft_redir.c 		return nft_redir_ipv4_eval(expr, regs, pkt);
pkt               211 net/netfilter/nft_redir.c 		return nft_redir_ipv6_eval(expr, regs, pkt);
pkt                19 net/netfilter/nft_reject_inet.c 				 const struct nft_pktinfo *pkt)
pkt                23 net/netfilter/nft_reject_inet.c 	switch (nft_pf(pkt)) {
pkt                27 net/netfilter/nft_reject_inet.c 			nf_send_unreach(pkt->skb, priv->icmp_code,
pkt                28 net/netfilter/nft_reject_inet.c 					nft_hook(pkt));
pkt                31 net/netfilter/nft_reject_inet.c 			nf_send_reset(nft_net(pkt), pkt->skb, nft_hook(pkt));
pkt                34 net/netfilter/nft_reject_inet.c 			nf_send_unreach(pkt->skb,
pkt                36 net/netfilter/nft_reject_inet.c 					nft_hook(pkt));
pkt                43 net/netfilter/nft_reject_inet.c 			nf_send_unreach6(nft_net(pkt), pkt->skb,
pkt                44 net/netfilter/nft_reject_inet.c 					 priv->icmp_code, nft_hook(pkt));
pkt                47 net/netfilter/nft_reject_inet.c 			nf_send_reset6(nft_net(pkt), pkt->skb, nft_hook(pkt));
pkt                50 net/netfilter/nft_reject_inet.c 			nf_send_unreach6(nft_net(pkt), pkt->skb,
pkt                52 net/netfilter/nft_reject_inet.c 					 nft_hook(pkt));
pkt                21 net/netfilter/nft_rt.c static u16 get_tcpmss(const struct nft_pktinfo *pkt, const struct dst_entry *skbdst)
pkt                24 net/netfilter/nft_rt.c 	const struct sk_buff *skb = pkt->skb;
pkt                30 net/netfilter/nft_rt.c 	switch (nft_pf(pkt)) {
pkt                41 net/netfilter/nft_rt.c 	nf_route(nft_net(pkt), &dst, &fl, false, nft_pf(pkt));
pkt                55 net/netfilter/nft_rt.c 		     const struct nft_pktinfo *pkt)
pkt                58 net/netfilter/nft_rt.c 	const struct sk_buff *skb = pkt->skb;
pkt                73 net/netfilter/nft_rt.c 		if (nft_pf(pkt) != NFPROTO_IPV4)
pkt                80 net/netfilter/nft_rt.c 		if (nft_pf(pkt) != NFPROTO_IPV6)
pkt                88 net/netfilter/nft_rt.c 		nft_reg_store16(dest, get_tcpmss(pkt, dst));
pkt                19 net/netfilter/nft_socket.c 			    const struct nft_pktinfo *pkt)
pkt                22 net/netfilter/nft_socket.c 	struct sk_buff *skb = pkt->skb;
pkt                26 net/netfilter/nft_socket.c 	if (sk && !net_eq(nft_net(pkt), sock_net(sk)))
pkt                30 net/netfilter/nft_socket.c 		switch(nft_pf(pkt)) {
pkt                32 net/netfilter/nft_socket.c 			sk = nf_sk_lookup_slow_v4(nft_net(pkt), skb, nft_in(pkt));
pkt                36 net/netfilter/nft_socket.c 			sk = nf_sk_lookup_slow_v6(nft_net(pkt), skb, nft_in(pkt));
pkt                46 net/netfilter/nft_synproxy.c 				 const struct nft_pktinfo *pkt,
pkt                52 net/netfilter/nft_synproxy.c 	struct net *net = nft_net(pkt);
pkt                54 net/netfilter/nft_synproxy.c 	struct sk_buff *skb = pkt->skb;
pkt                77 net/netfilter/nft_synproxy.c 				 const struct nft_pktinfo *pkt,
pkt                83 net/netfilter/nft_synproxy.c 	struct net *net = nft_net(pkt);
pkt                85 net/netfilter/nft_synproxy.c 	struct sk_buff *skb = pkt->skb;
pkt               108 net/netfilter/nft_synproxy.c 				 const struct nft_pktinfo *pkt)
pkt               111 net/netfilter/nft_synproxy.c 	struct sk_buff *skb = pkt->skb;
pkt               112 net/netfilter/nft_synproxy.c 	int thoff = pkt->xt.thoff;
pkt               116 net/netfilter/nft_synproxy.c 	if (pkt->tprot != IPPROTO_TCP) {
pkt               121 net/netfilter/nft_synproxy.c 	if (nf_ip_checksum(skb, nft_hook(pkt), thoff, IPPROTO_TCP)) {
pkt               126 net/netfilter/nft_synproxy.c 	tcp = skb_header_pointer(skb, pkt->xt.thoff,
pkt               141 net/netfilter/nft_synproxy.c 		nft_synproxy_eval_v4(priv, regs, pkt, tcp, &_tcph, &opts);
pkt               145 net/netfilter/nft_synproxy.c 		nft_synproxy_eval_v6(priv, regs, pkt, tcp, &_tcph, &opts);
pkt               243 net/netfilter/nft_synproxy.c 			      const struct nft_pktinfo *pkt)
pkt               247 net/netfilter/nft_synproxy.c 	nft_synproxy_do_eval(priv, regs, pkt);
pkt               324 net/netfilter/nft_synproxy.c 				  const struct nft_pktinfo *pkt)
pkt               328 net/netfilter/nft_synproxy.c 	nft_synproxy_do_eval(priv, regs, pkt);
pkt                23 net/netfilter/nft_tproxy.c 			       const struct nft_pktinfo *pkt)
pkt                26 net/netfilter/nft_tproxy.c 	struct sk_buff *skb = pkt->skb;
pkt                43 net/netfilter/nft_tproxy.c 	sk = nf_tproxy_get_sock_v4(nft_net(pkt), skb, iph->protocol,
pkt                60 net/netfilter/nft_tproxy.c 		sk = nf_tproxy_handle_time_wait4(nft_net(pkt), skb, taddr, tport, sk);
pkt                65 net/netfilter/nft_tproxy.c 		sk = nf_tproxy_get_sock_v4(nft_net(pkt), skb, iph->protocol,
pkt                80 net/netfilter/nft_tproxy.c 			       const struct nft_pktinfo *pkt)
pkt                83 net/netfilter/nft_tproxy.c 	struct sk_buff *skb = pkt->skb;
pkt                86 net/netfilter/nft_tproxy.c 	int thoff = pkt->xt.thoff;
pkt                94 net/netfilter/nft_tproxy.c 	if (!pkt->tprot_set) {
pkt                98 net/netfilter/nft_tproxy.c 	l4proto = pkt->tprot;
pkt               110 net/netfilter/nft_tproxy.c 	sk = nf_tproxy_get_sock_v6(nft_net(pkt), skb, thoff, l4proto,
pkt               113 net/netfilter/nft_tproxy.c 				   nft_in(pkt), NF_TPROXY_LOOKUP_ESTABLISHED);
pkt               128 net/netfilter/nft_tproxy.c 						 nft_net(pkt),
pkt               136 net/netfilter/nft_tproxy.c 		sk = nf_tproxy_get_sock_v6(nft_net(pkt), skb, thoff,
pkt               139 net/netfilter/nft_tproxy.c 					   nft_in(pkt), NF_TPROXY_LOOKUP_LISTENER);
pkt               152 net/netfilter/nft_tproxy.c 			    const struct nft_pktinfo *pkt)
pkt               156 net/netfilter/nft_tproxy.c 	switch (nft_pf(pkt)) {
pkt               161 net/netfilter/nft_tproxy.c 			nft_tproxy_eval_v4(expr, regs, pkt);
pkt               170 net/netfilter/nft_tproxy.c 			nft_tproxy_eval_v6(expr, regs, pkt);
pkt                23 net/netfilter/nft_tunnel.c 				const struct nft_pktinfo *pkt)
pkt                29 net/netfilter/nft_tunnel.c 	tun_info = skb_tunnel_info(pkt->skb);
pkt               432 net/netfilter/nft_tunnel.c 				       const struct nft_pktinfo *pkt)
pkt               435 net/netfilter/nft_tunnel.c 	struct sk_buff *skb = pkt->skb;
pkt               160 net/netfilter/nft_xfrm.c 				    const struct nft_pktinfo *pkt)
pkt               162 net/netfilter/nft_xfrm.c 	const struct sec_path *sp = skb_sec_path(pkt->skb);
pkt               176 net/netfilter/nft_xfrm.c 				  const struct nft_pktinfo *pkt)
pkt               178 net/netfilter/nft_xfrm.c 	const struct dst_entry *dst = skb_dst(pkt->skb);
pkt               195 net/netfilter/nft_xfrm.c 			      const struct nft_pktinfo *pkt)
pkt               201 net/netfilter/nft_xfrm.c 		nft_xfrm_get_eval_in(priv, regs, pkt);
pkt               204 net/netfilter/nft_xfrm.c 		nft_xfrm_get_eval_out(priv, regs, pkt);
pkt               210 net/packet/af_packet.c 		struct sockaddr_pkt pkt;
pkt              1833 net/packet/af_packet.c 	spkt = &PACKET_SKB_CB(skb)->sa.pkt;
pkt               340 net/qrtr/qrtr.c static struct sk_buff *qrtr_alloc_ctrl_packet(struct qrtr_ctrl_pkt **pkt)
pkt               350 net/qrtr/qrtr.c 	*pkt = skb_put_zero(skb, pkt_len);
pkt               365 net/qrtr/qrtr.c 	struct qrtr_ctrl_pkt *pkt;
pkt               395 net/qrtr/qrtr.c 			skb = qrtr_alloc_ctrl_packet(&pkt);
pkt               399 net/qrtr/qrtr.c 			pkt->cmd = cpu_to_le32(QRTR_TYPE_RESUME_TX);
pkt               400 net/qrtr/qrtr.c 			pkt->client.node = cpu_to_le32(dst.sq_node);
pkt               401 net/qrtr/qrtr.c 			pkt->client.port = cpu_to_le32(dst.sq_port);
pkt               456 net/qrtr/qrtr.c 	struct qrtr_ctrl_pkt *pkt;
pkt               464 net/qrtr/qrtr.c 	skb = qrtr_alloc_ctrl_packet(&pkt);
pkt               466 net/qrtr/qrtr.c 		pkt->cmd = cpu_to_le32(QRTR_TYPE_BYE);
pkt               504 net/qrtr/qrtr.c 	struct qrtr_ctrl_pkt *pkt;
pkt               513 net/qrtr/qrtr.c 	skb = qrtr_alloc_ctrl_packet(&pkt);
pkt               515 net/qrtr/qrtr.c 		pkt->cmd = cpu_to_le32(QRTR_TYPE_DEL_CLIENT);
pkt               516 net/qrtr/qrtr.c 		pkt->client.node = cpu_to_le32(ipc->us.sq_node);
pkt               517 net/qrtr/qrtr.c 		pkt->client.port = cpu_to_le32(ipc->us.sq_port);
pkt                36 net/rxrpc/conn_event.c 	} __attribute__((packed)) pkt;
pkt                61 net/rxrpc/conn_event.c 	iov[0].iov_base	= &pkt;
pkt                62 net/rxrpc/conn_event.c 	iov[0].iov_len	= sizeof(pkt.whdr);
pkt                68 net/rxrpc/conn_event.c 	pkt.whdr.epoch		= htonl(conn->proto.epoch);
pkt                69 net/rxrpc/conn_event.c 	pkt.whdr.cid		= htonl(conn->proto.cid | channel);
pkt                70 net/rxrpc/conn_event.c 	pkt.whdr.callNumber	= htonl(call_id);
pkt                71 net/rxrpc/conn_event.c 	pkt.whdr.seq		= 0;
pkt                72 net/rxrpc/conn_event.c 	pkt.whdr.type		= chan->last_type;
pkt                73 net/rxrpc/conn_event.c 	pkt.whdr.flags		= conn->out_clientflag;
pkt                74 net/rxrpc/conn_event.c 	pkt.whdr.userStatus	= 0;
pkt                75 net/rxrpc/conn_event.c 	pkt.whdr.securityIndex	= conn->security_ix;
pkt                76 net/rxrpc/conn_event.c 	pkt.whdr._rsvd		= 0;
pkt                77 net/rxrpc/conn_event.c 	pkt.whdr.serviceId	= htons(conn->service_id);
pkt                79 net/rxrpc/conn_event.c 	len = sizeof(pkt.whdr);
pkt                82 net/rxrpc/conn_event.c 		pkt.abort_code	= htonl(chan->last_abort);
pkt                83 net/rxrpc/conn_event.c 		iov[0].iov_len += sizeof(pkt.abort_code);
pkt                84 net/rxrpc/conn_event.c 		len += sizeof(pkt.abort_code);
pkt                91 net/rxrpc/conn_event.c 		pkt.ack.bufferSpace	= 0;
pkt                92 net/rxrpc/conn_event.c 		pkt.ack.maxSkew		= htons(skb ? skb->priority : 0);
pkt                93 net/rxrpc/conn_event.c 		pkt.ack.firstPacket	= htonl(chan->last_seq + 1);
pkt                94 net/rxrpc/conn_event.c 		pkt.ack.previousPacket	= htonl(chan->last_seq);
pkt                95 net/rxrpc/conn_event.c 		pkt.ack.serial		= htonl(skb ? sp->hdr.serial : 0);
pkt                96 net/rxrpc/conn_event.c 		pkt.ack.reason		= skb ? RXRPC_ACK_DUPLICATE : RXRPC_ACK_IDLE;
pkt                97 net/rxrpc/conn_event.c 		pkt.ack.nAcks		= 0;
pkt               102 net/rxrpc/conn_event.c 		pkt.whdr.flags		|= RXRPC_SLOW_START_OK;
pkt               104 net/rxrpc/conn_event.c 		iov[0].iov_len += sizeof(pkt.ack);
pkt               105 net/rxrpc/conn_event.c 		len += sizeof(pkt.ack) + 3 + sizeof(ack_info);
pkt               121 net/rxrpc/conn_event.c 	pkt.whdr.serial = htonl(serial);
pkt               129 net/rxrpc/conn_event.c 				   ntohl(pkt.ack.firstPacket),
pkt               130 net/rxrpc/conn_event.c 				   ntohl(pkt.ack.serial),
pkt               131 net/rxrpc/conn_event.c 				   pkt.ack.reason, 0);
pkt               142 net/rxrpc/conn_event.c 		trace_rxrpc_tx_packet(chan->call_debug_id, &pkt.whdr,
pkt                71 net/rxrpc/output.c 				 struct rxrpc_ack_buffer *pkt,
pkt                80 net/rxrpc/output.c 	u8 *ackp = pkt->acks;
pkt                89 net/rxrpc/output.c 	pkt->ack.bufferSpace	= htons(8);
pkt                90 net/rxrpc/output.c 	pkt->ack.maxSkew	= htons(0);
pkt                91 net/rxrpc/output.c 	pkt->ack.firstPacket	= htonl(hard_ack + 1);
pkt                92 net/rxrpc/output.c 	pkt->ack.previousPacket	= htonl(call->ackr_prev_seq);
pkt                93 net/rxrpc/output.c 	pkt->ack.serial		= htonl(serial);
pkt                94 net/rxrpc/output.c 	pkt->ack.reason		= reason;
pkt                95 net/rxrpc/output.c 	pkt->ack.nAcks		= top - hard_ack;
pkt                98 net/rxrpc/output.c 		pkt->whdr.flags |= RXRPC_REQUEST_ACK;
pkt               115 net/rxrpc/output.c 	pkt->ackinfo.rxMTU	= htonl(rxrpc_rx_mtu);
pkt               116 net/rxrpc/output.c 	pkt->ackinfo.maxMTU	= htonl(mtu);
pkt               117 net/rxrpc/output.c 	pkt->ackinfo.rwind	= htonl(call->rx_winsize);
pkt               118 net/rxrpc/output.c 	pkt->ackinfo.jumbo_max	= htonl(jmax);
pkt               133 net/rxrpc/output.c 	struct rxrpc_ack_buffer *pkt;
pkt               145 net/rxrpc/output.c 	pkt = kzalloc(sizeof(*pkt), GFP_KERNEL);
pkt               146 net/rxrpc/output.c 	if (!pkt)
pkt               157 net/rxrpc/output.c 	pkt->whdr.epoch		= htonl(conn->proto.epoch);
pkt               158 net/rxrpc/output.c 	pkt->whdr.cid		= htonl(call->cid);
pkt               159 net/rxrpc/output.c 	pkt->whdr.callNumber	= htonl(call->call_id);
pkt               160 net/rxrpc/output.c 	pkt->whdr.seq		= 0;
pkt               161 net/rxrpc/output.c 	pkt->whdr.type		= RXRPC_PACKET_TYPE_ACK;
pkt               162 net/rxrpc/output.c 	pkt->whdr.flags		= RXRPC_SLOW_START_OK | conn->out_clientflag;
pkt               163 net/rxrpc/output.c 	pkt->whdr.userStatus	= 0;
pkt               164 net/rxrpc/output.c 	pkt->whdr.securityIndex	= call->security_ix;
pkt               165 net/rxrpc/output.c 	pkt->whdr._rsvd		= 0;
pkt               166 net/rxrpc/output.c 	pkt->whdr.serviceId	= htons(call->service_id);
pkt               180 net/rxrpc/output.c 	n = rxrpc_fill_out_ack(conn, call, pkt, &hard_ack, &top, reason);
pkt               184 net/rxrpc/output.c 	iov[0].iov_base	= pkt;
pkt               185 net/rxrpc/output.c 	iov[0].iov_len	= sizeof(pkt->whdr) + sizeof(pkt->ack) + n;
pkt               186 net/rxrpc/output.c 	iov[1].iov_base = &pkt->ackinfo;
pkt               187 net/rxrpc/output.c 	iov[1].iov_len	= sizeof(pkt->ackinfo);
pkt               191 net/rxrpc/output.c 	pkt->whdr.serial = htonl(serial);
pkt               193 net/rxrpc/output.c 			   ntohl(pkt->ack.firstPacket),
pkt               194 net/rxrpc/output.c 			   ntohl(pkt->ack.serial),
pkt               195 net/rxrpc/output.c 			   pkt->ack.reason, pkt->ack.nAcks);
pkt               218 net/rxrpc/output.c 		trace_rxrpc_tx_packet(call->debug_id, &pkt->whdr,
pkt               226 net/rxrpc/output.c 			rxrpc_propose_ACK(call, pkt->ack.reason,
pkt               227 net/rxrpc/output.c 					  ntohl(pkt->ack.serial),
pkt               243 net/rxrpc/output.c 	kfree(pkt);
pkt               253 net/rxrpc/output.c 	struct rxrpc_abort_buffer pkt;
pkt               280 net/rxrpc/output.c 	pkt.whdr.epoch		= htonl(conn->proto.epoch);
pkt               281 net/rxrpc/output.c 	pkt.whdr.cid		= htonl(call->cid);
pkt               282 net/rxrpc/output.c 	pkt.whdr.callNumber	= htonl(call->call_id);
pkt               283 net/rxrpc/output.c 	pkt.whdr.seq		= 0;
pkt               284 net/rxrpc/output.c 	pkt.whdr.type		= RXRPC_PACKET_TYPE_ABORT;
pkt               285 net/rxrpc/output.c 	pkt.whdr.flags		= conn->out_clientflag;
pkt               286 net/rxrpc/output.c 	pkt.whdr.userStatus	= 0;
pkt               287 net/rxrpc/output.c 	pkt.whdr.securityIndex	= call->security_ix;
pkt               288 net/rxrpc/output.c 	pkt.whdr._rsvd		= 0;
pkt               289 net/rxrpc/output.c 	pkt.whdr.serviceId	= htons(call->service_id);
pkt               290 net/rxrpc/output.c 	pkt.abort_code		= htonl(call->abort_code);
pkt               292 net/rxrpc/output.c 	iov[0].iov_base	= &pkt;
pkt               293 net/rxrpc/output.c 	iov[0].iov_len	= sizeof(pkt);
pkt               296 net/rxrpc/output.c 	pkt.whdr.serial = htonl(serial);
pkt               299 net/rxrpc/output.c 			     &msg, iov, 1, sizeof(pkt));
pkt               305 net/rxrpc/output.c 		trace_rxrpc_tx_packet(call->debug_id, &pkt.whdr,
pkt               215 net/sctp/output.c static enum sctp_xmit sctp_packet_bundle_auth(struct sctp_packet *pkt,
pkt               218 net/sctp/output.c 	struct sctp_association *asoc = pkt->transport->asoc;
pkt               229 net/sctp/output.c 	if (chunk->chunk_hdr->type == SCTP_CID_AUTH || pkt->has_auth)
pkt               245 net/sctp/output.c 	retval = __sctp_packet_append_chunk(pkt, auth);
pkt               254 net/sctp/output.c static enum sctp_xmit sctp_packet_bundle_sack(struct sctp_packet *pkt,
pkt               262 net/sctp/output.c 	if (sctp_chunk_is_data(chunk) && !pkt->has_sack &&
pkt               263 net/sctp/output.c 	    !pkt->has_cookie_echo) {
pkt               266 net/sctp/output.c 		asoc = pkt->transport->asoc;
pkt               273 net/sctp/output.c 			if (pkt->transport->sack_generation !=
pkt               274 net/sctp/output.c 			    pkt->transport->asoc->peer.sack_generation)
pkt               280 net/sctp/output.c 				retval = __sctp_packet_append_chunk(pkt, sack);
pkt               591 net/sctp/outqueue.c static int __sctp_outq_flush_rtx(struct sctp_outq *q, struct sctp_packet *pkt,
pkt               594 net/sctp/outqueue.c 	struct sctp_transport *transport = pkt->transport;
pkt               657 net/sctp/outqueue.c 		status = sctp_packet_append_chunk(pkt, chunk);
pkt               661 net/sctp/outqueue.c 			if (!pkt->has_data && !pkt->has_cookie_echo) {
pkt               668 net/sctp/outqueue.c 				sctp_packet_transmit(pkt, gfp);
pkt               673 net/sctp/outqueue.c 			error = sctp_packet_transmit(pkt, gfp);
pkt               689 net/sctp/outqueue.c 			error = sctp_packet_transmit(pkt, gfp);
pkt               699 net/sctp/outqueue.c 			error = sctp_packet_transmit(pkt, gfp);
pkt              1273 net/sctp/sm_statefuns.c 	struct sctp_packet *pkt;
pkt              1295 net/sctp/sm_statefuns.c 	pkt = sctp_abort_pkt_new(net, ep, NULL, init, errhdr, len);
pkt              1297 net/sctp/sm_statefuns.c 	if (!pkt)
pkt              1299 net/sctp/sm_statefuns.c 	sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT, SCTP_PACKET(pkt));
pkt               105 net/vmw_vsock/virtio_transport.c 		struct virtio_vsock_pkt *pkt;
pkt               107 net/vmw_vsock/virtio_transport.c 		pkt = list_first_entry(&pkts, struct virtio_vsock_pkt, list);
pkt               108 net/vmw_vsock/virtio_transport.c 		list_del_init(&pkt->list);
pkt               110 net/vmw_vsock/virtio_transport.c 		virtio_transport_recv_pkt(pkt);
pkt               117 net/vmw_vsock/virtio_transport.c 					      struct virtio_vsock_pkt *pkt)
pkt               119 net/vmw_vsock/virtio_transport.c 	int len = pkt->len;
pkt               122 net/vmw_vsock/virtio_transport.c 	list_add_tail(&pkt->list, &vsock->loopback_list);
pkt               147 net/vmw_vsock/virtio_transport.c 		struct virtio_vsock_pkt *pkt;
pkt               158 net/vmw_vsock/virtio_transport.c 		pkt = list_first_entry(&vsock->send_pkt_list,
pkt               160 net/vmw_vsock/virtio_transport.c 		list_del_init(&pkt->list);
pkt               163 net/vmw_vsock/virtio_transport.c 		virtio_transport_deliver_tap_pkt(pkt);
pkt               165 net/vmw_vsock/virtio_transport.c 		reply = pkt->reply;
pkt               167 net/vmw_vsock/virtio_transport.c 		sg_init_one(&hdr, &pkt->hdr, sizeof(pkt->hdr));
pkt               169 net/vmw_vsock/virtio_transport.c 		if (pkt->buf) {
pkt               170 net/vmw_vsock/virtio_transport.c 			sg_init_one(&buf, pkt->buf, pkt->len);
pkt               174 net/vmw_vsock/virtio_transport.c 		ret = virtqueue_add_sgs(vq, sgs, out_sg, in_sg, pkt, GFP_KERNEL);
pkt               180 net/vmw_vsock/virtio_transport.c 			list_add(&pkt->list, &vsock->send_pkt_list);
pkt               210 net/vmw_vsock/virtio_transport.c virtio_transport_send_pkt(struct virtio_vsock_pkt *pkt)
pkt               213 net/vmw_vsock/virtio_transport.c 	int len = pkt->len;
pkt               218 net/vmw_vsock/virtio_transport.c 		virtio_transport_free_pkt(pkt);
pkt               223 net/vmw_vsock/virtio_transport.c 	if (le64_to_cpu(pkt->hdr.dst_cid) == vsock->guest_cid) {
pkt               224 net/vmw_vsock/virtio_transport.c 		len = virtio_transport_send_pkt_loopback(vsock, pkt);
pkt               228 net/vmw_vsock/virtio_transport.c 	if (pkt->reply)
pkt               232 net/vmw_vsock/virtio_transport.c 	list_add_tail(&pkt->list, &vsock->send_pkt_list);
pkt               246 net/vmw_vsock/virtio_transport.c 	struct virtio_vsock_pkt *pkt, *n;
pkt               258 net/vmw_vsock/virtio_transport.c 	list_for_each_entry_safe(pkt, n, &vsock->send_pkt_list, list) {
pkt               259 net/vmw_vsock/virtio_transport.c 		if (pkt->vsk != vsk)
pkt               261 net/vmw_vsock/virtio_transport.c 		list_move(&pkt->list, &freeme);
pkt               265 net/vmw_vsock/virtio_transport.c 	list_for_each_entry_safe(pkt, n, &freeme, list) {
pkt               266 net/vmw_vsock/virtio_transport.c 		if (pkt->reply)
pkt               268 net/vmw_vsock/virtio_transport.c 		list_del(&pkt->list);
pkt               269 net/vmw_vsock/virtio_transport.c 		virtio_transport_free_pkt(pkt);
pkt               292 net/vmw_vsock/virtio_transport.c 	struct virtio_vsock_pkt *pkt;
pkt               300 net/vmw_vsock/virtio_transport.c 		pkt = kzalloc(sizeof(*pkt), GFP_KERNEL);
pkt               301 net/vmw_vsock/virtio_transport.c 		if (!pkt)
pkt               304 net/vmw_vsock/virtio_transport.c 		pkt->buf = kmalloc(buf_len, GFP_KERNEL);
pkt               305 net/vmw_vsock/virtio_transport.c 		if (!pkt->buf) {
pkt               306 net/vmw_vsock/virtio_transport.c 			virtio_transport_free_pkt(pkt);
pkt               310 net/vmw_vsock/virtio_transport.c 		pkt->buf_len = buf_len;
pkt               311 net/vmw_vsock/virtio_transport.c 		pkt->len = buf_len;
pkt               313 net/vmw_vsock/virtio_transport.c 		sg_init_one(&hdr, &pkt->hdr, sizeof(pkt->hdr));
pkt               316 net/vmw_vsock/virtio_transport.c 		sg_init_one(&buf, pkt->buf, buf_len);
pkt               318 net/vmw_vsock/virtio_transport.c 		ret = virtqueue_add_sgs(vq, sgs, 0, 2, pkt, GFP_KERNEL);
pkt               320 net/vmw_vsock/virtio_transport.c 			virtio_transport_free_pkt(pkt);
pkt               344 net/vmw_vsock/virtio_transport.c 		struct virtio_vsock_pkt *pkt;
pkt               348 net/vmw_vsock/virtio_transport.c 		while ((pkt = virtqueue_get_buf(vq, &len)) != NULL) {
pkt               349 net/vmw_vsock/virtio_transport.c 			virtio_transport_free_pkt(pkt);
pkt               389 net/vmw_vsock/virtio_transport.c 			struct virtio_vsock_pkt *pkt;
pkt               400 net/vmw_vsock/virtio_transport.c 			pkt = virtqueue_get_buf(vq, &len);
pkt               401 net/vmw_vsock/virtio_transport.c 			if (!pkt) {
pkt               408 net/vmw_vsock/virtio_transport.c 			if (unlikely(len < sizeof(pkt->hdr) ||
pkt               409 net/vmw_vsock/virtio_transport.c 				     len > sizeof(pkt->hdr) + pkt->len)) {
pkt               410 net/vmw_vsock/virtio_transport.c 				virtio_transport_free_pkt(pkt);
pkt               414 net/vmw_vsock/virtio_transport.c 			pkt->len = len - sizeof(pkt->hdr);
pkt               415 net/vmw_vsock/virtio_transport.c 			virtio_transport_deliver_tap_pkt(pkt);
pkt               416 net/vmw_vsock/virtio_transport.c 			virtio_transport_recv_pkt(pkt);
pkt               677 net/vmw_vsock/virtio_transport.c 	struct virtio_vsock_pkt *pkt;
pkt               709 net/vmw_vsock/virtio_transport.c 	while ((pkt = virtqueue_detach_unused_buf(vsock->vqs[VSOCK_VQ_RX])))
pkt               710 net/vmw_vsock/virtio_transport.c 		virtio_transport_free_pkt(pkt);
pkt               714 net/vmw_vsock/virtio_transport.c 	while ((pkt = virtqueue_detach_unused_buf(vsock->vqs[VSOCK_VQ_TX])))
pkt               715 net/vmw_vsock/virtio_transport.c 		virtio_transport_free_pkt(pkt);
pkt               720 net/vmw_vsock/virtio_transport.c 		pkt = list_first_entry(&vsock->send_pkt_list,
pkt               722 net/vmw_vsock/virtio_transport.c 		list_del(&pkt->list);
pkt               723 net/vmw_vsock/virtio_transport.c 		virtio_transport_free_pkt(pkt);
pkt               729 net/vmw_vsock/virtio_transport.c 		pkt = list_first_entry(&vsock->loopback_list,
pkt               731 net/vmw_vsock/virtio_transport.c 		list_del(&pkt->list);
pkt               732 net/vmw_vsock/virtio_transport.c 		virtio_transport_free_pkt(pkt);
pkt                47 net/vmw_vsock/virtio_transport_common.c 	struct virtio_vsock_pkt *pkt;
pkt                50 net/vmw_vsock/virtio_transport_common.c 	pkt = kzalloc(sizeof(*pkt), GFP_KERNEL);
pkt                51 net/vmw_vsock/virtio_transport_common.c 	if (!pkt)
pkt                54 net/vmw_vsock/virtio_transport_common.c 	pkt->hdr.type		= cpu_to_le16(info->type);
pkt                55 net/vmw_vsock/virtio_transport_common.c 	pkt->hdr.op		= cpu_to_le16(info->op);
pkt                56 net/vmw_vsock/virtio_transport_common.c 	pkt->hdr.src_cid	= cpu_to_le64(src_cid);
pkt                57 net/vmw_vsock/virtio_transport_common.c 	pkt->hdr.dst_cid	= cpu_to_le64(dst_cid);
pkt                58 net/vmw_vsock/virtio_transport_common.c 	pkt->hdr.src_port	= cpu_to_le32(src_port);
pkt                59 net/vmw_vsock/virtio_transport_common.c 	pkt->hdr.dst_port	= cpu_to_le32(dst_port);
pkt                60 net/vmw_vsock/virtio_transport_common.c 	pkt->hdr.flags		= cpu_to_le32(info->flags);
pkt                61 net/vmw_vsock/virtio_transport_common.c 	pkt->len		= len;
pkt                62 net/vmw_vsock/virtio_transport_common.c 	pkt->hdr.len		= cpu_to_le32(len);
pkt                63 net/vmw_vsock/virtio_transport_common.c 	pkt->reply		= info->reply;
pkt                64 net/vmw_vsock/virtio_transport_common.c 	pkt->vsk		= info->vsk;
pkt                67 net/vmw_vsock/virtio_transport_common.c 		pkt->buf = kmalloc(len, GFP_KERNEL);
pkt                68 net/vmw_vsock/virtio_transport_common.c 		if (!pkt->buf)
pkt                71 net/vmw_vsock/virtio_transport_common.c 		pkt->buf_len = len;
pkt                73 net/vmw_vsock/virtio_transport_common.c 		err = memcpy_from_msg(pkt->buf, info->msg, len);
pkt                85 net/vmw_vsock/virtio_transport_common.c 	return pkt;
pkt                88 net/vmw_vsock/virtio_transport_common.c 	kfree(pkt->buf);
pkt                90 net/vmw_vsock/virtio_transport_common.c 	kfree(pkt);
pkt                97 net/vmw_vsock/virtio_transport_common.c 	struct virtio_vsock_pkt *pkt = opaque;
pkt               107 net/vmw_vsock/virtio_transport_common.c 	payload_len = le32_to_cpu(pkt->hdr.len);
pkt               108 net/vmw_vsock/virtio_transport_common.c 	payload_buf = pkt->buf + pkt->off;
pkt               110 net/vmw_vsock/virtio_transport_common.c 	skb = alloc_skb(sizeof(*hdr) + sizeof(pkt->hdr) + payload_len,
pkt               118 net/vmw_vsock/virtio_transport_common.c 	hdr->src_cid = pkt->hdr.src_cid;
pkt               119 net/vmw_vsock/virtio_transport_common.c 	hdr->src_port = pkt->hdr.src_port;
pkt               120 net/vmw_vsock/virtio_transport_common.c 	hdr->dst_cid = pkt->hdr.dst_cid;
pkt               121 net/vmw_vsock/virtio_transport_common.c 	hdr->dst_port = pkt->hdr.dst_port;
pkt               124 net/vmw_vsock/virtio_transport_common.c 	hdr->len = cpu_to_le16(sizeof(pkt->hdr));
pkt               127 net/vmw_vsock/virtio_transport_common.c 	switch (le16_to_cpu(pkt->hdr.op)) {
pkt               148 net/vmw_vsock/virtio_transport_common.c 	skb_put_data(skb, &pkt->hdr, sizeof(pkt->hdr));
pkt               157 net/vmw_vsock/virtio_transport_common.c void virtio_transport_deliver_tap_pkt(struct virtio_vsock_pkt *pkt)
pkt               159 net/vmw_vsock/virtio_transport_common.c 	vsock_deliver_tap(virtio_transport_build_skb, pkt);
pkt               168 net/vmw_vsock/virtio_transport_common.c 	struct virtio_vsock_pkt *pkt;
pkt               194 net/vmw_vsock/virtio_transport_common.c 	pkt = virtio_transport_alloc_pkt(info, pkt_len,
pkt               197 net/vmw_vsock/virtio_transport_common.c 	if (!pkt) {
pkt               202 net/vmw_vsock/virtio_transport_common.c 	virtio_transport_inc_tx_pkt(vvs, pkt);
pkt               204 net/vmw_vsock/virtio_transport_common.c 	return virtio_transport_get_ops()->send_pkt(pkt);
pkt               208 net/vmw_vsock/virtio_transport_common.c 					struct virtio_vsock_pkt *pkt)
pkt               210 net/vmw_vsock/virtio_transport_common.c 	if (vvs->rx_bytes + pkt->len > vvs->buf_alloc)
pkt               213 net/vmw_vsock/virtio_transport_common.c 	vvs->rx_bytes += pkt->len;
pkt               218 net/vmw_vsock/virtio_transport_common.c 					struct virtio_vsock_pkt *pkt)
pkt               220 net/vmw_vsock/virtio_transport_common.c 	vvs->rx_bytes -= pkt->len;
pkt               221 net/vmw_vsock/virtio_transport_common.c 	vvs->fwd_cnt += pkt->len;
pkt               224 net/vmw_vsock/virtio_transport_common.c void virtio_transport_inc_tx_pkt(struct virtio_vsock_sock *vvs, struct virtio_vsock_pkt *pkt)
pkt               228 net/vmw_vsock/virtio_transport_common.c 	pkt->hdr.fwd_cnt = cpu_to_le32(vvs->fwd_cnt);
pkt               229 net/vmw_vsock/virtio_transport_common.c 	pkt->hdr.buf_alloc = cpu_to_le32(vvs->buf_alloc);
pkt               276 net/vmw_vsock/virtio_transport_common.c 	struct virtio_vsock_pkt *pkt;
pkt               283 net/vmw_vsock/virtio_transport_common.c 		pkt = list_first_entry(&vvs->rx_queue,
pkt               287 net/vmw_vsock/virtio_transport_common.c 		if (bytes > pkt->len - pkt->off)
pkt               288 net/vmw_vsock/virtio_transport_common.c 			bytes = pkt->len - pkt->off;
pkt               295 net/vmw_vsock/virtio_transport_common.c 		err = memcpy_to_msg(msg, pkt->buf + pkt->off, bytes);
pkt               302 net/vmw_vsock/virtio_transport_common.c 		pkt->off += bytes;
pkt               303 net/vmw_vsock/virtio_transport_common.c 		if (pkt->off == pkt->len) {
pkt               304 net/vmw_vsock/virtio_transport_common.c 			virtio_transport_dec_rx_pkt(vvs, pkt);
pkt               305 net/vmw_vsock/virtio_transport_common.c 			list_del(&pkt->list);
pkt               306 net/vmw_vsock/virtio_transport_common.c 			virtio_transport_free_pkt(pkt);
pkt               680 net/vmw_vsock/virtio_transport_common.c 				  struct virtio_vsock_pkt *pkt)
pkt               685 net/vmw_vsock/virtio_transport_common.c 		.reply = !!pkt,
pkt               690 net/vmw_vsock/virtio_transport_common.c 	if (pkt && le16_to_cpu(pkt->hdr.op) == VIRTIO_VSOCK_OP_RST)
pkt               699 net/vmw_vsock/virtio_transport_common.c static int virtio_transport_reset_no_sock(struct virtio_vsock_pkt *pkt)
pkt               705 net/vmw_vsock/virtio_transport_common.c 		.type = le16_to_cpu(pkt->hdr.type),
pkt               710 net/vmw_vsock/virtio_transport_common.c 	if (le16_to_cpu(pkt->hdr.op) == VIRTIO_VSOCK_OP_RST)
pkt               714 net/vmw_vsock/virtio_transport_common.c 					   le64_to_cpu(pkt->hdr.dst_cid),
pkt               715 net/vmw_vsock/virtio_transport_common.c 					   le32_to_cpu(pkt->hdr.dst_port),
pkt               716 net/vmw_vsock/virtio_transport_common.c 					   le64_to_cpu(pkt->hdr.src_cid),
pkt               717 net/vmw_vsock/virtio_transport_common.c 					   le32_to_cpu(pkt->hdr.src_port));
pkt               826 net/vmw_vsock/virtio_transport_common.c 	struct virtio_vsock_pkt *pkt, *tmp;
pkt               834 net/vmw_vsock/virtio_transport_common.c 	list_for_each_entry_safe(pkt, tmp, &vvs->rx_queue, list) {
pkt               835 net/vmw_vsock/virtio_transport_common.c 		list_del(&pkt->list);
pkt               836 net/vmw_vsock/virtio_transport_common.c 		virtio_transport_free_pkt(pkt);
pkt               847 net/vmw_vsock/virtio_transport_common.c 				 struct virtio_vsock_pkt *pkt)
pkt               853 net/vmw_vsock/virtio_transport_common.c 	switch (le16_to_cpu(pkt->hdr.op)) {
pkt               874 net/vmw_vsock/virtio_transport_common.c 	virtio_transport_reset(vsk, pkt);
pkt               883 net/vmw_vsock/virtio_transport_common.c 			      struct virtio_vsock_pkt *pkt)
pkt               888 net/vmw_vsock/virtio_transport_common.c 	pkt->len = le32_to_cpu(pkt->hdr.len);
pkt               889 net/vmw_vsock/virtio_transport_common.c 	pkt->off = 0;
pkt               893 net/vmw_vsock/virtio_transport_common.c 	can_enqueue = virtio_transport_inc_rx_pkt(vvs, pkt);
pkt               903 net/vmw_vsock/virtio_transport_common.c 	if (pkt->len <= GOOD_COPY_LEN && !list_empty(&vvs->rx_queue)) {
pkt               912 net/vmw_vsock/virtio_transport_common.c 		if (pkt->len <= last_pkt->buf_len - last_pkt->len) {
pkt               913 net/vmw_vsock/virtio_transport_common.c 			memcpy(last_pkt->buf + last_pkt->len, pkt->buf,
pkt               914 net/vmw_vsock/virtio_transport_common.c 			       pkt->len);
pkt               915 net/vmw_vsock/virtio_transport_common.c 			last_pkt->len += pkt->len;
pkt               921 net/vmw_vsock/virtio_transport_common.c 	list_add_tail(&pkt->list, &vvs->rx_queue);
pkt               926 net/vmw_vsock/virtio_transport_common.c 		virtio_transport_free_pkt(pkt);
pkt               931 net/vmw_vsock/virtio_transport_common.c 				struct virtio_vsock_pkt *pkt)
pkt               936 net/vmw_vsock/virtio_transport_common.c 	switch (le16_to_cpu(pkt->hdr.op)) {
pkt               938 net/vmw_vsock/virtio_transport_common.c 		virtio_transport_recv_enqueue(vsk, pkt);
pkt               945 net/vmw_vsock/virtio_transport_common.c 		if (le32_to_cpu(pkt->hdr.flags) & VIRTIO_VSOCK_SHUTDOWN_RCV)
pkt               947 net/vmw_vsock/virtio_transport_common.c 		if (le32_to_cpu(pkt->hdr.flags) & VIRTIO_VSOCK_SHUTDOWN_SEND)
pkt               956 net/vmw_vsock/virtio_transport_common.c 		if (le32_to_cpu(pkt->hdr.flags))
pkt               967 net/vmw_vsock/virtio_transport_common.c 	virtio_transport_free_pkt(pkt);
pkt               973 net/vmw_vsock/virtio_transport_common.c 				    struct virtio_vsock_pkt *pkt)
pkt               977 net/vmw_vsock/virtio_transport_common.c 	if (le16_to_cpu(pkt->hdr.op) == VIRTIO_VSOCK_OP_RST)
pkt               983 net/vmw_vsock/virtio_transport_common.c 			       struct virtio_vsock_pkt *pkt)
pkt               988 net/vmw_vsock/virtio_transport_common.c 		.remote_cid = le64_to_cpu(pkt->hdr.src_cid),
pkt               989 net/vmw_vsock/virtio_transport_common.c 		.remote_port = le32_to_cpu(pkt->hdr.src_port),
pkt               999 net/vmw_vsock/virtio_transport_common.c virtio_transport_recv_listen(struct sock *sk, struct virtio_vsock_pkt *pkt)
pkt              1005 net/vmw_vsock/virtio_transport_common.c 	if (le16_to_cpu(pkt->hdr.op) != VIRTIO_VSOCK_OP_REQUEST) {
pkt              1006 net/vmw_vsock/virtio_transport_common.c 		virtio_transport_reset(vsk, pkt);
pkt              1011 net/vmw_vsock/virtio_transport_common.c 		virtio_transport_reset(vsk, pkt);
pkt              1018 net/vmw_vsock/virtio_transport_common.c 		virtio_transport_reset(vsk, pkt);
pkt              1029 net/vmw_vsock/virtio_transport_common.c 	vsock_addr_init(&vchild->local_addr, le64_to_cpu(pkt->hdr.dst_cid),
pkt              1030 net/vmw_vsock/virtio_transport_common.c 			le32_to_cpu(pkt->hdr.dst_port));
pkt              1031 net/vmw_vsock/virtio_transport_common.c 	vsock_addr_init(&vchild->remote_addr, le64_to_cpu(pkt->hdr.src_cid),
pkt              1032 net/vmw_vsock/virtio_transport_common.c 			le32_to_cpu(pkt->hdr.src_port));
pkt              1036 net/vmw_vsock/virtio_transport_common.c 	virtio_transport_send_response(vchild, pkt);
pkt              1045 net/vmw_vsock/virtio_transport_common.c 					  struct virtio_vsock_pkt *pkt)
pkt              1053 net/vmw_vsock/virtio_transport_common.c 	vvs->peer_buf_alloc = le32_to_cpu(pkt->hdr.buf_alloc);
pkt              1054 net/vmw_vsock/virtio_transport_common.c 	vvs->peer_fwd_cnt = le32_to_cpu(pkt->hdr.fwd_cnt);
pkt              1063 net/vmw_vsock/virtio_transport_common.c void virtio_transport_recv_pkt(struct virtio_vsock_pkt *pkt)
pkt              1070 net/vmw_vsock/virtio_transport_common.c 	vsock_addr_init(&src, le64_to_cpu(pkt->hdr.src_cid),
pkt              1071 net/vmw_vsock/virtio_transport_common.c 			le32_to_cpu(pkt->hdr.src_port));
pkt              1072 net/vmw_vsock/virtio_transport_common.c 	vsock_addr_init(&dst, le64_to_cpu(pkt->hdr.dst_cid),
pkt              1073 net/vmw_vsock/virtio_transport_common.c 			le32_to_cpu(pkt->hdr.dst_port));
pkt              1077 net/vmw_vsock/virtio_transport_common.c 					le32_to_cpu(pkt->hdr.len),
pkt              1078 net/vmw_vsock/virtio_transport_common.c 					le16_to_cpu(pkt->hdr.type),
pkt              1079 net/vmw_vsock/virtio_transport_common.c 					le16_to_cpu(pkt->hdr.op),
pkt              1080 net/vmw_vsock/virtio_transport_common.c 					le32_to_cpu(pkt->hdr.flags),
pkt              1081 net/vmw_vsock/virtio_transport_common.c 					le32_to_cpu(pkt->hdr.buf_alloc),
pkt              1082 net/vmw_vsock/virtio_transport_common.c 					le32_to_cpu(pkt->hdr.fwd_cnt));
pkt              1084 net/vmw_vsock/virtio_transport_common.c 	if (le16_to_cpu(pkt->hdr.type) != VIRTIO_VSOCK_TYPE_STREAM) {
pkt              1085 net/vmw_vsock/virtio_transport_common.c 		(void)virtio_transport_reset_no_sock(pkt);
pkt              1096 net/vmw_vsock/virtio_transport_common.c 			(void)virtio_transport_reset_no_sock(pkt);
pkt              1103 net/vmw_vsock/virtio_transport_common.c 	space_available = virtio_transport_space_update(sk, pkt);
pkt              1115 net/vmw_vsock/virtio_transport_common.c 		virtio_transport_recv_listen(sk, pkt);
pkt              1116 net/vmw_vsock/virtio_transport_common.c 		virtio_transport_free_pkt(pkt);
pkt              1119 net/vmw_vsock/virtio_transport_common.c 		virtio_transport_recv_connecting(sk, pkt);
pkt              1120 net/vmw_vsock/virtio_transport_common.c 		virtio_transport_free_pkt(pkt);
pkt              1123 net/vmw_vsock/virtio_transport_common.c 		virtio_transport_recv_connected(sk, pkt);
pkt              1126 net/vmw_vsock/virtio_transport_common.c 		virtio_transport_recv_disconnecting(sk, pkt);
pkt              1127 net/vmw_vsock/virtio_transport_common.c 		virtio_transport_free_pkt(pkt);
pkt              1130 net/vmw_vsock/virtio_transport_common.c 		virtio_transport_free_pkt(pkt);
pkt              1142 net/vmw_vsock/virtio_transport_common.c 	virtio_transport_free_pkt(pkt);
pkt              1146 net/vmw_vsock/virtio_transport_common.c void virtio_transport_free_pkt(struct virtio_vsock_pkt *pkt)
pkt              1148 net/vmw_vsock/virtio_transport_common.c 	kfree(pkt->buf);
pkt              1149 net/vmw_vsock/virtio_transport_common.c 	kfree(pkt);
pkt                40 net/vmw_vsock/vmci_transport.c 				      struct vmci_transport_packet *pkt);
pkt                44 net/vmw_vsock/vmci_transport.c 					struct vmci_transport_packet *pkt);
pkt                47 net/vmw_vsock/vmci_transport.c 					struct vmci_transport_packet *pkt);
pkt                50 net/vmw_vsock/vmci_transport.c 					struct vmci_transport_packet *pkt);
pkt                53 net/vmw_vsock/vmci_transport.c 					struct vmci_transport_packet *pkt);
pkt                55 net/vmw_vsock/vmci_transport.c 					 struct vmci_transport_packet *pkt);
pkt                64 net/vmw_vsock/vmci_transport.c 	struct vmci_transport_packet pkt;
pkt               118 net/vmw_vsock/vmci_transport.c vmci_transport_packet_init(struct vmci_transport_packet *pkt,
pkt               131 net/vmw_vsock/vmci_transport.c 	pkt->dg.src = vmci_make_handle(VMADDR_CID_ANY,
pkt               133 net/vmw_vsock/vmci_transport.c 	pkt->dg.dst = vmci_make_handle(dst->svm_cid,
pkt               135 net/vmw_vsock/vmci_transport.c 	pkt->dg.payload_size = sizeof(*pkt) - sizeof(pkt->dg);
pkt               136 net/vmw_vsock/vmci_transport.c 	pkt->version = VMCI_TRANSPORT_PACKET_VERSION;
pkt               137 net/vmw_vsock/vmci_transport.c 	pkt->type = type;
pkt               138 net/vmw_vsock/vmci_transport.c 	pkt->src_port = src->svm_port;
pkt               139 net/vmw_vsock/vmci_transport.c 	pkt->dst_port = dst->svm_port;
pkt               140 net/vmw_vsock/vmci_transport.c 	memset(&pkt->proto, 0, sizeof(pkt->proto));
pkt               141 net/vmw_vsock/vmci_transport.c 	memset(&pkt->_reserved2, 0, sizeof(pkt->_reserved2));
pkt               143 net/vmw_vsock/vmci_transport.c 	switch (pkt->type) {
pkt               145 net/vmw_vsock/vmci_transport.c 		pkt->u.size = 0;
pkt               150 net/vmw_vsock/vmci_transport.c 		pkt->u.size = size;
pkt               155 net/vmw_vsock/vmci_transport.c 		pkt->u.handle = handle;
pkt               161 net/vmw_vsock/vmci_transport.c 		pkt->u.size = 0;
pkt               165 net/vmw_vsock/vmci_transport.c 		pkt->u.mode = mode;
pkt               170 net/vmw_vsock/vmci_transport.c 		memcpy(&pkt->u.wait, wait, sizeof(pkt->u.wait));
pkt               175 net/vmw_vsock/vmci_transport.c 		pkt->u.size = size;
pkt               176 net/vmw_vsock/vmci_transport.c 		pkt->proto = proto;
pkt               182 net/vmw_vsock/vmci_transport.c vmci_transport_packet_get_addresses(struct vmci_transport_packet *pkt,
pkt               186 net/vmw_vsock/vmci_transport.c 	vsock_addr_init(local, pkt->dg.dst.context, pkt->dst_port);
pkt               187 net/vmw_vsock/vmci_transport.c 	vsock_addr_init(remote, pkt->dg.src.context, pkt->src_port);
pkt               191 net/vmw_vsock/vmci_transport.c __vmci_transport_send_control_pkt(struct vmci_transport_packet *pkt,
pkt               204 net/vmw_vsock/vmci_transport.c 	vmci_transport_packet_init(pkt, src, dst, type, size, mode, wait,
pkt               206 net/vmw_vsock/vmci_transport.c 	err = vmci_datagram_send(&pkt->dg);
pkt               214 net/vmw_vsock/vmci_transport.c vmci_transport_reply_control_pkt_fast(struct vmci_transport_packet *pkt,
pkt               224 net/vmw_vsock/vmci_transport.c 	if (pkt->type == VMCI_TRANSPORT_PACKET_TYPE_RST) {
pkt               227 net/vmw_vsock/vmci_transport.c 		vmci_transport_packet_get_addresses(pkt, &src, &dst);
pkt               250 net/vmw_vsock/vmci_transport.c 	static struct vmci_transport_packet pkt;
pkt               252 net/vmw_vsock/vmci_transport.c 	return __vmci_transport_send_control_pkt(&pkt, src, dst, type,
pkt               268 net/vmw_vsock/vmci_transport.c 	struct vmci_transport_packet *pkt;
pkt               271 net/vmw_vsock/vmci_transport.c 	pkt = kmalloc(sizeof(*pkt), GFP_KERNEL);
pkt               272 net/vmw_vsock/vmci_transport.c 	if (!pkt)
pkt               275 net/vmw_vsock/vmci_transport.c 	err = __vmci_transport_send_control_pkt(pkt, src, dst, type, size,
pkt               278 net/vmw_vsock/vmci_transport.c 	kfree(pkt);
pkt               310 net/vmw_vsock/vmci_transport.c 					struct vmci_transport_packet *pkt)
pkt               312 net/vmw_vsock/vmci_transport.c 	if (pkt->type == VMCI_TRANSPORT_PACKET_TYPE_RST)
pkt               321 net/vmw_vsock/vmci_transport.c 				     struct vmci_transport_packet *pkt)
pkt               327 net/vmw_vsock/vmci_transport.c 	if (pkt->type == VMCI_TRANSPORT_PACKET_TYPE_RST)
pkt               338 net/vmw_vsock/vmci_transport.c 		vsock_addr_init(&dst, pkt->dg.src.context,
pkt               339 net/vmw_vsock/vmci_transport.c 				pkt->src_port);
pkt               386 net/vmw_vsock/vmci_transport.c static int vmci_transport_reply_reset(struct vmci_transport_packet *pkt)
pkt               389 net/vmw_vsock/vmci_transport.c 						pkt,
pkt               486 net/vmw_vsock/vmci_transport.c 					struct vmci_transport_packet *pkt)
pkt               493 net/vmw_vsock/vmci_transport.c 	vsock_addr_init(&src, pkt->dg.src.context, pkt->src_port);
pkt               500 net/vmw_vsock/vmci_transport.c 		    pkt->dst_port == vpending->local_addr.svm_port) {
pkt               683 net/vmw_vsock/vmci_transport.c 	struct vmci_transport_packet *pkt;
pkt               700 net/vmw_vsock/vmci_transport.c 	if (VMCI_DG_SIZE(dg) < sizeof(*pkt))
pkt               704 net/vmw_vsock/vmci_transport.c 	pkt = (struct vmci_transport_packet *)dg;
pkt               710 net/vmw_vsock/vmci_transport.c 	vsock_addr_init(&src, pkt->dg.src.context, pkt->src_port);
pkt               711 net/vmw_vsock/vmci_transport.c 	vsock_addr_init(&dst, pkt->dg.dst.context, pkt->dst_port);
pkt               726 net/vmw_vsock/vmci_transport.c 			if (vmci_transport_send_reset_bh(&dst, &src, pkt) < 0)
pkt               738 net/vmw_vsock/vmci_transport.c 	if (pkt->type >= VMCI_TRANSPORT_PACKET_TYPE_MAX) {
pkt               754 net/vmw_vsock/vmci_transport.c 	if (!vmci_transport_allow_dgram(vsk, pkt->dg.src.context)) {
pkt               772 net/vmw_vsock/vmci_transport.c 					sk, pkt, true, &dst, &src,
pkt               783 net/vmw_vsock/vmci_transport.c 			if (vmci_transport_send_reset_bh(&dst, &src, pkt) < 0)
pkt               791 net/vmw_vsock/vmci_transport.c 		memcpy(&recv_pkt_info->pkt, pkt, sizeof(recv_pkt_info->pkt));
pkt               900 net/vmw_vsock/vmci_transport.c 	struct vmci_transport_packet *pkt;
pkt               906 net/vmw_vsock/vmci_transport.c 	pkt = &recv_pkt_info->pkt;
pkt               911 net/vmw_vsock/vmci_transport.c 	vsock_sk(sk)->local_addr.svm_cid = pkt->dg.dst.context;
pkt               915 net/vmw_vsock/vmci_transport.c 		vmci_transport_recv_listen(sk, pkt);
pkt               922 net/vmw_vsock/vmci_transport.c 		vmci_transport_recv_connecting_client(sk, pkt);
pkt               925 net/vmw_vsock/vmci_transport.c 		vmci_transport_recv_connected(sk, pkt);
pkt               934 net/vmw_vsock/vmci_transport.c 		vmci_transport_send_reset(sk, pkt);
pkt               947 net/vmw_vsock/vmci_transport.c 				      struct vmci_transport_packet *pkt)
pkt               965 net/vmw_vsock/vmci_transport.c 	pending = vmci_transport_get_pending(sk, pkt);
pkt               970 net/vmw_vsock/vmci_transport.c 		vsock_sk(pending)->local_addr.svm_cid = pkt->dg.dst.context;
pkt               976 net/vmw_vsock/vmci_transport.c 								    pkt);
pkt               979 net/vmw_vsock/vmci_transport.c 			vmci_transport_send_reset(pending, pkt);
pkt               996 net/vmw_vsock/vmci_transport.c 	if (!(pkt->type == VMCI_TRANSPORT_PACKET_TYPE_REQUEST ||
pkt               997 net/vmw_vsock/vmci_transport.c 	      pkt->type == VMCI_TRANSPORT_PACKET_TYPE_REQUEST2)) {
pkt               998 net/vmw_vsock/vmci_transport.c 		vmci_transport_reply_reset(pkt);
pkt              1002 net/vmw_vsock/vmci_transport.c 	if (pkt->u.size == 0) {
pkt              1003 net/vmw_vsock/vmci_transport.c 		vmci_transport_reply_reset(pkt);
pkt              1012 net/vmw_vsock/vmci_transport.c 		vmci_transport_reply_reset(pkt);
pkt              1019 net/vmw_vsock/vmci_transport.c 		vmci_transport_send_reset(sk, pkt);
pkt              1025 net/vmw_vsock/vmci_transport.c 	vsock_addr_init(&vpending->local_addr, pkt->dg.dst.context,
pkt              1026 net/vmw_vsock/vmci_transport.c 			pkt->dst_port);
pkt              1027 net/vmw_vsock/vmci_transport.c 	vsock_addr_init(&vpending->remote_addr, pkt->dg.src.context,
pkt              1028 net/vmw_vsock/vmci_transport.c 			pkt->src_port);
pkt              1033 net/vmw_vsock/vmci_transport.c 	if (pkt->u.size >= vmci_trans(vpending)->queue_pair_min_size &&
pkt              1034 net/vmw_vsock/vmci_transport.c 	    pkt->u.size <= vmci_trans(vpending)->queue_pair_max_size) {
pkt              1035 net/vmw_vsock/vmci_transport.c 		qp_size = pkt->u.size;
pkt              1046 net/vmw_vsock/vmci_transport.c 		if (pkt->type == VMCI_TRANSPORT_PACKET_TYPE_REQUEST)
pkt              1048 net/vmw_vsock/vmci_transport.c 		else if (pkt->type == VMCI_TRANSPORT_PACKET_TYPE_REQUEST2)
pkt              1064 net/vmw_vsock/vmci_transport.c 		int proto_int = pkt->proto;
pkt              1094 net/vmw_vsock/vmci_transport.c 		vmci_transport_send_reset(sk, pkt);
pkt              1131 net/vmw_vsock/vmci_transport.c 				      struct vmci_transport_packet *pkt)
pkt              1145 net/vmw_vsock/vmci_transport.c 	switch (pkt->type) {
pkt              1147 net/vmw_vsock/vmci_transport.c 		if (vmci_handle_is_invalid(pkt->u.handle)) {
pkt              1148 net/vmw_vsock/vmci_transport.c 			vmci_transport_send_reset(pending, pkt);
pkt              1156 net/vmw_vsock/vmci_transport.c 		vmci_transport_send_reset(pending, pkt);
pkt              1158 net/vmw_vsock/vmci_transport.c 		err = pkt->type == VMCI_TRANSPORT_PACKET_TYPE_RST ? 0 : -EINVAL;
pkt              1177 net/vmw_vsock/vmci_transport.c 		vmci_transport_send_reset(pending, pkt);
pkt              1186 net/vmw_vsock/vmci_transport.c 	handle = pkt->u.handle;
pkt              1201 net/vmw_vsock/vmci_transport.c 					pkt->dg.src.context,
pkt              1207 net/vmw_vsock/vmci_transport.c 		vmci_transport_send_reset(pending, pkt);
pkt              1240 net/vmw_vsock/vmci_transport.c 		vmci_transport_send_reset(pending, pkt);
pkt              1277 net/vmw_vsock/vmci_transport.c 				      struct vmci_transport_packet *pkt)
pkt              1285 net/vmw_vsock/vmci_transport.c 	switch (pkt->type) {
pkt              1287 net/vmw_vsock/vmci_transport.c 		if (vmci_handle_is_invalid(pkt->u.handle) ||
pkt              1288 net/vmw_vsock/vmci_transport.c 		    !vmci_handle_is_equal(pkt->u.handle,
pkt              1308 net/vmw_vsock/vmci_transport.c 		if (pkt->u.size == 0
pkt              1309 net/vmw_vsock/vmci_transport.c 		    || pkt->dg.src.context != vsk->remote_addr.svm_cid
pkt              1310 net/vmw_vsock/vmci_transport.c 		    || pkt->src_port != vsk->remote_addr.svm_port
pkt              1322 net/vmw_vsock/vmci_transport.c 		err = vmci_transport_recv_connecting_client_negotiate(sk, pkt);
pkt              1330 net/vmw_vsock/vmci_transport.c 		err = vmci_transport_recv_connecting_client_invalid(sk, pkt);
pkt              1366 net/vmw_vsock/vmci_transport.c 	vmci_transport_send_reset(sk, pkt);
pkt              1376 net/vmw_vsock/vmci_transport.c 					struct vmci_transport_packet *pkt)
pkt              1400 net/vmw_vsock/vmci_transport.c 	if (pkt->u.size < vmci_trans(vsk)->queue_pair_min_size ||
pkt              1401 net/vmw_vsock/vmci_transport.c 	    pkt->u.size > vmci_trans(vsk)->queue_pair_max_size) {
pkt              1409 net/vmw_vsock/vmci_transport.c 		vsk->local_addr.svm_cid = pkt->dg.dst.context;
pkt              1418 net/vmw_vsock/vmci_transport.c 		if (pkt->type == VMCI_TRANSPORT_PACKET_TYPE_NEGOTIATE)
pkt              1420 net/vmw_vsock/vmci_transport.c 		else if (pkt->type == VMCI_TRANSPORT_PACKET_TYPE_NEGOTIATE2)
pkt              1428 net/vmw_vsock/vmci_transport.c 		version = pkt->proto;
pkt              1456 net/vmw_vsock/vmci_transport.c 					      pkt->u.size,
pkt              1457 net/vmw_vsock/vmci_transport.c 					      pkt->u.size,
pkt              1477 net/vmw_vsock/vmci_transport.c 		pkt->u.size;
pkt              1497 net/vmw_vsock/vmci_transport.c 					      struct vmci_transport_packet *pkt)
pkt              1519 net/vmw_vsock/vmci_transport.c 					 struct vmci_transport_packet *pkt)
pkt              1532 net/vmw_vsock/vmci_transport.c 	switch (pkt->type) {
pkt              1534 net/vmw_vsock/vmci_transport.c 		if (pkt->u.mode) {
pkt              1537 net/vmw_vsock/vmci_transport.c 			vsk->peer_shutdown |= pkt->u.mode;
pkt              1564 net/vmw_vsock/vmci_transport.c 				sk, pkt, false, NULL, NULL,
pkt                98 net/vmw_vsock/vmci_transport.h 	struct vmci_transport_notify_pkt pkt;
pkt                15 net/vmw_vsock/vmci_transport_notify.c #define PKT_FIELD(vsk, field_name) (vmci_trans(vsk)->notify.pkt.field_name)
pkt               106 net/vmw_vsock/vmci_transport_notify.c 				   struct vmci_transport_packet *pkt,
pkt               117 net/vmw_vsock/vmci_transport_notify.c 	memcpy(&PKT_FIELD(vsk, peer_waiting_read_info), &pkt->u.wait,
pkt               136 net/vmw_vsock/vmci_transport_notify.c 				    struct vmci_transport_packet *pkt,
pkt               147 net/vmw_vsock/vmci_transport_notify.c 	memcpy(&PKT_FIELD(vsk, peer_waiting_write_info), &pkt->u.wait,
pkt               166 net/vmw_vsock/vmci_transport_notify.c 			   struct vmci_transport_packet *pkt,
pkt               302 net/vmw_vsock/vmci_transport_notify.c 			    struct vmci_transport_packet *pkt,
pkt               600 net/vmw_vsock/vmci_transport_notify.c 			struct vmci_transport_packet *pkt,
pkt               607 net/vmw_vsock/vmci_transport_notify.c 	switch (pkt->type) {
pkt               609 net/vmw_vsock/vmci_transport_notify.c 		vmci_transport_handle_wrote(sk, pkt, bottom_half, dst, src);
pkt               613 net/vmw_vsock/vmci_transport_notify.c 		vmci_transport_handle_read(sk, pkt, bottom_half, dst, src);
pkt               617 net/vmw_vsock/vmci_transport_notify.c 		vmci_transport_handle_waiting_write(sk, pkt, bottom_half,
pkt               623 net/vmw_vsock/vmci_transport_notify.c 		vmci_transport_handle_waiting_read(sk, pkt, bottom_half,
pkt                47 net/vmw_vsock/vmci_transport_notify.h 				   struct vmci_transport_packet *pkt,
pkt                74 net/vmw_vsock/vmci_transport_notify_qstate.c 			   struct vmci_transport_packet *pkt,
pkt                83 net/vmw_vsock/vmci_transport_notify_qstate.c 			    struct vmci_transport_packet *pkt,
pkt               344 net/vmw_vsock/vmci_transport_notify_qstate.c 				struct vmci_transport_packet *pkt,
pkt               351 net/vmw_vsock/vmci_transport_notify_qstate.c 	switch (pkt->type) {
pkt               353 net/vmw_vsock/vmci_transport_notify_qstate.c 		vmci_transport_handle_wrote(sk, pkt, bottom_half, dst, src);
pkt               357 net/vmw_vsock/vmci_transport_notify_qstate.c 		vmci_transport_handle_read(sk, pkt, bottom_half, dst, src);
pkt               241 samples/bpf/xdpsock_user.c static void hex_dump(void *pkt, size_t length, u64 addr)
pkt               243 samples/bpf/xdpsock_user.c 	const unsigned char *address = (unsigned char *)pkt;
pkt               576 samples/bpf/xdpsock_user.c 		char *pkt = xsk_umem__get_data(xsk->umem->buffer, addr);
pkt               578 samples/bpf/xdpsock_user.c 		hex_dump(pkt, len, addr);
pkt               691 samples/bpf/xdpsock_user.c 		char *pkt = xsk_umem__get_data(xsk->umem->buffer, addr);
pkt               693 samples/bpf/xdpsock_user.c 		swap_mac_addresses(pkt);
pkt               695 samples/bpf/xdpsock_user.c 		hex_dump(pkt, len, addr);
pkt               238 sound/soc/qcom/qdsp6/q6adm.c 				   struct apr_pkt *pkt, uint32_t rsp_opcode)
pkt               241 sound/soc/qcom/qdsp6/q6adm.c 	uint32_t opcode = pkt->hdr.opcode;
pkt               247 sound/soc/qcom/qdsp6/q6adm.c 	ret = apr_send_pkt(adm->apr, pkt);
pkt               327 sound/soc/qcom/qdsp6/q6adm.c 	struct apr_pkt *pkt;
pkt               336 sound/soc/qcom/qdsp6/q6adm.c 	pkt = p;
pkt               338 sound/soc/qcom/qdsp6/q6adm.c 	pkt->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
pkt               341 sound/soc/qcom/qdsp6/q6adm.c 	pkt->hdr.pkt_size = pkt_size;
pkt               342 sound/soc/qcom/qdsp6/q6adm.c 	pkt->hdr.src_port = afe_port;
pkt               343 sound/soc/qcom/qdsp6/q6adm.c 	pkt->hdr.dest_port = afe_port;
pkt               344 sound/soc/qcom/qdsp6/q6adm.c 	pkt->hdr.token = port_id << 16 | copp->copp_idx;
pkt               345 sound/soc/qcom/qdsp6/q6adm.c 	pkt->hdr.opcode = ADM_CMD_DEVICE_OPEN_V5;
pkt               359 sound/soc/qcom/qdsp6/q6adm.c 	ret = q6adm_apr_send_copp_pkt(adm, copp, pkt,
pkt               363 sound/soc/qcom/qdsp6/q6adm.c 	kfree(pkt);
pkt               466 sound/soc/qcom/qdsp6/q6adm.c 	struct apr_pkt *pkt;
pkt               480 sound/soc/qcom/qdsp6/q6adm.c 	pkt = matrix_map;
pkt               485 sound/soc/qcom/qdsp6/q6adm.c 	pkt->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
pkt               488 sound/soc/qcom/qdsp6/q6adm.c 	pkt->hdr.pkt_size = pkt_size;
pkt               489 sound/soc/qcom/qdsp6/q6adm.c 	pkt->hdr.token = 0;
pkt               490 sound/soc/qcom/qdsp6/q6adm.c 	pkt->hdr.opcode = ADM_CMD_MATRIX_MAP_ROUTINGS_V5;
pkt               514 sound/soc/qcom/qdsp6/q6adm.c 			kfree(pkt);
pkt               521 sound/soc/qcom/qdsp6/q6adm.c 			kfree(pkt);
pkt               533 sound/soc/qcom/qdsp6/q6adm.c 	ret = apr_send_pkt(adm->apr, pkt);
pkt               540 sound/soc/qcom/qdsp6/q6adm.c 				 adm->result.opcode == pkt->hdr.opcode,
pkt               556 sound/soc/qcom/qdsp6/q6adm.c 	kfree(pkt);
pkt               803 sound/soc/qcom/qdsp6/q6afe.c static int afe_apr_send_pkt(struct q6afe *afe, struct apr_pkt *pkt,
pkt               807 sound/soc/qcom/qdsp6/q6afe.c 	struct apr_hdr *hdr = &pkt->hdr;
pkt               814 sound/soc/qcom/qdsp6/q6afe.c 	ret = apr_send_pkt(afe->apr, pkt);
pkt               845 sound/soc/qcom/qdsp6/q6afe.c 	struct apr_pkt *pkt;
pkt               855 sound/soc/qcom/qdsp6/q6afe.c 	pkt = p;
pkt               861 sound/soc/qcom/qdsp6/q6afe.c 	pkt->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
pkt               864 sound/soc/qcom/qdsp6/q6afe.c 	pkt->hdr.pkt_size = pkt_size;
pkt               865 sound/soc/qcom/qdsp6/q6afe.c 	pkt->hdr.src_port = 0;
pkt               866 sound/soc/qcom/qdsp6/q6afe.c 	pkt->hdr.dest_port = 0;
pkt               867 sound/soc/qcom/qdsp6/q6afe.c 	pkt->hdr.token = port->token;
pkt               868 sound/soc/qcom/qdsp6/q6afe.c 	pkt->hdr.opcode = AFE_SVC_CMD_SET_PARAM;
pkt               878 sound/soc/qcom/qdsp6/q6afe.c 	ret = afe_apr_send_pkt(afe, pkt, port);
pkt               883 sound/soc/qcom/qdsp6/q6afe.c 	kfree(pkt);
pkt               893 sound/soc/qcom/qdsp6/q6afe.c 	struct apr_pkt *pkt;
pkt               903 sound/soc/qcom/qdsp6/q6afe.c 	pkt = p;
pkt               909 sound/soc/qcom/qdsp6/q6afe.c 	pkt->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
pkt               912 sound/soc/qcom/qdsp6/q6afe.c 	pkt->hdr.pkt_size = pkt_size;
pkt               913 sound/soc/qcom/qdsp6/q6afe.c 	pkt->hdr.src_port = 0;
pkt               914 sound/soc/qcom/qdsp6/q6afe.c 	pkt->hdr.dest_port = 0;
pkt               915 sound/soc/qcom/qdsp6/q6afe.c 	pkt->hdr.token = port->token;
pkt               916 sound/soc/qcom/qdsp6/q6afe.c 	pkt->hdr.opcode = AFE_PORT_CMD_SET_PARAM_V2;
pkt               927 sound/soc/qcom/qdsp6/q6afe.c 	ret = afe_apr_send_pkt(afe, pkt, port);
pkt               932 sound/soc/qcom/qdsp6/q6afe.c 	kfree(pkt);
pkt              1025 sound/soc/qcom/qdsp6/q6afe.c 	struct apr_pkt *pkt;
pkt              1043 sound/soc/qcom/qdsp6/q6afe.c 	pkt = p;
pkt              1046 sound/soc/qcom/qdsp6/q6afe.c 	pkt->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
pkt              1049 sound/soc/qcom/qdsp6/q6afe.c 	pkt->hdr.pkt_size = pkt_size;
pkt              1050 sound/soc/qcom/qdsp6/q6afe.c 	pkt->hdr.src_port = 0;
pkt              1051 sound/soc/qcom/qdsp6/q6afe.c 	pkt->hdr.dest_port = 0;
pkt              1052 sound/soc/qcom/qdsp6/q6afe.c 	pkt->hdr.token = index;
pkt              1053 sound/soc/qcom/qdsp6/q6afe.c 	pkt->hdr.opcode = AFE_PORT_CMD_DEVICE_STOP;
pkt              1057 sound/soc/qcom/qdsp6/q6afe.c 	ret = afe_apr_send_pkt(afe, pkt, port);
pkt              1061 sound/soc/qcom/qdsp6/q6afe.c 	kfree(pkt);
pkt              1304 sound/soc/qcom/qdsp6/q6afe.c 	struct apr_pkt *pkt;
pkt              1333 sound/soc/qcom/qdsp6/q6afe.c 	pkt = p;
pkt              1336 sound/soc/qcom/qdsp6/q6afe.c 	pkt->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
pkt              1339 sound/soc/qcom/qdsp6/q6afe.c 	pkt->hdr.pkt_size = pkt_size;
pkt              1340 sound/soc/qcom/qdsp6/q6afe.c 	pkt->hdr.src_port = 0;
pkt              1341 sound/soc/qcom/qdsp6/q6afe.c 	pkt->hdr.dest_port = 0;
pkt              1342 sound/soc/qcom/qdsp6/q6afe.c 	pkt->hdr.token = port->token;
pkt              1343 sound/soc/qcom/qdsp6/q6afe.c 	pkt->hdr.opcode = AFE_PORT_CMD_DEVICE_START;
pkt              1347 sound/soc/qcom/qdsp6/q6afe.c 	ret = afe_apr_send_pkt(afe, pkt, port);
pkt              1352 sound/soc/qcom/qdsp6/q6afe.c 	kfree(pkt);
pkt               214 sound/soc/qcom/qdsp6/q6asm.c 				      struct apr_pkt *pkt, uint32_t rsp_opcode)
pkt               216 sound/soc/qcom/qdsp6/q6asm.c 	struct apr_hdr *hdr = &pkt->hdr;
pkt               222 sound/soc/qcom/qdsp6/q6asm.c 	rc = apr_send_pkt(a->adev, pkt);
pkt               255 sound/soc/qcom/qdsp6/q6asm.c 	struct apr_pkt *pkt;
pkt               269 sound/soc/qcom/qdsp6/q6asm.c 	pkt = p;
pkt               272 sound/soc/qcom/qdsp6/q6asm.c 	pkt->hdr.hdr_field = APR_SEQ_CMD_HDR_FIELD;
pkt               273 sound/soc/qcom/qdsp6/q6asm.c 	pkt->hdr.src_port = 0;
pkt               274 sound/soc/qcom/qdsp6/q6asm.c 	pkt->hdr.dest_port = 0;
pkt               275 sound/soc/qcom/qdsp6/q6asm.c 	pkt->hdr.pkt_size = pkt_size;
pkt               276 sound/soc/qcom/qdsp6/q6asm.c 	pkt->hdr.token = ((ac->session << 8) | dir);
pkt               278 sound/soc/qcom/qdsp6/q6asm.c 	pkt->hdr.opcode = ASM_CMD_SHARED_MEM_UNMAP_REGIONS;
pkt               281 sound/soc/qcom/qdsp6/q6asm.c 	rc = q6asm_apr_send_session_pkt(a, ac, pkt, 0);
pkt               283 sound/soc/qcom/qdsp6/q6asm.c 		kfree(pkt);
pkt               289 sound/soc/qcom/qdsp6/q6asm.c 	kfree(pkt);
pkt               352 sound/soc/qcom/qdsp6/q6asm.c 	struct apr_pkt *pkt;
pkt               376 sound/soc/qcom/qdsp6/q6asm.c 	pkt = p;
pkt               380 sound/soc/qcom/qdsp6/q6asm.c 	pkt->hdr.hdr_field = APR_SEQ_CMD_HDR_FIELD;
pkt               381 sound/soc/qcom/qdsp6/q6asm.c 	pkt->hdr.src_port = 0;
pkt               382 sound/soc/qcom/qdsp6/q6asm.c 	pkt->hdr.dest_port = 0;
pkt               383 sound/soc/qcom/qdsp6/q6asm.c 	pkt->hdr.pkt_size = pkt_size;
pkt               384 sound/soc/qcom/qdsp6/q6asm.c 	pkt->hdr.token = ((ac->session << 8) | dir);
pkt               385 sound/soc/qcom/qdsp6/q6asm.c 	pkt->hdr.opcode = ASM_CMD_SHARED_MEM_MAP_REGIONS;
pkt               403 sound/soc/qcom/qdsp6/q6asm.c 	rc = q6asm_apr_send_session_pkt(a, ac, pkt,
pkt               406 sound/soc/qcom/qdsp6/q6asm.c 	kfree(pkt);
pkt               801 sound/soc/qcom/qdsp6/q6asm.c static int q6asm_ac_send_cmd_sync(struct audio_client *ac, struct apr_pkt *pkt)
pkt               803 sound/soc/qcom/qdsp6/q6asm.c 	struct apr_hdr *hdr = &pkt->hdr;
pkt               810 sound/soc/qcom/qdsp6/q6asm.c 	rc = apr_send_pkt(ac->adev, pkt);
pkt               849 sound/soc/qcom/qdsp6/q6asm.c 	struct apr_pkt *pkt;
pkt               859 sound/soc/qcom/qdsp6/q6asm.c 	pkt = p;
pkt               861 sound/soc/qcom/qdsp6/q6asm.c 	q6asm_add_hdr(ac, &pkt->hdr, pkt_size, true, ac->stream_id);
pkt               863 sound/soc/qcom/qdsp6/q6asm.c 	pkt->hdr.opcode = ASM_STREAM_CMD_OPEN_WRITE_V3;
pkt               885 sound/soc/qcom/qdsp6/q6asm.c 	rc = q6asm_ac_send_cmd_sync(ac, pkt);
pkt               892 sound/soc/qcom/qdsp6/q6asm.c 	kfree(pkt);
pkt               901 sound/soc/qcom/qdsp6/q6asm.c 	struct apr_pkt *pkt;
pkt               910 sound/soc/qcom/qdsp6/q6asm.c 	pkt = p;
pkt               913 sound/soc/qcom/qdsp6/q6asm.c 	q6asm_add_hdr(ac, &pkt->hdr, pkt_size, true, ac->stream_id);
pkt               915 sound/soc/qcom/qdsp6/q6asm.c 	pkt->hdr.opcode = ASM_SESSION_CMD_RUN_V2;
pkt               920 sound/soc/qcom/qdsp6/q6asm.c 		rc = q6asm_ac_send_cmd_sync(ac, pkt);
pkt               922 sound/soc/qcom/qdsp6/q6asm.c 		rc = apr_send_pkt(ac->adev, pkt);
pkt               927 sound/soc/qcom/qdsp6/q6asm.c 	kfree(pkt);
pkt               982 sound/soc/qcom/qdsp6/q6asm.c 	struct apr_pkt *pkt;
pkt               992 sound/soc/qcom/qdsp6/q6asm.c 	pkt = p;
pkt               995 sound/soc/qcom/qdsp6/q6asm.c 	q6asm_add_hdr(ac, &pkt->hdr, pkt_size, true, ac->stream_id);
pkt               997 sound/soc/qcom/qdsp6/q6asm.c 	pkt->hdr.opcode = ASM_DATA_CMD_MEDIA_FMT_UPDATE_V2;
pkt              1016 sound/soc/qcom/qdsp6/q6asm.c 	rc = q6asm_ac_send_cmd_sync(ac, pkt);
pkt              1019 sound/soc/qcom/qdsp6/q6asm.c 	kfree(pkt);
pkt              1038 sound/soc/qcom/qdsp6/q6asm.c 	struct apr_pkt *pkt;
pkt              1049 sound/soc/qcom/qdsp6/q6asm.c 	pkt = p;
pkt              1051 sound/soc/qcom/qdsp6/q6asm.c 	q6asm_add_hdr(ac, &pkt->hdr, pkt_size, true, ac->stream_id);
pkt              1053 sound/soc/qcom/qdsp6/q6asm.c 	pkt->hdr.opcode = ASM_STREAM_CMD_SET_ENCDEC_PARAM;
pkt              1071 sound/soc/qcom/qdsp6/q6asm.c 	rc = q6asm_ac_send_cmd_sync(ac, pkt);
pkt              1073 sound/soc/qcom/qdsp6/q6asm.c 	kfree(pkt);
pkt              1090 sound/soc/qcom/qdsp6/q6asm.c 	struct apr_pkt *pkt;
pkt              1101 sound/soc/qcom/qdsp6/q6asm.c 	pkt = p;
pkt              1106 sound/soc/qcom/qdsp6/q6asm.c 	q6asm_add_hdr(ac, &pkt->hdr, pkt_size, false, ac->stream_id);
pkt              1108 sound/soc/qcom/qdsp6/q6asm.c 	pkt->hdr.opcode = ASM_DATA_CMD_READ_V2;
pkt              1115 sound/soc/qcom/qdsp6/q6asm.c 	pkt->hdr.token = port->dsp_buf;
pkt              1123 sound/soc/qcom/qdsp6/q6asm.c 	rc = apr_send_pkt(ac->adev, pkt);
pkt              1127 sound/soc/qcom/qdsp6/q6asm.c 		pr_err("read op[0x%x]rc[%d]\n", pkt->hdr.opcode, rc);
pkt              1129 sound/soc/qcom/qdsp6/q6asm.c 	kfree(pkt);
pkt              1138 sound/soc/qcom/qdsp6/q6asm.c 	struct apr_pkt *pkt;
pkt              1147 sound/soc/qcom/qdsp6/q6asm.c 	pkt = p;
pkt              1150 sound/soc/qcom/qdsp6/q6asm.c 	q6asm_add_hdr(ac, &pkt->hdr,  pkt_size, true, ac->stream_id);
pkt              1151 sound/soc/qcom/qdsp6/q6asm.c 	pkt->hdr.opcode = ASM_STREAM_CMD_OPEN_READ_V3;
pkt              1171 sound/soc/qcom/qdsp6/q6asm.c 	rc = q6asm_ac_send_cmd_sync(ac, pkt);
pkt              1173 sound/soc/qcom/qdsp6/q6asm.c 	kfree(pkt);
pkt              1211 sound/soc/qcom/qdsp6/q6asm.c 	struct apr_pkt *pkt;
pkt              1221 sound/soc/qcom/qdsp6/q6asm.c 	pkt = p;
pkt              1226 sound/soc/qcom/qdsp6/q6asm.c 	q6asm_add_hdr(ac, &pkt->hdr, pkt_size, false, ac->stream_id);
pkt              1229 sound/soc/qcom/qdsp6/q6asm.c 	pkt->hdr.token = port->dsp_buf;
pkt              1230 sound/soc/qcom/qdsp6/q6asm.c 	pkt->hdr.opcode = ASM_DATA_CMD_WRITE_V2;
pkt              1251 sound/soc/qcom/qdsp6/q6asm.c 	rc = apr_send_pkt(ac->adev, pkt);
pkt              1255 sound/soc/qcom/qdsp6/q6asm.c 	kfree(pkt);
pkt              1276 sound/soc/qcom/qdsp6/q6asm.c 	struct apr_pkt pkt;
pkt              1279 sound/soc/qcom/qdsp6/q6asm.c 	q6asm_add_hdr(ac, &pkt.hdr, APR_HDR_SIZE, true, stream_id);
pkt              1283 sound/soc/qcom/qdsp6/q6asm.c 		pkt.hdr.opcode = ASM_SESSION_CMD_PAUSE;
pkt              1286 sound/soc/qcom/qdsp6/q6asm.c 		pkt.hdr.opcode = ASM_SESSION_CMD_SUSPEND;
pkt              1289 sound/soc/qcom/qdsp6/q6asm.c 		pkt.hdr.opcode = ASM_STREAM_CMD_FLUSH;
pkt              1292 sound/soc/qcom/qdsp6/q6asm.c 		pkt.hdr.opcode = ASM_STREAM_CMD_FLUSH_READBUFS;
pkt              1295 sound/soc/qcom/qdsp6/q6asm.c 		pkt.hdr.opcode = ASM_DATA_CMD_EOS;
pkt              1298 sound/soc/qcom/qdsp6/q6asm.c 		pkt.hdr.opcode = ASM_STREAM_CMD_CLOSE;
pkt              1305 sound/soc/qcom/qdsp6/q6asm.c 		rc = q6asm_ac_send_cmd_sync(ac, &pkt);
pkt              1307 sound/soc/qcom/qdsp6/q6asm.c 		return apr_send_pkt(ac->adev, &pkt);
pkt               154 sound/soc/qcom/qdsp6/q6core.c 	struct apr_pkt pkt;
pkt               157 sound/soc/qcom/qdsp6/q6core.c 	pkt.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
pkt               159 sound/soc/qcom/qdsp6/q6core.c 	pkt.hdr.pkt_size = APR_HDR_SIZE;
pkt               160 sound/soc/qcom/qdsp6/q6core.c 	pkt.hdr.opcode = AVCS_CMD_GET_FWK_VERSION;
pkt               162 sound/soc/qcom/qdsp6/q6core.c 	rc = apr_send_pkt(adev, &pkt);
pkt               184 sound/soc/qcom/qdsp6/q6core.c 	struct apr_pkt pkt;
pkt               187 sound/soc/qcom/qdsp6/q6core.c 	pkt.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
pkt               189 sound/soc/qcom/qdsp6/q6core.c 	pkt.hdr.pkt_size = APR_HDR_SIZE;
pkt               190 sound/soc/qcom/qdsp6/q6core.c 	pkt.hdr.opcode = AVCS_GET_VERSIONS;
pkt               192 sound/soc/qcom/qdsp6/q6core.c 	rc = apr_send_pkt(adev, &pkt);
pkt               209 sound/soc/qcom/qdsp6/q6core.c 	struct apr_pkt pkt;
pkt               214 sound/soc/qcom/qdsp6/q6core.c 	pkt.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
pkt               216 sound/soc/qcom/qdsp6/q6core.c 	pkt.hdr.pkt_size = APR_HDR_SIZE;
pkt               217 sound/soc/qcom/qdsp6/q6core.c 	pkt.hdr.opcode = AVCS_CMD_ADSP_EVENT_GET_STATE;
pkt               219 sound/soc/qcom/qdsp6/q6core.c 	rc = apr_send_pkt(adev, &pkt);
pkt               470 tools/bpf/bpf_dbg.c static void bpf_dump_pkt(uint8_t *pkt, uint32_t pkt_caplen, uint32_t pkt_len)
pkt               477 tools/bpf/bpf_dbg.c 	hex_dump(pkt, pkt_caplen);
pkt               603 tools/bpf/bpf_dbg.c static uint32_t extract_u32(uint8_t *pkt, uint32_t off)
pkt               607 tools/bpf/bpf_dbg.c 	memcpy(&r, &pkt[off], sizeof(r));
pkt               612 tools/bpf/bpf_dbg.c static uint16_t extract_u16(uint8_t *pkt, uint32_t off)
pkt               616 tools/bpf/bpf_dbg.c 	memcpy(&r, &pkt[off], sizeof(r));
pkt               621 tools/bpf/bpf_dbg.c static uint8_t extract_u8(uint8_t *pkt, uint32_t off)
pkt               623 tools/bpf/bpf_dbg.c 	return pkt[off];
pkt               633 tools/bpf/bpf_dbg.c 			    uint8_t *pkt, uint32_t pkt_caplen,
pkt               667 tools/bpf/bpf_dbg.c 			r->A = extract_u32(pkt, K);
pkt               674 tools/bpf/bpf_dbg.c 			r->A = extract_u16(pkt, K);
pkt               681 tools/bpf/bpf_dbg.c 			r->A = extract_u8(pkt, K);
pkt               688 tools/bpf/bpf_dbg.c 			r->A = extract_u32(pkt, r->X + K);
pkt               693 tools/bpf/bpf_dbg.c 			r->A = extract_u16(pkt, r->X + K);
pkt               700 tools/bpf/bpf_dbg.c 			r->A = extract_u8(pkt, r->X + K);
pkt               707 tools/bpf/bpf_dbg.c 			r->X = extract_u8(pkt, K);
pkt               855 tools/bpf/bpf_dbg.c 				  uint8_t *pkt, uint32_t pkt_caplen,
pkt               861 tools/bpf/bpf_dbg.c 	bpf_dump_pkt(pkt, pkt_caplen, pkt_len);
pkt               866 tools/bpf/bpf_dbg.c static int bpf_run_all(struct sock_filter *f, uint16_t bpf_len, uint8_t *pkt,
pkt               875 tools/bpf/bpf_dbg.c 			stop = bpf_handle_breakpoint(&bpf_curr, f, pkt,
pkt               878 tools/bpf/bpf_dbg.c 		bpf_single_step(&bpf_curr, &f[bpf_curr.Pc], pkt, pkt_caplen,
pkt               887 tools/bpf/bpf_dbg.c 			    uint8_t *pkt, uint32_t pkt_caplen,
pkt               897 tools/bpf/bpf_dbg.c 			stop = bpf_handle_breakpoint(&bpf_curr, f, pkt,
pkt               900 tools/bpf/bpf_dbg.c 		bpf_single_step(&bpf_curr, &f[bpf_curr.Pc], pkt, pkt_caplen,
pkt                96 tools/testing/selftests/bpf/prog_tests/flow_dissector.c 	} pkt;
pkt               106 tools/testing/selftests/bpf/prog_tests/flow_dissector.c 		.pkt.ipv4 = {
pkt               127 tools/testing/selftests/bpf/prog_tests/flow_dissector.c 		.pkt.ipv6 = {
pkt               147 tools/testing/selftests/bpf/prog_tests/flow_dissector.c 		.pkt.svlan_ipv4 = {
pkt               169 tools/testing/selftests/bpf/prog_tests/flow_dissector.c 		.pkt.dvlan_ipv6 = {
pkt               192 tools/testing/selftests/bpf/prog_tests/flow_dissector.c 		.pkt.ipv4 = {
pkt               218 tools/testing/selftests/bpf/prog_tests/flow_dissector.c 		.pkt.ipv4 = {
pkt               240 tools/testing/selftests/bpf/prog_tests/flow_dissector.c 		.pkt.ipv6_frag = {
pkt               266 tools/testing/selftests/bpf/prog_tests/flow_dissector.c 		.pkt.ipv6_frag = {
pkt               288 tools/testing/selftests/bpf/prog_tests/flow_dissector.c 		.pkt.ipv6 = {
pkt               310 tools/testing/selftests/bpf/prog_tests/flow_dissector.c 		.pkt.ipv6 = {
pkt               332 tools/testing/selftests/bpf/prog_tests/flow_dissector.c 		.pkt.ipip = {
pkt               360 tools/testing/selftests/bpf/prog_tests/flow_dissector.c 		.pkt.ipip = {
pkt               407 tools/testing/selftests/bpf/prog_tests/flow_dissector.c static int tx_tap(int fd, void *pkt, size_t len)
pkt               412 tools/testing/selftests/bpf/prog_tests/flow_dissector.c 			.iov_base = pkt,
pkt               461 tools/testing/selftests/bpf/prog_tests/flow_dissector.c 			.data_in = &tests[i].pkt,
pkt               462 tools/testing/selftests/bpf/prog_tests/flow_dissector.c 			.data_size_in = sizeof(tests[i].pkt),
pkt               513 tools/testing/selftests/bpf/prog_tests/flow_dissector.c 		err = tx_tap(tap_fd, &tests[i].pkt, sizeof(tests[i].pkt));
pkt                57 tools/testing/selftests/bpf/progs/test_xdp_vlan.c bool parse_eth_frame(struct ethhdr *eth, void *data_end, struct parse_pkt *pkt)
pkt                75 tools/testing/selftests/bpf/progs/test_xdp_vlan.c 		pkt->vlan_outer_offset = offset;
pkt                76 tools/testing/selftests/bpf/progs/test_xdp_vlan.c 		pkt->vlan_outer = bpf_ntohs(vlan_hdr->h_vlan_TCI)
pkt                88 tools/testing/selftests/bpf/progs/test_xdp_vlan.c 		pkt->vlan_inner_offset = offset;
pkt                89 tools/testing/selftests/bpf/progs/test_xdp_vlan.c 		pkt->vlan_inner = bpf_ntohs(vlan_hdr->h_vlan_TCI)
pkt                95 tools/testing/selftests/bpf/progs/test_xdp_vlan.c 	pkt->l3_proto = bpf_ntohs(eth_type); /* Convert to host-byte-order */
pkt                96 tools/testing/selftests/bpf/progs/test_xdp_vlan.c 	pkt->l3_offset = offset;
pkt               110 tools/testing/selftests/bpf/progs/test_xdp_vlan.c 	struct parse_pkt pkt = { 0 };
pkt               112 tools/testing/selftests/bpf/progs/test_xdp_vlan.c 	if (!parse_eth_frame(data, data_end, &pkt))
pkt               116 tools/testing/selftests/bpf/progs/test_xdp_vlan.c 	if (pkt.vlan_outer == TESTVLAN)
pkt               152 tools/testing/selftests/bpf/progs/test_xdp_vlan.c 	struct parse_pkt pkt = { 0 };
pkt               154 tools/testing/selftests/bpf/progs/test_xdp_vlan.c 	if (!parse_eth_frame(data, data_end, &pkt))
pkt               158 tools/testing/selftests/bpf/progs/test_xdp_vlan.c 	if (pkt.vlan_outer == TESTVLAN) {
pkt               159 tools/testing/selftests/bpf/progs/test_xdp_vlan.c 		struct _vlan_hdr *vlan_hdr = data + pkt.vlan_outer_offset;
pkt               186 tools/testing/selftests/bpf/progs/test_xdp_vlan.c 	struct parse_pkt pkt = { 0 };
pkt               189 tools/testing/selftests/bpf/progs/test_xdp_vlan.c 	if (!parse_eth_frame(data, data_end, &pkt))
pkt               193 tools/testing/selftests/bpf/progs/test_xdp_vlan.c 	if (pkt.vlan_outer_offset == 0)
pkt               246 tools/testing/selftests/bpf/progs/test_xdp_vlan.c 	struct parse_pkt pkt = { 0 };
pkt               248 tools/testing/selftests/bpf/progs/test_xdp_vlan.c 	if (!parse_eth_frame(orig_eth, data_end, &pkt))
pkt               252 tools/testing/selftests/bpf/progs/test_xdp_vlan.c 	if (pkt.vlan_outer_offset == 0)
pkt               462 tools/testing/selftests/bpf/test_flow_dissector.c static int do_tx(int fd, const char *pkt, int len)
pkt               466 tools/testing/selftests/bpf/test_flow_dissector.c 	ret = write(fd, pkt, len);