xo                730 drivers/clk/qcom/clk-rcg2.c 	struct clk_hw *p2, *p8, *p9, *xo;
xo                734 drivers/clk/qcom/clk-rcg2.c 	xo = clk_hw_get_parent_by_index(hw, 0);
xo                735 drivers/clk/qcom/clk-rcg2.c 	if (req->rate == clk_hw_get_rate(xo)) {
xo                736 drivers/clk/qcom/clk-rcg2.c 		req->best_parent_hw = xo;
xo                 52 drivers/clk/qcom/gcc-msm8994.c static struct clk_fixed_factor xo = {
xo               2283 drivers/clk/qcom/gcc-msm8994.c 	clk = devm_clk_register(dev, &xo.hw);
xo                171 drivers/clk/qcom/gcc-msm8996.c static struct clk_fixed_factor xo = {
xo               3197 drivers/clk/qcom/gcc-msm8996.c 	&xo.hw,
xo                120 drivers/clk/qcom/gcc-msm8998.c static struct clk_fixed_factor xo = {
xo               2963 drivers/clk/qcom/gcc-msm8998.c 	&xo.hw,
xo                142 drivers/clk/qcom/gcc-sdm660.c static struct clk_fixed_factor xo = {
xo               2256 drivers/clk/qcom/gcc-sdm660.c 	&xo.hw,
xo                382 drivers/crypto/chelsio/chcr_ipsec.c 	struct xfrm_offload *xo;
xo                408 drivers/crypto/chelsio/chcr_ipsec.c 	xo = xfrm_offload(skb);
xo                412 drivers/crypto/chelsio/chcr_ipsec.c 	seqno = cpu_to_be64(seqlo + ((u64)xo->seq.hi << 32));
xo               1160 drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c 	struct xfrm_offload *xo = NULL;
xo               1210 drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c 	xo = xfrm_offload(skb);
xo               1211 drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c 	xo->flags = CRYPTO_DONE;
xo               1212 drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c 	xo->status = CRYPTO_SUCCESS;
xo                547 drivers/net/ethernet/intel/ixgbevf/ipsec.c 	struct xfrm_offload *xo = NULL;
xo                597 drivers/net/ethernet/intel/ixgbevf/ipsec.c 	xo = xfrm_offload(skb);
xo                598 drivers/net/ethernet/intel/ixgbevf/ipsec.c 	xo->flags = CRYPTO_DONE;
xo                599 drivers/net/ethernet/intel/ixgbevf/ipsec.c 	xo->status = CRYPTO_SUCCESS;
xo                104 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h 			  struct xfrm_offload *xo);
xo                137 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c 				struct xfrm_offload *xo)
xo                153 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c 		if (xo->proto == IPPROTO_IPV6) {
xo                162 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c 		swp_spec.tun_l4_proto = xo->proto;
xo                169 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c 			    struct xfrm_offload *xo)
xo                179 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c 		seq_hi = xo->seq.hi - 1;
xo                181 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c 		seq_hi = xo->seq.hi;
xo                185 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c 	seqno = cpu_to_be64(xo->seq.low + ((u64)seq_hi << 32));
xo                191 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c 			struct xfrm_offload *xo)
xo                197 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c 	seqno = cpu_to_be64(xo->seq.low + ((u64)xo->seq.hi << 32));
xo                204 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c 				     struct xfrm_offload *xo)
xo                228 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c 	mdata->content.tx.esp_next_proto = xo->proto;
xo                241 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c 	struct xfrm_offload *xo = xfrm_offload(skb);
xo                247 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c 	if (!xo)
xo                279 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c 	mlx5e_ipsec_set_swp(skb, &wqe->eth, x->props.mode, xo);
xo                281 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c 	sa_entry->set_iv_op(skb, x, xo);
xo                282 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c 	mlx5e_ipsec_set_metadata(skb, mdata, xo);
xo                296 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c 	struct xfrm_offload *xo;
xo                318 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c 	xo = xfrm_offload(skb);
xo                319 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c 	xo->flags = CRYPTO_DONE;
xo                322 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c 		xo->status = CRYPTO_SUCCESS;
xo                324 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c 			xo->flags |= XFRM_ESP_NO_TRAILER;
xo                325 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c 			xo->proto = mdata->content.rx.nexthdr;
xo                329 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c 		xo->status = CRYPTO_TUNNEL_ESP_AUTH_FAILED;
xo                332 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c 		xo->status = CRYPTO_INVALID_PROTOCOL;
xo                 52 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h 			    struct xfrm_offload *xo);
xo                 54 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h 			struct xfrm_offload *xo);
xo                 71 drivers/remoteproc/qcom_q6v5_adsp.c 	struct clk *xo;
xo                181 drivers/remoteproc/qcom_q6v5_adsp.c 	ret = clk_prepare_enable(adsp->xo);
xo                236 drivers/remoteproc/qcom_q6v5_adsp.c 	clk_disable_unprepare(adsp->xo);
xo                247 drivers/remoteproc/qcom_q6v5_adsp.c 	clk_disable_unprepare(adsp->xo);
xo                298 drivers/remoteproc/qcom_q6v5_adsp.c 	adsp->xo = devm_clk_get(adsp->dev, "xo");
xo                299 drivers/remoteproc/qcom_q6v5_adsp.c 	if (IS_ERR(adsp->xo)) {
xo                300 drivers/remoteproc/qcom_q6v5_adsp.c 		ret = PTR_ERR(adsp->xo);
xo                 46 drivers/remoteproc/qcom_q6v5_pas.c 	struct clk *xo;
xo                 87 drivers/remoteproc/qcom_q6v5_pas.c 	ret = clk_prepare_enable(adsp->xo);
xo                126 drivers/remoteproc/qcom_q6v5_pas.c 	clk_disable_unprepare(adsp->xo);
xo                138 drivers/remoteproc/qcom_q6v5_pas.c 	clk_disable_unprepare(adsp->xo);
xo                186 drivers/remoteproc/qcom_q6v5_pas.c 	adsp->xo = devm_clk_get(adsp->dev, "xo");
xo                187 drivers/remoteproc/qcom_q6v5_pas.c 	if (IS_ERR(adsp->xo)) {
xo                188 drivers/remoteproc/qcom_q6v5_pas.c 		ret = PTR_ERR(adsp->xo);
xo                123 net/ipv4/esp4.c 	struct xfrm_offload *xo = xfrm_offload(skb);
xo                127 net/ipv4/esp4.c 	if (xo && (xo->flags & XFRM_DEV_RESUME)) {
xo                139 net/ipv4/esp4.c 	if (xo && (xo->flags & XFRM_DEV_RESUME)) {
xo                185 net/ipv4/esp4.c 		struct xfrm_offload *xo = xfrm_offload(skb);
xo                187 net/ipv4/esp4.c 		if (xo)
xo                188 net/ipv4/esp4.c 			seqhi = xo->seq.hi;
xo                537 net/ipv4/esp4.c 	struct xfrm_offload *xo = xfrm_offload(skb);
xo                549 net/ipv4/esp4.c 	if (xo && (xo->flags & XFRM_ESP_NO_TRAILER)) {
xo                550 net/ipv4/esp4.c 		ret = xo->proto;
xo                583 net/ipv4/esp4.c 	struct xfrm_offload *xo = xfrm_offload(skb);
xo                588 net/ipv4/esp4.c 	if (!xo || (xo && !(xo->flags & CRYPTO_DONE)))
xo                 32 net/ipv4/esp4_offload.c 	struct xfrm_offload *xo;
xo                 44 net/ipv4/esp4_offload.c 	xo = xfrm_offload(skb);
xo                 45 net/ipv4/esp4_offload.c 	if (!xo || !(xo->flags & CRYPTO_DONE)) {
xo                 65 net/ipv4/esp4_offload.c 		xo = xfrm_offload(skb);
xo                 66 net/ipv4/esp4_offload.c 		if (!xo)
xo                 70 net/ipv4/esp4_offload.c 	xo->flags |= XFRM_GRO;
xo                 96 net/ipv4/esp4_offload.c 	struct xfrm_offload *xo = xfrm_offload(skb);
xo                106 net/ipv4/esp4_offload.c 	xo->proto = proto;
xo                123 net/ipv4/esp4_offload.c 	struct xfrm_offload *xo = xfrm_offload(skb);
xo                126 net/ipv4/esp4_offload.c 	ops = rcu_dereference(inet_offloads[xo->proto]);
xo                154 net/ipv4/esp4_offload.c 	struct xfrm_offload *xo = xfrm_offload(skb);
xo                157 net/ipv4/esp4_offload.c 	if (!xo)
xo                185 net/ipv4/esp4_offload.c 	xo->flags |= XFRM_GSO_SEGMENT;
xo                193 net/ipv4/esp4_offload.c 	struct xfrm_offload *xo = xfrm_offload(skb);
xo                198 net/ipv4/esp4_offload.c 	if (!(xo->flags & CRYPTO_DONE))
xo                209 net/ipv4/esp4_offload.c 	struct xfrm_offload *xo;
xo                218 net/ipv4/esp4_offload.c 	xo = xfrm_offload(skb);
xo                220 net/ipv4/esp4_offload.c 	if (!xo)
xo                226 net/ipv4/esp4_offload.c 		xo->flags |= CRYPTO_FALLBACK;
xo                230 net/ipv4/esp4_offload.c 	esp.proto = xo->proto;
xo                254 net/ipv4/esp4_offload.c 	seq = xo->seq.low;
xo                261 net/ipv4/esp4_offload.c 	if (xo->flags & XFRM_GSO_SEGMENT) {
xo                265 net/ipv4/esp4_offload.c 			xo->seq.low++;
xo                267 net/ipv4/esp4_offload.c 			xo->seq.low += skb_shinfo(skb)->gso_segs;
xo                270 net/ipv4/esp4_offload.c 	esp.seqno = cpu_to_be64(seq + ((u64)xo->seq.hi << 32));
xo                 54 net/ipv4/xfrm4_input.c 	struct xfrm_offload *xo = xfrm_offload(skb);
xo                 68 net/ipv4/xfrm4_input.c 	if (xo && (xo->flags & XFRM_GRO)) {
xo                130 net/ipv6/esp6.c 	struct xfrm_offload *xo = xfrm_offload(skb);
xo                134 net/ipv6/esp6.c 	if (xo && (xo->flags & XFRM_DEV_RESUME)) {
xo                146 net/ipv6/esp6.c 	if (xo && (xo->flags & XFRM_DEV_RESUME)) {
xo                187 net/ipv6/esp6.c 		struct xfrm_offload *xo = xfrm_offload(skb);
xo                191 net/ipv6/esp6.c 		if (xo)
xo                192 net/ipv6/esp6.c 			esph->seq_no = htonl(xo->seq.hi);
xo                476 net/ipv6/esp6.c 	struct xfrm_offload *xo = xfrm_offload(skb);
xo                488 net/ipv6/esp6.c 	if (xo && (xo->flags & XFRM_ESP_NO_TRAILER)) {
xo                489 net/ipv6/esp6.c 		ret = xo->proto;
xo                521 net/ipv6/esp6.c 	struct xfrm_offload *xo = xfrm_offload(skb);
xo                526 net/ipv6/esp6.c 	if (!xo || (xo && !(xo->flags & CRYPTO_DONE)))
xo                 53 net/ipv6/esp6_offload.c 	struct xfrm_offload *xo;
xo                 66 net/ipv6/esp6_offload.c 	xo = xfrm_offload(skb);
xo                 67 net/ipv6/esp6_offload.c 	if (!xo || !(xo->flags & CRYPTO_DONE)) {
xo                 87 net/ipv6/esp6_offload.c 		xo = xfrm_offload(skb);
xo                 88 net/ipv6/esp6_offload.c 		if (!xo)
xo                 92 net/ipv6/esp6_offload.c 	xo->flags |= XFRM_GRO;
xo                123 net/ipv6/esp6_offload.c 	struct xfrm_offload *xo = xfrm_offload(skb);
xo                140 net/ipv6/esp6_offload.c 	xo->proto = proto;
xo                157 net/ipv6/esp6_offload.c 	struct xfrm_offload *xo = xfrm_offload(skb);
xo                160 net/ipv6/esp6_offload.c 	ops = rcu_dereference(inet6_offloads[xo->proto]);
xo                188 net/ipv6/esp6_offload.c 	struct xfrm_offload *xo = xfrm_offload(skb);
xo                191 net/ipv6/esp6_offload.c 	if (!xo)
xo                217 net/ipv6/esp6_offload.c 	xo->flags |= XFRM_GSO_SEGMENT;
xo                225 net/ipv6/esp6_offload.c 	struct xfrm_offload *xo = xfrm_offload(skb);
xo                230 net/ipv6/esp6_offload.c 	if (!(xo->flags & CRYPTO_DONE))
xo                242 net/ipv6/esp6_offload.c 	struct xfrm_offload *xo;
xo                251 net/ipv6/esp6_offload.c 	xo = xfrm_offload(skb);
xo                253 net/ipv6/esp6_offload.c 	if (!xo)
xo                257 net/ipv6/esp6_offload.c 		xo->flags |= CRYPTO_FALLBACK;
xo                261 net/ipv6/esp6_offload.c 	esp.proto = xo->proto;
xo                282 net/ipv6/esp6_offload.c 	seq = xo->seq.low;
xo                289 net/ipv6/esp6_offload.c 	if (xo->flags & XFRM_GSO_SEGMENT) {
xo                293 net/ipv6/esp6_offload.c 			xo->seq.low++;
xo                295 net/ipv6/esp6_offload.c 			xo->seq.low += skb_shinfo(skb)->gso_segs;
xo                298 net/ipv6/esp6_offload.c 	esp.seqno = cpu_to_be64(xo->seq.low + ((u64)xo->seq.hi << 32));
xo                 45 net/ipv6/xfrm6_input.c 	struct xfrm_offload *xo = xfrm_offload(skb);
xo                 60 net/ipv6/xfrm6_input.c 	if (xo && (xo->flags & XFRM_GRO)) {
xo                 25 net/xfrm/xfrm_device.c 	struct xfrm_offload *xo = xfrm_offload(skb);
xo                 28 net/xfrm/xfrm_device.c 	if (xo->flags & XFRM_GSO_SEGMENT)
xo                 38 net/xfrm/xfrm_device.c 	struct xfrm_offload *xo = xfrm_offload(skb);
xo                 40 net/xfrm/xfrm_device.c 	if (xo->flags & XFRM_GSO_SEGMENT)
xo                 82 net/xfrm/xfrm_device.c 	struct xfrm_offload *xo = xfrm_offload(skb);
xo                 85 net/xfrm/xfrm_device.c 	if (!xo)
xo                 93 net/xfrm/xfrm_device.c 	if (xo->flags & XFRM_GRO || x->xso.flags & XFRM_OFFLOAD_INBOUND)
xo                132 net/xfrm/xfrm_device.c 		xo->flags |= XFRM_DEV_RESUME;
xo                157 net/xfrm/xfrm_device.c 		xo = xfrm_offload(skb2);
xo                158 net/xfrm/xfrm_device.c 		xo->flags |= XFRM_DEV_RESUME;
xo                471 net/xfrm/xfrm_input.c 	struct xfrm_offload *xo = xfrm_offload(skb);
xo                502 net/xfrm/xfrm_input.c 		if (xo && (xo->flags & CRYPTO_DONE)) {
xo                506 net/xfrm/xfrm_input.c 			if (!(xo->status & CRYPTO_SUCCESS)) {
xo                507 net/xfrm/xfrm_input.c 				if (xo->status &
xo                520 net/xfrm/xfrm_input.c 				if (xo->status & CRYPTO_INVALID_PROTOCOL) {
xo                722 net/xfrm/xfrm_input.c 		xo = xfrm_offload(skb);
xo                723 net/xfrm/xfrm_input.c 		if (xo)
xo                724 net/xfrm/xfrm_input.c 			xfrm_gro = xo->flags & XFRM_GRO;
xo                557 net/xfrm/xfrm_replay.c 	struct xfrm_offload *xo = xfrm_offload(skb);
xo                560 net/xfrm/xfrm_replay.c 	if (!xo)
xo                566 net/xfrm/xfrm_replay.c 			xo->seq.low = oseq;
xo                569 net/xfrm/xfrm_replay.c 			xo->seq.low = oseq + 1;
xo                574 net/xfrm/xfrm_replay.c 		xo->seq.hi = 0;
xo                594 net/xfrm/xfrm_replay.c 	struct xfrm_offload *xo = xfrm_offload(skb);
xo                599 net/xfrm/xfrm_replay.c 	if (!xo)
xo                605 net/xfrm/xfrm_replay.c 			xo->seq.low = oseq;
xo                608 net/xfrm/xfrm_replay.c 			xo->seq.low = oseq + 1;
xo                613 net/xfrm/xfrm_replay.c 		xo->seq.hi = 0;
xo                633 net/xfrm/xfrm_replay.c 	struct xfrm_offload *xo = xfrm_offload(skb);
xo                639 net/xfrm/xfrm_replay.c 	if (!xo)
xo                646 net/xfrm/xfrm_replay.c 			xo->seq.low = oseq;
xo                647 net/xfrm/xfrm_replay.c 			xo->seq.hi = oseq_hi;
xo                651 net/xfrm/xfrm_replay.c 			xo->seq.low = oseq + 1;
xo                652 net/xfrm/xfrm_replay.c 			xo->seq.hi = oseq_hi;
xo                658 net/xfrm/xfrm_replay.c 			xo->seq.hi = oseq_hi;