fl 244 arch/mips/kernel/rtlx.c size_t lx_write, fl = 0L; fl 263 arch/mips/kernel/rtlx.c fl = min(count, (size_t)lx->buffer_size - lx->lx_read); fl 265 arch/mips/kernel/rtlx.c failed = copy_to_user(buff, lx->lx_buffer + lx->lx_read, fl); fl 270 arch/mips/kernel/rtlx.c if (count - fl) fl 271 arch/mips/kernel/rtlx.c failed = copy_to_user(buff + fl, lx->lx_buffer, count - fl); fl 289 arch/mips/kernel/rtlx.c size_t fl; fl 305 arch/mips/kernel/rtlx.c fl = min(count, (size_t) rt->buffer_size - rt->rt_write); fl 307 arch/mips/kernel/rtlx.c failed = copy_from_user(rt->rt_buffer + rt->rt_write, buffer, fl); fl 312 arch/mips/kernel/rtlx.c if (count - fl) fl 313 arch/mips/kernel/rtlx.c failed = copy_from_user(rt->rt_buffer, buffer + fl, count - fl); fl 316 arch/powerpc/kernel/rtas_flash.c struct flash_block_list *fl; fl 333 arch/powerpc/kernel/rtas_flash.c fl = uf->flist; fl 334 arch/powerpc/kernel/rtas_flash.c while (fl->next) fl 335 arch/powerpc/kernel/rtas_flash.c fl = fl->next; /* seek to last block_list for append */ fl 336 arch/powerpc/kernel/rtas_flash.c next_free = fl->num_blocks; fl 339 arch/powerpc/kernel/rtas_flash.c fl->next = kmem_cache_zalloc(flash_block_cache, GFP_KERNEL); fl 340 arch/powerpc/kernel/rtas_flash.c if (!fl->next) fl 342 arch/powerpc/kernel/rtas_flash.c fl = fl->next; fl 357 arch/powerpc/kernel/rtas_flash.c fl->blocks[next_free].data = p; fl 358 arch/powerpc/kernel/rtas_flash.c fl->blocks[next_free].length = count; fl 359 arch/powerpc/kernel/rtas_flash.c fl->num_blocks++; fl 1327 arch/x86/events/intel/ds.c int fl = event->hw.flags; fl 1328 arch/x86/events/intel/ds.c bool fst = fl & (PERF_X86_EVENT_PEBS_ST | PERF_X86_EVENT_PEBS_HSW_PREC); fl 1330 arch/x86/events/intel/ds.c if (fl & PERF_X86_EVENT_PEBS_LDLAT) fl 1332 arch/x86/events/intel/ds.c else if (fst && (fl & PERF_X86_EVENT_PEBS_HSW_PREC)) fl 3358 drivers/gpu/drm/amd/amdgpu/si_dpm.c static int r600_calculate_at(u32 t, u32 h, u32 fh, u32 fl, u32 *tl, u32 *th) fl 3363 drivers/gpu/drm/amd/amdgpu/si_dpm.c if ((fl == 0) || (fh == 0) || (fl > fh)) fl 3366 drivers/gpu/drm/amd/amdgpu/si_dpm.c k = (100 * fh) / fl; fl 189 drivers/gpu/drm/i915/i915_cmd_parser.c #define CMD(op, opm, f, lm, fl, ...) \ fl 191 drivers/gpu/drm/i915/i915_cmd_parser.c .flags = (fl) | ((f) ? CMD_DESC_FIXED : 0), \ fl 256 drivers/gpu/drm/i915/selftests/i915_vma.c #define VALID(sz, fl) { .size = (sz), .flags = (fl), .assert = assert_pin_valid, .string = #sz ", " #fl ", (valid) " } fl 257 drivers/gpu/drm/i915/selftests/i915_vma.c #define __INVALID(sz, fl, check, eval) { .size = (sz), .flags = (fl), .assert = (check), .string = #sz ", " #fl ", (invalid " #eval ")" } fl 258 drivers/gpu/drm/i915/selftests/i915_vma.c #define INVALID(sz, fl) __INVALID(sz, fl, assert_pin_einval, EINVAL) fl 259 drivers/gpu/drm/i915/selftests/i915_vma.c #define NOSPACE(sz, fl) __INVALID(sz, fl, assert_pin_enospc, ENOSPC) fl 329 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c {.fl = 4, .lut = 0x357}, fl 330 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c {.fl = 5, .lut = 0x3357}, fl 331 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c {.fl = 6, .lut = 0x23357}, fl 332 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c {.fl = 7, .lut = 0x223357}, fl 333 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c {.fl = 8, .lut = 0x2223357}, fl 334 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c {.fl = 9, .lut = 0x22223357}, fl 335 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c {.fl = 10, .lut = 0x222223357}, fl 336 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c {.fl = 11, .lut = 0x2222223357}, fl 337 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c {.fl = 12, .lut = 0x22222223357}, fl 338 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c {.fl = 13, .lut = 0x222222223357}, fl 339 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c {.fl = 14, .lut = 0x1222222223357}, fl 340 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c {.fl = 0, .lut = 0x11222222223357} fl 344 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c {.fl = 10, .lut = 0x344556677}, fl 345 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c {.fl = 11, .lut = 0x3344556677}, fl 346 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c {.fl = 12, .lut = 0x23344556677}, fl 347 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c {.fl = 13, .lut = 0x223344556677}, fl 348 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c {.fl = 14, .lut = 0x1223344556677}, fl 349 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c {.fl = 0, .lut = 0x112233344556677}, fl 353 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c {.fl = 0, .lut = 0x0}, fl 261 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h u32 fl; fl 207 drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c if (total_fl <= tbl->entries[i].fl) fl 211 drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c if (!tbl->entries[i-1].fl) fl 25 drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h TP_PROTO(u32 pnum, u32 fmt, bool rt, u32 fl, fl 27 drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h TP_ARGS(pnum, fmt, rt, fl, lut, lut_usage), fl 32 drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h __field(u32, fl) fl 40 drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h __entry->fl = fl; fl 46 drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h __entry->rt, __entry->fl, fl 221 drivers/gpu/drm/radeon/r600_dpm.c int r600_calculate_at(u32 t, u32 h, u32 fh, u32 fl, u32 *tl, u32 *th) fl 226 drivers/gpu/drm/radeon/r600_dpm.c if ((fl == 0) || (fh == 0) || (fl > fh)) fl 229 drivers/gpu/drm/radeon/r600_dpm.c k = (100 * fh) / fl; fl 142 drivers/gpu/drm/radeon/r600_dpm.h int r600_calculate_at(u32 t, u32 h, u32 fh, u32 fl, u32 *tl, u32 *th); fl 191 drivers/iio/adc/stm32-dfsdm-adc.c static int stm32_dfsdm_compute_osrs(struct stm32_dfsdm_filter *fl, fl 198 drivers/iio/adc/stm32-dfsdm-adc.c unsigned int p = fl->ford; /* filter order (ford) */ fl 199 drivers/iio/adc/stm32-dfsdm-adc.c struct stm32_dfsdm_filter_osr *flo = &fl->flo[fast]; fl 211 drivers/iio/adc/stm32-dfsdm-adc.c if (fl->ford == DFSDM_FASTSINC_ORDER) { fl 224 drivers/iio/adc/stm32-dfsdm-adc.c else if (fl->ford == DFSDM_FASTSINC_ORDER) fl 315 drivers/iio/adc/stm32-dfsdm-adc.c struct stm32_dfsdm_filter *fl = &adc->dfsdm->fl_list[adc->fl_id]; fl 318 drivers/iio/adc/stm32-dfsdm-adc.c memset(&fl->flo[0], 0, sizeof(fl->flo[0])); fl 319 drivers/iio/adc/stm32-dfsdm-adc.c memset(&fl->flo[1], 0, sizeof(fl->flo[1])); fl 321 drivers/iio/adc/stm32-dfsdm-adc.c ret0 = stm32_dfsdm_compute_osrs(fl, 0, oversamp); fl 322 drivers/iio/adc/stm32-dfsdm-adc.c ret1 = stm32_dfsdm_compute_osrs(fl, 1, oversamp); fl 456 drivers/iio/adc/stm32-dfsdm-adc.c struct stm32_dfsdm_filter *fl = &adc->dfsdm->fl_list[fl_id]; fl 457 drivers/iio/adc/stm32-dfsdm-adc.c struct stm32_dfsdm_filter_osr *flo = &fl->flo[0]; fl 462 drivers/iio/adc/stm32-dfsdm-adc.c fl->fast = 0; fl 470 drivers/iio/adc/stm32-dfsdm-adc.c if (fl->flo[1].res >= fl->flo[0].res) { fl 471 drivers/iio/adc/stm32-dfsdm-adc.c fl->fast = 1; fl 472 drivers/iio/adc/stm32-dfsdm-adc.c flo = &fl->flo[1]; fl 500 drivers/iio/adc/stm32-dfsdm-adc.c struct stm32_dfsdm_filter *fl = &adc->dfsdm->fl_list[fl_id]; fl 501 drivers/iio/adc/stm32-dfsdm-adc.c struct stm32_dfsdm_filter_osr *flo = &fl->flo[fl->fast]; fl 520 drivers/iio/adc/stm32-dfsdm-adc.c DFSDM_FCR_FORD(fl->ford)); fl 530 drivers/iio/adc/stm32-dfsdm-adc.c DFSDM_CR1_FAST(fl->fast)); fl 564 drivers/iio/adc/stm32-dfsdm-adc.c cr1 |= DFSDM_CR1_RSYNC(fl->sync_mode); fl 585 drivers/iio/adc/stm32-dfsdm-adc.c if (!fl->sync_mode && !trig) fl 587 drivers/iio/adc/stm32-dfsdm-adc.c cr1 |= DFSDM_CR1_JSYNC(fl->sync_mode); fl 824 drivers/iio/adc/stm32-dfsdm-adc.c struct stm32_dfsdm_filter *fl = &adc->dfsdm->fl_list[adc->fl_id]; fl 825 drivers/iio/adc/stm32-dfsdm-adc.c struct stm32_dfsdm_filter_osr *flo = &fl->flo[fl->fast]; fl 1873 drivers/infiniband/hw/mlx4/qp.c path->fl = 1 << 6; fl 2284 drivers/infiniband/hw/mlx4/qp.c context->pri_path.fl |= fl 2473 drivers/infiniband/hw/mlx4/qp.c context->pri_path.fl = 0x80; fl 2476 drivers/infiniband/hw/mlx4/qp.c context->pri_path.fl = 0x80; fl 89 drivers/infiniband/sw/rxe/rxe_net.c struct flowi4 fl = { { 0 } }; fl 91 drivers/infiniband/sw/rxe/rxe_net.c memset(&fl, 0, sizeof(fl)); fl 92 drivers/infiniband/sw/rxe/rxe_net.c fl.flowi4_oif = ndev->ifindex; fl 93 drivers/infiniband/sw/rxe/rxe_net.c memcpy(&fl.saddr, saddr, sizeof(*saddr)); fl 94 drivers/infiniband/sw/rxe/rxe_net.c memcpy(&fl.daddr, daddr, sizeof(*daddr)); fl 95 drivers/infiniband/sw/rxe/rxe_net.c fl.flowi4_proto = IPPROTO_UDP; fl 97 drivers/infiniband/sw/rxe/rxe_net.c rt = ip_route_output_key(&init_net, &fl); fl 86 drivers/media/i2c/smiapp/smiapp.h #define SMIAPP_IDENT_FQ(manufacturer, model, rev, fl, _name, _quirk) \ fl 90 drivers/media/i2c/smiapp/smiapp.h .flags = fl, \ fl 407 drivers/media/mc/mc-device.c #define MEDIA_IOC_ARG(__cmd, func, fl, from_user, to_user) \ fl 411 drivers/media/mc/mc-device.c .flags = fl, \ fl 416 drivers/media/mc/mc-device.c #define MEDIA_IOC(__cmd, func, fl) \ fl 417 drivers/media/mc/mc-device.c MEDIA_IOC_ARG(__cmd, func, fl, copy_arg_from_user, copy_arg_to_user) fl 775 drivers/media/pci/bt8xx/bttv-driver.c unsigned char fl, fh, fi; fl 788 drivers/media/pci/bt8xx/bttv-driver.c fl=fout/fin; fl 790 drivers/media/pci/bt8xx/bttv-driver.c btwrite(fl, BT848_PLL_F_LO); fl 30 drivers/media/usb/pvrusb2/pvrusb2-context.c static void pvr2_context_set_notify(struct pvr2_context *mp, int fl) fl 34 drivers/media/usb/pvrusb2/pvrusb2-context.c if (fl) { fl 1702 drivers/media/usb/pvrusb2/pvrusb2-hdw.c int fl; fl 1704 drivers/media/usb/pvrusb2/pvrusb2-hdw.c fl = pvr2_hdw_untrip_unlocked(hdw); fl 1706 drivers/media/usb/pvrusb2/pvrusb2-hdw.c if (fl) pvr2_hdw_state_sched(hdw); fl 1745 drivers/media/usb/pvrusb2/pvrusb2-hdw.c int fl; fl 1747 drivers/media/usb/pvrusb2/pvrusb2-hdw.c if ((fl = (hdw->desired_stream_type != config)) != 0) { fl 1755 drivers/media/usb/pvrusb2/pvrusb2-hdw.c if (fl) return 0; fl 3181 drivers/media/usb/pvrusb2/pvrusb2-hdw.c int fl; fl 3183 drivers/media/usb/pvrusb2/pvrusb2-hdw.c fl = pvr2_hdw_commit_setup(hdw); fl 3185 drivers/media/usb/pvrusb2/pvrusb2-hdw.c if (!fl) return 0; fl 3192 drivers/media/usb/pvrusb2/pvrusb2-hdw.c int fl = 0; fl 3195 drivers/media/usb/pvrusb2/pvrusb2-hdw.c fl = pvr2_hdw_state_eval(hdw); fl 3197 drivers/media/usb/pvrusb2/pvrusb2-hdw.c if (fl && hdw->state_func) { fl 4538 drivers/media/usb/pvrusb2/pvrusb2-hdw.c int fl = !0; fl 4540 drivers/media/usb/pvrusb2/pvrusb2-hdw.c fl = (hdw->state_encoder_ok && fl 4544 drivers/media/usb/pvrusb2/pvrusb2-hdw.c fl = hdw->state_encoder_ok; fl 4546 drivers/media/usb/pvrusb2/pvrusb2-hdw.c if (fl && fl 167 drivers/media/usb/pvrusb2/pvrusb2-io.c int fl; fl 178 drivers/media/usb/pvrusb2/pvrusb2-io.c fl = (sp->r_count == 0); fl 189 drivers/media/usb/pvrusb2/pvrusb2-io.c return fl; fl 229 drivers/media/usb/pvrusb2/pvrusb2-ioread.c int pvr2_ioread_set_enabled(struct pvr2_ioread *cp,int fl) fl 232 drivers/media/usb/pvrusb2/pvrusb2-ioread.c if ((!fl) == (!(cp->enabled))) return ret; fl 236 drivers/media/usb/pvrusb2/pvrusb2-ioread.c if (fl) { fl 20 drivers/media/usb/pvrusb2/pvrusb2-ioread.h int pvr2_ioread_set_enabled(struct pvr2_ioread *,int fl); fl 585 drivers/message/fusion/lsi/mpi.h #define MPI_SGE_GET_FLAGS(fl) (((fl) & ~MPI_SGE_LENGTH_MASK) >> MPI_SGE_FLAGS_SHIFT) fl 586 drivers/message/fusion/lsi/mpi.h #define MPI_SGE_LENGTH(fl) ((fl) & MPI_SGE_LENGTH_MASK) fl 587 drivers/message/fusion/lsi/mpi.h #define MPI_SGE_CHAIN_LENGTH(fl) ((fl) & MPI_SGE_CHAIN_LENGTH_MASK) fl 117 drivers/misc/fastrpc.c struct fastrpc_user *fl; fl 136 drivers/misc/fastrpc.c struct fastrpc_user *fl; fl 163 drivers/misc/fastrpc.c struct fastrpc_user *fl; fl 236 drivers/misc/fastrpc.c static int fastrpc_map_find(struct fastrpc_user *fl, int fd, fl 241 drivers/misc/fastrpc.c mutex_lock(&fl->mutex); fl 242 drivers/misc/fastrpc.c list_for_each_entry(map, &fl->maps, node) { fl 246 drivers/misc/fastrpc.c mutex_unlock(&fl->mutex); fl 250 drivers/misc/fastrpc.c mutex_unlock(&fl->mutex); fl 262 drivers/misc/fastrpc.c static int fastrpc_buf_alloc(struct fastrpc_user *fl, struct device *dev, fl 274 drivers/misc/fastrpc.c buf->fl = fl; fl 288 drivers/misc/fastrpc.c if (fl->sctx && fl->sctx->sid) fl 289 drivers/misc/fastrpc.c buf->phys += ((u64)fl->sctx->sid << 32); fl 423 drivers/misc/fastrpc.c ctx->fl = user; fl 593 drivers/misc/fastrpc.c static int fastrpc_map_create(struct fastrpc_user *fl, int fd, fl 596 drivers/misc/fastrpc.c struct fastrpc_session_ctx *sess = fl->sctx; fl 600 drivers/misc/fastrpc.c if (!fastrpc_map_find(fl, fd, ppmap)) fl 608 drivers/misc/fastrpc.c map->fl = fl; fl 630 drivers/misc/fastrpc.c map->phys += ((u64)fl->sctx->sid << 32); fl 636 drivers/misc/fastrpc.c spin_lock(&fl->lock); fl 637 drivers/misc/fastrpc.c list_add_tail(&map->node, &fl->maps); fl 638 drivers/misc/fastrpc.c spin_unlock(&fl->lock); fl 714 drivers/misc/fastrpc.c struct device *dev = ctx->fl->sctx->dev; fl 726 drivers/misc/fastrpc.c err = fastrpc_map_create(ctx->fl, ctx->args[i].fd, fl 739 drivers/misc/fastrpc.c struct device *dev = ctx->fl->sctx->dev; fl 759 drivers/misc/fastrpc.c err = fastrpc_buf_alloc(ctx->fl, dev, pkt_size, &ctx->buf); fl 887 drivers/misc/fastrpc.c struct fastrpc_user *fl = ctx->fl; fl 890 drivers/misc/fastrpc.c cctx = fl->cctx; fl 891 drivers/misc/fastrpc.c msg->pid = fl->tgid; fl 897 drivers/misc/fastrpc.c msg->ctx = ctx->ctxid | fl->pd; fl 907 drivers/misc/fastrpc.c static int fastrpc_internal_invoke(struct fastrpc_user *fl, u32 kernel, fl 914 drivers/misc/fastrpc.c if (!fl->sctx) fl 917 drivers/misc/fastrpc.c if (!fl->cctx->rpdev) fl 920 drivers/misc/fastrpc.c ctx = fastrpc_context_alloc(fl, kernel, sc, args); fl 933 drivers/misc/fastrpc.c err = fastrpc_invoke_send(fl->sctx, ctx, kernel, handle); fl 958 drivers/misc/fastrpc.c spin_lock(&fl->lock); fl 960 drivers/misc/fastrpc.c spin_unlock(&fl->lock); fl 964 drivers/misc/fastrpc.c dev_dbg(fl->sctx->dev, "Error: Invoke Failed %d\n", err); fl 969 drivers/misc/fastrpc.c static int fastrpc_init_create_process(struct fastrpc_user *fl, fl 1003 drivers/misc/fastrpc.c inbuf.pgid = fl->tgid; fl 1009 drivers/misc/fastrpc.c fl->pd = 1; fl 1012 drivers/misc/fastrpc.c err = fastrpc_map_create(fl, init.filefd, init.filelen, &map); fl 1019 drivers/misc/fastrpc.c err = fastrpc_buf_alloc(fl, fl->sctx->dev, memlen, fl 1024 drivers/misc/fastrpc.c fl->init_mem = imem; fl 1056 drivers/misc/fastrpc.c err = fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE, fl 1066 drivers/misc/fastrpc.c fl->init_mem = NULL; fl 1070 drivers/misc/fastrpc.c spin_lock(&fl->lock); fl 1072 drivers/misc/fastrpc.c spin_unlock(&fl->lock); fl 1111 drivers/misc/fastrpc.c static int fastrpc_release_current_dsp_process(struct fastrpc_user *fl) fl 1117 drivers/misc/fastrpc.c tgid = fl->tgid; fl 1124 drivers/misc/fastrpc.c return fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE, fl 1130 drivers/misc/fastrpc.c struct fastrpc_user *fl = (struct fastrpc_user *)file->private_data; fl 1131 drivers/misc/fastrpc.c struct fastrpc_channel_ctx *cctx = fl->cctx; fl 1136 drivers/misc/fastrpc.c fastrpc_release_current_dsp_process(fl); fl 1139 drivers/misc/fastrpc.c list_del(&fl->user); fl 1142 drivers/misc/fastrpc.c if (fl->init_mem) fl 1143 drivers/misc/fastrpc.c fastrpc_buf_free(fl->init_mem); fl 1145 drivers/misc/fastrpc.c list_for_each_entry_safe(ctx, n, &fl->pending, node) { fl 1150 drivers/misc/fastrpc.c list_for_each_entry_safe(map, m, &fl->maps, node) { fl 1155 drivers/misc/fastrpc.c fastrpc_session_free(cctx, fl->sctx); fl 1158 drivers/misc/fastrpc.c mutex_destroy(&fl->mutex); fl 1159 drivers/misc/fastrpc.c kfree(fl); fl 1168 drivers/misc/fastrpc.c struct fastrpc_user *fl = NULL; fl 1171 drivers/misc/fastrpc.c fl = kzalloc(sizeof(*fl), GFP_KERNEL); fl 1172 drivers/misc/fastrpc.c if (!fl) fl 1178 drivers/misc/fastrpc.c filp->private_data = fl; fl 1179 drivers/misc/fastrpc.c spin_lock_init(&fl->lock); fl 1180 drivers/misc/fastrpc.c mutex_init(&fl->mutex); fl 1181 drivers/misc/fastrpc.c INIT_LIST_HEAD(&fl->pending); fl 1182 drivers/misc/fastrpc.c INIT_LIST_HEAD(&fl->maps); fl 1183 drivers/misc/fastrpc.c INIT_LIST_HEAD(&fl->user); fl 1184 drivers/misc/fastrpc.c fl->tgid = current->tgid; fl 1185 drivers/misc/fastrpc.c fl->cctx = cctx; fl 1187 drivers/misc/fastrpc.c fl->sctx = fastrpc_session_alloc(cctx); fl 1188 drivers/misc/fastrpc.c if (!fl->sctx) { fl 1190 drivers/misc/fastrpc.c mutex_destroy(&fl->mutex); fl 1191 drivers/misc/fastrpc.c kfree(fl); fl 1197 drivers/misc/fastrpc.c list_add_tail(&fl->user, &cctx->users); fl 1203 drivers/misc/fastrpc.c static int fastrpc_dmabuf_alloc(struct fastrpc_user *fl, char __user *argp) fl 1213 drivers/misc/fastrpc.c err = fastrpc_buf_alloc(fl, fl->sctx->dev, bp.size, &buf); fl 1241 drivers/misc/fastrpc.c static int fastrpc_init_attach(struct fastrpc_user *fl) fl 1244 drivers/misc/fastrpc.c int tgid = fl->tgid; fl 1252 drivers/misc/fastrpc.c fl->pd = 0; fl 1254 drivers/misc/fastrpc.c return fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE, fl 1258 drivers/misc/fastrpc.c static int fastrpc_invoke(struct fastrpc_user *fl, char __user *argp) fl 1282 drivers/misc/fastrpc.c err = fastrpc_internal_invoke(fl, false, inv.handle, inv.sc, args); fl 1291 drivers/misc/fastrpc.c struct fastrpc_user *fl = (struct fastrpc_user *)file->private_data; fl 1297 drivers/misc/fastrpc.c err = fastrpc_invoke(fl, argp); fl 1300 drivers/misc/fastrpc.c err = fastrpc_init_attach(fl); fl 1303 drivers/misc/fastrpc.c err = fastrpc_init_create_process(fl, argp); fl 1306 drivers/misc/fastrpc.c err = fastrpc_dmabuf_alloc(fl, argp); fl 73 drivers/mtd/parsers/redboot.c struct fis_list *fl = NULL, *tmp_fl; fl 216 drivers/mtd/parsers/redboot.c prev = &fl; fl 225 drivers/mtd/parsers/redboot.c if (fl->img->flash_base) { fl 230 drivers/mtd/parsers/redboot.c for (tmp_fl = fl; tmp_fl->next; tmp_fl = tmp_fl->next) { fl 255 drivers/mtd/parsers/redboot.c if (fl->img->flash_base) { fl 257 drivers/mtd/parsers/redboot.c parts[0].size = fl->img->flash_base; fl 263 drivers/mtd/parsers/redboot.c parts[i].size = fl->img->size; fl 264 drivers/mtd/parsers/redboot.c parts[i].offset = fl->img->flash_base; fl 267 drivers/mtd/parsers/redboot.c strcpy(names, fl->img->name); fl 278 drivers/mtd/parsers/redboot.c if(fl->next && fl->img->flash_base + fl->img->size + master->erasesize <= fl->next->img->flash_base) { fl 281 drivers/mtd/parsers/redboot.c parts[i].size = fl->next->img->flash_base - parts[i].offset; fl 285 drivers/mtd/parsers/redboot.c tmp_fl = fl; fl 286 drivers/mtd/parsers/redboot.c fl = fl->next; fl 292 drivers/mtd/parsers/redboot.c while (fl) { fl 293 drivers/mtd/parsers/redboot.c struct fis_list *old = fl; fl 294 drivers/mtd/parsers/redboot.c fl = fl->next; fl 1653 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c struct bnx2x_vf_mac_vlan_filters *fl = NULL; fl 1655 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c fl = kzalloc(struct_size(fl, filters, tlv->n_mac_vlan_filters), fl 1657 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c if (!fl) fl 1665 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c memset(&fl->filters[j], 0, sizeof(fl->filters[j])); fl 1667 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c fl->filters[j].mac = msg_filter->mac; fl 1668 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c fl->filters[j].type |= BNX2X_VF_FILTER_MAC; fl 1671 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c fl->filters[j].vid = msg_filter->vlan_tag; fl 1672 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c fl->filters[j].type |= BNX2X_VF_FILTER_VLAN; fl 1674 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c fl->filters[j].add = !!(msg_filter->flags & VFPF_Q_FILTER_SET); fl 1675 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c fl->count++; fl 1678 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c if (!fl->count) fl 1679 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c kfree(fl); fl 1681 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c *pfl = fl; fl 1740 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c struct bnx2x_vf_mac_vlan_filters *fl = NULL; fl 1743 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c rc = bnx2x_vf_mbx_macvlan_list(bp, vf, msg, &fl, fl 1748 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c if (fl) { fl 1751 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c rc = bnx2x_vf_mac_vlan_config_list(bp, vf, fl, fl 1759 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c fl = NULL; fl 1761 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c rc = bnx2x_vf_mbx_macvlan_list(bp, vf, msg, &fl, fl 1766 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c if (fl) { fl 1768 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c rc = bnx2x_vf_mac_vlan_config_list(bp, vf, fl, fl 1776 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c fl = NULL; fl 1778 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c rc = bnx2x_vf_mbx_macvlan_list(bp, vf, msg, &fl, fl 1783 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c if (fl) { fl 1785 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c rc = bnx2x_vf_mac_vlan_config_list(bp, vf, fl, fl 1002 drivers/net/ethernet/chelsio/cxgb/sge.c static void recycle_fl_buf(struct freelQ *fl, int idx) fl 1004 drivers/net/ethernet/chelsio/cxgb/sge.c struct freelQ_e *from = &fl->entries[idx]; fl 1005 drivers/net/ethernet/chelsio/cxgb/sge.c struct freelQ_e *to = &fl->entries[fl->pidx]; fl 1007 drivers/net/ethernet/chelsio/cxgb/sge.c fl->centries[fl->pidx] = fl->centries[idx]; fl 1010 drivers/net/ethernet/chelsio/cxgb/sge.c to->len_gen = G_CMD_LEN(from->len_gen) | V_CMD_GEN1(fl->genbit); fl 1012 drivers/net/ethernet/chelsio/cxgb/sge.c to->gen2 = V_CMD_GEN2(fl->genbit); fl 1013 drivers/net/ethernet/chelsio/cxgb/sge.c fl->credits++; fl 1015 drivers/net/ethernet/chelsio/cxgb/sge.c if (++fl->pidx == fl->size) { fl 1016 drivers/net/ethernet/chelsio/cxgb/sge.c fl->pidx = 0; fl 1017 drivers/net/ethernet/chelsio/cxgb/sge.c fl->genbit ^= 1; fl 1040 drivers/net/ethernet/chelsio/cxgb/sge.c struct freelQ *fl, unsigned int len) fl 1042 drivers/net/ethernet/chelsio/cxgb/sge.c const struct freelQ_ce *ce = &fl->centries[fl->cidx]; fl 1061 drivers/net/ethernet/chelsio/cxgb/sge.c recycle_fl_buf(fl, fl->cidx); fl 1066 drivers/net/ethernet/chelsio/cxgb/sge.c if (fl->credits < 2) { fl 1067 drivers/net/ethernet/chelsio/cxgb/sge.c recycle_fl_buf(fl, fl->cidx); fl 1089 drivers/net/ethernet/chelsio/cxgb/sge.c static void unexpected_offload(struct adapter *adapter, struct freelQ *fl) fl 1091 drivers/net/ethernet/chelsio/cxgb/sge.c struct freelQ_ce *ce = &fl->centries[fl->cidx]; fl 1098 drivers/net/ethernet/chelsio/cxgb/sge.c recycle_fl_buf(fl, fl->cidx); fl 1352 drivers/net/ethernet/chelsio/cxgb/sge.c static void sge_rx(struct sge *sge, struct freelQ *fl, unsigned int len) fl 1360 drivers/net/ethernet/chelsio/cxgb/sge.c skb = get_packet(adapter, fl, len - sge->rx_pkt_pad); fl 1496 drivers/net/ethernet/chelsio/cxgb/sge.c struct freelQ *fl = &sge->freelQ[e->FreelistQid]; fl 1500 drivers/net/ethernet/chelsio/cxgb/sge.c unexpected_offload(adapter, fl); fl 1502 drivers/net/ethernet/chelsio/cxgb/sge.c sge_rx(sge, fl, e->BufferLength); fl 1510 drivers/net/ethernet/chelsio/cxgb/sge.c if (++fl->cidx == fl->size) fl 1511 drivers/net/ethernet/chelsio/cxgb/sge.c fl->cidx = 0; fl 1512 drivers/net/ethernet/chelsio/cxgb/sge.c prefetch(fl->centries[fl->cidx].skb); fl 1514 drivers/net/ethernet/chelsio/cxgb/sge.c if (unlikely(--fl->credits < fl 1515 drivers/net/ethernet/chelsio/cxgb/sge.c fl->size - SGE_FREEL_REFILL_THRESH)) fl 1516 drivers/net/ethernet/chelsio/cxgb/sge.c refill_free_list(sge, fl); fl 1561 drivers/net/ethernet/chelsio/cxgb/sge.c const struct freelQ *fl = &sge->freelQ[e->FreelistQid]; fl 1565 drivers/net/ethernet/chelsio/cxgb/sge.c prefetch(fl->centries[fl->cidx].skb); fl 203 drivers/net/ethernet/chelsio/cxgb3/adapter.h struct sge_fl fl[SGE_RXQ_PER_SET]; fl 2780 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c qs->fl[i].empty += (v & 1); fl 169 drivers/net/ethernet/chelsio/cxgb3/sge.c return container_of(q, struct sge_qset, fl[qidx]); fl 563 drivers/net/ethernet/chelsio/cxgb3/sge.c static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl) fl 565 drivers/net/ethernet/chelsio/cxgb3/sge.c refill_fl(adap, fl, min(MAX_RX_REFILL, fl->size - fl->credits), fl 657 drivers/net/ethernet/chelsio/cxgb3/sge.c memset(q->fl, 0, sizeof(struct sge_fl) * SGE_RXQ_PER_SET); fl 682 drivers/net/ethernet/chelsio/cxgb3/sge.c if (q->fl[i].desc) { fl 684 drivers/net/ethernet/chelsio/cxgb3/sge.c t3_sge_disable_fl(adapter, q->fl[i].cntxt_id); fl 686 drivers/net/ethernet/chelsio/cxgb3/sge.c free_rx_bufs(pdev, &q->fl[i]); fl 687 drivers/net/ethernet/chelsio/cxgb3/sge.c kfree(q->fl[i].sdesc); fl 689 drivers/net/ethernet/chelsio/cxgb3/sge.c q->fl[i].size * fl 690 drivers/net/ethernet/chelsio/cxgb3/sge.c sizeof(struct rx_desc), q->fl[i].desc, fl 691 drivers/net/ethernet/chelsio/cxgb3/sge.c q->fl[i].phys_addr); fl 733 drivers/net/ethernet/chelsio/cxgb3/sge.c qs->fl[0].cntxt_id = 2 * id; fl 734 drivers/net/ethernet/chelsio/cxgb3/sge.c qs->fl[1].cntxt_id = 2 * id + 1; fl 783 drivers/net/ethernet/chelsio/cxgb3/sge.c static struct sk_buff *get_packet(struct adapter *adap, struct sge_fl *fl, fl 787 drivers/net/ethernet/chelsio/cxgb3/sge.c struct rx_sw_desc *sd = &fl->sdesc[fl->cidx]; fl 790 drivers/net/ethernet/chelsio/cxgb3/sge.c fl->credits--; fl 806 drivers/net/ethernet/chelsio/cxgb3/sge.c recycle_rx_buf(adap, fl, fl->cidx); fl 810 drivers/net/ethernet/chelsio/cxgb3/sge.c if (unlikely(fl->credits < drop_thres) && fl 811 drivers/net/ethernet/chelsio/cxgb3/sge.c refill_fl(adap, fl, min(MAX_RX_REFILL, fl->size - fl->credits - 1), fl 817 drivers/net/ethernet/chelsio/cxgb3/sge.c fl->buf_size, PCI_DMA_FROMDEVICE); fl 820 drivers/net/ethernet/chelsio/cxgb3/sge.c __refill_fl(adap, fl); fl 842 drivers/net/ethernet/chelsio/cxgb3/sge.c static struct sk_buff *get_packet_pg(struct adapter *adap, struct sge_fl *fl, fl 847 drivers/net/ethernet/chelsio/cxgb3/sge.c struct rx_sw_desc *sd = &fl->sdesc[fl->cidx]; fl 865 drivers/net/ethernet/chelsio/cxgb3/sge.c fl->credits--; fl 866 drivers/net/ethernet/chelsio/cxgb3/sge.c recycle_rx_buf(adap, fl, fl->cidx); fl 871 drivers/net/ethernet/chelsio/cxgb3/sge.c if (unlikely(q->rx_recycle_buf || (!skb && fl->credits <= drop_thres))) fl 888 drivers/net/ethernet/chelsio/cxgb3/sge.c if (!*sd->pg_chunk.p_cnt && sd->pg_chunk.page != fl->pg_chunk.page) fl 891 drivers/net/ethernet/chelsio/cxgb3/sge.c fl->alloc_size, fl 911 drivers/net/ethernet/chelsio/cxgb3/sge.c fl->credits--; fl 2129 drivers/net/ethernet/chelsio/cxgb3/sge.c struct sge_fl *fl, int len, int complete) fl 2131 drivers/net/ethernet/chelsio/cxgb3/sge.c struct rx_sw_desc *sd = &fl->sdesc[fl->cidx]; fl 2144 drivers/net/ethernet/chelsio/cxgb3/sge.c fl->credits--; fl 2148 drivers/net/ethernet/chelsio/cxgb3/sge.c fl->buf_size - SGE_PG_RSVD, fl 2152 drivers/net/ethernet/chelsio/cxgb3/sge.c if (!*sd->pg_chunk.p_cnt && sd->pg_chunk.page != fl->pg_chunk.page) fl 2155 drivers/net/ethernet/chelsio/cxgb3/sge.c fl->alloc_size, fl 2367 drivers/net/ethernet/chelsio/cxgb3/sge.c struct sge_fl *fl; fl 2371 drivers/net/ethernet/chelsio/cxgb3/sge.c fl = (len & F_RSPD_FLQ) ? &qs->fl[1] : &qs->fl[0]; fl 2372 drivers/net/ethernet/chelsio/cxgb3/sge.c if (fl->use_pages) { fl 2373 drivers/net/ethernet/chelsio/cxgb3/sge.c void *addr = fl->sdesc[fl->cidx].pg_chunk.va; fl 2379 drivers/net/ethernet/chelsio/cxgb3/sge.c __refill_fl(adap, fl); fl 2381 drivers/net/ethernet/chelsio/cxgb3/sge.c lro_add_page(adap, qs, fl, fl 2387 drivers/net/ethernet/chelsio/cxgb3/sge.c skb = get_packet_pg(adap, fl, q, fl 2393 drivers/net/ethernet/chelsio/cxgb3/sge.c skb = get_packet(adap, fl, G_RSPD_LEN(len), fl 2402 drivers/net/ethernet/chelsio/cxgb3/sge.c if (++fl->cidx == fl->size) fl 2403 drivers/net/ethernet/chelsio/cxgb3/sge.c fl->cidx = 0; fl 2993 drivers/net/ethernet/chelsio/cxgb3/sge.c if (qs->fl[0].credits < qs->fl[0].size) fl 2994 drivers/net/ethernet/chelsio/cxgb3/sge.c __refill_fl(adap, &qs->fl[0]); fl 2995 drivers/net/ethernet/chelsio/cxgb3/sge.c if (qs->fl[1].credits < qs->fl[1].size) fl 2996 drivers/net/ethernet/chelsio/cxgb3/sge.c __refill_fl(adap, &qs->fl[1]); fl 3047 drivers/net/ethernet/chelsio/cxgb3/sge.c q->fl[0].desc = alloc_ring(adapter->pdev, p->fl_size, fl 3050 drivers/net/ethernet/chelsio/cxgb3/sge.c &q->fl[0].phys_addr, &q->fl[0].sdesc); fl 3051 drivers/net/ethernet/chelsio/cxgb3/sge.c if (!q->fl[0].desc) fl 3054 drivers/net/ethernet/chelsio/cxgb3/sge.c q->fl[1].desc = alloc_ring(adapter->pdev, p->jumbo_size, fl 3057 drivers/net/ethernet/chelsio/cxgb3/sge.c &q->fl[1].phys_addr, &q->fl[1].sdesc); fl 3058 drivers/net/ethernet/chelsio/cxgb3/sge.c if (!q->fl[1].desc) fl 3092 drivers/net/ethernet/chelsio/cxgb3/sge.c q->fl[0].gen = q->fl[1].gen = 1; fl 3093 drivers/net/ethernet/chelsio/cxgb3/sge.c q->fl[0].size = p->fl_size; fl 3094 drivers/net/ethernet/chelsio/cxgb3/sge.c q->fl[1].size = p->jumbo_size; fl 3105 drivers/net/ethernet/chelsio/cxgb3/sge.c q->fl[0].buf_size = FL0_PG_CHUNK_SIZE; fl 3107 drivers/net/ethernet/chelsio/cxgb3/sge.c q->fl[0].buf_size = SGE_RX_SM_BUF_SIZE + sizeof(struct cpl_rx_data); fl 3110 drivers/net/ethernet/chelsio/cxgb3/sge.c q->fl[1].buf_size = FL1_PG_CHUNK_SIZE; fl 3112 drivers/net/ethernet/chelsio/cxgb3/sge.c q->fl[1].buf_size = is_offload(adapter) ? fl 3117 drivers/net/ethernet/chelsio/cxgb3/sge.c q->fl[0].use_pages = FL0_PG_CHUNK_SIZE > 0; fl 3118 drivers/net/ethernet/chelsio/cxgb3/sge.c q->fl[1].use_pages = FL1_PG_CHUNK_SIZE > 0; fl 3119 drivers/net/ethernet/chelsio/cxgb3/sge.c q->fl[0].order = FL0_PG_ORDER; fl 3120 drivers/net/ethernet/chelsio/cxgb3/sge.c q->fl[1].order = FL1_PG_ORDER; fl 3121 drivers/net/ethernet/chelsio/cxgb3/sge.c q->fl[0].alloc_size = FL0_PG_ALLOC_SIZE; fl 3122 drivers/net/ethernet/chelsio/cxgb3/sge.c q->fl[1].alloc_size = FL1_PG_ALLOC_SIZE; fl 3129 drivers/net/ethernet/chelsio/cxgb3/sge.c q->fl[0].buf_size - SGE_PG_RSVD, 1, 0); fl 3134 drivers/net/ethernet/chelsio/cxgb3/sge.c ret = t3_sge_init_flcntxt(adapter, q->fl[i].cntxt_id, 0, fl 3135 drivers/net/ethernet/chelsio/cxgb3/sge.c q->fl[i].phys_addr, q->fl[i].size, fl 3136 drivers/net/ethernet/chelsio/cxgb3/sge.c q->fl[i].buf_size - SGE_PG_RSVD, fl 3175 drivers/net/ethernet/chelsio/cxgb3/sge.c avail = refill_fl(adapter, &q->fl[0], q->fl[0].size, fl 3181 drivers/net/ethernet/chelsio/cxgb3/sge.c if (avail < q->fl[0].size) fl 3185 drivers/net/ethernet/chelsio/cxgb3/sge.c avail = refill_fl(adapter, &q->fl[1], q->fl[1].size, fl 3187 drivers/net/ethernet/chelsio/cxgb3/sge.c if (avail < q->fl[1].size) fl 3027 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c QDESC_GET_FLQ(&s->ethrxq[i].fl, CUDBG_QTYPE_NIC_FLQ, out); fl 3084 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c QDESC_GET_FLQ(&urxq->uldrxq[i].fl, fl 713 drivers/net/ethernet/chelsio/cxgb4/cxgb4.h struct sge_fl fl; fl 726 drivers/net/ethernet/chelsio/cxgb4/cxgb4.h struct sge_fl fl; fl 1417 drivers/net/ethernet/chelsio/cxgb4/cxgb4.h struct sge_fl *fl, rspq_handler_t hnd, fl 1891 drivers/net/ethernet/chelsio/cxgb4/cxgb4.h void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq, struct sge_fl *fl); fl 2734 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c R("FL ID:", fl.cntxt_id); fl 2735 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c R("FL size:", fl.size - 8); fl 2736 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c R("FL pend:", fl.pend_cred); fl 2737 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c R("FL avail:", fl.avail); fl 2738 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c R("FL PIDX:", fl.pidx); fl 2739 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c R("FL CIDX:", fl.cidx); fl 2753 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c RL("FLAllocErr:", fl.alloc_failed); fl 2754 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c RL("FLLrgAlcErr:", fl.large_alloc_failed); fl 2755 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c RL("FLMapErr:", fl.mapping_err); fl 2756 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c RL("FLLow:", fl.low); fl 2757 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c RL("FLStarving:", fl.starving); fl 2798 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c R("FL ID:", fl.cntxt_id); fl 2799 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c R("FL size:", fl.size - 8); fl 2800 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c R("FL pend:", fl.pend_cred); fl 2801 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c R("FL avail:", fl.avail); fl 2802 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c R("FL PIDX:", fl.pidx); fl 2803 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c R("FL CIDX:", fl.cidx); fl 2848 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c R("FL ID:", fl.cntxt_id); fl 2849 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c R("FL size:", fl.size - 8); fl 2850 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c R("FL pend:", fl.pend_cred); fl 2851 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c R("FL avail:", fl.avail); fl 2852 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c R("FL PIDX:", fl.pidx); fl 2853 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c R("FL CIDX:", fl.cidx); fl 2874 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c R("FL ID:", fl.cntxt_id); fl 2875 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c R("FL size:", fl.size - 8); fl 2876 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c R("FL pend:", fl.pend_cred); fl 2877 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c R("FL avail:", fl.avail); fl 2878 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c R("FL PIDX:", fl.pidx); fl 2879 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c R("FL CIDX:", fl.cidx); fl 2900 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c R("FL ID:", fl.cntxt_id); fl 2901 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c R("FL size:", fl.size - 8); fl 2902 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c R("FL pend:", fl.pend_cred); fl 2903 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c R("FL avail:", fl.avail); fl 2904 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c R("FL PIDX:", fl.pidx); fl 2905 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c R("FL CIDX:", fl.cidx); fl 2934 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c R("FL ID:", fl.cntxt_id); fl 2935 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c R("FL size:", fl.size - 8); fl 2936 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c R("FL pend:", fl.pend_cred); fl 2937 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c R("FL avail:", fl.avail); fl 2938 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c R("FL PIDX:", fl.pidx); fl 2939 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c R("FL CIDX:", fl.cidx); fl 833 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c e->rx_pending = s->ethrxq[pi->first_qset].fl.size - 8; fl 858 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c s->ethrxq[pi->first_qset + i].fl.size = e->rx_pending + 8; fl 1006 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c adap->msi_idx, &q->fl, fl 5186 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c r->fl.size = 72; fl 172 drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c q->fl.size ? &q->fl : NULL, fl 190 drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c q->fl.size ? &q->fl : NULL); fl 237 drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c q->fl.size ? &q->fl : NULL); fl 322 drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c r->fl.size = 72; fl 219 drivers/net/ethernet/chelsio/cxgb4/sge.c static inline unsigned int fl_cap(const struct sge_fl *fl) fl 221 drivers/net/ethernet/chelsio/cxgb4/sge.c return fl->size - 8; /* 1 descriptor = 8 buffers */ fl 234 drivers/net/ethernet/chelsio/cxgb4/sge.c const struct sge_fl *fl) fl 238 drivers/net/ethernet/chelsio/cxgb4/sge.c return fl->avail - fl->pend_cred <= s->fl_starve_thres; fl 688 drivers/net/ethernet/chelsio/cxgb4/sge.c static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl) fl 690 drivers/net/ethernet/chelsio/cxgb4/sge.c refill_fl(adap, fl, min(MAX_RX_REFILL, fl_cap(fl) - fl->avail), fl 3161 drivers/net/ethernet/chelsio/cxgb4/sge.c free_rx_bufs(q->adap, &rxq->fl, 1); fl 3170 drivers/net/ethernet/chelsio/cxgb4/sge.c rsd = &rxq->fl.sdesc[rxq->fl.cidx]; fl 3178 drivers/net/ethernet/chelsio/cxgb4/sge.c unmap_rx_buf(q->adap, &rxq->fl); fl 3200 drivers/net/ethernet/chelsio/cxgb4/sge.c restore_rx_bufs(&si, &rxq->fl, frags); fl 3217 drivers/net/ethernet/chelsio/cxgb4/sge.c if (q->offset >= 0 && fl_cap(&rxq->fl) - rxq->fl.avail >= 16) fl 3218 drivers/net/ethernet/chelsio/cxgb4/sge.c __refill_fl(q->adap, &rxq->fl); fl 3395 drivers/net/ethernet/chelsio/cxgb4/sge.c struct sge_fl *fl = s->egr_map[id]; fl 3400 drivers/net/ethernet/chelsio/cxgb4/sge.c if (fl_starving(adap, fl)) { fl 3401 drivers/net/ethernet/chelsio/cxgb4/sge.c rxq = container_of(fl, struct sge_eth_rxq, fl); fl 3403 drivers/net/ethernet/chelsio/cxgb4/sge.c fl->starving++; fl 3513 drivers/net/ethernet/chelsio/cxgb4/sge.c struct sge_fl *fl, rspq_handler_t hnd, fl 3554 drivers/net/ethernet/chelsio/cxgb4/sge.c if (fl) { fl 3565 drivers/net/ethernet/chelsio/cxgb4/sge.c if (fl->size < s->fl_starve_thres - 1 + 2 * 8) fl 3566 drivers/net/ethernet/chelsio/cxgb4/sge.c fl->size = s->fl_starve_thres - 1 + 2 * 8; fl 3567 drivers/net/ethernet/chelsio/cxgb4/sge.c fl->size = roundup(fl->size, 8); fl 3568 drivers/net/ethernet/chelsio/cxgb4/sge.c fl->desc = alloc_ring(adap->pdev_dev, fl->size, sizeof(__be64), fl 3569 drivers/net/ethernet/chelsio/cxgb4/sge.c sizeof(struct rx_sw_desc), &fl->addr, fl 3570 drivers/net/ethernet/chelsio/cxgb4/sge.c &fl->sdesc, s->stat_len, fl 3572 drivers/net/ethernet/chelsio/cxgb4/sge.c if (!fl->desc) fl 3575 drivers/net/ethernet/chelsio/cxgb4/sge.c flsz = fl->size / 8 + s->stat_len / sizeof(struct tx_desc); fl 3601 drivers/net/ethernet/chelsio/cxgb4/sge.c c.fl0addr = cpu_to_be64(fl->addr); fl 3628 drivers/net/ethernet/chelsio/cxgb4/sge.c iq->offset = fl ? 0 : -1; fl 3632 drivers/net/ethernet/chelsio/cxgb4/sge.c if (fl) { fl 3633 drivers/net/ethernet/chelsio/cxgb4/sge.c fl->cntxt_id = ntohs(c.fl0id); fl 3634 drivers/net/ethernet/chelsio/cxgb4/sge.c fl->avail = fl->pend_cred = 0; fl 3635 drivers/net/ethernet/chelsio/cxgb4/sge.c fl->pidx = fl->cidx = 0; fl 3636 drivers/net/ethernet/chelsio/cxgb4/sge.c fl->alloc_failed = fl->large_alloc_failed = fl->starving = 0; fl 3637 drivers/net/ethernet/chelsio/cxgb4/sge.c adap->sge.egr_map[fl->cntxt_id - adap->sge.egr_start] = fl; fl 3642 drivers/net/ethernet/chelsio/cxgb4/sge.c fl->bar2_addr = bar2_address(adap, fl 3643 drivers/net/ethernet/chelsio/cxgb4/sge.c fl->cntxt_id, fl 3645 drivers/net/ethernet/chelsio/cxgb4/sge.c &fl->bar2_qid); fl 3646 drivers/net/ethernet/chelsio/cxgb4/sge.c refill_fl(adap, fl, fl_cap(fl), GFP_KERNEL); fl 3694 drivers/net/ethernet/chelsio/cxgb4/sge.c if (fl && fl->desc) { fl 3695 drivers/net/ethernet/chelsio/cxgb4/sge.c kfree(fl->sdesc); fl 3696 drivers/net/ethernet/chelsio/cxgb4/sge.c fl->sdesc = NULL; fl 3698 drivers/net/ethernet/chelsio/cxgb4/sge.c fl->desc, fl->addr); fl 3699 drivers/net/ethernet/chelsio/cxgb4/sge.c fl->desc = NULL; fl 3960 drivers/net/ethernet/chelsio/cxgb4/sge.c struct sge_fl *fl) fl 3963 drivers/net/ethernet/chelsio/cxgb4/sge.c unsigned int fl_id = fl ? fl->cntxt_id : 0xffff; fl 3975 drivers/net/ethernet/chelsio/cxgb4/sge.c if (fl) { fl 3976 drivers/net/ethernet/chelsio/cxgb4/sge.c free_rx_bufs(adap, fl, fl->avail); fl 3977 drivers/net/ethernet/chelsio/cxgb4/sge.c dma_free_coherent(adap->pdev_dev, fl->size * 8 + s->stat_len, fl 3978 drivers/net/ethernet/chelsio/cxgb4/sge.c fl->desc, fl->addr); fl 3979 drivers/net/ethernet/chelsio/cxgb4/sge.c kfree(fl->sdesc); fl 3980 drivers/net/ethernet/chelsio/cxgb4/sge.c fl->sdesc = NULL; fl 3981 drivers/net/ethernet/chelsio/cxgb4/sge.c fl->cntxt_id = 0; fl 3982 drivers/net/ethernet/chelsio/cxgb4/sge.c fl->desc = NULL; fl 3999 drivers/net/ethernet/chelsio/cxgb4/sge.c q->fl.size ? &q->fl : NULL); fl 4021 drivers/net/ethernet/chelsio/cxgb4/sge.c eq->fl.size ? eq->fl.cntxt_id : 0xffff, fl 4030 drivers/net/ethernet/chelsio/cxgb4/sge.c eq->fl.size ? &eq->fl : NULL); fl 213 drivers/net/ethernet/chelsio/cxgb4vf/adapter.h struct sge_fl fl; /* Free List */ fl 641 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c &rxq->fl, t4vf_ethrx_handler); fl 682 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c rxq->fl.abs_id = rxq->fl.cntxt_id + s->egr_base; fl 683 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c EQ_MAP(s, rxq->fl.abs_id) = &rxq->fl; fl 1607 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c rp->rx_pending = s->ethrxq[pi->first_qset].fl.size - MIN_FL_RESID; fl 1640 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c s->ethrxq[qs].fl.size = rp->rx_pending + MIN_FL_RESID; fl 2101 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c R("FL ID:", fl.abs_id); fl 2102 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c R("FL size:", fl.size - MIN_FL_RESID); fl 2103 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c R("FL avail:", fl.avail); fl 2104 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c R("FL PIdx:", fl.pidx); fl 2105 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c R("FL CIdx:", fl.cidx); fl 2253 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c R("FLAllocErr:", fl.alloc_failed); fl 2254 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c R("FLLrgAlcErr:", fl.large_alloc_failed); fl 2255 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c R("FLStarving:", fl.starving); fl 2796 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c rxq->fl.size = 72; fl 247 drivers/net/ethernet/chelsio/cxgb4vf/sge.c static inline unsigned int fl_cap(const struct sge_fl *fl) fl 249 drivers/net/ethernet/chelsio/cxgb4vf/sge.c return fl->size - FL_PER_EQ_UNIT; fl 262 drivers/net/ethernet/chelsio/cxgb4vf/sge.c const struct sge_fl *fl) fl 266 drivers/net/ethernet/chelsio/cxgb4vf/sge.c return fl->avail - fl->pend_cred <= s->fl_starve_thres; fl 473 drivers/net/ethernet/chelsio/cxgb4vf/sge.c static void free_rx_bufs(struct adapter *adapter, struct sge_fl *fl, int n) fl 476 drivers/net/ethernet/chelsio/cxgb4vf/sge.c struct rx_sw_desc *sdesc = &fl->sdesc[fl->cidx]; fl 484 drivers/net/ethernet/chelsio/cxgb4vf/sge.c if (++fl->cidx == fl->size) fl 485 drivers/net/ethernet/chelsio/cxgb4vf/sge.c fl->cidx = 0; fl 486 drivers/net/ethernet/chelsio/cxgb4vf/sge.c fl->avail--; fl 503 drivers/net/ethernet/chelsio/cxgb4vf/sge.c static void unmap_rx_buf(struct adapter *adapter, struct sge_fl *fl) fl 505 drivers/net/ethernet/chelsio/cxgb4vf/sge.c struct rx_sw_desc *sdesc = &fl->sdesc[fl->cidx]; fl 512 drivers/net/ethernet/chelsio/cxgb4vf/sge.c if (++fl->cidx == fl->size) fl 513 drivers/net/ethernet/chelsio/cxgb4vf/sge.c fl->cidx = 0; fl 514 drivers/net/ethernet/chelsio/cxgb4vf/sge.c fl->avail--; fl 525 drivers/net/ethernet/chelsio/cxgb4vf/sge.c static inline void ring_fl_db(struct adapter *adapter, struct sge_fl *fl) fl 533 drivers/net/ethernet/chelsio/cxgb4vf/sge.c if (fl->pend_cred >= FL_PER_EQ_UNIT) { fl 535 drivers/net/ethernet/chelsio/cxgb4vf/sge.c val |= PIDX_V(fl->pend_cred / FL_PER_EQ_UNIT); fl 537 drivers/net/ethernet/chelsio/cxgb4vf/sge.c val |= PIDX_T5_V(fl->pend_cred / FL_PER_EQ_UNIT); fl 548 drivers/net/ethernet/chelsio/cxgb4vf/sge.c if (unlikely(fl->bar2_addr == NULL)) { fl 551 drivers/net/ethernet/chelsio/cxgb4vf/sge.c QID_V(fl->cntxt_id) | val); fl 553 drivers/net/ethernet/chelsio/cxgb4vf/sge.c writel(val | QID_V(fl->bar2_qid), fl 554 drivers/net/ethernet/chelsio/cxgb4vf/sge.c fl->bar2_addr + SGE_UDB_KDOORBELL); fl 561 drivers/net/ethernet/chelsio/cxgb4vf/sge.c fl->pend_cred %= FL_PER_EQ_UNIT; fl 604 drivers/net/ethernet/chelsio/cxgb4vf/sge.c static unsigned int refill_fl(struct adapter *adapter, struct sge_fl *fl, fl 610 drivers/net/ethernet/chelsio/cxgb4vf/sge.c unsigned int cred = fl->avail; fl 611 drivers/net/ethernet/chelsio/cxgb4vf/sge.c __be64 *d = &fl->desc[fl->pidx]; fl 612 drivers/net/ethernet/chelsio/cxgb4vf/sge.c struct rx_sw_desc *sdesc = &fl->sdesc[fl->pidx]; fl 619 drivers/net/ethernet/chelsio/cxgb4vf/sge.c BUG_ON(fl->avail + n > fl->size - FL_PER_EQ_UNIT); fl 640 drivers/net/ethernet/chelsio/cxgb4vf/sge.c fl->large_alloc_failed++; fl 666 drivers/net/ethernet/chelsio/cxgb4vf/sge.c fl->avail++; fl 667 drivers/net/ethernet/chelsio/cxgb4vf/sge.c if (++fl->pidx == fl->size) { fl 668 drivers/net/ethernet/chelsio/cxgb4vf/sge.c fl->pidx = 0; fl 669 drivers/net/ethernet/chelsio/cxgb4vf/sge.c sdesc = fl->sdesc; fl 670 drivers/net/ethernet/chelsio/cxgb4vf/sge.c d = fl->desc; fl 679 drivers/net/ethernet/chelsio/cxgb4vf/sge.c fl->alloc_failed++; fl 695 drivers/net/ethernet/chelsio/cxgb4vf/sge.c fl->avail++; fl 696 drivers/net/ethernet/chelsio/cxgb4vf/sge.c if (++fl->pidx == fl->size) { fl 697 drivers/net/ethernet/chelsio/cxgb4vf/sge.c fl->pidx = 0; fl 698 drivers/net/ethernet/chelsio/cxgb4vf/sge.c sdesc = fl->sdesc; fl 699 drivers/net/ethernet/chelsio/cxgb4vf/sge.c d = fl->desc; fl 709 drivers/net/ethernet/chelsio/cxgb4vf/sge.c cred = fl->avail - cred; fl 710 drivers/net/ethernet/chelsio/cxgb4vf/sge.c fl->pend_cred += cred; fl 711 drivers/net/ethernet/chelsio/cxgb4vf/sge.c ring_fl_db(adapter, fl); fl 713 drivers/net/ethernet/chelsio/cxgb4vf/sge.c if (unlikely(fl_starving(adapter, fl))) { fl 715 drivers/net/ethernet/chelsio/cxgb4vf/sge.c set_bit(fl->cntxt_id, adapter->sge.starving_fl); fl 725 drivers/net/ethernet/chelsio/cxgb4vf/sge.c static inline void __refill_fl(struct adapter *adapter, struct sge_fl *fl) fl 727 drivers/net/ethernet/chelsio/cxgb4vf/sge.c refill_fl(adapter, fl, fl 728 drivers/net/ethernet/chelsio/cxgb4vf/sge.c min((unsigned int)MAX_RX_REFILL, fl_cap(fl) - fl->avail), fl 1711 drivers/net/ethernet/chelsio/cxgb4vf/sge.c static void restore_rx_bufs(const struct pkt_gl *gl, struct sge_fl *fl, fl 1717 drivers/net/ethernet/chelsio/cxgb4vf/sge.c if (fl->cidx == 0) fl 1718 drivers/net/ethernet/chelsio/cxgb4vf/sge.c fl->cidx = fl->size - 1; fl 1720 drivers/net/ethernet/chelsio/cxgb4vf/sge.c fl->cidx--; fl 1721 drivers/net/ethernet/chelsio/cxgb4vf/sge.c sdesc = &fl->sdesc[fl->cidx]; fl 1724 drivers/net/ethernet/chelsio/cxgb4vf/sge.c fl->avail++; fl 1796 drivers/net/ethernet/chelsio/cxgb4vf/sge.c free_rx_bufs(rspq->adapter, &rxq->fl, fl 1809 drivers/net/ethernet/chelsio/cxgb4vf/sge.c BUG_ON(rxq->fl.avail == 0); fl 1810 drivers/net/ethernet/chelsio/cxgb4vf/sge.c sdesc = &rxq->fl.sdesc[rxq->fl.cidx]; fl 1818 drivers/net/ethernet/chelsio/cxgb4vf/sge.c unmap_rx_buf(rspq->adapter, &rxq->fl); fl 1842 drivers/net/ethernet/chelsio/cxgb4vf/sge.c restore_rx_bufs(&gl, &rxq->fl, frag); fl 1872 drivers/net/ethernet/chelsio/cxgb4vf/sge.c fl_cap(&rxq->fl) - rxq->fl.avail >= 2*FL_PER_EQ_UNIT) fl 1873 drivers/net/ethernet/chelsio/cxgb4vf/sge.c __refill_fl(rspq->adapter, &rxq->fl); fl 2085 drivers/net/ethernet/chelsio/cxgb4vf/sge.c struct sge_fl *fl = s->egr_map[id]; fl 2096 drivers/net/ethernet/chelsio/cxgb4vf/sge.c if (fl_starving(adapter, fl)) { fl 2099 drivers/net/ethernet/chelsio/cxgb4vf/sge.c rxq = container_of(fl, struct sge_eth_rxq, fl); fl 2101 drivers/net/ethernet/chelsio/cxgb4vf/sge.c fl->starving++; fl 2207 drivers/net/ethernet/chelsio/cxgb4vf/sge.c struct sge_fl *fl, rspq_handler_t hnd) fl 2272 drivers/net/ethernet/chelsio/cxgb4vf/sge.c if (fl) { fl 2283 drivers/net/ethernet/chelsio/cxgb4vf/sge.c if (fl->size < s->fl_starve_thres - 1 + 2 * FL_PER_EQ_UNIT) fl 2284 drivers/net/ethernet/chelsio/cxgb4vf/sge.c fl->size = s->fl_starve_thres - 1 + 2 * FL_PER_EQ_UNIT; fl 2285 drivers/net/ethernet/chelsio/cxgb4vf/sge.c fl->size = roundup(fl->size, FL_PER_EQ_UNIT); fl 2286 drivers/net/ethernet/chelsio/cxgb4vf/sge.c fl->desc = alloc_ring(adapter->pdev_dev, fl->size, fl 2288 drivers/net/ethernet/chelsio/cxgb4vf/sge.c &fl->addr, &fl->sdesc, s->stat_len); fl 2289 drivers/net/ethernet/chelsio/cxgb4vf/sge.c if (!fl->desc) { fl 2299 drivers/net/ethernet/chelsio/cxgb4vf/sge.c flsz = (fl->size / FL_PER_EQ_UNIT + fl 2331 drivers/net/ethernet/chelsio/cxgb4vf/sge.c cmd.fl0addr = cpu_to_be64(fl->addr); fl 2359 drivers/net/ethernet/chelsio/cxgb4vf/sge.c rspq->offset = fl ? 0 : -1; fl 2361 drivers/net/ethernet/chelsio/cxgb4vf/sge.c if (fl) { fl 2362 drivers/net/ethernet/chelsio/cxgb4vf/sge.c fl->cntxt_id = be16_to_cpu(rpl.fl0id); fl 2363 drivers/net/ethernet/chelsio/cxgb4vf/sge.c fl->avail = 0; fl 2364 drivers/net/ethernet/chelsio/cxgb4vf/sge.c fl->pend_cred = 0; fl 2365 drivers/net/ethernet/chelsio/cxgb4vf/sge.c fl->pidx = 0; fl 2366 drivers/net/ethernet/chelsio/cxgb4vf/sge.c fl->cidx = 0; fl 2367 drivers/net/ethernet/chelsio/cxgb4vf/sge.c fl->alloc_failed = 0; fl 2368 drivers/net/ethernet/chelsio/cxgb4vf/sge.c fl->large_alloc_failed = 0; fl 2369 drivers/net/ethernet/chelsio/cxgb4vf/sge.c fl->starving = 0; fl 2374 drivers/net/ethernet/chelsio/cxgb4vf/sge.c fl->bar2_addr = bar2_address(adapter, fl 2375 drivers/net/ethernet/chelsio/cxgb4vf/sge.c fl->cntxt_id, fl 2377 drivers/net/ethernet/chelsio/cxgb4vf/sge.c &fl->bar2_qid); fl 2379 drivers/net/ethernet/chelsio/cxgb4vf/sge.c refill_fl(adapter, fl, fl_cap(fl), GFP_KERNEL); fl 2394 drivers/net/ethernet/chelsio/cxgb4vf/sge.c if (fl && fl->desc) { fl 2395 drivers/net/ethernet/chelsio/cxgb4vf/sge.c kfree(fl->sdesc); fl 2396 drivers/net/ethernet/chelsio/cxgb4vf/sge.c fl->sdesc = NULL; fl 2398 drivers/net/ethernet/chelsio/cxgb4vf/sge.c fl->desc, fl->addr); fl 2399 drivers/net/ethernet/chelsio/cxgb4vf/sge.c fl->desc = NULL; fl 2530 drivers/net/ethernet/chelsio/cxgb4vf/sge.c struct sge_fl *fl) fl 2533 drivers/net/ethernet/chelsio/cxgb4vf/sge.c unsigned int flid = fl ? fl->cntxt_id : 0xffff; fl 2545 drivers/net/ethernet/chelsio/cxgb4vf/sge.c if (fl) { fl 2546 drivers/net/ethernet/chelsio/cxgb4vf/sge.c free_rx_bufs(adapter, fl, fl->avail); fl 2548 drivers/net/ethernet/chelsio/cxgb4vf/sge.c fl->size * sizeof(*fl->desc) + s->stat_len, fl 2549 drivers/net/ethernet/chelsio/cxgb4vf/sge.c fl->desc, fl->addr); fl 2550 drivers/net/ethernet/chelsio/cxgb4vf/sge.c kfree(fl->sdesc); fl 2551 drivers/net/ethernet/chelsio/cxgb4vf/sge.c fl->sdesc = NULL; fl 2552 drivers/net/ethernet/chelsio/cxgb4vf/sge.c fl->cntxt_id = 0; fl 2553 drivers/net/ethernet/chelsio/cxgb4vf/sge.c fl->desc = NULL; fl 2574 drivers/net/ethernet/chelsio/cxgb4vf/sge.c free_rspq_fl(adapter, &rxq->rspq, &rxq->fl); fl 905 drivers/net/ethernet/cortina/gemini.c unsigned int fl = (pn - epn) & m_pn; fl 907 drivers/net/ethernet/cortina/gemini.c if (fl > 64 >> fpp_order) fl 738 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c int lid, lt, ld, fl; fl 764 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c for (fl = 0; fl < NPC_MAX_LFL; fl++) { fl 765 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c SET_KEX_LDFLAGS(NIX_INTF_RX, ld, fl, fl 767 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c [ld][fl]); fl 769 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c SET_KEX_LDFLAGS(NIX_INTF_TX, ld, fl, fl 771 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c [ld][fl]); fl 2154 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c #define GET_KEX_LDFLAGS(intf, ld, fl) \ fl 2156 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c NPC_AF_INTFX_LDATAX_FLAGSX_CFG(intf, ld, fl)) fl 2161 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c int lid, lt, ld, fl; fl 2179 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c for (fl = 0; fl < NPC_MAX_LFL; fl++) { fl 2180 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c rsp->intf_ld_flags[NIX_INTF_RX][ld][fl] = fl 2181 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c GET_KEX_LDFLAGS(NIX_INTF_RX, ld, fl); fl 2182 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c rsp->intf_ld_flags[NIX_INTF_TX][ld][fl] = fl 2183 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c GET_KEX_LDFLAGS(NIX_INTF_TX, ld, fl); fl 80 drivers/net/ethernet/mellanox/mlx4/en_resources.c context->pri_path.fl |= MLX4_FL_ETH_SRC_CHECK_MC_LB; fl 474 drivers/net/ethernet/mellanox/mlx4/qp.c cmd->qp_context.pri_path.fl |= fl 829 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c qpc->pri_path.fl |= MLX4_FL_ETH_HIDE_CQE_VLAN; fl 831 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c qpc->pri_path.fl |= MLX4_FL_SV; fl 833 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c qpc->pri_path.fl |= MLX4_FL_CV; fl 3832 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c u8 orig_pri_path_fl = qpc->pri_path.fl; fl 5364 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c upd_context->qp_context.pri_path.fl = qp->pri_path_fl; fl 5374 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c upd_context->qp_context.pri_path.fl = fl 5377 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c upd_context->qp_context.pri_path.fl |= MLX4_FL_SV; fl 5379 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c upd_context->qp_context.pri_path.fl |= MLX4_FL_CV; fl 404 drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c struct flowi6 fl6 = t->fl.u.ip6; fl 88 drivers/scsi/csiostor/csio_wr.c csio_wr_reg32(hw, DBPRIO_F | QID_V(flq->un.fl.flid) | fl 120 drivers/scsi/csiostor/csio_wr.c struct csio_dma_buf *buf = &flq->un.fl.bufs[0]; fl 122 drivers/scsi/csiostor/csio_wr.c int sreg = flq->un.fl.sreg; fl 281 drivers/scsi/csiostor/csio_wr.c flq->un.fl.bufs = kcalloc(flq->credits, fl 284 drivers/scsi/csiostor/csio_wr.c if (!flq->un.fl.bufs) { fl 291 drivers/scsi/csiostor/csio_wr.c flq->un.fl.packen = 0; fl 292 drivers/scsi/csiostor/csio_wr.c flq->un.fl.offset = 0; fl 293 drivers/scsi/csiostor/csio_wr.c flq->un.fl.sreg = sreg; fl 489 drivers/scsi/csiostor/csio_wr.c iqp.fl0packen = flq->un.fl.packen ? 1 : 0; fl 1061 drivers/scsi/csiostor/csio_wr.c if (flq->un.fl.offset > 0) { fl 1063 drivers/scsi/csiostor/csio_wr.c flq->un.fl.offset = 0; fl 1074 drivers/scsi/csiostor/csio_wr.c buf = &flq->un.fl.bufs[flq->cidx]; fl 1080 drivers/scsi/csiostor/csio_wr.c flb.offset = flq->un.fl.offset; fl 1090 drivers/scsi/csiostor/csio_wr.c flb.defer_free = flq->un.fl.packen ? 0 : 1; fl 1095 drivers/scsi/csiostor/csio_wr.c if (flq->un.fl.packen) fl 1096 drivers/scsi/csiostor/csio_wr.c flq->un.fl.offset += ALIGN(lastlen, sge->csio_fl_align); fl 1698 drivers/scsi/csiostor/csio_wr.c if (!q->un.fl.bufs) fl 1701 drivers/scsi/csiostor/csio_wr.c buf = &q->un.fl.bufs[j]; fl 1708 drivers/scsi/csiostor/csio_wr.c kfree(q->un.fl.bufs); fl 412 drivers/scsi/csiostor/csio_wr.h struct csio_fl fl; fl 469 drivers/scsi/csiostor/csio_wr.h #define csio_q_flid(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->un.fl.flid) fl 706 drivers/scsi/cxgbi/libcxgbi.c struct flowi6 fl; fl 708 drivers/scsi/cxgbi/libcxgbi.c memset(&fl, 0, sizeof(fl)); fl 709 drivers/scsi/cxgbi/libcxgbi.c fl.flowi6_oif = ifindex; fl 711 drivers/scsi/cxgbi/libcxgbi.c memcpy(&fl.saddr, saddr, sizeof(struct in6_addr)); fl 713 drivers/scsi/cxgbi/libcxgbi.c memcpy(&fl.daddr, daddr, sizeof(struct in6_addr)); fl 714 drivers/scsi/cxgbi/libcxgbi.c return (struct rt6_info *)ip6_route_output(&init_net, NULL, &fl); fl 1332 drivers/scsi/lpfc/lpfc_els.c icmd->un.elsreq64.fl = 1; fl 9394 drivers/scsi/lpfc/lpfc_els.c icmd->un.elsreq64.fl = 1; fl 3748 drivers/scsi/lpfc/lpfc_hw.h uint32_t fl:1; fl 3754 drivers/scsi/lpfc/lpfc_hw.h uint32_t fl:1; fl 3852 drivers/scsi/lpfc/lpfc_hw.h uint32_t fl:1; fl 3858 drivers/scsi/lpfc/lpfc_hw.h uint32_t fl:1; fl 157 drivers/virtio/virtio_input.c u32 mi, ma, re, fu, fl; fl 164 drivers/virtio/virtio_input.c virtio_cread(vi->vdev, struct virtio_input_config, u.abs.flat, &fl); fl 165 drivers/virtio/virtio_input.c input_set_abs_params(vi->idev, abs, mi, ma, fu, fl); fl 116 fs/9p/vfs_file.c static int v9fs_file_lock(struct file *filp, int cmd, struct file_lock *fl) fl 121 fs/9p/vfs_file.c p9_debug(P9_DEBUG_VFS, "filp: %p lock: %p\n", filp, fl); fl 124 fs/9p/vfs_file.c if (__mandatory_lock(inode) && fl->fl_type != F_UNLCK) fl 127 fs/9p/vfs_file.c if ((IS_SETLK(cmd) || IS_SETLKW(cmd)) && fl->fl_type != F_UNLCK) { fl 135 fs/9p/vfs_file.c static int v9fs_file_do_lock(struct file *filp, int cmd, struct file_lock *fl) fl 147 fs/9p/vfs_file.c if ((fl->fl_flags & FL_POSIX) != FL_POSIX) fl 150 fs/9p/vfs_file.c res = locks_lock_file_wait(filp, fl); fl 157 fs/9p/vfs_file.c switch (fl->fl_type) { fl 168 fs/9p/vfs_file.c flock.start = fl->fl_start; fl 169 fs/9p/vfs_file.c if (fl->fl_end == OFFSET_MAX) fl 172 fs/9p/vfs_file.c flock.length = fl->fl_end - fl->fl_start + 1; fl 173 fs/9p/vfs_file.c flock.proc_id = fl->fl_pid; fl 228 fs/9p/vfs_file.c if (res < 0 && fl->fl_type != F_UNLCK) { fl 229 fs/9p/vfs_file.c fl_type = fl->fl_type; fl 230 fs/9p/vfs_file.c fl->fl_type = F_UNLCK; fl 232 fs/9p/vfs_file.c locks_lock_file_wait(filp, fl); fl 233 fs/9p/vfs_file.c fl->fl_type = fl_type; fl 241 fs/9p/vfs_file.c static int v9fs_file_getlock(struct file *filp, struct file_lock *fl) fl 250 fs/9p/vfs_file.c posix_test_lock(filp, fl); fl 255 fs/9p/vfs_file.c if (fl->fl_type != F_UNLCK) fl 261 fs/9p/vfs_file.c glock.start = fl->fl_start; fl 262 fs/9p/vfs_file.c if (fl->fl_end == OFFSET_MAX) fl 265 fs/9p/vfs_file.c glock.length = fl->fl_end - fl->fl_start + 1; fl 266 fs/9p/vfs_file.c glock.proc_id = fl->fl_pid; fl 275 fs/9p/vfs_file.c fl->fl_type = F_RDLCK; fl 278 fs/9p/vfs_file.c fl->fl_type = F_WRLCK; fl 281 fs/9p/vfs_file.c fl->fl_type = F_UNLCK; fl 285 fs/9p/vfs_file.c fl->fl_start = glock.start; fl 287 fs/9p/vfs_file.c fl->fl_end = OFFSET_MAX; fl 289 fs/9p/vfs_file.c fl->fl_end = glock.start + glock.length - 1; fl 290 fs/9p/vfs_file.c fl->fl_pid = -glock.proc_id; fl 306 fs/9p/vfs_file.c static int v9fs_file_lock_dotl(struct file *filp, int cmd, struct file_lock *fl) fl 312 fs/9p/vfs_file.c filp, cmd, fl, filp); fl 315 fs/9p/vfs_file.c if (__mandatory_lock(inode) && fl->fl_type != F_UNLCK) fl 318 fs/9p/vfs_file.c if ((IS_SETLK(cmd) || IS_SETLKW(cmd)) && fl->fl_type != F_UNLCK) { fl 324 fs/9p/vfs_file.c ret = v9fs_file_do_lock(filp, cmd, fl); fl 326 fs/9p/vfs_file.c ret = v9fs_file_getlock(filp, fl); fl 342 fs/9p/vfs_file.c struct file_lock *fl) fl 348 fs/9p/vfs_file.c filp, cmd, fl, filp); fl 351 fs/9p/vfs_file.c if (__mandatory_lock(inode) && fl->fl_type != F_UNLCK) fl 354 fs/9p/vfs_file.c if (!(fl->fl_flags & FL_FLOCK)) fl 357 fs/9p/vfs_file.c if ((IS_SETLK(cmd) || IS_SETLKW(cmd)) && fl->fl_type != F_UNLCK) { fl 362 fs/9p/vfs_file.c fl->fl_flags |= FL_POSIX; fl 363 fs/9p/vfs_file.c fl->fl_flags ^= FL_FLOCK; fl 366 fs/9p/vfs_file.c ret = v9fs_file_do_lock(filp, cmd, fl); fl 17 fs/afs/flock.c static void afs_fl_copy_lock(struct file_lock *new, struct file_lock *fl); fl 18 fs/afs/flock.c static void afs_fl_release_private(struct file_lock *fl); fl 463 fs/afs/flock.c static int afs_do_setlk(struct file *file, struct file_lock *fl) fl 478 fs/afs/flock.c fl->fl_start, fl->fl_end, fl->fl_type, mode); fl 480 fs/afs/flock.c fl->fl_ops = &afs_lock_ops; fl 481 fs/afs/flock.c INIT_LIST_HEAD(&fl->fl_u.afs.link); fl 482 fs/afs/flock.c fl->fl_u.afs.state = AFS_LOCK_PENDING; fl 484 fs/afs/flock.c partial = (fl->fl_start != 0 || fl->fl_end != OFFSET_MAX); fl 485 fs/afs/flock.c type = (fl->fl_type == F_RDLCK) ? AFS_LOCK_READ : AFS_LOCK_WRITE; fl 493 fs/afs/flock.c trace_afs_flock_op(vnode, fl, afs_flock_op_set_lock); fl 510 fs/afs/flock.c list_add_tail(&fl->fl_u.afs.link, &vnode->pending_locks); fl 524 fs/afs/flock.c list_move_tail(&fl->fl_u.afs.link, &vnode->granted_locks); fl 525 fs/afs/flock.c fl->fl_u.afs.state = AFS_LOCK_GRANTED; fl 531 fs/afs/flock.c list_move_tail(&fl->fl_u.afs.link, &vnode->granted_locks); fl 532 fs/afs/flock.c fl->fl_u.afs.state = AFS_LOCK_GRANTED; fl 538 fs/afs/flock.c !(fl->fl_flags & FL_SLEEP)) { fl 561 fs/afs/flock.c trace_afs_flock_ev(vnode, fl, afs_flock_try_to_lock, 0); fl 576 fs/afs/flock.c fl->fl_u.afs.state = ret; fl 577 fs/afs/flock.c trace_afs_flock_ev(vnode, fl, afs_flock_fail_perm, ret); fl 578 fs/afs/flock.c list_del_init(&fl->fl_u.afs.link); fl 583 fs/afs/flock.c fl->fl_u.afs.state = ret; fl 584 fs/afs/flock.c trace_afs_flock_ev(vnode, fl, afs_flock_fail_other, ret); fl 585 fs/afs/flock.c list_del_init(&fl->fl_u.afs.link); fl 590 fs/afs/flock.c fl->fl_u.afs.state = ret; fl 591 fs/afs/flock.c trace_afs_flock_ev(vnode, fl, afs_flock_fail_other, ret); fl 592 fs/afs/flock.c list_del_init(&fl->fl_u.afs.link); fl 602 fs/afs/flock.c ASSERTCMP(vnode->pending_locks.next, ==, &fl->fl_u.afs.link); fl 607 fs/afs/flock.c trace_afs_flock_ev(vnode, fl, afs_flock_acquired, type); fl 616 fs/afs/flock.c ASSERTCMP(fl->fl_u.afs.state, ==, AFS_LOCK_GRANTED); fl 620 fs/afs/flock.c trace_afs_flock_ev(vnode, fl, afs_flock_vfs_locking, 0); fl 621 fs/afs/flock.c ret = locks_lock_file_wait(file, fl); fl 622 fs/afs/flock.c trace_afs_flock_ev(vnode, fl, afs_flock_vfs_lock, ret); fl 635 fs/afs/flock.c if (!(fl->fl_flags & FL_SLEEP)) { fl 636 fs/afs/flock.c list_del_init(&fl->fl_u.afs.link); fl 643 fs/afs/flock.c trace_afs_flock_ev(vnode, fl, afs_flock_would_block, ret); fl 654 fs/afs/flock.c trace_afs_flock_ev(vnode, fl, afs_flock_waiting, 0); fl 655 fs/afs/flock.c ret = wait_event_interruptible(fl->fl_wait, fl 656 fs/afs/flock.c fl->fl_u.afs.state != AFS_LOCK_PENDING); fl 657 fs/afs/flock.c trace_afs_flock_ev(vnode, fl, afs_flock_waited, ret); fl 659 fs/afs/flock.c if (fl->fl_u.afs.state >= 0 && fl->fl_u.afs.state != AFS_LOCK_GRANTED) { fl 662 fs/afs/flock.c switch (fl->fl_u.afs.state) { fl 664 fs/afs/flock.c fl->fl_u.afs.state = AFS_LOCK_PENDING; fl 674 fs/afs/flock.c fl->fl_u.afs.state = AFS_LOCK_PENDING; fl 686 fs/afs/flock.c if (fl->fl_u.afs.state == AFS_LOCK_GRANTED) fl 688 fs/afs/flock.c ret = fl->fl_u.afs.state; fl 700 fs/afs/flock.c list_del_init(&fl->fl_u.afs.link); fl 713 fs/afs/flock.c static int afs_do_unlk(struct file *file, struct file_lock *fl) fl 718 fs/afs/flock.c _enter("{%llx:%llu},%u", vnode->fid.vid, vnode->fid.vnode, fl->fl_type); fl 720 fs/afs/flock.c trace_afs_flock_op(vnode, fl, afs_flock_op_unlock); fl 725 fs/afs/flock.c ret = locks_lock_file_wait(file, fl); fl 733 fs/afs/flock.c static int afs_do_getlk(struct file *file, struct file_lock *fl) fl 744 fs/afs/flock.c fl->fl_type = F_UNLCK; fl 747 fs/afs/flock.c posix_test_lock(file, fl); fl 748 fs/afs/flock.c if (fl->fl_type == F_UNLCK) { fl 757 fs/afs/flock.c fl->fl_type = F_RDLCK; fl 759 fs/afs/flock.c fl->fl_type = F_WRLCK; fl 760 fs/afs/flock.c fl->fl_start = 0; fl 761 fs/afs/flock.c fl->fl_end = OFFSET_MAX; fl 762 fs/afs/flock.c fl->fl_pid = 0; fl 768 fs/afs/flock.c _leave(" = %d [%hd]", ret, fl->fl_type); fl 775 fs/afs/flock.c int afs_lock(struct file *file, int cmd, struct file_lock *fl) fl 783 fs/afs/flock.c fl->fl_type, fl->fl_flags, fl 784 fs/afs/flock.c (long long) fl->fl_start, (long long) fl->fl_end); fl 787 fs/afs/flock.c if (__mandatory_lock(&vnode->vfs_inode) && fl->fl_type != F_UNLCK) fl 791 fs/afs/flock.c return afs_do_getlk(file, fl); fl 793 fs/afs/flock.c fl->fl_u.afs.debug_id = atomic_inc_return(&afs_file_lock_debug_id); fl 794 fs/afs/flock.c trace_afs_flock_op(vnode, fl, afs_flock_op_lock); fl 796 fs/afs/flock.c if (fl->fl_type == F_UNLCK) fl 797 fs/afs/flock.c ret = afs_do_unlk(file, fl); fl 799 fs/afs/flock.c ret = afs_do_setlk(file, fl); fl 807 fs/afs/flock.c trace_afs_flock_op(vnode, fl, op); fl 814 fs/afs/flock.c int afs_flock(struct file *file, int cmd, struct file_lock *fl) fl 822 fs/afs/flock.c fl->fl_type, fl->fl_flags); fl 831 fs/afs/flock.c if (!(fl->fl_flags & FL_FLOCK)) fl 834 fs/afs/flock.c fl->fl_u.afs.debug_id = atomic_inc_return(&afs_file_lock_debug_id); fl 835 fs/afs/flock.c trace_afs_flock_op(vnode, fl, afs_flock_op_flock); fl 838 fs/afs/flock.c if (fl->fl_type == F_UNLCK) fl 839 fs/afs/flock.c ret = afs_do_unlk(file, fl); fl 841 fs/afs/flock.c ret = afs_do_setlk(file, fl); fl 849 fs/afs/flock.c trace_afs_flock_op(vnode, fl, op); fl 859 fs/afs/flock.c static void afs_fl_copy_lock(struct file_lock *new, struct file_lock *fl) fl 861 fs/afs/flock.c struct afs_vnode *vnode = AFS_FS_I(locks_inode(fl->fl_file)); fl 869 fs/afs/flock.c list_add(&new->fl_u.afs.link, &fl->fl_u.afs.link); fl 877 fs/afs/flock.c static void afs_fl_release_private(struct file_lock *fl) fl 879 fs/afs/flock.c struct afs_vnode *vnode = AFS_FS_I(locks_inode(fl->fl_file)); fl 885 fs/afs/flock.c trace_afs_flock_op(vnode, fl, afs_flock_op_release_lock); fl 886 fs/afs/flock.c list_del_init(&fl->fl_u.afs.link); fl 41 fs/ceph/locks.c static void ceph_fl_release_lock(struct file_lock *fl) fl 43 fs/ceph/locks.c struct ceph_file_info *fi = fl->fl_file->private_data; fl 44 fs/ceph/locks.c struct inode *inode = file_inode(fl->fl_file); fl 64 fs/ceph/locks.c int cmd, u8 wait, struct file_lock *fl) fl 79 fs/ceph/locks.c fl->fl_ops = &ceph_fl_lock_ops; fl 80 fs/ceph/locks.c fl->fl_ops->fl_copy_lock(fl, NULL); fl 94 fs/ceph/locks.c if (LLONG_MAX == fl->fl_end) fl 97 fs/ceph/locks.c length = fl->fl_end - fl->fl_start + 1; fl 99 fs/ceph/locks.c owner = secure_addr(fl->fl_owner); fl 103 fs/ceph/locks.c (int)operation, owner, (u64)fl->fl_pid, fl->fl_start, length, fl 104 fs/ceph/locks.c wait, fl->fl_type); fl 109 fs/ceph/locks.c req->r_args.filelock_change.pid = cpu_to_le64((u64)fl->fl_pid); fl 110 fs/ceph/locks.c req->r_args.filelock_change.start = cpu_to_le64(fl->fl_start); fl 119 fs/ceph/locks.c fl->fl_pid = -le64_to_cpu(req->r_reply_info.filelock_reply->pid); fl 121 fs/ceph/locks.c fl->fl_type = F_RDLCK; fl 123 fs/ceph/locks.c fl->fl_type = F_WRLCK; fl 125 fs/ceph/locks.c fl->fl_type = F_UNLCK; fl 127 fs/ceph/locks.c fl->fl_start = le64_to_cpu(req->r_reply_info.filelock_reply->start); fl 131 fs/ceph/locks.c fl->fl_end = length -1; fl 133 fs/ceph/locks.c fl->fl_end = 0; fl 139 fs/ceph/locks.c (int)operation, (u64)fl->fl_pid, fl->fl_start, fl 140 fs/ceph/locks.c length, wait, fl->fl_type, err); fl 217 fs/ceph/locks.c int ceph_lock(struct file *file, int cmd, struct file_lock *fl) fl 226 fs/ceph/locks.c if (!(fl->fl_flags & FL_POSIX)) fl 229 fs/ceph/locks.c if (__mandatory_lock(file->f_mapping->host) && fl->fl_type != F_UNLCK) fl 232 fs/ceph/locks.c dout("ceph_lock, fl_owner: %p\n", fl->fl_owner); fl 246 fs/ceph/locks.c if (op == CEPH_MDS_OP_SETFILELOCK && F_UNLCK == fl->fl_type) fl 247 fs/ceph/locks.c posix_lock_file(file, fl, NULL); fl 251 fs/ceph/locks.c if (F_RDLCK == fl->fl_type) fl 253 fs/ceph/locks.c else if (F_WRLCK == fl->fl_type) fl 258 fs/ceph/locks.c err = ceph_lock_message(CEPH_LOCK_FCNTL, op, inode, lock_cmd, wait, fl); fl 262 fs/ceph/locks.c err = posix_lock_file(file, fl, NULL); fl 268 fs/ceph/locks.c CEPH_LOCK_UNLOCK, 0, fl); fl 277 fs/ceph/locks.c int ceph_flock(struct file *file, int cmd, struct file_lock *fl) fl 285 fs/ceph/locks.c if (!(fl->fl_flags & FL_FLOCK)) fl 288 fs/ceph/locks.c if (fl->fl_type & LOCK_MAND) fl 291 fs/ceph/locks.c dout("ceph_flock, fl_file: %p\n", fl->fl_file); fl 299 fs/ceph/locks.c if (F_UNLCK == fl->fl_type) fl 300 fs/ceph/locks.c locks_lock_file_wait(file, fl); fl 307 fs/ceph/locks.c if (F_RDLCK == fl->fl_type) fl 309 fs/ceph/locks.c else if (F_WRLCK == fl->fl_type) fl 315 fs/ceph/locks.c inode, lock_cmd, wait, fl); fl 317 fs/ceph/locks.c err = locks_lock_file_wait(file, fl); fl 321 fs/ceph/locks.c inode, CEPH_LOCK_UNLOCK, 0, fl); fl 1133 fs/ceph/super.h extern int ceph_lock(struct file *file, int cmd, struct file_lock *fl); fl 1134 fs/ceph/super.h extern int ceph_flock(struct file *file, int cmd, struct file_lock *fl); fl 50 fs/ceph/xattr.c struct ceph_file_layout *fl = &ci->i_layout; fl 51 fs/ceph/xattr.c return (fl->stripe_unit > 0 || fl->stripe_count > 0 || fl 52 fs/ceph/xattr.c fl->object_size > 0 || fl->pool_id >= 0 || fl 53 fs/ceph/xattr.c rcu_dereference_raw(fl->pool_ns) != NULL); fl 30 fs/dlm/plock.c int (*callback)(struct file_lock *fl, int result); fl 31 fs/dlm/plock.c void *fl; fl 78 fs/dlm/plock.c struct file *file, struct file_lock *fl) fl 87 fs/dlm/plock.c op->info.pid = fl->fl_pid; fl 92 fs/dlm/plock.c if (fl->fl_lmops && fl->fl_lmops->lm_grant) fl 93 fs/dlm/plock.c op->info.owner = (__u64) fl->fl_pid; fl 95 fs/dlm/plock.c op->info.owner = (__u64)(long) fl->fl_owner; fl 102 fs/dlm/plock.c int cmd, struct file_lock *fl) fl 121 fs/dlm/plock.c op->info.pid = fl->fl_pid; fl 122 fs/dlm/plock.c op->info.ex = (fl->fl_type == F_WRLCK); fl 126 fs/dlm/plock.c op->info.start = fl->fl_start; fl 127 fs/dlm/plock.c op->info.end = fl->fl_end; fl 128 fs/dlm/plock.c if (fl->fl_lmops && fl->fl_lmops->lm_grant) { fl 131 fs/dlm/plock.c op->info.owner = (__u64) fl->fl_pid; fl 132 fs/dlm/plock.c xop->callback = fl->fl_lmops->lm_grant; fl 134 fs/dlm/plock.c locks_copy_lock(&xop->flc, fl); fl 135 fs/dlm/plock.c xop->fl = fl; fl 138 fs/dlm/plock.c op->info.owner = (__u64)(long) fl->fl_owner; fl 153 fs/dlm/plock.c do_unlock_close(ls, number, file, fl); fl 172 fs/dlm/plock.c if (locks_lock_file_wait(file, fl) < 0) fl 188 fs/dlm/plock.c struct file_lock *fl; fl 190 fs/dlm/plock.c int (*notify)(struct file_lock *fl, int result) = NULL; fl 205 fs/dlm/plock.c fl = xop->fl; fl 209 fs/dlm/plock.c notify(fl, op->info.rv); fl 225 fs/dlm/plock.c (unsigned long long)op->info.number, file, fl); fl 228 fs/dlm/plock.c rv = notify(fl, 0); fl 242 fs/dlm/plock.c struct file_lock *fl) fl 247 fs/dlm/plock.c unsigned char fl_flags = fl->fl_flags; fl 260 fs/dlm/plock.c fl->fl_flags |= FL_EXISTS; fl 262 fs/dlm/plock.c rv = locks_lock_file_wait(file, fl); fl 273 fs/dlm/plock.c op->info.pid = fl->fl_pid; fl 276 fs/dlm/plock.c op->info.start = fl->fl_start; fl 277 fs/dlm/plock.c op->info.end = fl->fl_end; fl 278 fs/dlm/plock.c if (fl->fl_lmops && fl->fl_lmops->lm_grant) fl 279 fs/dlm/plock.c op->info.owner = (__u64) fl->fl_pid; fl 281 fs/dlm/plock.c op->info.owner = (__u64)(long) fl->fl_owner; fl 283 fs/dlm/plock.c if (fl->fl_flags & FL_CLOSE) { fl 310 fs/dlm/plock.c fl->fl_flags = fl_flags; fl 316 fs/dlm/plock.c struct file_lock *fl) fl 333 fs/dlm/plock.c op->info.pid = fl->fl_pid; fl 334 fs/dlm/plock.c op->info.ex = (fl->fl_type == F_WRLCK); fl 337 fs/dlm/plock.c op->info.start = fl->fl_start; fl 338 fs/dlm/plock.c op->info.end = fl->fl_end; fl 339 fs/dlm/plock.c if (fl->fl_lmops && fl->fl_lmops->lm_grant) fl 340 fs/dlm/plock.c op->info.owner = (__u64) fl->fl_pid; fl 342 fs/dlm/plock.c op->info.owner = (__u64)(long) fl->fl_owner; fl 360 fs/dlm/plock.c fl->fl_type = F_UNLCK; fl 364 fs/dlm/plock.c locks_init_lock(fl); fl 365 fs/dlm/plock.c fl->fl_type = (op->info.ex) ? F_WRLCK : F_RDLCK; fl 366 fs/dlm/plock.c fl->fl_flags = FL_POSIX; fl 367 fs/dlm/plock.c fl->fl_pid = -op->info.pid; fl 368 fs/dlm/plock.c fl->fl_start = op->info.start; fl 369 fs/dlm/plock.c fl->fl_end = op->info.end; fl 130 fs/ext4/move_extent.c unsigned fl = AOP_FLAG_NOFS; fl 142 fs/ext4/move_extent.c page[0] = grab_cache_page_write_begin(mapping[0], index1, fl); fl 146 fs/ext4/move_extent.c page[1] = grab_cache_page_write_begin(mapping[1], index2, fl); fl 533 fs/fcntl.c struct compat_flock fl; fl 535 fs/fcntl.c if (copy_from_user(&fl, ufl, sizeof(struct compat_flock))) fl 537 fs/fcntl.c copy_flock_fields(kfl, &fl); fl 543 fs/fcntl.c struct compat_flock64 fl; fl 545 fs/fcntl.c if (copy_from_user(&fl, ufl, sizeof(struct compat_flock64))) fl 547 fs/fcntl.c copy_flock_fields(kfl, &fl); fl 553 fs/fcntl.c struct compat_flock fl; fl 555 fs/fcntl.c memset(&fl, 0, sizeof(struct compat_flock)); fl 556 fs/fcntl.c copy_flock_fields(&fl, kfl); fl 557 fs/fcntl.c if (copy_to_user(ufl, &fl, sizeof(struct compat_flock))) fl 564 fs/fcntl.c struct compat_flock64 fl; fl 569 fs/fcntl.c memset(&fl, 0, sizeof(struct compat_flock64)); fl 570 fs/fcntl.c copy_flock_fields(&fl, kfl); fl 571 fs/fcntl.c if (copy_to_user(ufl, &fl, sizeof(struct compat_flock64))) fl 2323 fs/fuse/file.c struct file_lock *fl) fl 2335 fs/fuse/file.c fl->fl_start = ffl->start; fl 2336 fs/fuse/file.c fl->fl_end = ffl->end; fl 2343 fs/fuse/file.c fl->fl_pid = pid_nr_ns(find_pid_ns(ffl->pid, fc->pid_ns), &init_pid_ns); fl 2350 fs/fuse/file.c fl->fl_type = ffl->type; fl 2355 fs/fuse/file.c const struct file_lock *fl, int opcode, pid_t pid, fl 2364 fs/fuse/file.c inarg->owner = fuse_lock_owner_id(fc, fl->fl_owner); fl 2365 fs/fuse/file.c inarg->lk.start = fl->fl_start; fl 2366 fs/fuse/file.c inarg->lk.end = fl->fl_end; fl 2367 fs/fuse/file.c inarg->lk.type = fl->fl_type; fl 2378 fs/fuse/file.c static int fuse_getlk(struct file *file, struct file_lock *fl) fl 2387 fs/fuse/file.c fuse_lk_fill(&args, file, fl, FUSE_GETLK, 0, 0, &inarg); fl 2393 fs/fuse/file.c err = convert_fuse_file_lock(fc, &outarg.lk, fl); fl 2398 fs/fuse/file.c static int fuse_setlk(struct file *file, struct file_lock *fl, int flock) fl 2404 fs/fuse/file.c int opcode = (fl->fl_flags & FL_SLEEP) ? FUSE_SETLKW : FUSE_SETLK; fl 2405 fs/fuse/file.c struct pid *pid = fl->fl_type != F_UNLCK ? task_tgid(current) : NULL; fl 2409 fs/fuse/file.c if (fl->fl_lmops && fl->fl_lmops->lm_grant) { fl 2415 fs/fuse/file.c if ((fl->fl_flags & FL_CLOSE_POSIX) == FL_CLOSE_POSIX) fl 2418 fs/fuse/file.c fuse_lk_fill(&args, file, fl, opcode, pid_nr, flock, &inarg); fl 2428 fs/fuse/file.c static int fuse_file_lock(struct file *file, int cmd, struct file_lock *fl) fl 2438 fs/fuse/file.c posix_test_lock(file, fl); fl 2441 fs/fuse/file.c err = fuse_getlk(file, fl); fl 2444 fs/fuse/file.c err = posix_lock_file(file, fl, NULL); fl 2446 fs/fuse/file.c err = fuse_setlk(file, fl, 0); fl 2451 fs/fuse/file.c static int fuse_file_flock(struct file *file, int cmd, struct file_lock *fl) fl 2458 fs/fuse/file.c err = locks_lock_file_wait(file, fl); fl 2464 fs/fuse/file.c err = fuse_setlk(file, fl, 1); fl 1184 fs/gfs2/file.c static int gfs2_lock(struct file *file, int cmd, struct file_lock *fl) fl 1190 fs/gfs2/file.c if (!(fl->fl_flags & FL_POSIX)) fl 1192 fs/gfs2/file.c if (__mandatory_lock(&ip->i_inode) && fl->fl_type != F_UNLCK) fl 1198 fs/gfs2/file.c fl->fl_type = F_UNLCK; fl 1201 fs/gfs2/file.c if (fl->fl_type == F_UNLCK) fl 1202 fs/gfs2/file.c locks_lock_file_wait(file, fl); fl 1206 fs/gfs2/file.c return dlm_posix_get(ls->ls_dlm, ip->i_no_addr, file, fl); fl 1207 fs/gfs2/file.c else if (fl->fl_type == F_UNLCK) fl 1208 fs/gfs2/file.c return dlm_posix_unlock(ls->ls_dlm, ip->i_no_addr, file, fl); fl 1210 fs/gfs2/file.c return dlm_posix_lock(ls->ls_dlm, ip->i_no_addr, file, cmd, fl); fl 1213 fs/gfs2/file.c static int do_flock(struct file *file, int cmd, struct file_lock *fl) fl 1224 fs/gfs2/file.c state = (fl->fl_type == F_WRLCK) ? LM_ST_EXCLUSIVE : LM_ST_SHARED; fl 1260 fs/gfs2/file.c error = locks_lock_file_wait(file, fl); fl 1269 fs/gfs2/file.c static void do_unflock(struct file *file, struct file_lock *fl) fl 1275 fs/gfs2/file.c locks_lock_file_wait(file, fl); fl 1292 fs/gfs2/file.c static int gfs2_flock(struct file *file, int cmd, struct file_lock *fl) fl 1294 fs/gfs2/file.c if (!(fl->fl_flags & FL_FLOCK)) fl 1296 fs/gfs2/file.c if (fl->fl_type & LOCK_MAND) fl 1299 fs/gfs2/file.c if (fl->fl_type == F_UNLCK) { fl 1300 fs/gfs2/file.c do_unflock(file, fl); fl 1303 fs/gfs2/file.c return do_flock(file, cmd, fl); fl 68 fs/lockd/clnt4xdr.c const struct file_lock *fl = &lock->fl; fl 70 fs/lockd/clnt4xdr.c *l_offset = loff_t_to_s64(fl->fl_start); fl 71 fs/lockd/clnt4xdr.c if (fl->fl_end == OFFSET_MAX) fl 74 fs/lockd/clnt4xdr.c *l_len = loff_t_to_s64(fl->fl_end - fl->fl_start + 1); fl 246 fs/lockd/clnt4xdr.c encode_bool(xdr, lock->fl.fl_type == F_RDLCK); fl 259 fs/lockd/clnt4xdr.c struct file_lock *fl = &lock->fl; fl 267 fs/lockd/clnt4xdr.c locks_init_lock(fl); fl 274 fs/lockd/clnt4xdr.c fl->fl_pid = (pid_t)lock->svid; fl 284 fs/lockd/clnt4xdr.c fl->fl_flags = FL_POSIX; fl 285 fs/lockd/clnt4xdr.c fl->fl_type = exclusive != 0 ? F_WRLCK : F_RDLCK; fl 290 fs/lockd/clnt4xdr.c fl->fl_start = (loff_t)l_offset; fl 292 fs/lockd/clnt4xdr.c fl->fl_end = OFFSET_MAX; fl 294 fs/lockd/clnt4xdr.c fl->fl_end = (loff_t)end; fl 367 fs/lockd/clnt4xdr.c encode_bool(xdr, lock->fl.fl_type == F_WRLCK); fl 390 fs/lockd/clnt4xdr.c encode_bool(xdr, lock->fl.fl_type == F_WRLCK); fl 413 fs/lockd/clnt4xdr.c encode_bool(xdr, lock->fl.fl_type == F_WRLCK); fl 100 fs/lockd/clntlock.c struct nlm_wait *nlmclnt_prepare_block(struct nlm_host *host, struct file_lock *fl) fl 107 fs/lockd/clntlock.c block->b_lock = fl; fl 166 fs/lockd/clntlock.c const struct file_lock *fl = &lock->fl; fl 179 fs/lockd/clntlock.c if (fl_blocked->fl_start != fl->fl_start) fl 181 fs/lockd/clntlock.c if (fl_blocked->fl_end != fl->fl_end) fl 234 fs/lockd/clntlock.c struct file_lock *fl, *next; fl 260 fs/lockd/clntlock.c list_for_each_entry_safe(fl, next, &host->h_reclaim, fl_u.nfs_fl.list) { fl 261 fs/lockd/clntlock.c list_del_init(&fl->fl_u.nfs_fl.list); fl 271 fs/lockd/clntlock.c if (nlmclnt_reclaim(host, fl, req) != 0) fl 273 fs/lockd/clntlock.c list_add_tail(&fl->fl_u.nfs_fl.list, &host->h_granted); fl 31 fs/lockd/clntproc.c static void nlmclnt_locks_init_private(struct file_lock *fl, struct nlm_host *host); fl 126 fs/lockd/clntproc.c static void nlmclnt_setlockargs(struct nlm_rqst *req, struct file_lock *fl) fl 133 fs/lockd/clntproc.c memcpy(&lock->fh, NFS_FH(locks_inode(fl->fl_file)), sizeof(struct nfs_fh)); fl 137 fs/lockd/clntproc.c (unsigned int)fl->fl_u.nfs_fl.owner->pid, fl 139 fs/lockd/clntproc.c lock->svid = fl->fl_u.nfs_fl.owner->pid; fl 140 fs/lockd/clntproc.c lock->fl.fl_start = fl->fl_start; fl 141 fs/lockd/clntproc.c lock->fl.fl_end = fl->fl_end; fl 142 fs/lockd/clntproc.c lock->fl.fl_type = fl->fl_type; fl 147 fs/lockd/clntproc.c WARN_ON_ONCE(req->a_args.lock.fl.fl_ops != NULL); fl 158 fs/lockd/clntproc.c int nlmclnt_proc(struct nlm_host *host, int cmd, struct file_lock *fl, void *data) fl 171 fs/lockd/clntproc.c nlmclnt_locks_init_private(fl, host); fl 172 fs/lockd/clntproc.c if (!fl->fl_u.nfs_fl.owner) { fl 178 fs/lockd/clntproc.c nlmclnt_setlockargs(call, fl); fl 182 fs/lockd/clntproc.c if (fl->fl_type != F_UNLCK) { fl 184 fs/lockd/clntproc.c status = nlmclnt_lock(call, fl); fl 186 fs/lockd/clntproc.c status = nlmclnt_unlock(call, fl); fl 188 fs/lockd/clntproc.c status = nlmclnt_test(call, fl); fl 191 fs/lockd/clntproc.c fl->fl_ops->fl_release_private(fl); fl 192 fs/lockd/clntproc.c fl->fl_ops = NULL; fl 210 fs/lockd/clntproc.c locks_init_lock(&call->a_args.lock.fl); fl 211 fs/lockd/clntproc.c locks_init_lock(&call->a_res.lock.fl); fl 428 fs/lockd/clntproc.c nlmclnt_test(struct nlm_rqst *req, struct file_lock *fl) fl 432 fs/lockd/clntproc.c status = nlmclnt_call(nfs_file_cred(fl->fl_file), req, NLMPROC_TEST); fl 438 fs/lockd/clntproc.c fl->fl_type = F_UNLCK; fl 444 fs/lockd/clntproc.c fl->fl_start = req->a_res.lock.fl.fl_start; fl 445 fs/lockd/clntproc.c fl->fl_end = req->a_res.lock.fl.fl_end; fl 446 fs/lockd/clntproc.c fl->fl_type = req->a_res.lock.fl.fl_type; fl 447 fs/lockd/clntproc.c fl->fl_pid = -req->a_res.lock.fl.fl_pid; fl 457 fs/lockd/clntproc.c static void nlmclnt_locks_copy_lock(struct file_lock *new, struct file_lock *fl) fl 459 fs/lockd/clntproc.c spin_lock(&fl->fl_u.nfs_fl.owner->host->h_lock); fl 460 fs/lockd/clntproc.c new->fl_u.nfs_fl.state = fl->fl_u.nfs_fl.state; fl 461 fs/lockd/clntproc.c new->fl_u.nfs_fl.owner = nlmclnt_get_lockowner(fl->fl_u.nfs_fl.owner); fl 462 fs/lockd/clntproc.c list_add_tail(&new->fl_u.nfs_fl.list, &fl->fl_u.nfs_fl.owner->host->h_granted); fl 463 fs/lockd/clntproc.c spin_unlock(&fl->fl_u.nfs_fl.owner->host->h_lock); fl 466 fs/lockd/clntproc.c static void nlmclnt_locks_release_private(struct file_lock *fl) fl 468 fs/lockd/clntproc.c spin_lock(&fl->fl_u.nfs_fl.owner->host->h_lock); fl 469 fs/lockd/clntproc.c list_del(&fl->fl_u.nfs_fl.list); fl 470 fs/lockd/clntproc.c spin_unlock(&fl->fl_u.nfs_fl.owner->host->h_lock); fl 471 fs/lockd/clntproc.c nlmclnt_put_lockowner(fl->fl_u.nfs_fl.owner); fl 479 fs/lockd/clntproc.c static void nlmclnt_locks_init_private(struct file_lock *fl, struct nlm_host *host) fl 481 fs/lockd/clntproc.c fl->fl_u.nfs_fl.state = 0; fl 482 fs/lockd/clntproc.c fl->fl_u.nfs_fl.owner = nlmclnt_find_lockowner(host, fl->fl_owner); fl 483 fs/lockd/clntproc.c INIT_LIST_HEAD(&fl->fl_u.nfs_fl.list); fl 484 fs/lockd/clntproc.c fl->fl_ops = &nlmclnt_lock_ops; fl 487 fs/lockd/clntproc.c static int do_vfs_lock(struct file_lock *fl) fl 489 fs/lockd/clntproc.c return locks_lock_file_wait(fl->fl_file, fl); fl 513 fs/lockd/clntproc.c nlmclnt_lock(struct nlm_rqst *req, struct file_lock *fl) fl 515 fs/lockd/clntproc.c const struct cred *cred = nfs_file_cred(fl->fl_file); fl 519 fs/lockd/clntproc.c unsigned char fl_flags = fl->fl_flags; fl 527 fs/lockd/clntproc.c fl->fl_flags |= FL_ACCESS; fl 528 fs/lockd/clntproc.c status = do_vfs_lock(fl); fl 529 fs/lockd/clntproc.c fl->fl_flags = fl_flags; fl 533 fs/lockd/clntproc.c block = nlmclnt_prepare_block(host, fl); fl 542 fs/lockd/clntproc.c fl->fl_u.nfs_fl.state = host->h_state; fl 565 fs/lockd/clntproc.c if (nlmclnt_cancel(host, req->a_args.block, fl) == 0) fl 572 fs/lockd/clntproc.c if (fl->fl_u.nfs_fl.state != host->h_state) { fl 577 fs/lockd/clntproc.c fl->fl_flags |= FL_SLEEP; fl 578 fs/lockd/clntproc.c if (do_vfs_lock(fl) < 0) fl 581 fs/lockd/clntproc.c fl->fl_flags = fl_flags; fl 605 fs/lockd/clntproc.c fl_type = fl->fl_type; fl 606 fs/lockd/clntproc.c fl->fl_type = F_UNLCK; fl 608 fs/lockd/clntproc.c do_vfs_lock(fl); fl 610 fs/lockd/clntproc.c fl->fl_type = fl_type; fl 611 fs/lockd/clntproc.c fl->fl_flags = fl_flags; fl 620 fs/lockd/clntproc.c nlmclnt_reclaim(struct nlm_host *host, struct file_lock *fl, fl 626 fs/lockd/clntproc.c locks_init_lock(&req->a_args.lock.fl); fl 627 fs/lockd/clntproc.c locks_init_lock(&req->a_res.lock.fl); fl 631 fs/lockd/clntproc.c nlmclnt_setlockargs(req, fl); fl 634 fs/lockd/clntproc.c status = nlmclnt_call(nfs_file_cred(fl->fl_file), req, NLMPROC_LOCK); fl 639 fs/lockd/clntproc.c "(errno %d, status %d)\n", fl->fl_pid, fl 661 fs/lockd/clntproc.c nlmclnt_unlock(struct nlm_rqst *req, struct file_lock *fl) fl 666 fs/lockd/clntproc.c unsigned char fl_flags = fl->fl_flags; fl 673 fs/lockd/clntproc.c fl->fl_flags |= FL_EXISTS; fl 675 fs/lockd/clntproc.c status = do_vfs_lock(fl); fl 677 fs/lockd/clntproc.c fl->fl_flags = fl_flags; fl 684 fs/lockd/clntproc.c status = nlmclnt_async_call(nfs_file_cred(fl->fl_file), req, fl 758 fs/lockd/clntproc.c static int nlmclnt_cancel(struct nlm_host *host, int block, struct file_lock *fl) fl 771 fs/lockd/clntproc.c nlmclnt_setlockargs(req, fl); fl 775 fs/lockd/clntproc.c status = nlmclnt_async_call(nfs_file_cred(fl->fl_file), req, fl 64 fs/lockd/clntxdr.c const struct file_lock *fl = &lock->fl; fl 66 fs/lockd/clntxdr.c *l_offset = loff_t_to_s32(fl->fl_start); fl 67 fs/lockd/clntxdr.c if (fl->fl_end == OFFSET_MAX) fl 70 fs/lockd/clntxdr.c *l_len = loff_t_to_s32(fl->fl_end - fl->fl_start + 1); fl 241 fs/lockd/clntxdr.c encode_bool(xdr, lock->fl.fl_type == F_RDLCK); fl 254 fs/lockd/clntxdr.c struct file_lock *fl = &lock->fl; fl 261 fs/lockd/clntxdr.c locks_init_lock(fl); fl 268 fs/lockd/clntxdr.c fl->fl_pid = (pid_t)lock->svid; fl 278 fs/lockd/clntxdr.c fl->fl_flags = FL_POSIX; fl 279 fs/lockd/clntxdr.c fl->fl_type = exclusive != 0 ? F_WRLCK : F_RDLCK; fl 284 fs/lockd/clntxdr.c fl->fl_start = (loff_t)l_offset; fl 286 fs/lockd/clntxdr.c fl->fl_end = OFFSET_MAX; fl 288 fs/lockd/clntxdr.c fl->fl_end = (loff_t)end; fl 360 fs/lockd/clntxdr.c encode_bool(xdr, lock->fl.fl_type == F_WRLCK); fl 383 fs/lockd/clntxdr.c encode_bool(xdr, lock->fl.fl_type == F_WRLCK); fl 406 fs/lockd/clntxdr.c encode_bool(xdr, lock->fl.fl_type == F_WRLCK); fl 48 fs/lockd/svc4proc.c lock->fl.fl_file = file->f_file; fl 49 fs/lockd/svc4proc.c lock->fl.fl_pid = current->tgid; fl 50 fs/lockd/svc4proc.c lock->fl.fl_lmops = &nlmsvc_lock_operations; fl 51 fs/lockd/svc4proc.c nlmsvc_locks_init_private(&lock->fl, host, (pid_t)lock->svid); fl 52 fs/lockd/svc4proc.c if (!lock->fl.fl_owner) { fl 148 fs/lockd/svclock.c struct file_lock *fl; fl 151 fs/lockd/svclock.c file, lock->fl.fl_pid, fl 152 fs/lockd/svclock.c (long long)lock->fl.fl_start, fl 153 fs/lockd/svclock.c (long long)lock->fl.fl_end, lock->fl.fl_type); fl 155 fs/lockd/svclock.c fl = &block->b_call->a_args.lock.fl; fl 157 fs/lockd/svclock.c block->b_file, fl->fl_pid, fl 158 fs/lockd/svclock.c (long long)fl->fl_start, fl 159 fs/lockd/svclock.c (long long)fl->fl_end, fl->fl_type, fl 161 fs/lockd/svclock.c if (block->b_file == file && nlm_compare_locks(fl, &lock->fl)) { fl 239 fs/lockd/svclock.c call->a_args.lock.fl.fl_flags |= FL_SLEEP; fl 240 fs/lockd/svclock.c call->a_args.lock.fl.fl_lmops = &nlmsvc_lock_operations; fl 279 fs/lockd/svclock.c status = locks_delete_block(&block->b_call->a_args.lock.fl); fl 394 fs/lockd/svclock.c if (lock->fl.fl_owner) fl 395 fs/lockd/svclock.c nlmsvc_put_lockowner(lock->fl.fl_owner); fl 398 fs/lockd/svclock.c static void nlmsvc_locks_copy_lock(struct file_lock *new, struct file_lock *fl) fl 400 fs/lockd/svclock.c struct nlm_lockowner *nlm_lo = (struct nlm_lockowner *)fl->fl_owner; fl 404 fs/lockd/svclock.c static void nlmsvc_locks_release_private(struct file_lock *fl) fl 406 fs/lockd/svclock.c nlmsvc_put_lockowner((struct nlm_lockowner *)fl->fl_owner); fl 414 fs/lockd/svclock.c void nlmsvc_locks_init_private(struct file_lock *fl, struct nlm_host *host, fl 417 fs/lockd/svclock.c fl->fl_owner = nlmsvc_find_lockowner(host, pid); fl 418 fs/lockd/svclock.c if (fl->fl_owner != NULL) fl 419 fs/lockd/svclock.c fl->fl_ops = &nlmsvc_lock_ops; fl 428 fs/lockd/svclock.c locks_copy_lock(&call->a_args.lock.fl, &lock->fl); fl 435 fs/lockd/svclock.c call->a_args.lock.svid = ((struct nlm_lockowner *)lock->fl.fl_owner)->pid; fl 453 fs/lockd/svclock.c locks_release_private(&call->a_args.lock.fl); fl 497 fs/lockd/svclock.c lock->fl.fl_type, lock->fl.fl_pid, fl 498 fs/lockd/svclock.c (long long)lock->fl.fl_start, fl 499 fs/lockd/svclock.c (long long)lock->fl.fl_end, fl 515 fs/lockd/svclock.c lock->fl.fl_flags &= ~FL_SLEEP; fl 544 fs/lockd/svclock.c lock->fl.fl_flags &= ~FL_SLEEP; fl 545 fs/lockd/svclock.c error = vfs_lock_file(file->f_file, F_SETLK, &lock->fl, NULL); fl 546 fs/lockd/svclock.c lock->fl.fl_flags &= ~FL_SLEEP; fl 604 fs/lockd/svclock.c lock->fl.fl_type, fl 605 fs/lockd/svclock.c (long long)lock->fl.fl_start, fl 606 fs/lockd/svclock.c (long long)lock->fl.fl_end); fl 614 fs/lockd/svclock.c test_owner = (struct nlm_lockowner *)lock->fl.fl_owner; fl 616 fs/lockd/svclock.c error = vfs_test_lock(file->f_file, &lock->fl); fl 626 fs/lockd/svclock.c if (lock->fl.fl_type == F_UNLCK) { fl 632 fs/lockd/svclock.c lock->fl.fl_type, (long long)lock->fl.fl_start, fl 633 fs/lockd/svclock.c (long long)lock->fl.fl_end); fl 637 fs/lockd/svclock.c conflock->svid = ((struct nlm_lockowner *)lock->fl.fl_owner)->pid; fl 638 fs/lockd/svclock.c conflock->fl.fl_type = lock->fl.fl_type; fl 639 fs/lockd/svclock.c conflock->fl.fl_start = lock->fl.fl_start; fl 640 fs/lockd/svclock.c conflock->fl.fl_end = lock->fl.fl_end; fl 641 fs/lockd/svclock.c locks_release_private(&lock->fl); fl 644 fs/lockd/svclock.c lock->fl.fl_owner = NULL; fl 667 fs/lockd/svclock.c lock->fl.fl_pid, fl 668 fs/lockd/svclock.c (long long)lock->fl.fl_start, fl 669 fs/lockd/svclock.c (long long)lock->fl.fl_end); fl 674 fs/lockd/svclock.c lock->fl.fl_type = F_UNLCK; fl 675 fs/lockd/svclock.c error = vfs_lock_file(file->f_file, F_SETLK, &lock->fl, NULL); fl 696 fs/lockd/svclock.c lock->fl.fl_pid, fl 697 fs/lockd/svclock.c (long long)lock->fl.fl_start, fl 698 fs/lockd/svclock.c (long long)lock->fl.fl_end); fl 708 fs/lockd/svclock.c &block->b_call->a_args.lock.fl); fl 734 fs/lockd/svclock.c static int nlmsvc_grant_deferred(struct file_lock *fl, int result) fl 741 fs/lockd/svclock.c if (nlm_compare_locks(&block->b_call->a_args.lock.fl, fl)) { fl 773 fs/lockd/svclock.c nlmsvc_notify_blocked(struct file_lock *fl) fl 777 fs/lockd/svclock.c dprintk("lockd: VFS unblock notification for block %p\n", fl); fl 780 fs/lockd/svclock.c if (nlm_compare_locks(&block->b_call->a_args.lock.fl, fl)) { fl 834 fs/lockd/svclock.c lock->fl.fl_flags |= FL_SLEEP; fl 835 fs/lockd/svclock.c fl_start = lock->fl.fl_start; fl 836 fs/lockd/svclock.c fl_end = lock->fl.fl_end; fl 837 fs/lockd/svclock.c error = vfs_lock_file(file->f_file, F_SETLK, &lock->fl, NULL); fl 838 fs/lockd/svclock.c lock->fl.fl_flags &= ~FL_SLEEP; fl 839 fs/lockd/svclock.c lock->fl.fl_start = fl_start; fl 840 fs/lockd/svclock.c lock->fl.fl_end = fl_end; fl 78 fs/lockd/svcproc.c lock->fl.fl_file = file->f_file; fl 79 fs/lockd/svcproc.c lock->fl.fl_pid = current->tgid; fl 80 fs/lockd/svcproc.c lock->fl.fl_lmops = &nlmsvc_lock_operations; fl 81 fs/lockd/svcproc.c nlmsvc_locks_init_private(&lock->fl, host, (pid_t)lock->svid); fl 82 fs/lockd/svcproc.c if (!lock->fl.fl_owner) { fl 167 fs/lockd/svcsubs.c struct file_lock *fl; fl 176 fs/lockd/svcsubs.c list_for_each_entry(fl, &flctx->flc_posix, fl_list) { fl 177 fs/lockd/svcsubs.c if (fl->fl_lmops != &nlmsvc_lock_operations) fl 183 fs/lockd/svcsubs.c lockhost = ((struct nlm_lockowner *)fl->fl_owner)->host; fl 185 fs/lockd/svcsubs.c struct file_lock lock = *fl; fl 229 fs/lockd/svcsubs.c struct file_lock *fl; fl 237 fs/lockd/svcsubs.c list_for_each_entry(fl, &flctx->flc_posix, fl_list) { fl 238 fs/lockd/svcsubs.c if (fl->fl_lmops == &nlmsvc_lock_operations) { fl 117 fs/lockd/xdr.c struct file_lock *fl = &lock->fl; fl 128 fs/lockd/xdr.c locks_init_lock(fl); fl 129 fs/lockd/xdr.c fl->fl_flags = FL_POSIX; fl 130 fs/lockd/xdr.c fl->fl_type = F_RDLCK; /* as good as anything else */ fl 135 fs/lockd/xdr.c fl->fl_start = s32_to_loff_t(start); fl 138 fs/lockd/xdr.c fl->fl_end = OFFSET_MAX; fl 140 fs/lockd/xdr.c fl->fl_end = s32_to_loff_t(end); fl 157 fs/lockd/xdr.c struct file_lock *fl = &resp->lock.fl; fl 159 fs/lockd/xdr.c *p++ = (fl->fl_type == F_RDLCK)? xdr_zero : xdr_one; fl 166 fs/lockd/xdr.c start = loff_t_to_s32(fl->fl_start); fl 167 fs/lockd/xdr.c if (fl->fl_end == OFFSET_MAX) fl 170 fs/lockd/xdr.c len = loff_t_to_s32(fl->fl_end - fl->fl_start + 1); fl 196 fs/lockd/xdr.c argp->lock.fl.fl_type = F_WRLCK; fl 224 fs/lockd/xdr.c argp->lock.fl.fl_type = F_WRLCK; fl 245 fs/lockd/xdr.c argp->lock.fl.fl_type = F_WRLCK; fl 257 fs/lockd/xdr.c argp->lock.fl.fl_type = F_UNLCK; fl 268 fs/lockd/xdr.c locks_init_lock(&lock->fl); fl 109 fs/lockd/xdr4.c struct file_lock *fl = &lock->fl; fl 120 fs/lockd/xdr4.c locks_init_lock(fl); fl 121 fs/lockd/xdr4.c fl->fl_flags = FL_POSIX; fl 122 fs/lockd/xdr4.c fl->fl_type = F_RDLCK; /* as good as anything else */ fl 127 fs/lockd/xdr4.c fl->fl_start = s64_to_loff_t(start); fl 130 fs/lockd/xdr4.c fl->fl_end = OFFSET_MAX; fl 132 fs/lockd/xdr4.c fl->fl_end = s64_to_loff_t(end); fl 150 fs/lockd/xdr4.c struct file_lock *fl = &resp->lock.fl; fl 152 fs/lockd/xdr4.c *p++ = (fl->fl_type == F_RDLCK)? xdr_zero : xdr_one; fl 159 fs/lockd/xdr4.c start = loff_t_to_s64(fl->fl_start); fl 160 fs/lockd/xdr4.c if (fl->fl_end == OFFSET_MAX) fl 163 fs/lockd/xdr4.c len = loff_t_to_s64(fl->fl_end - fl->fl_start + 1); fl 168 fs/lockd/xdr4.c resp->status, (int)resp->lock.svid, fl->fl_type, fl 169 fs/lockd/xdr4.c (long long)fl->fl_start, (long long)fl->fl_end); fl 193 fs/lockd/xdr4.c argp->lock.fl.fl_type = F_WRLCK; fl 221 fs/lockd/xdr4.c argp->lock.fl.fl_type = F_WRLCK; fl 242 fs/lockd/xdr4.c argp->lock.fl.fl_type = F_WRLCK; fl 254 fs/lockd/xdr4.c argp->lock.fl.fl_type = F_UNLCK; fl 265 fs/lockd/xdr4.c locks_init_lock(&lock->fl); fl 177 fs/locks.c #define IS_POSIX(fl) (fl->fl_flags & FL_POSIX) fl 178 fs/locks.c #define IS_FLOCK(fl) (fl->fl_flags & FL_FLOCK) fl 179 fs/locks.c #define IS_LEASE(fl) (fl->fl_flags & (FL_LEASE|FL_DELEG|FL_LAYOUT)) fl 180 fs/locks.c #define IS_OFDLCK(fl) (fl->fl_flags & FL_OFDLCK) fl 181 fs/locks.c #define IS_REMOTELCK(fl) (fl->fl_pid <= 0) fl 183 fs/locks.c static bool lease_breaking(struct file_lock *fl) fl 185 fs/locks.c return fl->fl_flags & (FL_UNLOCK_PENDING | FL_DOWNGRADE_PENDING); fl 188 fs/locks.c static int target_leasetype(struct file_lock *fl) fl 190 fs/locks.c if (fl->fl_flags & FL_UNLOCK_PENDING) fl 192 fs/locks.c if (fl->fl_flags & FL_DOWNGRADE_PENDING) fl 194 fs/locks.c return fl->fl_type; fl 283 fs/locks.c struct file_lock *fl; fl 285 fs/locks.c list_for_each_entry(fl, list, fl_list) { fl 286 fs/locks.c pr_warn("%s: fl_owner=%p fl_flags=0x%x fl_type=0x%x fl_pid=%u\n", list_type, fl->fl_owner, fl->fl_flags, fl->fl_type, fl->fl_pid); fl 311 fs/locks.c struct file_lock *fl; fl 314 fs/locks.c list_for_each_entry(fl, list, fl_list) fl 315 fs/locks.c if (fl->fl_file == filp) fl 320 fs/locks.c fl->fl_owner, fl->fl_flags, fl->fl_type, fl->fl_pid); fl 334 fs/locks.c static void locks_init_lock_heads(struct file_lock *fl) fl 336 fs/locks.c INIT_HLIST_NODE(&fl->fl_link); fl 337 fs/locks.c INIT_LIST_HEAD(&fl->fl_list); fl 338 fs/locks.c INIT_LIST_HEAD(&fl->fl_blocked_requests); fl 339 fs/locks.c INIT_LIST_HEAD(&fl->fl_blocked_member); fl 340 fs/locks.c init_waitqueue_head(&fl->fl_wait); fl 346 fs/locks.c struct file_lock *fl = kmem_cache_zalloc(filelock_cache, GFP_KERNEL); fl 348 fs/locks.c if (fl) fl 349 fs/locks.c locks_init_lock_heads(fl); fl 351 fs/locks.c return fl; fl 355 fs/locks.c void locks_release_private(struct file_lock *fl) fl 357 fs/locks.c BUG_ON(waitqueue_active(&fl->fl_wait)); fl 358 fs/locks.c BUG_ON(!list_empty(&fl->fl_list)); fl 359 fs/locks.c BUG_ON(!list_empty(&fl->fl_blocked_requests)); fl 360 fs/locks.c BUG_ON(!list_empty(&fl->fl_blocked_member)); fl 361 fs/locks.c BUG_ON(!hlist_unhashed(&fl->fl_link)); fl 363 fs/locks.c if (fl->fl_ops) { fl 364 fs/locks.c if (fl->fl_ops->fl_release_private) fl 365 fs/locks.c fl->fl_ops->fl_release_private(fl); fl 366 fs/locks.c fl->fl_ops = NULL; fl 369 fs/locks.c if (fl->fl_lmops) { fl 370 fs/locks.c if (fl->fl_lmops->lm_put_owner) { fl 371 fs/locks.c fl->fl_lmops->lm_put_owner(fl->fl_owner); fl 372 fs/locks.c fl->fl_owner = NULL; fl 374 fs/locks.c fl->fl_lmops = NULL; fl 380 fs/locks.c void locks_free_lock(struct file_lock *fl) fl 382 fs/locks.c locks_release_private(fl); fl 383 fs/locks.c kmem_cache_free(filelock_cache, fl); fl 390 fs/locks.c struct file_lock *fl; fl 393 fs/locks.c fl = list_first_entry(dispose, struct file_lock, fl_list); fl 394 fs/locks.c list_del_init(&fl->fl_list); fl 395 fs/locks.c locks_free_lock(fl); fl 399 fs/locks.c void locks_init_lock(struct file_lock *fl) fl 401 fs/locks.c memset(fl, 0, sizeof(struct file_lock)); fl 402 fs/locks.c locks_init_lock_heads(fl); fl 409 fs/locks.c void locks_copy_conflock(struct file_lock *new, struct file_lock *fl) fl 411 fs/locks.c new->fl_owner = fl->fl_owner; fl 412 fs/locks.c new->fl_pid = fl->fl_pid; fl 414 fs/locks.c new->fl_flags = fl->fl_flags; fl 415 fs/locks.c new->fl_type = fl->fl_type; fl 416 fs/locks.c new->fl_start = fl->fl_start; fl 417 fs/locks.c new->fl_end = fl->fl_end; fl 418 fs/locks.c new->fl_lmops = fl->fl_lmops; fl 421 fs/locks.c if (fl->fl_lmops) { fl 422 fs/locks.c if (fl->fl_lmops->lm_get_owner) fl 423 fs/locks.c fl->fl_lmops->lm_get_owner(fl->fl_owner); fl 428 fs/locks.c void locks_copy_lock(struct file_lock *new, struct file_lock *fl) fl 433 fs/locks.c locks_copy_conflock(new, fl); fl 435 fs/locks.c new->fl_file = fl->fl_file; fl 436 fs/locks.c new->fl_ops = fl->fl_ops; fl 438 fs/locks.c if (fl->fl_ops) { fl 439 fs/locks.c if (fl->fl_ops->fl_copy_lock) fl 440 fs/locks.c fl->fl_ops->fl_copy_lock(new, fl); fl 445 fs/locks.c static void locks_move_blocks(struct file_lock *new, struct file_lock *fl) fl 454 fs/locks.c if (list_empty(&fl->fl_blocked_requests)) fl 457 fs/locks.c list_splice_init(&fl->fl_blocked_requests, &new->fl_blocked_requests); fl 479 fs/locks.c flock_make_lock(struct file *filp, unsigned int cmd, struct file_lock *fl) fl 486 fs/locks.c if (fl == NULL) { fl 487 fs/locks.c fl = locks_alloc_lock(); fl 488 fs/locks.c if (fl == NULL) fl 491 fs/locks.c locks_init_lock(fl); fl 494 fs/locks.c fl->fl_file = filp; fl 495 fs/locks.c fl->fl_owner = filp; fl 496 fs/locks.c fl->fl_pid = current->tgid; fl 497 fs/locks.c fl->fl_flags = FL_FLOCK; fl 498 fs/locks.c fl->fl_type = type; fl 499 fs/locks.c fl->fl_end = OFFSET_MAX; fl 501 fs/locks.c return fl; fl 504 fs/locks.c static int assign_type(struct file_lock *fl, long type) fl 510 fs/locks.c fl->fl_type = type; fl 518 fs/locks.c static int flock64_to_posix_lock(struct file *filp, struct file_lock *fl, fl 523 fs/locks.c fl->fl_start = 0; fl 526 fs/locks.c fl->fl_start = filp->f_pos; fl 529 fs/locks.c fl->fl_start = i_size_read(file_inode(filp)); fl 534 fs/locks.c if (l->l_start > OFFSET_MAX - fl->fl_start) fl 536 fs/locks.c fl->fl_start += l->l_start; fl 537 fs/locks.c if (fl->fl_start < 0) fl 543 fs/locks.c if (l->l_len - 1 > OFFSET_MAX - fl->fl_start) fl 545 fs/locks.c fl->fl_end = fl->fl_start + l->l_len - 1; fl 548 fs/locks.c if (fl->fl_start + l->l_len < 0) fl 550 fs/locks.c fl->fl_end = fl->fl_start - 1; fl 551 fs/locks.c fl->fl_start += l->l_len; fl 553 fs/locks.c fl->fl_end = OFFSET_MAX; fl 555 fs/locks.c fl->fl_owner = current->files; fl 556 fs/locks.c fl->fl_pid = current->tgid; fl 557 fs/locks.c fl->fl_file = filp; fl 558 fs/locks.c fl->fl_flags = FL_POSIX; fl 559 fs/locks.c fl->fl_ops = NULL; fl 560 fs/locks.c fl->fl_lmops = NULL; fl 562 fs/locks.c return assign_type(fl, l->l_type); fl 568 fs/locks.c static int flock_to_posix_lock(struct file *filp, struct file_lock *fl, fl 578 fs/locks.c return flock64_to_posix_lock(filp, fl, &ll); fl 583 fs/locks.c lease_break_callback(struct file_lock *fl) fl 585 fs/locks.c kill_fasync(&fl->fl_fasync, SIGIO, POLL_MSG); fl 590 fs/locks.c lease_setup(struct file_lock *fl, void **priv) fl 592 fs/locks.c struct file *filp = fl->fl_file; fl 600 fs/locks.c if (!fasync_insert_entry(fa->fa_fd, filp, &fl->fl_fasync, fa)) fl 615 fs/locks.c static int lease_init(struct file *filp, long type, struct file_lock *fl) fl 617 fs/locks.c if (assign_type(fl, type) != 0) fl 620 fs/locks.c fl->fl_owner = filp; fl 621 fs/locks.c fl->fl_pid = current->tgid; fl 623 fs/locks.c fl->fl_file = filp; fl 624 fs/locks.c fl->fl_flags = FL_LEASE; fl 625 fs/locks.c fl->fl_start = 0; fl 626 fs/locks.c fl->fl_end = OFFSET_MAX; fl 627 fs/locks.c fl->fl_ops = NULL; fl 628 fs/locks.c fl->fl_lmops = &lease_manager_ops; fl 635 fs/locks.c struct file_lock *fl = locks_alloc_lock(); fl 638 fs/locks.c if (fl == NULL) fl 641 fs/locks.c error = lease_init(filp, type, fl); fl 643 fs/locks.c locks_free_lock(fl); fl 646 fs/locks.c return fl; fl 666 fs/locks.c static void locks_insert_global_locks(struct file_lock *fl) fl 673 fs/locks.c fl->fl_link_cpu = smp_processor_id(); fl 674 fs/locks.c hlist_add_head(&fl->fl_link, &fll->hlist); fl 679 fs/locks.c static void locks_delete_global_locks(struct file_lock *fl) fl 690 fs/locks.c if (hlist_unhashed(&fl->fl_link)) fl 693 fs/locks.c fll = per_cpu_ptr(&file_lock_list, fl->fl_link_cpu); fl 695 fs/locks.c hlist_del_init(&fl->fl_link); fl 700 fs/locks.c posix_owner_key(struct file_lock *fl) fl 702 fs/locks.c return (unsigned long)fl->fl_owner; fl 823 fs/locks.c struct file_lock *fl; fl 827 fs/locks.c list_for_each_entry(fl, &blocker->fl_blocked_requests, fl_blocked_member) fl 828 fs/locks.c if (conflict(fl, waiter)) { fl 829 fs/locks.c blocker = fl; fl 878 fs/locks.c locks_insert_lock_ctx(struct file_lock *fl, struct list_head *before) fl 880 fs/locks.c list_add_tail(&fl->fl_list, before); fl 881 fs/locks.c locks_insert_global_locks(fl); fl 885 fs/locks.c locks_unlink_lock_ctx(struct file_lock *fl) fl 887 fs/locks.c locks_delete_global_locks(fl); fl 888 fs/locks.c list_del_init(&fl->fl_list); fl 889 fs/locks.c locks_wake_up_blocks(fl); fl 893 fs/locks.c locks_delete_lock_ctx(struct file_lock *fl, struct list_head *dispose) fl 895 fs/locks.c locks_unlink_lock_ctx(fl); fl 897 fs/locks.c list_add(&fl->fl_list, dispose); fl 899 fs/locks.c locks_free_lock(fl); fl 952 fs/locks.c posix_test_lock(struct file *filp, struct file_lock *fl) fl 960 fs/locks.c fl->fl_type = F_UNLCK; fl 966 fs/locks.c if (posix_locks_conflict(fl, cfl)) { fl 967 fs/locks.c locks_copy_conflock(fl, cfl); fl 971 fs/locks.c fl->fl_type = F_UNLCK; fl 1016 fs/locks.c struct file_lock *fl; fl 1018 fs/locks.c hash_for_each_possible(blocked_hash, fl, fl_link, posix_owner_key(block_fl)) { fl 1019 fs/locks.c if (posix_same_owner(fl, block_fl)) { fl 1020 fs/locks.c while (fl->fl_blocker) fl 1021 fs/locks.c fl = fl->fl_blocker; fl 1022 fs/locks.c return fl; fl 1062 fs/locks.c struct file_lock *fl; fl 1086 fs/locks.c list_for_each_entry(fl, &ctx->flc_flock, fl_list) { fl 1087 fs/locks.c if (request->fl_file != fl->fl_file) fl 1089 fs/locks.c if (request->fl_type == fl->fl_type) fl 1092 fs/locks.c locks_delete_lock_ctx(fl, &dispose); fl 1103 fs/locks.c list_for_each_entry(fl, &ctx->flc_flock, fl_list) { fl 1104 fs/locks.c if (!flock_locks_conflict(request, fl)) fl 1110 fs/locks.c locks_insert_block(fl, request, flock_locks_conflict); fl 1134 fs/locks.c struct file_lock *fl, *tmp; fl 1169 fs/locks.c list_for_each_entry(fl, &ctx->flc_posix, fl_list) { fl 1170 fs/locks.c if (!posix_locks_conflict(request, fl)) fl 1173 fs/locks.c locks_copy_conflock(conflock, fl); fl 1188 fs/locks.c if (likely(!posix_locks_deadlock(request, fl))) { fl 1190 fs/locks.c __locks_insert_block(fl, request, fl 1204 fs/locks.c list_for_each_entry(fl, &ctx->flc_posix, fl_list) { fl 1205 fs/locks.c if (posix_same_owner(request, fl)) fl 1210 fs/locks.c list_for_each_entry_safe_from(fl, tmp, &ctx->flc_posix, fl_list) { fl 1211 fs/locks.c if (!posix_same_owner(request, fl)) fl 1215 fs/locks.c if (request->fl_type == fl->fl_type) { fl 1220 fs/locks.c if (fl->fl_end < request->fl_start - 1) fl 1225 fs/locks.c if (fl->fl_start - 1 > request->fl_end) fl 1233 fs/locks.c if (fl->fl_start > request->fl_start) fl 1234 fs/locks.c fl->fl_start = request->fl_start; fl 1236 fs/locks.c request->fl_start = fl->fl_start; fl 1237 fs/locks.c if (fl->fl_end < request->fl_end) fl 1238 fs/locks.c fl->fl_end = request->fl_end; fl 1240 fs/locks.c request->fl_end = fl->fl_end; fl 1242 fs/locks.c locks_delete_lock_ctx(fl, &dispose); fl 1245 fs/locks.c request = fl; fl 1251 fs/locks.c if (fl->fl_end < request->fl_start) fl 1253 fs/locks.c if (fl->fl_start > request->fl_end) fl 1257 fs/locks.c if (fl->fl_start < request->fl_start) fl 1258 fs/locks.c left = fl; fl 1262 fs/locks.c if (fl->fl_end > request->fl_end) { fl 1263 fs/locks.c right = fl; fl 1266 fs/locks.c if (fl->fl_start >= request->fl_start) { fl 1271 fs/locks.c locks_delete_lock_ctx(fl, &dispose); fl 1287 fs/locks.c locks_insert_lock_ctx(request, &fl->fl_list); fl 1288 fs/locks.c locks_delete_lock_ctx(fl, &dispose); fl 1317 fs/locks.c locks_insert_lock_ctx(new_fl, &fl->fl_list); fl 1318 fs/locks.c fl = new_fl; fl 1329 fs/locks.c locks_insert_lock_ctx(left, &fl->fl_list); fl 1368 fs/locks.c int posix_lock_file(struct file *filp, struct file_lock *fl, fl 1371 fs/locks.c return posix_lock_inode(locks_inode(filp), fl, conflock); fl 1382 fs/locks.c static int posix_lock_inode_wait(struct inode *inode, struct file_lock *fl) fl 1387 fs/locks.c error = posix_lock_inode(inode, fl, NULL); fl 1390 fs/locks.c error = wait_event_interruptible(fl->fl_wait, fl 1391 fs/locks.c list_empty(&fl->fl_blocked_member)); fl 1395 fs/locks.c locks_delete_block(fl); fl 1412 fs/locks.c struct file_lock *fl; fl 1423 fs/locks.c list_for_each_entry(fl, &ctx->flc_posix, fl_list) { fl 1424 fs/locks.c if (fl->fl_owner != current->files && fl 1425 fs/locks.c fl->fl_owner != file) { fl 1447 fs/locks.c struct file_lock fl; fl 1451 fs/locks.c locks_init_lock(&fl); fl 1452 fs/locks.c fl.fl_pid = current->tgid; fl 1453 fs/locks.c fl.fl_file = filp; fl 1454 fs/locks.c fl.fl_flags = FL_POSIX | FL_ACCESS; fl 1457 fs/locks.c fl.fl_type = type; fl 1458 fs/locks.c fl.fl_start = start; fl 1459 fs/locks.c fl.fl_end = end; fl 1463 fs/locks.c fl.fl_owner = filp; fl 1464 fs/locks.c fl.fl_flags &= ~FL_SLEEP; fl 1465 fs/locks.c error = posix_lock_inode(inode, &fl, NULL); fl 1471 fs/locks.c fl.fl_flags |= FL_SLEEP; fl 1472 fs/locks.c fl.fl_owner = current->files; fl 1473 fs/locks.c error = posix_lock_inode(inode, &fl, NULL); fl 1476 fs/locks.c error = wait_event_interruptible(fl.fl_wait, fl 1477 fs/locks.c list_empty(&fl.fl_blocked_member)); fl 1489 fs/locks.c locks_delete_block(&fl); fl 1496 fs/locks.c static void lease_clear_pending(struct file_lock *fl, int arg) fl 1500 fs/locks.c fl->fl_flags &= ~FL_UNLOCK_PENDING; fl 1503 fs/locks.c fl->fl_flags &= ~FL_DOWNGRADE_PENDING; fl 1508 fs/locks.c int lease_modify(struct file_lock *fl, int arg, struct list_head *dispose) fl 1510 fs/locks.c int error = assign_type(fl, arg); fl 1514 fs/locks.c lease_clear_pending(fl, arg); fl 1515 fs/locks.c locks_wake_up_blocks(fl); fl 1517 fs/locks.c struct file *filp = fl->fl_file; fl 1521 fs/locks.c fasync_helper(0, fl->fl_file, 0, &fl->fl_fasync); fl 1522 fs/locks.c if (fl->fl_fasync != NULL) { fl 1523 fs/locks.c printk(KERN_ERR "locks_delete_lock: fasync == %p\n", fl->fl_fasync); fl 1524 fs/locks.c fl->fl_fasync = NULL; fl 1526 fs/locks.c locks_delete_lock_ctx(fl, dispose); fl 1543 fs/locks.c struct file_lock *fl, *tmp; fl 1547 fs/locks.c list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, fl_list) { fl 1548 fs/locks.c trace_time_out_leases(inode, fl); fl 1549 fs/locks.c if (past_time(fl->fl_downgrade_time)) fl 1550 fs/locks.c lease_modify(fl, F_RDLCK, dispose); fl 1551 fs/locks.c if (past_time(fl->fl_break_time)) fl 1552 fs/locks.c lease_modify(fl, F_UNLCK, dispose); fl 1579 fs/locks.c struct file_lock *fl; fl 1583 fs/locks.c list_for_each_entry(fl, &ctx->flc_lease, fl_list) { fl 1584 fs/locks.c if (leases_conflict(fl, breaker)) fl 1607 fs/locks.c struct file_lock *new_fl, *fl, *tmp; fl 1639 fs/locks.c list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, fl_list) { fl 1640 fs/locks.c if (!leases_conflict(fl, new_fl)) fl 1643 fs/locks.c if (fl->fl_flags & FL_UNLOCK_PENDING) fl 1645 fs/locks.c fl->fl_flags |= FL_UNLOCK_PENDING; fl 1646 fs/locks.c fl->fl_break_time = break_time; fl 1648 fs/locks.c if (lease_breaking(fl)) fl 1650 fs/locks.c fl->fl_flags |= FL_DOWNGRADE_PENDING; fl 1651 fs/locks.c fl->fl_downgrade_time = break_time; fl 1653 fs/locks.c if (fl->fl_lmops->lm_break(fl)) fl 1654 fs/locks.c locks_delete_lock_ctx(fl, &dispose); fl 1667 fs/locks.c fl = list_first_entry(&ctx->flc_lease, struct file_lock, fl_list); fl 1668 fs/locks.c break_time = fl->fl_break_time; fl 1673 fs/locks.c locks_insert_block(fl, new_fl, leases_conflict); fl 1721 fs/locks.c struct file_lock *fl; fl 1726 fs/locks.c fl = list_first_entry_or_null(&ctx->flc_lease, fl 1728 fs/locks.c if (fl && (fl->fl_type == F_WRLCK)) fl 1763 fs/locks.c struct file_lock *fl; fl 1774 fs/locks.c list_for_each_entry(fl, &ctx->flc_lease, fl_list) { fl 1775 fs/locks.c if (fl->fl_file != filp) fl 1777 fs/locks.c type = target_leasetype(fl); fl 1834 fs/locks.c struct file_lock *fl, *my_fl = NULL, *lease; fl 1883 fs/locks.c list_for_each_entry(fl, &ctx->flc_lease, fl_list) { fl 1884 fs/locks.c if (fl->fl_file == filp && fl 1885 fs/locks.c fl->fl_owner == lease->fl_owner) { fl 1886 fs/locks.c my_fl = fl; fl 1900 fs/locks.c if (fl->fl_flags & FL_UNLOCK_PENDING) fl 1950 fs/locks.c struct file_lock *fl, *victim = NULL; fl 1963 fs/locks.c list_for_each_entry(fl, &ctx->flc_lease, fl_list) { fl 1964 fs/locks.c if (fl->fl_file == filp && fl 1965 fs/locks.c fl->fl_owner == owner) { fl 1966 fs/locks.c victim = fl; fl 1972 fs/locks.c error = fl->fl_lmops->lm_change(victim, F_UNLCK, &dispose); fl 2110 fs/locks.c struct file_lock *fl; fl 2114 fs/locks.c fl = lease_alloc(filp, arg); fl 2115 fs/locks.c if (IS_ERR(fl)) fl 2116 fs/locks.c return PTR_ERR(fl); fl 2120 fs/locks.c locks_free_lock(fl); fl 2125 fs/locks.c error = vfs_setlease(filp, arg, &fl, (void **)&new); fl 2126 fs/locks.c if (fl) fl 2127 fs/locks.c locks_free_lock(fl); fl 2157 fs/locks.c static int flock_lock_inode_wait(struct inode *inode, struct file_lock *fl) fl 2162 fs/locks.c error = flock_lock_inode(inode, fl); fl 2165 fs/locks.c error = wait_event_interruptible(fl->fl_wait, fl 2166 fs/locks.c list_empty(&fl->fl_blocked_member)); fl 2170 fs/locks.c locks_delete_block(fl); fl 2181 fs/locks.c int locks_lock_inode_wait(struct inode *inode, struct file_lock *fl) fl 2184 fs/locks.c switch (fl->fl_flags & (FL_POSIX|FL_FLOCK)) { fl 2186 fs/locks.c res = posix_lock_inode_wait(inode, fl); fl 2189 fs/locks.c res = flock_lock_inode_wait(inode, fl); fl 2271 fs/locks.c int vfs_test_lock(struct file *filp, struct file_lock *fl) fl 2274 fs/locks.c return filp->f_op->lock(filp, F_GETLK, fl); fl 2275 fs/locks.c posix_test_lock(filp, fl); fl 2287 fs/locks.c static pid_t locks_translate_pid(struct file_lock *fl, struct pid_namespace *ns) fl 2292 fs/locks.c if (IS_OFDLCK(fl)) fl 2294 fs/locks.c if (IS_REMOTELCK(fl)) fl 2295 fs/locks.c return fl->fl_pid; fl 2302 fs/locks.c return (pid_t)fl->fl_pid; fl 2305 fs/locks.c pid = find_pid_ns(fl->fl_pid, &init_pid_ns); fl 2311 fs/locks.c static int posix_lock_to_flock(struct flock *flock, struct file_lock *fl) fl 2313 fs/locks.c flock->l_pid = locks_translate_pid(fl, task_active_pid_ns(current)); fl 2319 fs/locks.c if (fl->fl_start > OFFT_OFFSET_MAX) fl 2321 fs/locks.c if (fl->fl_end != OFFSET_MAX && fl->fl_end > OFFT_OFFSET_MAX) fl 2324 fs/locks.c flock->l_start = fl->fl_start; fl 2325 fs/locks.c flock->l_len = fl->fl_end == OFFSET_MAX ? 0 : fl 2326 fs/locks.c fl->fl_end - fl->fl_start + 1; fl 2328 fs/locks.c flock->l_type = fl->fl_type; fl 2333 fs/locks.c static void posix_lock_to_flock64(struct flock64 *flock, struct file_lock *fl) fl 2335 fs/locks.c flock->l_pid = locks_translate_pid(fl, task_active_pid_ns(current)); fl 2336 fs/locks.c flock->l_start = fl->fl_start; fl 2337 fs/locks.c flock->l_len = fl->fl_end == OFFSET_MAX ? 0 : fl 2338 fs/locks.c fl->fl_end - fl->fl_start + 1; fl 2340 fs/locks.c flock->l_type = fl->fl_type; fl 2349 fs/locks.c struct file_lock *fl; fl 2352 fs/locks.c fl = locks_alloc_lock(); fl 2353 fs/locks.c if (fl == NULL) fl 2359 fs/locks.c error = flock_to_posix_lock(filp, fl, flock); fl 2369 fs/locks.c fl->fl_flags |= FL_OFDLCK; fl 2370 fs/locks.c fl->fl_owner = filp; fl 2373 fs/locks.c error = vfs_test_lock(filp, fl); fl 2377 fs/locks.c flock->l_type = fl->fl_type; fl 2378 fs/locks.c if (fl->fl_type != F_UNLCK) { fl 2379 fs/locks.c error = posix_lock_to_flock(flock, fl); fl 2384 fs/locks.c locks_free_lock(fl); fl 2421 fs/locks.c int vfs_lock_file(struct file *filp, unsigned int cmd, struct file_lock *fl, struct file_lock *conf) fl 2424 fs/locks.c return filp->f_op->lock(filp, cmd, fl); fl 2426 fs/locks.c return posix_lock_file(filp, fl, conf); fl 2431 fs/locks.c struct file_lock *fl) fl 2435 fs/locks.c error = security_file_lock(filp, fl->fl_type); fl 2440 fs/locks.c error = vfs_lock_file(filp, cmd, fl, NULL); fl 2443 fs/locks.c error = wait_event_interruptible(fl->fl_wait, fl 2444 fs/locks.c list_empty(&fl->fl_blocked_member)); fl 2448 fs/locks.c locks_delete_block(fl); fl 2455 fs/locks.c check_fmode_for_setlk(struct file_lock *fl) fl 2457 fs/locks.c switch (fl->fl_type) { fl 2459 fs/locks.c if (!(fl->fl_file->f_mode & FMODE_READ)) fl 2463 fs/locks.c if (!(fl->fl_file->f_mode & FMODE_WRITE)) fl 2562 fs/locks.c struct file_lock *fl; fl 2565 fs/locks.c fl = locks_alloc_lock(); fl 2566 fs/locks.c if (fl == NULL) fl 2573 fs/locks.c error = flock64_to_posix_lock(filp, fl, flock); fl 2583 fs/locks.c fl->fl_flags |= FL_OFDLCK; fl 2584 fs/locks.c fl->fl_owner = filp; fl 2587 fs/locks.c error = vfs_test_lock(filp, fl); fl 2591 fs/locks.c flock->l_type = fl->fl_type; fl 2592 fs/locks.c if (fl->fl_type != F_UNLCK) fl 2593 fs/locks.c posix_lock_to_flock64(flock, fl); fl 2596 fs/locks.c locks_free_lock(fl); fl 2731 fs/locks.c struct file_lock fl; fl 2737 fs/locks.c flock_make_lock(filp, LOCK_UN, &fl); fl 2738 fs/locks.c fl.fl_flags |= FL_CLOSE; fl 2741 fs/locks.c filp->f_op->flock(filp, F_SETLKW, &fl); fl 2743 fs/locks.c flock_lock_inode(inode, &fl); fl 2745 fs/locks.c if (fl.fl_ops && fl.fl_ops->fl_release_private) fl 2746 fs/locks.c fl.fl_ops->fl_release_private(&fl); fl 2753 fs/locks.c struct file_lock *fl, *tmp; fl 2761 fs/locks.c list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, fl_list) fl 2762 fs/locks.c if (filp == fl->fl_file) fl 2763 fs/locks.c lease_modify(fl, F_UNLCK, &dispose); fl 2804 fs/locks.c int vfs_cancel_lock(struct file *filp, struct file_lock *fl) fl 2807 fs/locks.c return filp->f_op->lock(filp, F_CANCELLK, fl); fl 2821 fs/locks.c static void lock_get_status(struct seq_file *f, struct file_lock *fl, fl 2828 fs/locks.c fl_pid = locks_translate_pid(fl, proc_pidns); fl 2835 fs/locks.c if (fl->fl_file != NULL) fl 2836 fs/locks.c inode = locks_inode(fl->fl_file); fl 2839 fs/locks.c if (IS_POSIX(fl)) { fl 2840 fs/locks.c if (fl->fl_flags & FL_ACCESS) fl 2842 fs/locks.c else if (IS_OFDLCK(fl)) fl 2850 fs/locks.c } else if (IS_FLOCK(fl)) { fl 2851 fs/locks.c if (fl->fl_type & LOCK_MAND) { fl 2856 fs/locks.c } else if (IS_LEASE(fl)) { fl 2857 fs/locks.c if (fl->fl_flags & FL_DELEG) fl 2862 fs/locks.c if (lease_breaking(fl)) fl 2864 fs/locks.c else if (fl->fl_file) fl 2871 fs/locks.c if (fl->fl_type & LOCK_MAND) { fl 2873 fs/locks.c (fl->fl_type & LOCK_READ) fl 2874 fs/locks.c ? (fl->fl_type & LOCK_WRITE) ? "RW " : "READ " fl 2875 fs/locks.c : (fl->fl_type & LOCK_WRITE) ? "WRITE" : "NONE "); fl 2877 fs/locks.c int type = IS_LEASE(fl) ? target_leasetype(fl) : fl->fl_type; fl 2890 fs/locks.c if (IS_POSIX(fl)) { fl 2891 fs/locks.c if (fl->fl_end == OFFSET_MAX) fl 2892 fs/locks.c seq_printf(f, "%Ld EOF\n", fl->fl_start); fl 2894 fs/locks.c seq_printf(f, "%Ld %Ld\n", fl->fl_start, fl->fl_end); fl 2903 fs/locks.c struct file_lock *fl, *bfl; fl 2906 fs/locks.c fl = hlist_entry(v, struct file_lock, fl_link); fl 2908 fs/locks.c if (locks_translate_pid(fl, proc_pidns) == 0) fl 2911 fs/locks.c lock_get_status(f, fl, iter->li_pos, ""); fl 2913 fs/locks.c list_for_each_entry(bfl, &fl->fl_blocked_requests, fl_blocked_member) fl 2923 fs/locks.c struct file_lock *fl; fl 2925 fs/locks.c list_for_each_entry(fl, head, fl_list) { fl 2927 fs/locks.c if (filp != fl->fl_file) fl 2929 fs/locks.c if (fl->fl_owner != files && fl 2930 fs/locks.c fl->fl_owner != filp) fl 2935 fs/locks.c lock_get_status(f, fl, *id, ""); fl 2418 fs/namespace.c unsigned int fl = mnt->mnt.mnt_flags; fl 2420 fs/namespace.c if ((fl & MNT_LOCK_READONLY) && fl 2424 fs/namespace.c if ((fl & MNT_LOCK_NODEV) && fl 2428 fs/namespace.c if ((fl & MNT_LOCK_NOSUID) && fl 2432 fs/namespace.c if ((fl & MNT_LOCK_NOEXEC) && fl 2436 fs/namespace.c if ((fl & MNT_LOCK_ATIME) && fl 2437 fs/namespace.c ((fl & MNT_ATIME_MASK) != (mnt_flags & MNT_ATIME_MASK))) fl 108 fs/nfs/delegation.c struct file_lock *fl; fl 119 fs/nfs/delegation.c list_for_each_entry(fl, list, fl_list) { fl 120 fs/nfs/delegation.c if (nfs_file_open_context(fl->fl_file)->state != state) fl 123 fs/nfs/delegation.c status = nfs4_lock_delegation_recall(fl, state, stateid); fl 67 fs/nfs/delegation.h int nfs4_lock_delegation_recall(struct file_lock *fl, struct nfs4_state *state, const nfs4_stateid *stateid); fl 657 fs/nfs/file.c do_getlk(struct file *filp, int cmd, struct file_lock *fl, int is_local) fl 661 fs/nfs/file.c unsigned int saved_type = fl->fl_type; fl 664 fs/nfs/file.c posix_test_lock(filp, fl); fl 665 fs/nfs/file.c if (fl->fl_type != F_UNLCK) { fl 669 fs/nfs/file.c fl->fl_type = saved_type; fl 677 fs/nfs/file.c status = NFS_PROTO(inode)->lock(filp, cmd, fl); fl 681 fs/nfs/file.c fl->fl_type = F_UNLCK; fl 686 fs/nfs/file.c do_unlk(struct file *filp, int cmd, struct file_lock *fl, int is_local) fl 706 fs/nfs/file.c if (status < 0 && !(fl->fl_flags & FL_CLOSE)) fl 715 fs/nfs/file.c status = NFS_PROTO(inode)->lock(filp, cmd, fl); fl 717 fs/nfs/file.c status = locks_lock_file_wait(filp, fl); fl 722 fs/nfs/file.c do_setlk(struct file *filp, int cmd, struct file_lock *fl, int is_local) fl 740 fs/nfs/file.c status = NFS_PROTO(inode)->lock(filp, cmd, fl); fl 742 fs/nfs/file.c status = locks_lock_file_wait(filp, fl); fl 766 fs/nfs/file.c int nfs_lock(struct file *filp, int cmd, struct file_lock *fl) fl 773 fs/nfs/file.c filp, fl->fl_type, fl->fl_flags, fl 774 fs/nfs/file.c (long long)fl->fl_start, (long long)fl->fl_end); fl 779 fs/nfs/file.c if (__mandatory_lock(inode) && fl->fl_type != F_UNLCK) fl 786 fs/nfs/file.c ret = NFS_PROTO(inode)->lock_check_bounds(fl); fl 792 fs/nfs/file.c ret = do_getlk(filp, cmd, fl, is_local); fl 793 fs/nfs/file.c else if (fl->fl_type == F_UNLCK) fl 794 fs/nfs/file.c ret = do_unlk(filp, cmd, fl, is_local); fl 796 fs/nfs/file.c ret = do_setlk(filp, cmd, fl, is_local); fl 805 fs/nfs/file.c int nfs_flock(struct file *filp, int cmd, struct file_lock *fl) fl 811 fs/nfs/file.c filp, fl->fl_type, fl->fl_flags); fl 813 fs/nfs/file.c if (!(fl->fl_flags & FL_FLOCK)) fl 822 fs/nfs/file.c if (fl->fl_type & LOCK_MAND) fl 829 fs/nfs/file.c if (fl->fl_type == F_UNLCK) fl 830 fs/nfs/file.c return do_unlk(filp, cmd, fl, is_local); fl 831 fs/nfs/file.c return do_setlk(filp, cmd, fl, is_local); fl 538 fs/nfs/filelayout/filelayout.c struct nfs4_filelayout_segment *fl, fl 546 fs/nfs/filelayout/filelayout.c if (fl->dsaddr != NULL) fl 550 fs/nfs/filelayout/filelayout.c d = nfs4_find_get_deviceid(NFS_SERVER(lo->plh_inode), &fl->deviceid, fl 560 fs/nfs/filelayout/filelayout.c if (fl->first_stripe_index >= dsaddr->stripe_count) { fl 562 fs/nfs/filelayout/filelayout.c __func__, fl->first_stripe_index); fl 566 fs/nfs/filelayout/filelayout.c if ((fl->stripe_type == STRIPE_SPARSE && fl 567 fs/nfs/filelayout/filelayout.c fl->num_fh > 1 && fl->num_fh != dsaddr->ds_num) || fl 568 fs/nfs/filelayout/filelayout.c (fl->stripe_type == STRIPE_DENSE && fl 569 fs/nfs/filelayout/filelayout.c fl->num_fh != dsaddr->stripe_count)) { fl 571 fs/nfs/filelayout/filelayout.c __func__, fl->num_fh); fl 580 fs/nfs/filelayout/filelayout.c if (cmpxchg(&fl->dsaddr, NULL, dsaddr) != NULL) fl 599 fs/nfs/filelayout/filelayout.c struct nfs4_filelayout_segment *fl, fl 615 fs/nfs/filelayout/filelayout.c if (fl->pattern_offset > lgr->range.offset) { fl 617 fs/nfs/filelayout/filelayout.c __func__, fl->pattern_offset); fl 621 fs/nfs/filelayout/filelayout.c if (!fl->stripe_unit) { fl 623 fs/nfs/filelayout/filelayout.c __func__, fl->stripe_unit); fl 633 fs/nfs/filelayout/filelayout.c static void _filelayout_free_lseg(struct nfs4_filelayout_segment *fl) fl 637 fs/nfs/filelayout/filelayout.c if (fl->fh_array) { fl 638 fs/nfs/filelayout/filelayout.c for (i = 0; i < fl->num_fh; i++) { fl 639 fs/nfs/filelayout/filelayout.c if (!fl->fh_array[i]) fl 641 fs/nfs/filelayout/filelayout.c kfree(fl->fh_array[i]); fl 643 fs/nfs/filelayout/filelayout.c kfree(fl->fh_array); fl 645 fs/nfs/filelayout/filelayout.c kfree(fl); fl 650 fs/nfs/filelayout/filelayout.c struct nfs4_filelayout_segment *fl, fl 676 fs/nfs/filelayout/filelayout.c memcpy(&fl->deviceid, p, sizeof(fl->deviceid)); fl 678 fs/nfs/filelayout/filelayout.c nfs4_print_deviceid(&fl->deviceid); fl 682 fs/nfs/filelayout/filelayout.c fl->commit_through_mds = 1; fl 684 fs/nfs/filelayout/filelayout.c fl->stripe_type = STRIPE_DENSE; fl 686 fs/nfs/filelayout/filelayout.c fl->stripe_type = STRIPE_SPARSE; fl 687 fs/nfs/filelayout/filelayout.c fl->stripe_unit = nfl_util & ~NFL4_UFLG_MASK; fl 689 fs/nfs/filelayout/filelayout.c fl->first_stripe_index = be32_to_cpup(p++); fl 690 fs/nfs/filelayout/filelayout.c p = xdr_decode_hyper(p, &fl->pattern_offset); fl 691 fs/nfs/filelayout/filelayout.c fl->num_fh = be32_to_cpup(p++); fl 694 fs/nfs/filelayout/filelayout.c __func__, nfl_util, fl->num_fh, fl->first_stripe_index, fl 695 fs/nfs/filelayout/filelayout.c fl->pattern_offset); fl 699 fs/nfs/filelayout/filelayout.c if (fl->num_fh > fl 703 fs/nfs/filelayout/filelayout.c if (fl->num_fh > 0) { fl 704 fs/nfs/filelayout/filelayout.c fl->fh_array = kcalloc(fl->num_fh, sizeof(fl->fh_array[0]), fl 706 fs/nfs/filelayout/filelayout.c if (!fl->fh_array) fl 710 fs/nfs/filelayout/filelayout.c for (i = 0; i < fl->num_fh; i++) { fl 712 fs/nfs/filelayout/filelayout.c fl->fh_array[i] = kmalloc(sizeof(struct nfs_fh), gfp_flags); fl 713 fs/nfs/filelayout/filelayout.c if (!fl->fh_array[i]) fl 719 fs/nfs/filelayout/filelayout.c fl->fh_array[i]->size = be32_to_cpup(p++); fl 720 fs/nfs/filelayout/filelayout.c if (sizeof(struct nfs_fh) < fl->fh_array[i]->size) { fl 722 fs/nfs/filelayout/filelayout.c i, fl->fh_array[i]->size); fl 726 fs/nfs/filelayout/filelayout.c p = xdr_inline_decode(&stream, fl->fh_array[i]->size); fl 729 fs/nfs/filelayout/filelayout.c memcpy(fl->fh_array[i]->data, p, fl->fh_array[i]->size); fl 731 fs/nfs/filelayout/filelayout.c fl->fh_array[i]->size); fl 745 fs/nfs/filelayout/filelayout.c struct nfs4_filelayout_segment *fl = FILELAYOUT_LSEG(lseg); fl 748 fs/nfs/filelayout/filelayout.c if (fl->dsaddr != NULL) fl 749 fs/nfs/filelayout/filelayout.c nfs4_fl_put_deviceid(fl->dsaddr); fl 759 fs/nfs/filelayout/filelayout.c _filelayout_free_lseg(fl); fl 767 fs/nfs/filelayout/filelayout.c struct nfs4_filelayout_segment *fl = FILELAYOUT_LSEG(lseg); fl 771 fs/nfs/filelayout/filelayout.c if (fl->commit_through_mds) fl 774 fs/nfs/filelayout/filelayout.c size = (fl->stripe_type == STRIPE_SPARSE) ? fl 775 fs/nfs/filelayout/filelayout.c fl->dsaddr->ds_num : fl->dsaddr->stripe_count; fl 824 fs/nfs/filelayout/filelayout.c struct nfs4_filelayout_segment *fl; fl 828 fs/nfs/filelayout/filelayout.c fl = kzalloc(sizeof(*fl), gfp_flags); fl 829 fs/nfs/filelayout/filelayout.c if (!fl) fl 832 fs/nfs/filelayout/filelayout.c rc = filelayout_decode_layout(layoutid, fl, lgr, gfp_flags); fl 833 fs/nfs/filelayout/filelayout.c if (rc != 0 || filelayout_check_layout(layoutid, fl, lgr, gfp_flags)) { fl 834 fs/nfs/filelayout/filelayout.c _filelayout_free_lseg(fl); fl 837 fs/nfs/filelayout/filelayout.c return &fl->generic_hdr; fl 893 fs/nfs/filelayout/filelayout.c struct nfs4_filelayout_segment *fl; fl 902 fs/nfs/filelayout/filelayout.c fl = FILELAYOUT_LSEG(lseg); fl 904 fs/nfs/filelayout/filelayout.c status = filelayout_check_deviceid(lo, fl, gfp_flags); fl 989 fs/nfs/filelayout/filelayout.c static u32 select_bucket_index(struct nfs4_filelayout_segment *fl, u32 j) fl 991 fs/nfs/filelayout/filelayout.c if (fl->stripe_type == STRIPE_SPARSE) fl 992 fs/nfs/filelayout/filelayout.c return nfs4_fl_calc_ds_index(&fl->generic_hdr, j); fl 1004 fs/nfs/filelayout/filelayout.c struct nfs4_filelayout_segment *fl = FILELAYOUT_LSEG(lseg); fl 1007 fs/nfs/filelayout/filelayout.c if (fl->commit_through_mds) { fl 1017 fs/nfs/filelayout/filelayout.c i = select_bucket_index(fl, j); fl 925 fs/nfs/nfs3proc.c nfs3_proc_lock(struct file *filp, int cmd, struct file_lock *fl) fl 932 fs/nfs/nfs3proc.c if (fl->fl_flags & FL_CLOSE) { fl 940 fs/nfs/nfs3proc.c status = nlmclnt_proc(NFS_SERVER(inode)->nlm_host, cmd, fl, l_ctx); fl 488 fs/nfs/nfs4_fs.h extern int nfs4_set_lock_state(struct nfs4_state *state, struct file_lock *fl); fl 2184 fs/nfs/nfs4proc.c static int nfs4_handle_delegation_recall_error(struct nfs_server *server, struct nfs4_state *state, const nfs4_stateid *stateid, struct file_lock *fl, int err) fl 2228 fs/nfs/nfs4proc.c if (fl) { fl 2229 fs/nfs/nfs4proc.c struct nfs4_lock_state *lsp = fl->fl_u.nfs4_fl.owner; fl 6391 fs/nfs/nfs4proc.c .fl = request, fl 6484 fs/nfs/nfs4proc.c struct file_lock fl; fl 6489 fs/nfs/nfs4proc.c static struct nfs4_unlockdata *nfs4_alloc_unlockdata(struct file_lock *fl, fl 6502 fs/nfs/nfs4proc.c p->arg.fl = &p->fl; fl 6509 fs/nfs/nfs4proc.c locks_init_lock(&p->fl); fl 6510 fs/nfs/nfs4proc.c locks_copy_lock(&p->fl, fl); fl 6541 fs/nfs/nfs4proc.c locks_lock_inode_wait(calldata->lsp->ls_state->inode, &calldata->fl); fl 6606 fs/nfs/nfs4proc.c static struct rpc_task *nfs4_do_unlck(struct file_lock *fl, fl 6630 fs/nfs/nfs4proc.c fl->fl_type = F_UNLCK; fl 6631 fs/nfs/nfs4proc.c if (fl->fl_flags & FL_CLOSE) fl 6634 fs/nfs/nfs4proc.c data = nfs4_alloc_unlockdata(fl, ctx, lsp, seqid); fl 6701 fs/nfs/nfs4proc.c struct file_lock fl; fl 6708 fs/nfs/nfs4proc.c static struct nfs4_lockdata *nfs4_alloc_lockdata(struct file_lock *fl, fl 6722 fs/nfs/nfs4proc.c p->arg.fl = &p->fl; fl 6737 fs/nfs/nfs4proc.c locks_init_lock(&p->fl); fl 6738 fs/nfs/nfs4proc.c locks_copy_lock(&p->fl, fl); fl 6805 fs/nfs/nfs4proc.c data->fl.fl_flags &= ~(FL_SLEEP | FL_ACCESS); fl 6806 fs/nfs/nfs4proc.c if (locks_lock_inode_wait(lsp->ls_state->inode, &data->fl) < 0) fl 6845 fs/nfs/nfs4proc.c task = nfs4_do_unlck(&data->fl, data->ctx, data->lsp, fl 6881 fs/nfs/nfs4proc.c static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *fl, int recovery_type) fl 6899 fs/nfs/nfs4proc.c data = nfs4_alloc_lockdata(fl, nfs_file_open_context(fl->fl_file), fl 6900 fs/nfs/nfs4proc.c fl->fl_u.nfs4_fl.owner, fl 6929 fs/nfs/nfs4proc.c trace_nfs4_set_lock(fl, state, &data->res.stateid, cmd, ret); fl 7213 fs/nfs/nfs4proc.c int nfs4_lock_delegation_recall(struct file_lock *fl, struct nfs4_state *state, const nfs4_stateid *stateid) fl 7218 fs/nfs/nfs4proc.c err = nfs4_set_lock_state(state, fl); fl 7221 fs/nfs/nfs4proc.c err = _nfs4_do_setlk(state, F_SETLK, fl, NFS_LOCK_NEW); fl 7222 fs/nfs/nfs4proc.c return nfs4_handle_delegation_recall_error(server, state, stateid, fl, err); fl 961 fs/nfs/nfs4state.c static void nfs4_fl_release_lock(struct file_lock *fl) fl 963 fs/nfs/nfs4state.c nfs4_put_lock_state(fl->fl_u.nfs4_fl.owner); fl 971 fs/nfs/nfs4state.c int nfs4_set_lock_state(struct nfs4_state *state, struct file_lock *fl) fl 975 fs/nfs/nfs4state.c if (fl->fl_ops != NULL) fl 977 fs/nfs/nfs4state.c lsp = nfs4_get_lock_state(state, fl->fl_owner); fl 980 fs/nfs/nfs4state.c fl->fl_u.nfs4_fl.owner = lsp; fl 981 fs/nfs/nfs4state.c fl->fl_ops = &nfs4_fl_lock_ops; fl 1491 fs/nfs/nfs4state.c struct file_lock *fl; fl 1506 fs/nfs/nfs4state.c list_for_each_entry(fl, list, fl_list) { fl 1507 fs/nfs/nfs4state.c if (nfs_file_open_context(fl->fl_file)->state != state) fl 1510 fs/nfs/nfs4state.c status = ops->recover_lock(state, fl); fl 1535 fs/nfs/nfs4state.c lsp = fl->fl_u.nfs4_fl.owner; fl 1297 fs/nfs/nfs4xdr.c static inline int nfs4_lock_type(struct file_lock *fl, int block) fl 1299 fs/nfs/nfs4xdr.c if (fl->fl_type == F_RDLCK) fl 1304 fs/nfs/nfs4xdr.c static inline uint64_t nfs4_lock_length(struct file_lock *fl) fl 1306 fs/nfs/nfs4xdr.c if (fl->fl_end == OFFSET_MAX) fl 1308 fs/nfs/nfs4xdr.c return fl->fl_end - fl->fl_start + 1; fl 1333 fs/nfs/nfs4xdr.c *p++ = cpu_to_be32(nfs4_lock_type(args->fl, args->block)); fl 1335 fs/nfs/nfs4xdr.c p = xdr_encode_hyper(p, args->fl->fl_start); fl 1336 fs/nfs/nfs4xdr.c p = xdr_encode_hyper(p, nfs4_lock_length(args->fl)); fl 1356 fs/nfs/nfs4xdr.c *p++ = cpu_to_be32(nfs4_lock_type(args->fl, 0)); fl 1357 fs/nfs/nfs4xdr.c p = xdr_encode_hyper(p, args->fl->fl_start); fl 1358 fs/nfs/nfs4xdr.c p = xdr_encode_hyper(p, nfs4_lock_length(args->fl)); fl 1367 fs/nfs/nfs4xdr.c encode_uint32(xdr, nfs4_lock_type(args->fl, 0)); fl 1371 fs/nfs/nfs4xdr.c p = xdr_encode_hyper(p, args->fl->fl_start); fl 1372 fs/nfs/nfs4xdr.c xdr_encode_hyper(p, nfs4_lock_length(args->fl)); fl 4907 fs/nfs/nfs4xdr.c static int decode_lock_denied (struct xdr_stream *xdr, struct file_lock *fl) fl 4919 fs/nfs/nfs4xdr.c if (fl != NULL) { /* manipulate file lock */ fl 4920 fs/nfs/nfs4xdr.c fl->fl_start = (loff_t)offset; fl 4921 fs/nfs/nfs4xdr.c fl->fl_end = fl->fl_start + (loff_t)length - 1; fl 4923 fs/nfs/nfs4xdr.c fl->fl_end = OFFSET_MAX; fl 4924 fs/nfs/nfs4xdr.c fl->fl_type = F_WRLCK; fl 4926 fs/nfs/nfs4xdr.c fl->fl_type = F_RDLCK; fl 4927 fs/nfs/nfs4xdr.c fl->fl_pid = 0; fl 648 fs/nfs/proc.c nfs_proc_lock(struct file *filp, int cmd, struct file_lock *fl) fl 652 fs/nfs/proc.c return nlmclnt_proc(NFS_SERVER(inode)->nlm_host, cmd, fl, NULL); fl 657 fs/nfs/proc.c static int nfs_lock_check_bounds(const struct file_lock *fl) fl 661 fs/nfs/proc.c start = (__s32)fl->fl_start; fl 662 fs/nfs/proc.c if ((loff_t)start != fl->fl_start) fl 665 fs/nfs/proc.c if (fl->fl_end != OFFSET_MAX) { fl 666 fs/nfs/proc.c end = (__s32)fl->fl_end; fl 667 fs/nfs/proc.c if ((loff_t)end != fl->fl_end) fl 1309 fs/nfs/write.c is_whole_file_wrlock(struct file_lock *fl) fl 1311 fs/nfs/write.c return fl->fl_start == 0 && fl->fl_end == OFFSET_MAX && fl 1312 fs/nfs/write.c fl->fl_type == F_WRLCK; fl 1327 fs/nfs/write.c struct file_lock *fl; fl 1343 fs/nfs/write.c fl = list_first_entry(&flctx->flc_posix, struct file_lock, fl 1345 fs/nfs/write.c if (is_whole_file_wrlock(fl)) fl 1348 fs/nfs/write.c fl = list_first_entry(&flctx->flc_flock, struct file_lock, fl 1350 fs/nfs/write.c if (fl->fl_type == F_WRLCK) fl 524 fs/nfsd/filecache.c struct file_lock *fl = data; fl 527 fs/nfsd/filecache.c if (fl->fl_flags & FL_LEASE) fl 528 fs/nfsd/filecache.c nfsd_file_close_inode_sync(file_inode(fl->fl_file)); fl 30 fs/nfsd/flexfilelayout.c struct pnfs_ff_layout *fl; fl 38 fs/nfsd/flexfilelayout.c fl = kzalloc(sizeof(*fl), GFP_KERNEL); fl 39 fs/nfsd/flexfilelayout.c if (!fl) fl 41 fs/nfsd/flexfilelayout.c args->lg_content = fl; fl 48 fs/nfsd/flexfilelayout.c fl->flags = FF_FLAGS_NO_LAYOUTCOMMIT | FF_FLAGS_NO_IO_THRU_MDS | fl 54 fs/nfsd/flexfilelayout.c fl->uid = make_kuid(&init_user_ns, u); fl 56 fs/nfsd/flexfilelayout.c fl->uid = inode->i_uid; fl 57 fs/nfsd/flexfilelayout.c fl->gid = inode->i_gid; fl 59 fs/nfsd/flexfilelayout.c error = nfsd4_set_deviceid(&fl->deviceid, fhp, device_generation); fl 63 fs/nfsd/flexfilelayout.c fl->fh.size = fhp->fh_handle.fh_size; fl 64 fs/nfsd/flexfilelayout.c memcpy(fl->fh.data, &fhp->fh_handle.fh_base, fl->fh.size); fl 22 fs/nfsd/flexfilelayoutxdr.c struct pnfs_ff_layout *fl = lgp->lg_content; fl 33 fs/nfsd/flexfilelayoutxdr.c fh_len = 4 + fl->fh.size; fl 35 fs/nfsd/flexfilelayoutxdr.c uid.len = sprintf(uid.buf, "%u", from_kuid(&init_user_ns, fl->uid)); fl 36 fs/nfsd/flexfilelayoutxdr.c gid.len = sprintf(gid.buf, "%u", from_kgid(&init_user_ns, fl->gid)); fl 57 fs/nfsd/flexfilelayoutxdr.c p = xdr_encode_opaque_fixed(p, &fl->deviceid, fl 62 fs/nfsd/flexfilelayoutxdr.c *p++ = cpu_to_be32(fl->stateid.si_generation); fl 63 fs/nfsd/flexfilelayoutxdr.c p = xdr_encode_opaque_fixed(p, &fl->stateid.si_opaque, fl 67 fs/nfsd/flexfilelayoutxdr.c p = xdr_encode_opaque(p, fl->fh.data, fl->fh.size); fl 72 fs/nfsd/flexfilelayoutxdr.c *p++ = cpu_to_be32(fl->flags); fl 184 fs/nfsd/nfs4layouts.c struct file_lock *fl; fl 190 fs/nfsd/nfs4layouts.c fl = locks_alloc_lock(); fl 191 fs/nfsd/nfs4layouts.c if (!fl) fl 193 fs/nfsd/nfs4layouts.c locks_init_lock(fl); fl 194 fs/nfsd/nfs4layouts.c fl->fl_lmops = &nfsd4_layouts_lm_ops; fl 195 fs/nfsd/nfs4layouts.c fl->fl_flags = FL_LAYOUT; fl 196 fs/nfsd/nfs4layouts.c fl->fl_type = F_RDLCK; fl 197 fs/nfsd/nfs4layouts.c fl->fl_end = OFFSET_MAX; fl 198 fs/nfsd/nfs4layouts.c fl->fl_owner = ls; fl 199 fs/nfsd/nfs4layouts.c fl->fl_pid = current->tgid; fl 200 fs/nfsd/nfs4layouts.c fl->fl_file = ls->ls_file->nf_file; fl 202 fs/nfsd/nfs4layouts.c status = vfs_setlease(fl->fl_file, fl->fl_type, &fl, NULL); fl 204 fs/nfsd/nfs4layouts.c locks_free_lock(fl); fl 207 fs/nfsd/nfs4layouts.c BUG_ON(fl != NULL); fl 725 fs/nfsd/nfs4layouts.c nfsd4_layout_lm_break(struct file_lock *fl) fl 732 fs/nfsd/nfs4layouts.c fl->fl_break_time = 0; fl 733 fs/nfsd/nfs4layouts.c nfsd4_recall_file_layout(fl->fl_owner); fl 4447 fs/nfsd/nfs4state.c nfsd_break_deleg_cb(struct file_lock *fl) fl 4450 fs/nfsd/nfs4state.c struct nfs4_delegation *dp = (struct nfs4_delegation *)fl->fl_owner; fl 4458 fs/nfsd/nfs4state.c fl->fl_break_time = 0; fl 4789 fs/nfsd/nfs4state.c struct file_lock *fl; fl 4791 fs/nfsd/nfs4state.c fl = locks_alloc_lock(); fl 4792 fs/nfsd/nfs4state.c if (!fl) fl 4794 fs/nfsd/nfs4state.c fl->fl_lmops = &nfsd_lease_mng_ops; fl 4795 fs/nfsd/nfs4state.c fl->fl_flags = FL_DELEG; fl 4796 fs/nfsd/nfs4state.c fl->fl_type = flag == NFS4_OPEN_DELEGATE_READ? F_RDLCK: F_WRLCK; fl 4797 fs/nfsd/nfs4state.c fl->fl_end = OFFSET_MAX; fl 4798 fs/nfsd/nfs4state.c fl->fl_owner = (fl_owner_t)dp; fl 4799 fs/nfsd/nfs4state.c fl->fl_pid = current->tgid; fl 4800 fs/nfsd/nfs4state.c fl->fl_file = dp->dl_stid.sc_file->fi_deleg_file->nf_file; fl 4801 fs/nfsd/nfs4state.c return fl; fl 4811 fs/nfsd/nfs4state.c struct file_lock *fl; fl 4851 fs/nfsd/nfs4state.c fl = nfs4_alloc_init_lease(dp, NFS4_OPEN_DELEGATE_READ); fl 4852 fs/nfsd/nfs4state.c if (!fl) fl 4855 fs/nfsd/nfs4state.c status = vfs_setlease(fp->fi_deleg_file->nf_file, fl->fl_type, &fl, NULL); fl 4856 fs/nfsd/nfs4state.c if (fl) fl 4857 fs/nfsd/nfs4state.c locks_free_lock(fl); fl 6113 fs/nfsd/nfs4state.c nfsd4_lm_notify(struct file_lock *fl) fl 6115 fs/nfsd/nfs4state.c struct nfs4_lockowner *lo = (struct nfs4_lockowner *)fl->fl_owner; fl 6118 fs/nfsd/nfs4state.c struct nfsd4_blocked_lock *nbl = container_of(fl, fl 6142 fs/nfsd/nfs4state.c nfs4_set_lock_denied(struct file_lock *fl, struct nfsd4_lock_denied *deny) fl 6146 fs/nfsd/nfs4state.c if (fl->fl_lmops == &nfsd_posix_mng_ops) { fl 6147 fs/nfsd/nfs4state.c lo = (struct nfs4_lockowner *) fl->fl_owner; fl 6161 fs/nfsd/nfs4state.c deny->ld_start = fl->fl_start; fl 6163 fs/nfsd/nfs4state.c if (fl->fl_end != NFS4_MAX_UINT64) fl 6164 fs/nfsd/nfs4state.c deny->ld_length = fl->fl_end - fl->fl_start + 1; fl 6166 fs/nfsd/nfs4state.c if (fl->fl_type != F_RDLCK) fl 6797 fs/nfsd/nfs4state.c struct file_lock *fl; fl 6814 fs/nfsd/nfs4state.c list_for_each_entry(fl, &flctx->flc_posix, fl_list) { fl 6815 fs/nfsd/nfs4state.c if (fl->fl_owner == (fl_owner_t)lowner) { fl 6537 fs/ocfs2/alloc.c struct ocfs2_per_slot_free_list *fl; fl 6543 fs/ocfs2/alloc.c fl = ctxt->c_first_suballocator; fl 6545 fs/ocfs2/alloc.c if (fl->f_first) { fl 6546 fs/ocfs2/alloc.c trace_ocfs2_run_deallocs(fl->f_inode_type, fl 6547 fs/ocfs2/alloc.c fl->f_slot); fl 6549 fs/ocfs2/alloc.c fl->f_inode_type, fl 6550 fs/ocfs2/alloc.c fl->f_slot, fl 6551 fs/ocfs2/alloc.c fl->f_first); fl 6558 fs/ocfs2/alloc.c ctxt->c_first_suballocator = fl->f_next_suballocator; fl 6559 fs/ocfs2/alloc.c kfree(fl); fl 6581 fs/ocfs2/alloc.c struct ocfs2_per_slot_free_list *fl = ctxt->c_first_suballocator; fl 6583 fs/ocfs2/alloc.c while (fl) { fl 6584 fs/ocfs2/alloc.c if (fl->f_inode_type == type && fl->f_slot == slot) fl 6585 fs/ocfs2/alloc.c return fl; fl 6587 fs/ocfs2/alloc.c fl = fl->f_next_suballocator; fl 6590 fs/ocfs2/alloc.c fl = kmalloc(sizeof(*fl), GFP_NOFS); fl 6591 fs/ocfs2/alloc.c if (fl) { fl 6592 fs/ocfs2/alloc.c fl->f_inode_type = type; fl 6593 fs/ocfs2/alloc.c fl->f_slot = slot; fl 6594 fs/ocfs2/alloc.c fl->f_first = NULL; fl 6595 fs/ocfs2/alloc.c fl->f_next_suballocator = ctxt->c_first_suballocator; fl 6597 fs/ocfs2/alloc.c ctxt->c_first_suballocator = fl; fl 6599 fs/ocfs2/alloc.c return fl; fl 6608 fs/ocfs2/alloc.c struct ocfs2_per_slot_free_list *fl = ctxt->c_first_suballocator; fl 6610 fs/ocfs2/alloc.c while (fl) { fl 6611 fs/ocfs2/alloc.c if (fl->f_inode_type == type && fl->f_slot == preferred_slot) { fl 6612 fs/ocfs2/alloc.c *real_slot = fl->f_slot; fl 6613 fs/ocfs2/alloc.c return fl; fl 6616 fs/ocfs2/alloc.c fl = fl->f_next_suballocator; fl 6622 fs/ocfs2/alloc.c fl = ctxt->c_first_suballocator; fl 6623 fs/ocfs2/alloc.c *real_slot = fl->f_slot; fl 6625 fs/ocfs2/alloc.c return fl; fl 6631 fs/ocfs2/alloc.c struct ocfs2_per_slot_free_list *fl = NULL; fl 6636 fs/ocfs2/alloc.c fl = et->et_dealloc->c_first_suballocator; fl 6637 fs/ocfs2/alloc.c if (!fl) fl 6640 fs/ocfs2/alloc.c if (!fl->f_first) fl 6659 fs/ocfs2/alloc.c struct ocfs2_per_slot_free_list *fl; fl 6677 fs/ocfs2/alloc.c fl = ocfs2_find_preferred_free_list(EXTENT_ALLOC_SYSTEM_INODE, fl 6683 fs/ocfs2/alloc.c if (!fl) { fl 6688 fs/ocfs2/alloc.c bf = fl->f_first; fl 6689 fs/ocfs2/alloc.c fl->f_first = bf->free_next; fl 6732 fs/ocfs2/alloc.c if (!fl->f_first) { fl 6733 fs/ocfs2/alloc.c dealloc->c_first_suballocator = fl->f_next_suballocator; fl 6734 fs/ocfs2/alloc.c kfree(fl); fl 6755 fs/ocfs2/alloc.c struct ocfs2_per_slot_free_list *fl; fl 6758 fs/ocfs2/alloc.c fl = ocfs2_find_per_slot_free_list(type, slot, ctxt); fl 6759 fs/ocfs2/alloc.c if (fl == NULL) { fl 6779 fs/ocfs2/alloc.c item->free_next = fl->f_first; fl 6781 fs/ocfs2/alloc.c fl->f_first = item; fl 25 fs/ocfs2/locks.c int cmd, struct file_lock *fl) fl 31 fs/ocfs2/locks.c if (fl->fl_type == F_WRLCK) fl 73 fs/ocfs2/locks.c ret = locks_lock_file_wait(file, fl); fl 83 fs/ocfs2/locks.c static int ocfs2_do_funlock(struct file *file, int cmd, struct file_lock *fl) fl 90 fs/ocfs2/locks.c ret = locks_lock_file_wait(file, fl); fl 99 fs/ocfs2/locks.c int ocfs2_flock(struct file *file, int cmd, struct file_lock *fl) fl 104 fs/ocfs2/locks.c if (!(fl->fl_flags & FL_FLOCK)) fl 111 fs/ocfs2/locks.c return locks_lock_file_wait(file, fl); fl 113 fs/ocfs2/locks.c if (fl->fl_type == F_UNLCK) fl 114 fs/ocfs2/locks.c return ocfs2_do_funlock(file, cmd, fl); fl 116 fs/ocfs2/locks.c return ocfs2_do_flock(file, inode, cmd, fl); fl 119 fs/ocfs2/locks.c int ocfs2_lock(struct file *file, int cmd, struct file_lock *fl) fl 124 fs/ocfs2/locks.c if (!(fl->fl_flags & FL_POSIX)) fl 126 fs/ocfs2/locks.c if (__mandatory_lock(inode) && fl->fl_type != F_UNLCK) fl 129 fs/ocfs2/locks.c return ocfs2_plock(osb->cconn, OCFS2_I(inode)->ip_blkno, file, cmd, fl); fl 15 fs/ocfs2/locks.h int ocfs2_flock(struct file *file, int cmd, struct file_lock *fl); fl 16 fs/ocfs2/locks.h int ocfs2_lock(struct file *file, int cmd, struct file_lock *fl); fl 740 fs/ocfs2/stack_user.c struct file_lock *fl) fl 756 fs/ocfs2/stack_user.c fl->fl_type = F_UNLCK; fl 760 fs/ocfs2/stack_user.c return dlm_posix_get(conn->cc_lockspace, ino, file, fl); fl 761 fs/ocfs2/stack_user.c else if (fl->fl_type == F_UNLCK) fl 762 fs/ocfs2/stack_user.c return dlm_posix_unlock(conn->cc_lockspace, ino, file, fl); fl 764 fs/ocfs2/stack_user.c return dlm_posix_lock(conn->cc_lockspace, ino, file, cmd, fl); fl 294 fs/ocfs2/stackglue.c struct file *file, int cmd, struct file_lock *fl) fl 298 fs/ocfs2/stackglue.c return active_stack->sp_ops->plock(conn, ino, file, cmd, fl); fl 212 fs/ocfs2/stackglue.h struct file_lock *fl); fl 284 fs/ocfs2/stackglue.h struct file *file, int cmd, struct file_lock *fl); fl 602 fs/orangefs/file.c static int orangefs_lock(struct file *filp, int cmd, struct file_lock *fl) fl 609 fs/orangefs/file.c posix_test_lock(filp, fl); fl 611 fs/orangefs/file.c rc = posix_lock_file(filp, fl, NULL); fl 1401 fs/read_write.c int fl; fl 1452 fs/read_write.c fl = 0; fl 1461 fs/read_write.c fl = SPLICE_F_NONBLOCK; fl 1464 fs/read_write.c retval = do_splice_direct(in.file, &pos, out.file, &out_pos, count, fl); fl 141 fs/xfs/xfs_quota.h #define xfs_qm_vop_chown_reserve(tp, ip, u, g, p, fl) (0) fl 143 fs/xfs/xfs_quota.h #define xfs_qm_dqattach_locked(ip, fl) (0) fl 71 include/linux/ceph/ceph_fs.h extern void ceph_file_layout_from_legacy(struct ceph_file_layout *fl, fl 73 include/linux/ceph/ceph_fs.h extern void ceph_file_layout_to_legacy(struct ceph_file_layout *fl, fl 11 include/linux/dlm_plock.h int cmd, struct file_lock *fl); fl 13 include/linux/dlm_plock.h struct file_lock *fl); fl 15 include/linux/dlm_plock.h struct file_lock *fl); fl 1152 include/linux/fs.h void locks_free_lock(struct file_lock *fl); fl 1165 include/linux/fs.h extern int vfs_cancel_lock(struct file *filp, struct file_lock *fl); fl 1166 include/linux/fs.h extern int locks_lock_inode_wait(struct inode *inode, struct file_lock *fl); fl 1221 include/linux/fs.h static inline void locks_init_lock(struct file_lock *fl) fl 1226 include/linux/fs.h static inline void locks_copy_conflock(struct file_lock *new, struct file_lock *fl) fl 1231 include/linux/fs.h static inline void locks_copy_lock(struct file_lock *new, struct file_lock *fl) fl 1246 include/linux/fs.h static inline void posix_test_lock(struct file *filp, struct file_lock *fl) fl 1251 include/linux/fs.h static inline int posix_lock_file(struct file *filp, struct file_lock *fl, fl 1262 include/linux/fs.h static inline int vfs_test_lock(struct file *filp, struct file_lock *fl) fl 1268 include/linux/fs.h struct file_lock *fl, struct file_lock *conf) fl 1273 include/linux/fs.h static inline int vfs_cancel_lock(struct file *filp, struct file_lock *fl) fl 1278 include/linux/fs.h static inline int locks_lock_inode_wait(struct inode *inode, struct file_lock *fl) fl 1306 include/linux/fs.h static inline int lease_modify(struct file_lock *fl, int arg, fl 1327 include/linux/fs.h static inline int locks_lock_file_wait(struct file *filp, struct file_lock *fl) fl 1329 include/linux/fs.h return locks_lock_inode_wait(locks_inode(filp), fl); fl 78 include/linux/lockd/bind.h extern int nlmclnt_proc(struct nlm_host *host, int cmd, struct file_lock *fl, void *data); fl 214 include/linux/lockd/lockd.h struct nlm_wait * nlmclnt_prepare_block(struct nlm_host *host, struct file_lock *fl); fl 44 include/linux/lockd/xdr.h struct file_lock fl; fl 1746 include/linux/lsm_hooks.h struct flowi *fl); fl 1788 include/linux/lsm_hooks.h const struct flowi *fl); fl 137 include/linux/mlx4/qp.h u8 fl; fl 635 include/linux/mlx5/mlx5_ifc.h u8 fl[0x1]; fl 9008 include/linux/mlx5/mlx5_ifc.h u8 fl[0x1]; fl 349 include/linux/netfilter.h int nf_route(struct net *net, struct dst_entry **dst, struct flowi *fl, fl 363 include/linux/netfilter.h void (*decode_session)(struct sk_buff *skb, struct flowi *fl); fl 372 include/linux/netfilter.h nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl, u_int8_t family) fl 380 include/linux/netfilter.h nat_hook->decode_session(skb, fl); fl 420 include/linux/netfilter.h nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl, u_int8_t family) fl 26 include/linux/netfilter_ipv4.h int nf_ip_route(struct net *net, struct dst_entry **dst, struct flowi *fl, fl 35 include/linux/netfilter_ipv4.h struct flowi *fl, bool strict) fl 49 include/linux/netfilter_ipv6.h int (*route)(struct net *net, struct dst_entry **dst, struct flowi *fl, fl 98 include/linux/netfilter_ipv6.h struct flowi *fl, bool strict); fl 101 include/linux/netfilter_ipv6.h struct flowi *fl, bool strict) fl 107 include/linux/netfilter_ipv6.h return v6ops->route(net, dst, fl, strict); fl 112 include/linux/netfilter_ipv6.h return __nf_ip6_route(net, dst, fl, strict); fl 551 include/linux/nfs_xdr.h struct file_lock * fl; fl 573 include/linux/nfs_xdr.h struct file_lock * fl; fl 587 include/linux/nfs_xdr.h struct file_lock * fl; fl 1304 include/linux/security.h void security_sk_classify_flow(struct sock *sk, struct flowi *fl); fl 1305 include/linux/security.h void security_req_classify_flow(const struct request_sock *req, struct flowi *fl); fl 1456 include/linux/security.h static inline void security_sk_classify_flow(struct sock *sk, struct flowi *fl) fl 1460 include/linux/security.h static inline void security_req_classify_flow(const struct request_sock *req, struct flowi *fl) fl 1587 include/linux/security.h const struct flowi *fl); fl 1589 include/linux/security.h void security_skb_classify_flow(struct sk_buff *skb, struct flowi *fl); fl 1641 include/linux/security.h struct xfrm_policy *xp, const struct flowi *fl) fl 1651 include/linux/security.h static inline void security_skb_classify_flow(struct sk_buff *skb, struct flowi *fl) fl 219 include/net/9p/client.h int p9_client_getlock_dotl(struct p9_fid *fid, struct p9_getlock *fl); fl 152 include/net/dn_fib.h #define dn_fib_lookup(fl, res) (-ESRCH) fl 154 include/net/dn_fib.h #define dn_fib_select_multipath(fl, res) do { } while(0) fl 463 include/net/dst.h const struct flowi *fl, fl 472 include/net/dst.h const struct flowi *fl, const struct sock *sk, fl 480 include/net/dst.h const struct flowi *fl, fl 494 include/net/dst.h const struct flowi *fl, const struct sock *sk, fl 499 include/net/dst.h const struct flowi *fl, fl 504 include/net/dst.h const struct flowi *fl, const struct sock *sk, fl 24 include/net/inet6_connection_sock.h int inet6_csk_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl); fl 34 include/net/inet_connection_sock.h int (*queue_xmit)(struct sock *sk, struct sk_buff *skb, struct flowi *fl); fl 168 include/net/inet_sock.h struct flowi fl; fl 207 include/net/ip.h int __ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl, fl 235 include/net/ip.h struct flowi *fl) fl 237 include/net/ip.h return __ip_queue_xmit(sk, skb, fl, inet_sk(sk)->tos); fl 51 include/net/ip6_tunnel.h struct flowi fl; /* flowi template for xmit */ fl 339 include/net/ipv6.h struct ip6_flowlabel *fl; fl 406 include/net/ipv6.h struct ip6_flowlabel *fl, fl 416 include/net/ipv6.h static inline void fl6_sock_release(struct ip6_flowlabel *fl) fl 418 include/net/ipv6.h if (fl) fl 419 include/net/ipv6.h atomic_dec(&fl->users); fl 40 include/net/l3mdev.h int l3mdev_fib_rule_match(struct net *net, struct flowi *fl, fl 43 include/net/l3mdev.h void l3mdev_update_flow(struct net *net, struct flowi *fl); fl 284 include/net/l3mdev.h int l3mdev_fib_rule_match(struct net *net, struct flowi *fl, fl 290 include/net/l3mdev.h void l3mdev_update_flow(struct net *net, struct flowi *fl) fl 451 include/net/sctp/structs.h struct flowi *fl, fl 455 include/net/sctp/structs.h struct flowi *fl); fl 806 include/net/sctp/structs.h struct flowi fl; fl 1993 include/net/tcp.h struct dst_entry *(*route_req)(const struct sock *sk, struct flowi *fl, fl 1998 include/net/tcp.h struct flowi *fl, struct request_sock *req, fl 332 include/net/xfrm.h const struct flowi *fl); fl 843 include/net/xfrm.h __be16 xfrm_flowi_sport(const struct flowi *fl, const union flowi_uli *uli) fl 846 include/net/xfrm.h switch(fl->flowi_proto) { fl 870 include/net/xfrm.h __be16 xfrm_flowi_dport(const struct flowi *fl, const union flowi_uli *uli) fl 873 include/net/xfrm.h switch(fl->flowi_proto) { fl 894 include/net/xfrm.h const struct flowi *fl, unsigned short family); fl 1131 include/net/xfrm.h int __xfrm_decode_session(struct sk_buff *skb, struct flowi *fl, fl 1134 include/net/xfrm.h static inline int xfrm_decode_session(struct sk_buff *skb, struct flowi *fl, fl 1137 include/net/xfrm.h return __xfrm_decode_session(skb, fl, family, 0); fl 1141 include/net/xfrm.h struct flowi *fl, fl 1144 include/net/xfrm.h return __xfrm_decode_session(skb, fl, family, 1); fl 1216 include/net/xfrm.h struct flowi *fl, fl 1234 include/net/xfrm.h xfrm_address_t *xfrm_flowi_daddr(const struct flowi *fl, unsigned short family) fl 1238 include/net/xfrm.h return (xfrm_address_t *)&fl->u.ip4.daddr; fl 1240 include/net/xfrm.h return (xfrm_address_t *)&fl->u.ip6.daddr; fl 1246 include/net/xfrm.h xfrm_address_t *xfrm_flowi_saddr(const struct flowi *fl, unsigned short family) fl 1250 include/net/xfrm.h return (xfrm_address_t *)&fl->u.ip4.saddr; fl 1252 include/net/xfrm.h return (xfrm_address_t *)&fl->u.ip6.saddr; fl 1258 include/net/xfrm.h void xfrm_flowi_addr_get(const struct flowi *fl, fl 1264 include/net/xfrm.h memcpy(&saddr->a4, &fl->u.ip4.saddr, sizeof(saddr->a4)); fl 1265 include/net/xfrm.h memcpy(&daddr->a4, &fl->u.ip4.daddr, sizeof(daddr->a4)); fl 1268 include/net/xfrm.h saddr->in6 = fl->u.ip6.saddr; fl 1269 include/net/xfrm.h daddr->in6 = fl->u.ip6.daddr; fl 1311 include/net/xfrm.h xfrm_state_addr_flow_check(const struct xfrm_state *x, const struct flowi *fl, fl 1317 include/net/xfrm.h (const xfrm_address_t *)&fl->u.ip4.daddr, fl 1318 include/net/xfrm.h (const xfrm_address_t *)&fl->u.ip4.saddr); fl 1321 include/net/xfrm.h (const xfrm_address_t *)&fl->u.ip6.daddr, fl 1322 include/net/xfrm.h (const xfrm_address_t *)&fl->u.ip6.saddr); fl 1480 include/net/xfrm.h const struct flowi *fl, fl 1100 include/trace/events/afs.h TP_PROTO(struct afs_vnode *vnode, struct file_lock *fl, fl 1103 include/trace/events/afs.h TP_ARGS(vnode, fl, event, error), fl 1118 include/trace/events/afs.h __entry->debug_id = fl ? fl->fl_u.afs.debug_id : 0; fl 1130 include/trace/events/afs.h TP_PROTO(struct afs_vnode *vnode, struct file_lock *fl, fl 1133 include/trace/events/afs.h TP_ARGS(vnode, fl, op), fl 1147 include/trace/events/afs.h __entry->from = fl->fl_start; fl 1148 include/trace/events/afs.h __entry->len = fl->fl_end - fl->fl_start + 1; fl 1150 include/trace/events/afs.h __entry->type = fl->fl_type; fl 1151 include/trace/events/afs.h __entry->flags = fl->fl_flags; fl 1152 include/trace/events/afs.h __entry->debug_id = fl->fl_u.afs.debug_id; fl 63 include/trace/events/filelock.h TP_PROTO(struct inode *inode, struct file_lock *fl, int ret), fl 65 include/trace/events/filelock.h TP_ARGS(inode, fl, ret), fl 68 include/trace/events/filelock.h __field(struct file_lock *, fl) fl 82 include/trace/events/filelock.h __entry->fl = fl ? fl : NULL; fl 85 include/trace/events/filelock.h __entry->fl_blocker = fl ? fl->fl_blocker : NULL; fl 86 include/trace/events/filelock.h __entry->fl_owner = fl ? fl->fl_owner : NULL; fl 87 include/trace/events/filelock.h __entry->fl_pid = fl ? fl->fl_pid : 0; fl 88 include/trace/events/filelock.h __entry->fl_flags = fl ? fl->fl_flags : 0; fl 89 include/trace/events/filelock.h __entry->fl_type = fl ? fl->fl_type : 0; fl 90 include/trace/events/filelock.h __entry->fl_start = fl ? fl->fl_start : 0; fl 91 include/trace/events/filelock.h __entry->fl_end = fl ? fl->fl_end : 0; fl 96 include/trace/events/filelock.h __entry->fl, MAJOR(__entry->s_dev), MINOR(__entry->s_dev), fl 104 include/trace/events/filelock.h TP_PROTO(struct inode *inode, struct file_lock *fl, int ret), fl 105 include/trace/events/filelock.h TP_ARGS(inode, fl, ret)); fl 108 include/trace/events/filelock.h TP_PROTO(struct inode *inode, struct file_lock *fl, int ret), fl 109 include/trace/events/filelock.h TP_ARGS(inode, fl, ret)); fl 112 include/trace/events/filelock.h TP_PROTO(struct inode *inode, struct file_lock *fl, int ret), fl 113 include/trace/events/filelock.h TP_ARGS(inode, fl, ret)); fl 116 include/trace/events/filelock.h TP_PROTO(struct inode *inode, struct file_lock *fl, int ret), fl 117 include/trace/events/filelock.h TP_ARGS(inode, fl, ret)); fl 120 include/trace/events/filelock.h TP_PROTO(struct inode *inode, struct file_lock *fl), fl 122 include/trace/events/filelock.h TP_ARGS(inode, fl), fl 125 include/trace/events/filelock.h __field(struct file_lock *, fl) fl 137 include/trace/events/filelock.h __entry->fl = fl ? fl : NULL; fl 140 include/trace/events/filelock.h __entry->fl_blocker = fl ? fl->fl_blocker : NULL; fl 141 include/trace/events/filelock.h __entry->fl_owner = fl ? fl->fl_owner : NULL; fl 142 include/trace/events/filelock.h __entry->fl_flags = fl ? fl->fl_flags : 0; fl 143 include/trace/events/filelock.h __entry->fl_type = fl ? fl->fl_type : 0; fl 144 include/trace/events/filelock.h __entry->fl_break_time = fl ? fl->fl_break_time : 0; fl 145 include/trace/events/filelock.h __entry->fl_downgrade_time = fl ? fl->fl_downgrade_time : 0; fl 149 include/trace/events/filelock.h __entry->fl, MAJOR(__entry->s_dev), MINOR(__entry->s_dev), fl 156 include/trace/events/filelock.h DEFINE_EVENT(filelock_lease, break_lease_noblock, TP_PROTO(struct inode *inode, struct file_lock *fl), fl 157 include/trace/events/filelock.h TP_ARGS(inode, fl)); fl 159 include/trace/events/filelock.h DEFINE_EVENT(filelock_lease, break_lease_block, TP_PROTO(struct inode *inode, struct file_lock *fl), fl 160 include/trace/events/filelock.h TP_ARGS(inode, fl)); fl 162 include/trace/events/filelock.h DEFINE_EVENT(filelock_lease, break_lease_unblock, TP_PROTO(struct inode *inode, struct file_lock *fl), fl 163 include/trace/events/filelock.h TP_ARGS(inode, fl)); fl 165 include/trace/events/filelock.h DEFINE_EVENT(filelock_lease, generic_delete_lease, TP_PROTO(struct inode *inode, struct file_lock *fl), fl 166 include/trace/events/filelock.h TP_ARGS(inode, fl)); fl 168 include/trace/events/filelock.h DEFINE_EVENT(filelock_lease, time_out_leases, TP_PROTO(struct inode *inode, struct file_lock *fl), fl 169 include/trace/events/filelock.h TP_ARGS(inode, fl)); fl 172 include/trace/events/filelock.h TP_PROTO(struct inode *inode, struct file_lock *fl), fl 174 include/trace/events/filelock.h TP_ARGS(inode, fl), fl 193 include/trace/events/filelock.h __entry->fl_owner = fl->fl_owner; fl 194 include/trace/events/filelock.h __entry->fl_flags = fl->fl_flags; fl 195 include/trace/events/filelock.h __entry->fl_type = fl->fl_type; fl 2536 kernel/trace/trace_events_hist.c unsigned long fl = flags & ~HIST_FIELD_FL_LOG2; fl 2538 kernel/trace/trace_events_hist.c hist_field->operands[0] = create_hist_field(hist_data, field, fl, NULL); fl 31 net/ceph/ceph_fs.c void ceph_file_layout_from_legacy(struct ceph_file_layout *fl, fl 34 net/ceph/ceph_fs.c fl->stripe_unit = le32_to_cpu(legacy->fl_stripe_unit); fl 35 net/ceph/ceph_fs.c fl->stripe_count = le32_to_cpu(legacy->fl_stripe_count); fl 36 net/ceph/ceph_fs.c fl->object_size = le32_to_cpu(legacy->fl_object_size); fl 37 net/ceph/ceph_fs.c fl->pool_id = le32_to_cpu(legacy->fl_pg_pool); fl 38 net/ceph/ceph_fs.c if (fl->pool_id == 0 && fl->stripe_unit == 0 && fl 39 net/ceph/ceph_fs.c fl->stripe_count == 0 && fl->object_size == 0) fl 40 net/ceph/ceph_fs.c fl->pool_id = -1; fl 44 net/ceph/ceph_fs.c void ceph_file_layout_to_legacy(struct ceph_file_layout *fl, fl 47 net/ceph/ceph_fs.c legacy->fl_stripe_unit = cpu_to_le32(fl->stripe_unit); fl 48 net/ceph/ceph_fs.c legacy->fl_stripe_count = cpu_to_le32(fl->stripe_count); fl 49 net/ceph/ceph_fs.c legacy->fl_object_size = cpu_to_le32(fl->object_size); fl 50 net/ceph/ceph_fs.c if (fl->pool_id >= 0) fl 51 net/ceph/ceph_fs.c legacy->fl_pg_pool = cpu_to_le32(fl->pool_id); fl 4121 net/core/dev.c struct sd_flow_limit *fl; fl 4131 net/core/dev.c fl = rcu_dereference(sd->flow_limit); fl 4132 net/core/dev.c if (fl) { fl 4133 net/core/dev.c new_flow = skb_get_hash(skb) & (fl->num_buckets - 1); fl 4134 net/core/dev.c old_flow = fl->history[fl->history_head]; fl 4135 net/core/dev.c fl->history[fl->history_head] = new_flow; fl 4137 net/core/dev.c fl->history_head++; fl 4138 net/core/dev.c fl->history_head &= FLOW_LIMIT_HISTORY - 1; fl 4140 net/core/dev.c if (likely(fl->buckets[old_flow])) fl 4141 net/core/dev.c fl->buckets[old_flow]--; fl 4143 net/core/dev.c if (++fl->buckets[new_flow] > (FLOW_LIMIT_HISTORY >> 1)) { fl 4144 net/core/dev.c fl->count++; fl 246 net/core/fib_rules.c struct flowi *fl, int flags, fl 251 net/core/fib_rules.c if (rule->iifindex && (rule->iifindex != fl->flowi_iif)) fl 254 net/core/fib_rules.c if (rule->oifindex && (rule->oifindex != fl->flowi_oif)) fl 257 net/core/fib_rules.c if ((rule->mark ^ fl->flowi_mark) & rule->mark_mask) fl 260 net/core/fib_rules.c if (rule->tun_id && (rule->tun_id != fl->flowi_tun_key.tun_id)) fl 263 net/core/fib_rules.c if (rule->l3mdev && !l3mdev_fib_rule_match(rule->fr_net, fl, arg)) fl 266 net/core/fib_rules.c if (uid_lt(fl->flowi_uid, rule->uid_range.start) || fl 267 net/core/fib_rules.c uid_gt(fl->flowi_uid, rule->uid_range.end)) fl 270 net/core/fib_rules.c ret = ops->match(rule, fl, flags); fl 275 net/core/fib_rules.c int fib_rules_lookup(struct fib_rules_ops *ops, struct flowi *fl, fl 285 net/core/fib_rules.c if (!fib_rule_match(rule, ops, fl, flags, arg)) fl 301 net/core/fib_rules.c err = ops->action(rule, fl, flags, arg); fl 153 net/core/net-procfs.c struct sd_flow_limit *fl; fl 156 net/core/net-procfs.c fl = rcu_dereference(sd->flow_limit); fl 157 net/core/net-procfs.c if (fl) fl 158 net/core/net-procfs.c flow_limit_count = fl->count; fl 70 net/dccp/ipv4.c fl4 = &inet->cork.fl.u.ip4; fl 138 net/dccp/output.c err = icsk->icsk_af_ops->queue_xmit(sk, skb, &inet->cork.fl); fl 1284 net/decnet/dn_route.c int dn_route_output_sock(struct dst_entry __rcu **pprt, struct flowidn *fl, struct sock *sk, int flags) fl 1288 net/decnet/dn_route.c err = __dn_route_output_key(pprt, fl, flags & MSG_TRYHARD); fl 1289 net/decnet/dn_route.c if (err == 0 && fl->flowidn_proto) { fl 1291 net/decnet/dn_route.c flowidn_to_flowi(fl), sk, 0); fl 108 net/decnet/dn_rules.c static int dn_fib_rule_match(struct fib_rule *rule, struct flowi *fl, int flags) fl 111 net/decnet/dn_rules.c struct flowidn *fld = &fl->u.dn; fl 1204 net/ipv4/af_inet.c fl4 = &inet->cork.fl.u.ip4; fl 1256 net/ipv4/af_inet.c fl4 = &inet->cork.fl.u.ip4; fl 47 net/ipv4/datagram.c fl4 = &inet->cork.fl.u.ip4; fl 171 net/ipv4/fib_rules.c static int fib4_rule_match(struct fib_rule *rule, struct flowi *fl, int flags) fl 174 net/ipv4/fib_rules.c struct flowi4 *fl4 = &fl->u.ip4; fl 614 net/ipv4/inet_connection_sock.c fl4 = &newinet->cork.fl.u.ip4; fl 1078 net/ipv4/inet_connection_sock.c static struct dst_entry *inet_csk_rebuild_route(struct sock *sk, struct flowi *fl) fl 1090 net/ipv4/inet_connection_sock.c fl4 = &fl->u.ip4; fl 1110 net/ipv4/inet_connection_sock.c dst = inet_csk_rebuild_route(sk, &inet->cork.fl); fl 1118 net/ipv4/inet_connection_sock.c dst = inet_csk_rebuild_route(sk, &inet->cork.fl); fl 453 net/ipv4/ip_output.c int __ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl, fl 469 net/ipv4/ip_output.c fl4 = &fl->u.ip4; fl 200 net/ipv4/ip_vti.c struct flowi *fl) fl 215 net/ipv4/ip_vti.c fl->u.ip4.flowi4_oif = dev->ifindex; fl 216 net/ipv4/ip_vti.c fl->u.ip4.flowi4_flags |= FLOWI_FLAG_ANYSRC; fl 217 net/ipv4/ip_vti.c rt = __ip_route_output_key(dev_net(dev), &fl->u.ip4); fl 228 net/ipv4/ip_vti.c fl->u.ip6.flowi6_oif = dev->ifindex; fl 229 net/ipv4/ip_vti.c fl->u.ip6.flowi6_flags |= FLOWI_FLAG_ANYSRC; fl 230 net/ipv4/ip_vti.c dst = ip6_route_output(dev_net(dev), NULL, &fl->u.ip6); fl 247 net/ipv4/ip_vti.c dst = xfrm_lookup(tunnel->net, dst, fl, NULL, 0); fl 308 net/ipv4/ip_vti.c struct flowi fl; fl 313 net/ipv4/ip_vti.c memset(&fl, 0, sizeof(fl)); fl 317 net/ipv4/ip_vti.c xfrm_decode_session(skb, &fl, AF_INET); fl 321 net/ipv4/ip_vti.c xfrm_decode_session(skb, &fl, AF_INET6); fl 329 net/ipv4/ip_vti.c fl.flowi_mark = be32_to_cpu(tunnel->parms.o_key); fl 331 net/ipv4/ip_vti.c return vti_xmit(skb, dev, &fl); fl 191 net/ipv4/ipmr.c static int ipmr_rule_match(struct fib_rule *rule, struct flowi *fl, int flags) fl 83 net/ipv4/netfilter.c int nf_ip_route(struct net *net, struct dst_entry **dst, struct flowi *fl, fl 86 net/ipv4/netfilter.c struct rtable *rt = ip_route_output_key(net, &fl->u.ip4); fl 403 net/ipv4/syncookies.c inet_sk(ret)->cork.fl.u.ip4 = fl4; fl 6571 net/ipv4/tcp_input.c struct flowi fl; fl 6623 net/ipv4/tcp_input.c dst = af_ops->route_req(sk, &fl, req); fl 6666 net/ipv4/tcp_input.c af_ops->send_synack(fastopen_sk, dst, &fl, req, fl 6683 net/ipv4/tcp_input.c af_ops->send_synack(sk, dst, &fl, req, &foc, fl 229 net/ipv4/tcp_ipv4.c fl4 = &inet->cork.fl.u.ip4; fl 939 net/ipv4/tcp_ipv4.c struct flowi *fl, fl 1361 net/ipv4/tcp_ipv4.c struct flowi *fl, fl 1364 net/ipv4/tcp_ipv4.c return inet_csk_route_req(sk, &fl->u.ip4, req); fl 1177 net/ipv4/tcp_output.c err = icsk->icsk_af_ops->queue_xmit(sk, skb, &inet->cork.fl); fl 3850 net/ipv4/tcp_output.c struct flowi fl; fl 3854 net/ipv4/tcp_output.c res = af_ops->send_synack(sk, NULL, &fl, req, NULL, TCP_SYNACK_NORMAL); fl 911 net/ipv4/udp.c struct flowi4 *fl4 = &inet->cork.fl.u.ip4; fl 998 net/ipv4/udp.c fl4 = &inet->cork.fl.u.ip4; fl 1195 net/ipv4/udp.c fl4 = &inet->cork.fl.u.ip4; fl 1276 net/ipv4/udp.c ret = ip_append_page(sk, &inet->cork.fl.u.ip4, fl 72 net/ipv4/xfrm4_policy.c const struct flowi *fl) fl 75 net/ipv4/xfrm4_policy.c const struct flowi4 *fl4 = &fl->u.ip4; fl 295 net/ipv6/fib6_rules.c static int fib6_rule_match(struct fib_rule *rule, struct flowi *fl, int flags) fl 298 net/ipv6/fib6_rules.c struct flowi6 *fl6 = &fl->u.ip6; fl 60 net/ipv6/ip6_flowlabel.c #define for_each_fl_rcu(hash, fl) \ fl 61 net/ipv6/ip6_flowlabel.c for (fl = rcu_dereference_bh(fl_ht[(hash)]); \ fl 62 net/ipv6/ip6_flowlabel.c fl != NULL; \ fl 63 net/ipv6/ip6_flowlabel.c fl = rcu_dereference_bh(fl->next)) fl 64 net/ipv6/ip6_flowlabel.c #define for_each_fl_continue_rcu(fl) \ fl 65 net/ipv6/ip6_flowlabel.c for (fl = rcu_dereference_bh(fl->next); \ fl 66 net/ipv6/ip6_flowlabel.c fl != NULL; \ fl 67 net/ipv6/ip6_flowlabel.c fl = rcu_dereference_bh(fl->next)) fl 76 net/ipv6/ip6_flowlabel.c struct ip6_flowlabel *fl; fl 78 net/ipv6/ip6_flowlabel.c for_each_fl_rcu(FL_HASH(label), fl) { fl 79 net/ipv6/ip6_flowlabel.c if (fl->label == label && net_eq(fl->fl_net, net)) fl 80 net/ipv6/ip6_flowlabel.c return fl; fl 87 net/ipv6/ip6_flowlabel.c struct ip6_flowlabel *fl; fl 90 net/ipv6/ip6_flowlabel.c fl = __fl_lookup(net, label); fl 91 net/ipv6/ip6_flowlabel.c if (fl && !atomic_inc_not_zero(&fl->users)) fl 92 net/ipv6/ip6_flowlabel.c fl = NULL; fl 94 net/ipv6/ip6_flowlabel.c return fl; fl 97 net/ipv6/ip6_flowlabel.c static bool fl_shared_exclusive(struct ip6_flowlabel *fl) fl 99 net/ipv6/ip6_flowlabel.c return fl->share == IPV6_FL_S_EXCL || fl 100 net/ipv6/ip6_flowlabel.c fl->share == IPV6_FL_S_PROCESS || fl 101 net/ipv6/ip6_flowlabel.c fl->share == IPV6_FL_S_USER; fl 106 net/ipv6/ip6_flowlabel.c struct ip6_flowlabel *fl = container_of(head, struct ip6_flowlabel, rcu); fl 108 net/ipv6/ip6_flowlabel.c if (fl->share == IPV6_FL_S_PROCESS) fl 109 net/ipv6/ip6_flowlabel.c put_pid(fl->owner.pid); fl 110 net/ipv6/ip6_flowlabel.c kfree(fl->opt); fl 111 net/ipv6/ip6_flowlabel.c kfree(fl); fl 115 net/ipv6/ip6_flowlabel.c static void fl_free(struct ip6_flowlabel *fl) fl 117 net/ipv6/ip6_flowlabel.c if (!fl) fl 120 net/ipv6/ip6_flowlabel.c if (fl_shared_exclusive(fl) || fl->opt) fl 123 net/ipv6/ip6_flowlabel.c call_rcu(&fl->rcu, fl_free_rcu); fl 126 net/ipv6/ip6_flowlabel.c static void fl_release(struct ip6_flowlabel *fl) fl 130 net/ipv6/ip6_flowlabel.c fl->lastuse = jiffies; fl 131 net/ipv6/ip6_flowlabel.c if (atomic_dec_and_test(&fl->users)) { fl 132 net/ipv6/ip6_flowlabel.c unsigned long ttd = fl->lastuse + fl->linger; fl 133 net/ipv6/ip6_flowlabel.c if (time_after(ttd, fl->expires)) fl 134 net/ipv6/ip6_flowlabel.c fl->expires = ttd; fl 135 net/ipv6/ip6_flowlabel.c ttd = fl->expires; fl 136 net/ipv6/ip6_flowlabel.c if (fl->opt && fl->share == IPV6_FL_S_EXCL) { fl 137 net/ipv6/ip6_flowlabel.c struct ipv6_txoptions *opt = fl->opt; fl 138 net/ipv6/ip6_flowlabel.c fl->opt = NULL; fl 157 net/ipv6/ip6_flowlabel.c struct ip6_flowlabel *fl; fl 161 net/ipv6/ip6_flowlabel.c while ((fl = rcu_dereference_protected(*flp, fl 163 net/ipv6/ip6_flowlabel.c if (atomic_read(&fl->users) == 0) { fl 164 net/ipv6/ip6_flowlabel.c unsigned long ttd = fl->lastuse + fl->linger; fl 165 net/ipv6/ip6_flowlabel.c if (time_after(ttd, fl->expires)) fl 166 net/ipv6/ip6_flowlabel.c fl->expires = ttd; fl 167 net/ipv6/ip6_flowlabel.c ttd = fl->expires; fl 169 net/ipv6/ip6_flowlabel.c *flp = fl->next; fl 170 net/ipv6/ip6_flowlabel.c fl_free(fl); fl 177 net/ipv6/ip6_flowlabel.c flp = &fl->next; fl 194 net/ipv6/ip6_flowlabel.c struct ip6_flowlabel *fl; fl 198 net/ipv6/ip6_flowlabel.c while ((fl = rcu_dereference_protected(*flp, fl 200 net/ipv6/ip6_flowlabel.c if (net_eq(fl->fl_net, net) && fl 201 net/ipv6/ip6_flowlabel.c atomic_read(&fl->users) == 0) { fl 202 net/ipv6/ip6_flowlabel.c *flp = fl->next; fl 203 net/ipv6/ip6_flowlabel.c fl_free(fl); fl 207 net/ipv6/ip6_flowlabel.c flp = &fl->next; fl 214 net/ipv6/ip6_flowlabel.c struct ip6_flowlabel *fl, __be32 label) fl 218 net/ipv6/ip6_flowlabel.c fl->label = label & IPV6_FLOWLABEL_MASK; fl 223 net/ipv6/ip6_flowlabel.c fl->label = htonl(prandom_u32())&IPV6_FLOWLABEL_MASK; fl 224 net/ipv6/ip6_flowlabel.c if (fl->label) { fl 225 net/ipv6/ip6_flowlabel.c lfl = __fl_lookup(net, fl->label); fl 239 net/ipv6/ip6_flowlabel.c lfl = __fl_lookup(net, fl->label); fl 247 net/ipv6/ip6_flowlabel.c fl->lastuse = jiffies; fl 248 net/ipv6/ip6_flowlabel.c fl->next = fl_ht[FL_HASH(fl->label)]; fl 249 net/ipv6/ip6_flowlabel.c rcu_assign_pointer(fl_ht[FL_HASH(fl->label)], fl); fl 268 net/ipv6/ip6_flowlabel.c struct ip6_flowlabel *fl = sfl->fl; fl 270 net/ipv6/ip6_flowlabel.c if (fl->label == label && atomic_inc_not_zero(&fl->users)) { fl 271 net/ipv6/ip6_flowlabel.c fl->lastuse = jiffies; fl 273 net/ipv6/ip6_flowlabel.c return fl; fl 295 net/ipv6/ip6_flowlabel.c fl_release(sfl->fl); fl 313 net/ipv6/ip6_flowlabel.c struct ip6_flowlabel *fl, fl 316 net/ipv6/ip6_flowlabel.c struct ipv6_txoptions *fl_opt = fl->opt; fl 350 net/ipv6/ip6_flowlabel.c static int fl6_renew(struct ip6_flowlabel *fl, unsigned long linger, unsigned long expires) fl 360 net/ipv6/ip6_flowlabel.c fl->lastuse = jiffies; fl 361 net/ipv6/ip6_flowlabel.c if (time_before(fl->linger, linger)) fl 362 net/ipv6/ip6_flowlabel.c fl->linger = linger; fl 363 net/ipv6/ip6_flowlabel.c if (time_before(expires, fl->linger)) fl 364 net/ipv6/ip6_flowlabel.c expires = fl->linger; fl 365 net/ipv6/ip6_flowlabel.c if (time_before(fl->expires, fl->lastuse + expires)) fl 366 net/ipv6/ip6_flowlabel.c fl->expires = fl->lastuse + expires; fl 376 net/ipv6/ip6_flowlabel.c struct ip6_flowlabel *fl = NULL; fl 387 net/ipv6/ip6_flowlabel.c fl = kzalloc(sizeof(*fl), GFP_KERNEL); fl 388 net/ipv6/ip6_flowlabel.c if (!fl) fl 397 net/ipv6/ip6_flowlabel.c fl->opt = kmalloc(sizeof(*fl->opt) + olen, GFP_KERNEL); fl 398 net/ipv6/ip6_flowlabel.c if (!fl->opt) fl 401 net/ipv6/ip6_flowlabel.c memset(fl->opt, 0, sizeof(*fl->opt)); fl 402 net/ipv6/ip6_flowlabel.c fl->opt->tot_len = sizeof(*fl->opt) + olen; fl 404 net/ipv6/ip6_flowlabel.c if (copy_from_user(fl->opt+1, optval+CMSG_ALIGN(sizeof(*freq)), olen)) fl 408 net/ipv6/ip6_flowlabel.c msg.msg_control = (void *)(fl->opt+1); fl 411 net/ipv6/ip6_flowlabel.c ipc6.opt = fl->opt; fl 416 net/ipv6/ip6_flowlabel.c if (fl->opt->opt_flen) fl 418 net/ipv6/ip6_flowlabel.c if (fl->opt->opt_nflen == 0) { fl 419 net/ipv6/ip6_flowlabel.c kfree(fl->opt); fl 420 net/ipv6/ip6_flowlabel.c fl->opt = NULL; fl 424 net/ipv6/ip6_flowlabel.c fl->fl_net = net; fl 425 net/ipv6/ip6_flowlabel.c fl->expires = jiffies; fl 426 net/ipv6/ip6_flowlabel.c err = fl6_renew(fl, freq->flr_linger, freq->flr_expires); fl 429 net/ipv6/ip6_flowlabel.c fl->share = freq->flr_share; fl 436 net/ipv6/ip6_flowlabel.c fl->dst = freq->flr_dst; fl 437 net/ipv6/ip6_flowlabel.c atomic_set(&fl->users, 1); fl 438 net/ipv6/ip6_flowlabel.c switch (fl->share) { fl 443 net/ipv6/ip6_flowlabel.c fl->owner.pid = get_task_pid(current, PIDTYPE_PID); fl 446 net/ipv6/ip6_flowlabel.c fl->owner.uid = current_euid(); fl 452 net/ipv6/ip6_flowlabel.c if (fl_shared_exclusive(fl) || fl->opt) fl 454 net/ipv6/ip6_flowlabel.c return fl; fl 457 net/ipv6/ip6_flowlabel.c if (fl) { fl 458 net/ipv6/ip6_flowlabel.c kfree(fl->opt); fl 459 net/ipv6/ip6_flowlabel.c kfree(fl); fl 490 net/ipv6/ip6_flowlabel.c struct ip6_flowlabel *fl) fl 493 net/ipv6/ip6_flowlabel.c sfl->fl = fl; fl 518 net/ipv6/ip6_flowlabel.c if (sfl->fl->label == (np->flow_label & IPV6_FLOWLABEL_MASK)) { fl 520 net/ipv6/ip6_flowlabel.c freq->flr_label = sfl->fl->label; fl 521 net/ipv6/ip6_flowlabel.c freq->flr_dst = sfl->fl->dst; fl 522 net/ipv6/ip6_flowlabel.c freq->flr_share = sfl->fl->share; fl 523 net/ipv6/ip6_flowlabel.c freq->flr_expires = (sfl->fl->expires - jiffies) / HZ; fl 524 net/ipv6/ip6_flowlabel.c freq->flr_linger = sfl->fl->linger / HZ; fl 545 net/ipv6/ip6_flowlabel.c struct ip6_flowlabel *fl, *fl1 = NULL; fl 570 net/ipv6/ip6_flowlabel.c if (sfl->fl->label == freq.flr_label) { fl 575 net/ipv6/ip6_flowlabel.c fl_release(sfl->fl); fl 586 net/ipv6/ip6_flowlabel.c if (sfl->fl->label == freq.flr_label) { fl 587 net/ipv6/ip6_flowlabel.c err = fl6_renew(sfl->fl, freq.flr_linger, freq.flr_expires); fl 596 net/ipv6/ip6_flowlabel.c fl = fl_lookup(net, freq.flr_label); fl 597 net/ipv6/ip6_flowlabel.c if (fl) { fl 598 net/ipv6/ip6_flowlabel.c err = fl6_renew(fl, freq.flr_linger, freq.flr_expires); fl 599 net/ipv6/ip6_flowlabel.c fl_release(fl); fl 627 net/ipv6/ip6_flowlabel.c fl = fl_create(net, sk, &freq, optval, optlen, &err); fl 628 net/ipv6/ip6_flowlabel.c if (!fl) fl 636 net/ipv6/ip6_flowlabel.c if (sfl->fl->label == freq.flr_label) { fl 641 net/ipv6/ip6_flowlabel.c fl1 = sfl->fl; fl 658 net/ipv6/ip6_flowlabel.c fl1->share != fl->share || fl 660 net/ipv6/ip6_flowlabel.c (fl1->owner.pid != fl->owner.pid)) || fl 662 net/ipv6/ip6_flowlabel.c !uid_eq(fl1->owner.uid, fl->owner.uid))) fl 668 net/ipv6/ip6_flowlabel.c if (fl->linger > fl1->linger) fl 669 net/ipv6/ip6_flowlabel.c fl1->linger = fl->linger; fl 670 net/ipv6/ip6_flowlabel.c if ((long)(fl->expires - fl1->expires) > 0) fl 671 net/ipv6/ip6_flowlabel.c fl1->expires = fl->expires; fl 673 net/ipv6/ip6_flowlabel.c fl_free(fl); fl 693 net/ipv6/ip6_flowlabel.c fl1 = fl_intern(net, fl, freq.flr_label); fl 699 net/ipv6/ip6_flowlabel.c &fl->label, sizeof(fl->label))) { fl 704 net/ipv6/ip6_flowlabel.c fl_link(np, sfl1, fl); fl 712 net/ipv6/ip6_flowlabel.c fl_free(fl); fl 729 net/ipv6/ip6_flowlabel.c struct ip6_flowlabel *fl = NULL; fl 734 net/ipv6/ip6_flowlabel.c for_each_fl_rcu(state->bucket, fl) { fl 735 net/ipv6/ip6_flowlabel.c if (net_eq(fl->fl_net, net)) fl 739 net/ipv6/ip6_flowlabel.c fl = NULL; fl 741 net/ipv6/ip6_flowlabel.c return fl; fl 744 net/ipv6/ip6_flowlabel.c static struct ip6_flowlabel *ip6fl_get_next(struct seq_file *seq, struct ip6_flowlabel *fl) fl 749 net/ipv6/ip6_flowlabel.c for_each_fl_continue_rcu(fl) { fl 750 net/ipv6/ip6_flowlabel.c if (net_eq(fl->fl_net, net)) fl 756 net/ipv6/ip6_flowlabel.c for_each_fl_rcu(state->bucket, fl) { fl 757 net/ipv6/ip6_flowlabel.c if (net_eq(fl->fl_net, net)) fl 762 net/ipv6/ip6_flowlabel.c fl = NULL; fl 765 net/ipv6/ip6_flowlabel.c return fl; fl 770 net/ipv6/ip6_flowlabel.c struct ip6_flowlabel *fl = ip6fl_get_first(seq); fl 771 net/ipv6/ip6_flowlabel.c if (fl) fl 772 net/ipv6/ip6_flowlabel.c while (pos && (fl = ip6fl_get_next(seq, fl)) != NULL) fl 774 net/ipv6/ip6_flowlabel.c return pos ? NULL : fl; fl 790 net/ipv6/ip6_flowlabel.c struct ip6_flowlabel *fl; fl 793 net/ipv6/ip6_flowlabel.c fl = ip6fl_get_first(seq); fl 795 net/ipv6/ip6_flowlabel.c fl = ip6fl_get_next(seq, v); fl 797 net/ipv6/ip6_flowlabel.c return fl; fl 812 net/ipv6/ip6_flowlabel.c struct ip6_flowlabel *fl = v; fl 815 net/ipv6/ip6_flowlabel.c (unsigned int)ntohl(fl->label), fl 816 net/ipv6/ip6_flowlabel.c fl->share, fl 817 net/ipv6/ip6_flowlabel.c ((fl->share == IPV6_FL_S_PROCESS) ? fl 818 net/ipv6/ip6_flowlabel.c pid_nr_ns(fl->owner.pid, state->pid_ns) : fl 819 net/ipv6/ip6_flowlabel.c ((fl->share == IPV6_FL_S_USER) ? fl 820 net/ipv6/ip6_flowlabel.c from_kuid_munged(seq_user_ns(seq), fl->owner.uid) : fl 822 net/ipv6/ip6_flowlabel.c atomic_read(&fl->users), fl 823 net/ipv6/ip6_flowlabel.c fl->linger/HZ, fl 824 net/ipv6/ip6_flowlabel.c (long)(fl->expires - jiffies)/HZ, fl 825 net/ipv6/ip6_flowlabel.c &fl->dst, fl 826 net/ipv6/ip6_flowlabel.c fl->opt ? fl->opt->opt_nflen : 0); fl 643 net/ipv6/ip6_gre.c memcpy(fl6, &t->fl.u.ip6, sizeof(*fl6)); fl 685 net/ipv6/ip6_gre.c memcpy(fl6, &t->fl.u.ip6, sizeof(*fl6)); fl 864 net/ipv6/ip6_gre.c memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6)); fl 1017 net/ipv6/ip6_gre.c memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6)); fl 1073 net/ipv6/ip6_gre.c struct flowi6 *fl6 = &t->fl.u.ip6; fl 1343 net/ipv6/ip6_gre.c t->fl.u.ip6.flowlabel, fl 1344 net/ipv6/ip6_gre.c true, &t->fl.u.ip6)); fl 1282 net/ipv6/ip6_output.c cork->fl.u.ip6 = *fl6; fl 1683 net/ipv6/ip6_output.c fl6 = &inet->cork.fl.u.ip6; fl 1710 net/ipv6/ip6_output.c memset(&cork->fl, 0, sizeof(cork->fl)); fl 1726 net/ipv6/ip6_output.c struct flowi6 *fl6 = &cork->fl.u.ip6; fl 1267 net/ipv6/ip6_tunnel.c memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6)); fl 1353 net/ipv6/ip6_tunnel.c memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6)); fl 1424 net/ipv6/ip6_tunnel.c struct flowi6 *fl6 = &t->fl.u.ip6; fl 441 net/ipv6/ip6_vti.c vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl) fl 457 net/ipv6/ip6_vti.c fl->u.ip4.flowi4_oif = dev->ifindex; fl 458 net/ipv6/ip6_vti.c fl->u.ip4.flowi4_flags |= FLOWI_FLAG_ANYSRC; fl 459 net/ipv6/ip6_vti.c rt = __ip_route_output_key(dev_net(dev), &fl->u.ip4); fl 467 net/ipv6/ip6_vti.c fl->u.ip6.flowi6_oif = dev->ifindex; fl 468 net/ipv6/ip6_vti.c fl->u.ip6.flowi6_flags |= FLOWI_FLAG_ANYSRC; fl 469 net/ipv6/ip6_vti.c dst = ip6_route_output(dev_net(dev), NULL, &fl->u.ip6); fl 483 net/ipv6/ip6_vti.c dst = xfrm_lookup(t->net, dst, fl, NULL, 0); fl 548 net/ipv6/ip6_vti.c struct flowi fl; fl 554 net/ipv6/ip6_vti.c memset(&fl, 0, sizeof(fl)); fl 562 net/ipv6/ip6_vti.c xfrm_decode_session(skb, &fl, AF_INET6); fl 566 net/ipv6/ip6_vti.c xfrm_decode_session(skb, &fl, AF_INET); fl 574 net/ipv6/ip6_vti.c fl.flowi_mark = be32_to_cpu(t->parms.o_key); fl 576 net/ipv6/ip6_vti.c ret = vti6_xmit(skb, dev, &fl); fl 197 net/ipv6/mip6.c const struct flowi *fl) fl 201 net/ipv6/mip6.c const struct flowi6 *fl6 = &fl->u.ip6; fl 235 net/ipv6/mip6.c sel.dport = xfrm_flowi_dport(fl, &fl6->uli); fl 238 net/ipv6/mip6.c sel.sport = xfrm_flowi_sport(fl, &fl6->uli); fl 93 net/ipv6/netfilter.c struct flowi *fl, bool strict) fl 105 net/ipv6/netfilter.c result = ip6_route_output(net, sk, &fl->u.ip6); fl 485 net/ipv6/tcp_ipv6.c struct flowi *fl, fl 493 net/ipv6/tcp_ipv6.c struct flowi6 *fl6 = &fl->u.ip6; fl 772 net/ipv6/tcp_ipv6.c struct flowi *fl, fl 775 net/ipv6/tcp_ipv6.c return inet6_csk_route_req(sk, &fl->u.ip6, req, IPPROTO_TCP); fl 1200 net/ipv6/udp.c fl6 = inet_sk(sk)->cork.fl.u.ip6; fl 72 net/ipv6/xfrm6_policy.c const struct flowi *fl) fl 1008 net/l2tp/l2tp_core.c struct flowi *fl, size_t data_len) fl 1038 net/l2tp/l2tp_core.c error = ip_queue_xmit(tunnel->sock, skb, fl); fl 1060 net/l2tp/l2tp_core.c struct flowi *fl; fl 1109 net/l2tp/l2tp_core.c fl = &inet->cork.fl; fl 1137 net/l2tp/l2tp_core.c l2tp_xmit_core(session, skb, fl, data_len); fl 468 net/l2tp/l2tp_ip.c fl4 = &inet->cork.fl.u.ip4; fl 508 net/l2tp/l2tp_ip.c rc = ip_queue_xmit(sk, skb, &inet->cork.fl); fl 153 net/l3mdev/l3mdev.c int l3mdev_fib_rule_match(struct net *net, struct flowi *fl, fl 161 net/l3mdev/l3mdev.c dev = dev_get_by_index_rcu(net, fl->flowi_oif); fl 169 net/l3mdev/l3mdev.c dev = dev_get_by_index_rcu(net, fl->flowi_iif); fl 183 net/l3mdev/l3mdev.c void l3mdev_update_flow(struct net *net, struct flowi *fl) fl 190 net/l3mdev/l3mdev.c if (fl->flowi_oif) { fl 191 net/l3mdev/l3mdev.c dev = dev_get_by_index_rcu(net, fl->flowi_oif); fl 195 net/l3mdev/l3mdev.c fl->flowi_oif = ifindex; fl 196 net/l3mdev/l3mdev.c fl->flowi_flags |= FLOWI_FLAG_SKIP_NH_OIF; fl 202 net/l3mdev/l3mdev.c if (fl->flowi_iif) { fl 203 net/l3mdev/l3mdev.c dev = dev_get_by_index_rcu(net, fl->flowi_iif); fl 207 net/l3mdev/l3mdev.c fl->flowi_iif = ifindex; fl 208 net/l3mdev/l3mdev.c fl->flowi_flags |= FLOWI_FLAG_SKIP_NH_OIF; fl 873 net/netfilter/nf_conntrack_sip.c struct flowi fl; fl 876 net/netfilter/nf_conntrack_sip.c memset(&fl, 0, sizeof(fl)); fl 880 net/netfilter/nf_conntrack_sip.c fl.u.ip4.daddr = daddr->ip; fl 881 net/netfilter/nf_conntrack_sip.c nf_ip_route(net, &dst, &fl, false); fl 885 net/netfilter/nf_conntrack_sip.c fl.u.ip6.daddr = daddr->in6; fl 886 net/netfilter/nf_conntrack_sip.c nf_ip6_route(net, &dst, &fl, false); fl 59 net/netfilter/nf_nat_core.c struct flowi *fl) fl 62 net/netfilter/nf_nat_core.c struct flowi4 *fl4 = &fl->u.ip4; fl 91 net/netfilter/nf_nat_core.c struct flowi *fl) fl 95 net/netfilter/nf_nat_core.c struct flowi6 *fl6 = &fl->u.ip6; fl 121 net/netfilter/nf_nat_core.c static void __nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl) fl 142 net/netfilter/nf_nat_core.c nf_nat_ipv4_decode_session(skb, ct, dir, statusbit, fl); fl 145 net/netfilter/nf_nat_core.c nf_nat_ipv6_decode_session(skb, ct, dir, statusbit, fl); fl 152 net/netfilter/nf_nat_core.c struct flowi fl; fl 158 net/netfilter/nf_nat_core.c err = xfrm_decode_session(skb, &fl, family); fl 171 net/netfilter/nf_nat_core.c dst = xfrm_lookup(net, dst, &fl, sk, 0); fl 29 net/netfilter/nft_flow_offload.c struct flowi fl; fl 31 net/netfilter/nft_flow_offload.c memset(&fl, 0, sizeof(fl)); fl 34 net/netfilter/nft_flow_offload.c fl.u.ip4.daddr = ct->tuplehash[dir].tuple.src.u3.ip; fl 35 net/netfilter/nft_flow_offload.c fl.u.ip4.flowi4_oif = nft_in(pkt)->ifindex; fl 38 net/netfilter/nft_flow_offload.c fl.u.ip6.daddr = ct->tuplehash[dir].tuple.src.u3.in6; fl 39 net/netfilter/nft_flow_offload.c fl.u.ip6.flowi6_oif = nft_in(pkt)->ifindex; fl 43 net/netfilter/nft_flow_offload.c nf_route(nft_net(pkt), &other_dst, &fl, false, nft_pf(pkt)); fl 26 net/netfilter/nft_rt.c struct flowi fl; fl 28 net/netfilter/nft_rt.c memset(&fl, 0, sizeof(fl)); fl 32 net/netfilter/nft_rt.c fl.u.ip4.daddr = ip_hdr(skb)->saddr; fl 36 net/netfilter/nft_rt.c fl.u.ip6.daddr = ipv6_hdr(skb)->saddr; fl 41 net/netfilter/nft_rt.c nf_route(nft_net(pkt), &dst, &fl, false, nft_pf(pkt)); fl 163 net/netfilter/utils.c int nf_route(struct net *net, struct dst_entry **dst, struct flowi *fl, fl 171 net/netfilter/utils.c ret = nf_ip_route(net, dst, fl, strict); fl 174 net/netfilter/utils.c ret = nf_ip6_route(net, dst, fl, strict); fl 47 net/netfilter/xt_TCPMSS.c struct flowi fl; fl 52 net/netfilter/xt_TCPMSS.c struct flowi4 *fl4 = &fl.u.ip4; fl 56 net/netfilter/xt_TCPMSS.c struct flowi6 *fl6 = &fl.u.ip6; fl 62 net/netfilter/xt_TCPMSS.c nf_route(net, (struct dst_entry **)&rt, &fl, false, family); fl 427 net/openvswitch/actions.c static void set_ipv6_fl(struct ipv6hdr *nh, u32 fl, u32 mask) fl 430 net/openvswitch/actions.c OVS_SET_MASKED(nh->flow_lbl[0], (u8)(fl >> 16), (u8)(mask >> 16)); fl 431 net/openvswitch/actions.c OVS_SET_MASKED(nh->flow_lbl[1], (u8)(fl >> 8), (u8)(mask >> 8)); fl 432 net/openvswitch/actions.c OVS_SET_MASKED(nh->flow_lbl[2], (u8)fl, (u8)mask); fl 158 net/rxrpc/peer_object.c struct flowi fl; fl 159 net/rxrpc/peer_object.c struct flowi4 *fl4 = &fl.u.ip4; fl 161 net/rxrpc/peer_object.c struct flowi6 *fl6 = &fl.u.ip6; fl 166 net/rxrpc/peer_object.c memset(&fl, 0, sizeof(fl)); fl 391 net/sched/sch_atm.c struct tcf_proto *fl; fl 394 net/sched/sch_atm.c fl = rcu_dereference_bh(flow->filter_list); fl 395 net/sched/sch_atm.c if (fl) { fl 396 net/sched/sch_atm.c result = tcf_classify(skb, fl, &res, true); fl 212 net/sched/sch_cbq.c struct tcf_proto *fl; fl 227 net/sched/sch_cbq.c fl = rcu_dereference_bh(head->filter_list); fl 231 net/sched/sch_cbq.c result = tcf_classify(skb, fl, &res, true); fl 232 net/sched/sch_cbq.c if (!fl || result < 0) fl 308 net/sched/sch_drr.c struct tcf_proto *fl; fl 318 net/sched/sch_drr.c fl = rcu_dereference_bh(q->filter_list); fl 319 net/sched/sch_drr.c result = tcf_classify(skb, fl, &res, false); fl 243 net/sched/sch_dsmark.c struct tcf_proto *fl = rcu_dereference_bh(p->filter_list); fl 244 net/sched/sch_dsmark.c int result = tcf_classify(skb, fl, &res, false); fl 35 net/sched/sch_multiq.c struct tcf_proto *fl = rcu_dereference_bh(q->filter_list); fl 39 net/sched/sch_multiq.c err = tcf_classify(skb, fl, &res, false); fl 36 net/sched/sch_prio.c struct tcf_proto *fl; fl 41 net/sched/sch_prio.c fl = rcu_dereference_bh(q->filter_list); fl 42 net/sched/sch_prio.c err = tcf_classify(skb, fl, &res, false); fl 54 net/sched/sch_prio.c if (!fl || err < 0) { fl 682 net/sched/sch_qfq.c struct tcf_proto *fl; fl 693 net/sched/sch_qfq.c fl = rcu_dereference_bh(q->filter_list); fl 694 net/sched/sch_qfq.c result = tcf_classify(skb, fl, &res, false); fl 254 net/sched/sch_sfb.c static bool sfb_classify(struct sk_buff *skb, struct tcf_proto *fl, fl 260 net/sched/sch_sfb.c result = tcf_classify(skb, fl, &res, false); fl 285 net/sched/sch_sfb.c struct tcf_proto *fl; fl 311 net/sched/sch_sfb.c fl = rcu_dereference_bh(q->filter_list); fl 312 net/sched/sch_sfb.c if (fl) { fl 316 net/sched/sch_sfb.c if (!sfb_classify(skb, fl, &ret, &salt)) fl 168 net/sched/sch_sfq.c struct tcf_proto *fl; fl 176 net/sched/sch_sfq.c fl = rcu_dereference_bh(q->filter_list); fl 177 net/sched/sch_sfq.c if (!fl) fl 181 net/sched/sch_sfq.c result = tcf_classify(skb, fl, &res, false); fl 198 net/sctp/ipv6.c struct flowi6 *fl6 = &transport->fl.u.ip6; fl 227 net/sctp/ipv6.c struct flowi *fl, struct sock *sk) fl 282 net/sctp/ipv6.c memcpy(fl, &_fl, sizeof(_fl)); fl 308 net/sctp/ipv6.c memcpy(fl, &_fl, sizeof(_fl)); fl 348 net/sctp/ipv6.c memcpy(fl, &_fl, sizeof(_fl)); fl 363 net/sctp/ipv6.c memcpy(fl, &_fl, sizeof(_fl)); fl 375 net/sctp/ipv6.c &fl->u.ip6.saddr); fl 396 net/sctp/ipv6.c struct flowi *fl) fl 398 net/sctp/ipv6.c struct flowi6 *fl6 = &fl->u.ip6; fl 408 net/sctp/protocol.c struct flowi *fl, struct sock *sk) fl 445 net/sctp/protocol.c memcpy(fl, &_fl, sizeof(_fl)); fl 512 net/sctp/protocol.c memcpy(fl, &_fl, sizeof(_fl)); fl 522 net/sctp/protocol.c memcpy(fl, &_fl, sizeof(_fl)); fl 531 net/sctp/protocol.c &fl->u.ip4.daddr, &fl->u.ip4.saddr); fl 543 net/sctp/protocol.c struct flowi *fl) fl 550 net/sctp/protocol.c saddr->v4.sin_addr.s_addr = fl->u.ip4.saddr; fl 980 net/sctp/protocol.c skb->len, &transport->fl.u.ip4.saddr, fl 981 net/sctp/protocol.c &transport->fl.u.ip4.daddr); fl 991 net/sctp/protocol.c return __ip_queue_xmit(&inet->sk, skb, &transport->fl, dscp); fl 228 net/sctp/transport.c &transport->fl, sk); fl 273 net/sctp/transport.c t->af_specific->get_dst(t, &t->saddr, &t->fl, sk); fl 297 net/sctp/transport.c af->get_dst(transport, saddr, &transport->fl, sctp_opt2sk(opt)); fl 302 net/sctp/transport.c af->get_saddr(opt, transport, &transport->fl); fl 173 net/tipc/udp_media.c struct flowi4 fl = { fl 179 net/tipc/udp_media.c rt = ip_route_output_key(net, &fl); fl 184 net/tipc/udp_media.c dst_cache_set_ip4(cache, &rt->dst, fl.saddr); fl 260 net/xfrm/xfrm_interface.c xfrmi_xmit2(struct sk_buff *skb, struct net_device *dev, struct flowi *fl) fl 272 net/xfrm/xfrm_interface.c dst = xfrm_lookup_with_ifid(xi->net, dst, fl, NULL, 0, xi->p.if_id); fl 344 net/xfrm/xfrm_interface.c struct flowi fl; fl 347 net/xfrm/xfrm_interface.c memset(&fl, 0, sizeof(fl)); fl 351 net/xfrm/xfrm_interface.c xfrm_decode_session(skb, &fl, AF_INET6); fl 354 net/xfrm/xfrm_interface.c fl.u.ip6.flowi6_oif = dev->ifindex; fl 355 net/xfrm/xfrm_interface.c fl.u.ip6.flowi6_flags |= FLOWI_FLAG_ANYSRC; fl 356 net/xfrm/xfrm_interface.c dst = ip6_route_output(dev_net(dev), NULL, &fl.u.ip6); fl 366 net/xfrm/xfrm_interface.c xfrm_decode_session(skb, &fl, AF_INET); fl 371 net/xfrm/xfrm_interface.c fl.u.ip4.flowi4_oif = dev->ifindex; fl 372 net/xfrm/xfrm_interface.c fl.u.ip4.flowi4_flags |= FLOWI_FLAG_ANYSRC; fl 373 net/xfrm/xfrm_interface.c rt = __ip_route_output_key(dev_net(dev), &fl.u.ip4); fl 385 net/xfrm/xfrm_interface.c fl.flowi_oif = xi->p.link; fl 387 net/xfrm/xfrm_interface.c ret = xfrmi_xmit2(skb, dev, &fl); fl 194 net/xfrm/xfrm_policy.c __xfrm4_selector_match(const struct xfrm_selector *sel, const struct flowi *fl) fl 196 net/xfrm/xfrm_policy.c const struct flowi4 *fl4 = &fl->u.ip4; fl 200 net/xfrm/xfrm_policy.c !((xfrm_flowi_dport(fl, &fl4->uli) ^ sel->dport) & sel->dport_mask) && fl 201 net/xfrm/xfrm_policy.c !((xfrm_flowi_sport(fl, &fl4->uli) ^ sel->sport) & sel->sport_mask) && fl 207 net/xfrm/xfrm_policy.c __xfrm6_selector_match(const struct xfrm_selector *sel, const struct flowi *fl) fl 209 net/xfrm/xfrm_policy.c const struct flowi6 *fl6 = &fl->u.ip6; fl 213 net/xfrm/xfrm_policy.c !((xfrm_flowi_dport(fl, &fl6->uli) ^ sel->dport) & sel->dport_mask) && fl 214 net/xfrm/xfrm_policy.c !((xfrm_flowi_sport(fl, &fl6->uli) ^ sel->sport) & sel->sport_mask) && fl 219 net/xfrm/xfrm_policy.c bool xfrm_selector_match(const struct xfrm_selector *sel, const struct flowi *fl, fl 224 net/xfrm/xfrm_policy.c return __xfrm4_selector_match(sel, fl); fl 226 net/xfrm/xfrm_policy.c return __xfrm6_selector_match(sel, fl); fl 1884 net/xfrm/xfrm_policy.c const struct flowi *fl, fl 1893 net/xfrm/xfrm_policy.c (fl->flowi_mark & pol->mark.m) != pol->mark.v || fl 1897 net/xfrm/xfrm_policy.c match = xfrm_selector_match(sel, fl, family); fl 1899 net/xfrm/xfrm_policy.c ret = security_xfrm_policy_lookup(pol->security, fl->flowi_secid, fl 2010 net/xfrm/xfrm_policy.c const struct flowi *fl, fl 2025 net/xfrm/xfrm_policy.c err = xfrm_policy_match(pol, fl, type, family, dir, if_id); fl 2049 net/xfrm/xfrm_policy.c const struct flowi *fl, fl 2058 net/xfrm/xfrm_policy.c fl, type, family, dir, fl 2072 net/xfrm/xfrm_policy.c const struct flowi *fl, fl 2084 net/xfrm/xfrm_policy.c daddr = xfrm_flowi_daddr(fl, family); fl 2085 net/xfrm/xfrm_policy.c saddr = xfrm_flowi_saddr(fl, family); fl 2098 net/xfrm/xfrm_policy.c err = xfrm_policy_match(pol, fl, type, family, dir, if_id); fl 2116 net/xfrm/xfrm_policy.c pol = xfrm_policy_eval_candidates(&cand, ret, fl, type, fl 2137 net/xfrm/xfrm_policy.c const struct flowi *fl, fl 2143 net/xfrm/xfrm_policy.c pol = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_SUB, fl, family, fl 2148 net/xfrm/xfrm_policy.c return xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN, fl, family, fl 2153 net/xfrm/xfrm_policy.c const struct flowi *fl, fl 2170 net/xfrm/xfrm_policy.c match = xfrm_selector_match(&pol->selector, fl, family); fl 2178 net/xfrm/xfrm_policy.c fl->flowi_secid, fl 2358 net/xfrm/xfrm_policy.c xfrm_tmpl_resolve_one(struct xfrm_policy *policy, const struct flowi *fl, fl 2364 net/xfrm/xfrm_policy.c xfrm_address_t *daddr = xfrm_flowi_daddr(fl, family); fl 2365 net/xfrm/xfrm_policy.c xfrm_address_t *saddr = xfrm_flowi_saddr(fl, family); fl 2379 net/xfrm/xfrm_policy.c error = xfrm_get_saddr(net, fl->flowi_oif, fl 2388 net/xfrm/xfrm_policy.c x = xfrm_state_find(remote, local, fl, tmpl, policy, &error, fl 2417 net/xfrm/xfrm_policy.c xfrm_tmpl_resolve(struct xfrm_policy **pols, int npols, const struct flowi *fl, fl 2433 net/xfrm/xfrm_policy.c ret = xfrm_tmpl_resolve_one(pols[i], fl, &tpp[cnx], family); fl 2454 net/xfrm/xfrm_policy.c static int xfrm_get_tos(const struct flowi *fl, int family) fl 2457 net/xfrm/xfrm_policy.c return IPTOS_RT_MASK & fl->u.ip4.flowi4_tos; fl 2508 net/xfrm/xfrm_policy.c const struct flowi *fl) fl 2517 net/xfrm/xfrm_policy.c err = afinfo->fill_dst(xdst, dev, fl); fl 2533 net/xfrm/xfrm_policy.c const struct flowi *fl, fl 2552 net/xfrm/xfrm_policy.c xfrm_flowi_addr_get(fl, &saddr, &daddr, family); fl 2554 net/xfrm/xfrm_policy.c tos = xfrm_get_tos(fl, family); fl 2595 net/xfrm/xfrm_policy.c mark = xfrm_smark_get(fl->flowi_mark, xfrm[i]); fl 2598 net/xfrm/xfrm_policy.c dst = xfrm_dst_lookup(xfrm[i], tos, fl->flowi_oif, fl 2644 net/xfrm/xfrm_policy.c err = xfrm_fill_dst(xdst_prev, dev, fl); fl 2666 net/xfrm/xfrm_policy.c static int xfrm_expand_policies(const struct flowi *fl, u16 family, fl 2687 net/xfrm/xfrm_policy.c fl, family, fl 2713 net/xfrm/xfrm_policy.c const struct flowi *fl, u16 family, fl 2724 net/xfrm/xfrm_policy.c err = xfrm_tmpl_resolve(pols, num_pols, fl, xfrm, family); fl 2734 net/xfrm/xfrm_policy.c dst = xfrm_bundle_create(pols[0], xfrm, bundle, err, fl, dst_orig); fl 2757 net/xfrm/xfrm_policy.c struct flowi fl; fl 2768 net/xfrm/xfrm_policy.c xfrm_decode_session(skb, &fl, dst->ops->family); fl 2772 net/xfrm/xfrm_policy.c dst = xfrm_lookup(net, xfrm_dst_path(dst), &fl, sk, XFRM_LOOKUP_QUEUE); fl 2800 net/xfrm/xfrm_policy.c xfrm_decode_session(skb, &fl, skb_dst(skb)->ops->family); fl 2802 net/xfrm/xfrm_policy.c dst = xfrm_lookup(net, xfrm_dst_path(skb_dst(skb)), &fl, skb->sk, 0); fl 2869 net/xfrm/xfrm_policy.c const struct flowi *fl, fl 2913 net/xfrm/xfrm_policy.c err = xfrm_fill_dst(xdst, dev, fl); fl 2927 net/xfrm/xfrm_policy.c const struct flowi *fl, fl 2938 net/xfrm/xfrm_policy.c pols[0] = xfrm_policy_lookup(net, fl, family, dir, if_id); fl 2939 net/xfrm/xfrm_policy.c err = xfrm_expand_policies(fl, family, pols, fl 2948 net/xfrm/xfrm_policy.c xdst = xfrm_resolve_and_create_bundle(pols, num_pols, fl, family, fl 2971 net/xfrm/xfrm_policy.c xdst = xfrm_create_dummy_bundle(net, xflo, fl, num_xfrms, family); fl 3016 net/xfrm/xfrm_policy.c const struct flowi *fl, fl 3034 net/xfrm/xfrm_policy.c pols[0] = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl, family, fl 3036 net/xfrm/xfrm_policy.c err = xfrm_expand_policies(fl, family, pols, fl 3048 net/xfrm/xfrm_policy.c pols, num_pols, fl, fl 3079 net/xfrm/xfrm_policy.c xdst = xfrm_bundle_lookup(net, fl, family, dir, &xflo, if_id); fl 3168 net/xfrm/xfrm_policy.c const struct flowi *fl, const struct sock *sk, fl 3171 net/xfrm/xfrm_policy.c return xfrm_lookup_with_ifid(net, dst_orig, fl, sk, flags, 0); fl 3179 net/xfrm/xfrm_policy.c const struct flowi *fl, fl 3182 net/xfrm/xfrm_policy.c struct dst_entry *dst = xfrm_lookup(net, dst_orig, fl, sk, fl 3197 net/xfrm/xfrm_policy.c xfrm_secpath_reject(int idx, struct sk_buff *skb, const struct flowi *fl) fl 3207 net/xfrm/xfrm_policy.c return x->type->reject(x, skb, fl); fl 3263 net/xfrm/xfrm_policy.c decode_session4(struct sk_buff *skb, struct flowi *fl, bool reverse) fl 3268 net/xfrm/xfrm_policy.c struct flowi4 *fl4 = &fl->u.ip4; fl 3372 net/xfrm/xfrm_policy.c decode_session6(struct sk_buff *skb, struct flowi *fl, bool reverse) fl 3374 net/xfrm/xfrm_policy.c struct flowi6 *fl6 = &fl->u.ip6; fl 3470 net/xfrm/xfrm_policy.c int __xfrm_decode_session(struct sk_buff *skb, struct flowi *fl, fl 3475 net/xfrm/xfrm_policy.c decode_session4(skb, fl, reverse); fl 3479 net/xfrm/xfrm_policy.c decode_session6(skb, fl, reverse); fl 3486 net/xfrm/xfrm_policy.c return security_xfrm_decode_session(skb, &fl->flowi_secid); fl 3512 net/xfrm/xfrm_policy.c struct flowi fl; fl 3534 net/xfrm/xfrm_policy.c if (__xfrm_decode_session(skb, &fl, family, reverse) < 0) { fl 3539 net/xfrm/xfrm_policy.c nf_nat_decode_session(skb, &fl, family); fl 3548 net/xfrm/xfrm_policy.c if (!xfrm_selector_match(&x->sel, &fl, family)) { fl 3558 net/xfrm/xfrm_policy.c pol = xfrm_sk_policy_lookup(sk, dir, &fl, family, if_id); fl 3566 net/xfrm/xfrm_policy.c pol = xfrm_policy_lookup(net, &fl, family, dir, if_id); fl 3575 net/xfrm/xfrm_policy.c xfrm_secpath_reject(xerr_idx, skb, &fl); fl 3589 net/xfrm/xfrm_policy.c &fl, family, fl 3661 net/xfrm/xfrm_policy.c xfrm_secpath_reject(xerr_idx, skb, &fl); fl 3671 net/xfrm/xfrm_policy.c struct flowi fl; fl 3675 net/xfrm/xfrm_policy.c if (xfrm_decode_session(skb, &fl, family) < 0) { fl 3686 net/xfrm/xfrm_policy.c dst = xfrm_lookup(net, skb_dst(skb), &fl, NULL, XFRM_LOOKUP_QUEUE); fl 852 net/xfrm/xfrm_state.c __xfrm4_init_tempsel(struct xfrm_selector *sel, const struct flowi *fl) fl 854 net/xfrm/xfrm_state.c const struct flowi4 *fl4 = &fl->u.ip4; fl 858 net/xfrm/xfrm_state.c sel->dport = xfrm_flowi_dport(fl, &fl4->uli); fl 860 net/xfrm/xfrm_state.c sel->sport = xfrm_flowi_sport(fl, &fl4->uli); fl 870 net/xfrm/xfrm_state.c __xfrm6_init_tempsel(struct xfrm_selector *sel, const struct flowi *fl) fl 872 net/xfrm/xfrm_state.c const struct flowi6 *fl6 = &fl->u.ip6; fl 877 net/xfrm/xfrm_state.c sel->dport = xfrm_flowi_dport(fl, &fl6->uli); fl 879 net/xfrm/xfrm_state.c sel->sport = xfrm_flowi_sport(fl, &fl6->uli); fl 889 net/xfrm/xfrm_state.c xfrm_init_tempstate(struct xfrm_state *x, const struct flowi *fl, fl 896 net/xfrm/xfrm_state.c __xfrm4_init_tempsel(&x->sel, fl); fl 899 net/xfrm/xfrm_state.c __xfrm6_init_tempsel(&x->sel, fl); fl 1002 net/xfrm/xfrm_state.c const struct flowi *fl, unsigned short family, fl 1019 net/xfrm/xfrm_state.c !xfrm_selector_match(&x->sel, fl, x->sel.family)) || fl 1020 net/xfrm/xfrm_state.c !security_xfrm_state_pol_flow_match(x, pol, fl)) fl 1032 net/xfrm/xfrm_state.c if (xfrm_selector_match(&x->sel, fl, x->sel.family) && fl 1033 net/xfrm/xfrm_state.c security_xfrm_state_pol_flow_match(x, pol, fl)) fl 1040 net/xfrm/xfrm_state.c const struct flowi *fl, struct xfrm_tmpl *tmpl, fl 1072 net/xfrm/xfrm_state.c xfrm_state_look_at(pol, x, fl, encap_family, fl 1089 net/xfrm/xfrm_state.c xfrm_state_look_at(pol, x, fl, encap_family, fl 1121 net/xfrm/xfrm_state.c xfrm_init_tempstate(x, fl, tmpl, daddr, saddr, family); fl 1125 net/xfrm/xfrm_state.c error = security_xfrm_state_alloc_acquire(x, pol->security, fl->flowi_secid); fl 2083 security/security.c void security_sk_classify_flow(struct sock *sk, struct flowi *fl) fl 2085 security/security.c call_void_hook(sk_getsecid, sk, &fl->flowi_secid); fl 2089 security/security.c void security_req_classify_flow(const struct request_sock *req, struct flowi *fl) fl 2091 security/security.c call_void_hook(req_classify_flow, req, fl); fl 2283 security/security.c const struct flowi *fl) fl 2299 security/security.c rc = hp->hook.xfrm_state_pol_flow_match(x, xp, fl); fl 2310 security/security.c void security_skb_classify_flow(struct sk_buff *skb, struct flowi *fl) fl 2312 security/security.c int rc = call_int_hook(xfrm_decode_session, 0, skb, &fl->flowi_secid, fl 5435 security/selinux/hooks.c struct flowi *fl) fl 5437 security/selinux/hooks.c fl->flowi_secid = req->secid; fl 29 security/selinux/include/xfrm.h const struct flowi *fl); fl 178 security/selinux/xfrm.c const struct flowi *fl) fl 200 security/selinux/xfrm.c if (fl->flowi_secid != state_sid) fl 207 security/selinux/xfrm.c fl->flowi_secid, state_sid, fl 1698 sound/soc/codecs/wm2200.c int i, bclk, lrclk, wl, fl, sr_code; fl 1705 sound/soc/codecs/wm2200.c fl = snd_soc_params_to_frame_size(params); fl 1706 sound/soc/codecs/wm2200.c if (fl < 0) fl 1707 sound/soc/codecs/wm2200.c return fl; fl 1710 sound/soc/codecs/wm2200.c wl, fl); fl 1402 sound/soc/codecs/wm5100.c int i, base, bclk, aif_rate, lrclk, wl, fl, sr; fl 1411 sound/soc/codecs/wm5100.c fl = snd_soc_params_to_frame_size(params); fl 1412 sound/soc/codecs/wm5100.c if (fl < 0) fl 1413 sound/soc/codecs/wm5100.c return fl; fl 1416 sound/soc/codecs/wm5100.c wl, fl); fl 1486 sound/soc/codecs/wm5100.c i = (wl << WM5100_AIF1TX_WL_SHIFT) | fl; fl 123 tools/hv/hv_kvp_daemon.c struct flock fl = {F_WRLCK, SEEK_SET, 0, 0, 0}; fl 124 tools/hv/hv_kvp_daemon.c fl.l_pid = getpid(); fl 126 tools/hv/hv_kvp_daemon.c if (fcntl(kvp_file_info[pool].fd, F_SETLKW, &fl) == -1) { fl 135 tools/hv/hv_kvp_daemon.c struct flock fl = {F_UNLCK, SEEK_SET, 0, 0, 0}; fl 136 tools/hv/hv_kvp_daemon.c fl.l_pid = getpid(); fl 138 tools/hv/hv_kvp_daemon.c if (fcntl(kvp_file_info[pool].fd, F_SETLK, &fl) == -1) {