ftmp 281 arch/arm/mach-vexpress/spc.c u32 fmin = 0, fmax = ~0, ftmp; ftmp 285 arch/arm/mach-vexpress/spc.c ftmp = opps->freq; ftmp 286 arch/arm/mach-vexpress/spc.c if (ftmp >= freq) { ftmp 287 arch/arm/mach-vexpress/spc.c if (ftmp <= fmax) ftmp 288 arch/arm/mach-vexpress/spc.c fmax = ftmp; ftmp 290 arch/arm/mach-vexpress/spc.c if (ftmp >= fmin) ftmp 291 arch/arm/mach-vexpress/spc.c fmin = ftmp; ftmp 1132 arch/sh/kernel/dwarf.c struct dwarf_fde *fde, *ftmp; ftmp 1148 arch/sh/kernel/dwarf.c list_for_each_entry_safe(fde, ftmp, &mod->arch.fde_list, link) { ftmp 41 drivers/clk/clk-scmi.c u64 fmin, fmax, ftmp; ftmp 60 drivers/clk/clk-scmi.c ftmp = rate - fmin; ftmp 61 drivers/clk/clk-scmi.c ftmp += clk->info->range.step_size - 1; /* to round up */ ftmp 62 drivers/clk/clk-scmi.c do_div(ftmp, clk->info->range.step_size); ftmp 64 drivers/clk/clk-scmi.c return ftmp * clk->info->range.step_size + fmin; ftmp 66 drivers/clk/clk-scpi.c unsigned long fmin = 0, fmax = ~0, ftmp; ftmp 70 drivers/clk/clk-scpi.c ftmp = opp->freq; ftmp 71 drivers/clk/clk-scpi.c if (ftmp >= rate) { ftmp 72 drivers/clk/clk-scpi.c if (ftmp <= fmax) ftmp 73 drivers/clk/clk-scpi.c fmax = ftmp; ftmp 75 drivers/clk/clk-scpi.c } else if (ftmp >= fmin) { ftmp 76 drivers/clk/clk-scpi.c fmin = ftmp; ftmp 1990 drivers/net/ethernet/intel/iavf/iavf_main.c struct iavf_mac_filter *f, *ftmp; ftmp 2015 drivers/net/ethernet/intel/iavf/iavf_main.c list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) { ftmp 2067 drivers/net/ethernet/intel/iavf/iavf_main.c struct iavf_mac_filter *f, *ftmp; ftmp 2187 drivers/net/ethernet/intel/iavf/iavf_main.c list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) { ftmp 3859 drivers/net/ethernet/intel/iavf/iavf_main.c struct iavf_mac_filter *f, *ftmp; ftmp 3920 drivers/net/ethernet/intel/iavf/iavf_main.c list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) { ftmp 505 drivers/net/ethernet/intel/iavf/iavf_virtchnl.c struct iavf_mac_filter *f, *ftmp; ftmp 547 drivers/net/ethernet/intel/iavf/iavf_virtchnl.c list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) { ftmp 645 drivers/net/ethernet/intel/iavf/iavf_virtchnl.c struct iavf_vlan_filter *f, *ftmp; ftmp 688 drivers/net/ethernet/intel/iavf/iavf_virtchnl.c list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) { ftmp 187 fs/cifs/cifs_unicode.c __u16 ftmp[3]; /* ftmp[3] = 3array x 2bytes = 6bytes UTF-16 */ ftmp 198 fs/cifs/cifs_unicode.c ftmp[0] = get_unaligned_le16(&from[i]); ftmp 199 fs/cifs/cifs_unicode.c if (ftmp[0] == 0) ftmp 202 fs/cifs/cifs_unicode.c ftmp[1] = get_unaligned_le16(&from[i + 1]); ftmp 204 fs/cifs/cifs_unicode.c ftmp[1] = 0; ftmp 206 fs/cifs/cifs_unicode.c ftmp[2] = get_unaligned_le16(&from[i + 2]); ftmp 208 fs/cifs/cifs_unicode.c ftmp[2] = 0; ftmp 215 fs/cifs/cifs_unicode.c charlen = cifs_mapchar(tmp, ftmp, codepage, map_type); ftmp 221 fs/cifs/cifs_unicode.c charlen = cifs_mapchar(&to[outlen], ftmp, codepage, map_type); ftmp 313 fs/cifs/cifs_unicode.c __u16 ftmp[3]; ftmp 316 fs/cifs/cifs_unicode.c ftmp[0] = get_unaligned_le16(&from[i]); ftmp 317 fs/cifs/cifs_unicode.c if (ftmp[0] == 0) ftmp 320 fs/cifs/cifs_unicode.c ftmp[1] = get_unaligned_le16(&from[i + 1]); ftmp 322 fs/cifs/cifs_unicode.c ftmp[1] = 0; ftmp 324 fs/cifs/cifs_unicode.c ftmp[2] = get_unaligned_le16(&from[i + 2]); ftmp 326 fs/cifs/cifs_unicode.c ftmp[2] = 0; ftmp 328 fs/cifs/cifs_unicode.c charlen = cifs_mapchar(tmp, ftmp, codepage, NO_MAP_UNI_RSVD); ftmp 78 tools/testing/selftests/vm/map_populate.c FILE *ftmp; ftmp 81 tools/testing/selftests/vm/map_populate.c ftmp = tmpfile(); ftmp 82 tools/testing/selftests/vm/map_populate.c BUG_ON(ftmp == 0, "tmpfile()"); ftmp 84 tools/testing/selftests/vm/map_populate.c ret = ftruncate(fileno(ftmp), MMAP_SZ); ftmp 88 tools/testing/selftests/vm/map_populate.c MAP_SHARED, fileno(ftmp), 0); ftmp 112 tools/testing/selftests/vm/map_populate.c return child_f(sock[0], smap, fileno(ftmp));