Home
last modified time | relevance | path

Searched refs:slice (Results 1 – 88 of 88) sorted by relevance

/linux-4.1.27/drivers/staging/lustre/lustre/llite/
Dvvp_page.c65 struct cl_page_slice *slice) in vvp_page_fini() argument
67 struct ccc_page *cp = cl2ccc_page(slice); in vvp_page_fini()
74 LASSERT((struct cl_page *)vmpage->private != slice->cpl_page); in vvp_page_fini()
79 const struct cl_page_slice *slice, struct cl_io *io, in vvp_page_own() argument
82 struct ccc_page *vpg = cl2ccc_page(slice); in vvp_page_own()
104 const struct cl_page_slice *slice, in vvp_page_assume() argument
107 struct page *vmpage = cl2vm_page(slice); in vvp_page_assume()
115 const struct cl_page_slice *slice, in vvp_page_unassume() argument
118 struct page *vmpage = cl2vm_page(slice); in vvp_page_unassume()
125 const struct cl_page_slice *slice, struct cl_io *io) in vvp_page_disown() argument
[all …]
Dvvp_lock.c62 const struct cl_lock_slice *slice) in vvp_lock_weigh() argument
64 struct ccc_object *cob = cl2ccc(slice->cls_obj); in vvp_lock_weigh()
Dvvp_io.c51 const struct cl_io_slice *slice);
816 const struct cl_page_slice *slice) in vvp_io_read_page() argument
819 struct cl_object *obj = slice->cpl_obj; in vvp_io_read_page()
820 struct ccc_page *cp = cl2ccc_page(slice); in vvp_io_read_page()
821 struct cl_page *page = slice->cpl_page; in vvp_io_read_page()
831 LASSERT(slice->cpl_obj == obj); in vvp_io_read_page()
937 const struct cl_page_slice *slice, in vvp_io_prepare_write() argument
940 struct cl_object *obj = slice->cpl_obj; in vvp_io_prepare_write()
941 struct ccc_page *cp = cl2ccc_page(slice); in vvp_io_prepare_write()
942 struct cl_page *pg = slice->cpl_page; in vvp_io_prepare_write()
[all …]
/linux-4.1.27/drivers/staging/lustre/lustre/lov/
Dlov_page.c55 static int lov_page_invariant(const struct cl_page_slice *slice) in lov_page_invariant() argument
57 const struct cl_page *page = slice->cpl_page; in lov_page_invariant()
58 const struct cl_page *sub = lov_sub_page(slice); in lov_page_invariant()
67 struct cl_page_slice *slice) in lov_page_fini() argument
69 struct cl_page *sub = lov_sub_page(slice); in lov_page_fini()
71 LINVRNT(lov_page_invariant(slice)); in lov_page_fini()
77 slice->cpl_page->cp_child = NULL; in lov_page_fini()
83 const struct cl_page_slice *slice, struct cl_io *io, in lov_page_own() argument
89 LINVRNT(lov_page_invariant(slice)); in lov_page_own()
90 LINVRNT(!cl2lov_page(slice)->lps_invalid); in lov_page_own()
[all …]
Dlov_cl_internal.h638 const struct cl_page_slice *slice);
756 cl2lovsub_lock(const struct cl_lock_slice *slice) in cl2lovsub_lock() argument
758 LINVRNT(lovsub_is_object(&slice->cls_obj->co_lu)); in cl2lovsub_lock()
759 return container_of(slice, struct lovsub_lock, lss_cl); in cl2lovsub_lock()
764 const struct cl_lock_slice *slice; in cl2sub_lock() local
766 slice = cl_lock_at(lock, &lovsub_device_type); in cl2sub_lock()
767 LASSERT(slice != NULL); in cl2sub_lock()
768 return cl2lovsub_lock(slice); in cl2sub_lock()
771 static inline struct lov_lock *cl2lov_lock(const struct cl_lock_slice *slice) in cl2lov_lock() argument
773 LINVRNT(lov_is_object(&slice->cls_obj->co_lu)); in cl2lov_lock()
[all …]
Dlovsub_lock.c56 struct cl_lock_slice *slice) in lovsub_lock_fini() argument
60 lsl = cl2lovsub_lock(slice); in lovsub_lock_fini()
91 const struct cl_lock_slice *slice, in lovsub_lock_state() argument
94 struct lovsub_lock *sub = cl2lovsub_lock(slice); in lovsub_lock_state()
97 LASSERT(cl_lock_is_mutexed(slice->cls_lock)); in lovsub_lock_state()
116 const struct cl_lock_slice *slice) in lovsub_lock_weigh() argument
118 struct lovsub_lock *lock = cl2lovsub_lock(slice); in lovsub_lock_weigh()
122 LASSERT(cl_lock_is_mutexed(slice->cls_lock)); in lovsub_lock_weigh()
247 const struct cl_lock_slice *slice, in lovsub_lock_closure() argument
255 LASSERT(cl_lock_is_mutexed(slice->cls_lock)); in lovsub_lock_closure()
[all …]
Dlov_lock.c53 const struct cl_lock_slice *slice);
430 struct cl_lock_slice *slice) in lov_lock_fini() argument
435 lck = cl2lov_lock(slice); in lov_lock_fini()
544 const struct cl_lock_slice *slice, in lov_lock_enqueue() argument
547 struct cl_lock *lock = slice->cls_lock; in lov_lock_enqueue()
548 struct lov_lock *lck = cl2lov_lock(slice); in lov_lock_enqueue()
636 const struct cl_lock_slice *slice) in lov_lock_unuse() argument
638 struct lov_lock *lck = cl2lov_lock(slice); in lov_lock_unuse()
639 struct cl_lock_closure *closure = lov_closure_get(env, slice->cls_lock); in lov_lock_unuse()
653 LASSERT(slice->cls_lock->cll_state == CLS_INTRANSIT); in lov_lock_unuse()
[all …]
Dlovsub_dev.c54 const struct cl_req_slice *slice, int ioret) in lovsub_req_completion() argument
58 lsr = cl2lovsub_req(slice); in lovsub_req_completion()
68 const struct cl_req_slice *slice, in lovsub_req_attr_set() argument
Dlov_io.c260 const struct cl_page_slice *slice) in lov_page_subio() argument
263 struct cl_page *page = slice->cpl_page; in lov_page_subio()
267 LASSERT(cl2lov(slice->cpl_obj) == lio->lis_object); in lov_page_subio()
681 const struct cl_page_slice *slice, in lov_io_prepare_write() argument
685 struct cl_page *sub_page = lov_sub_page(slice); in lov_io_prepare_write()
689 sub = lov_page_subio(env, lio, slice); in lov_io_prepare_write()
701 const struct cl_page_slice *slice, in lov_io_commit_write() argument
705 struct cl_page *sub_page = lov_sub_page(slice); in lov_io_commit_write()
709 sub = lov_page_subio(env, lio, slice); in lov_io_commit_write()
Dlovsub_page.c54 struct cl_page_slice *slice) in lovsub_page_fini() argument
Dlov_dev.c123 const struct cl_req_slice *slice, int ioret) in lov_req_completion() argument
127 lr = cl2lov_req(slice); in lov_req_completion()
/linux-4.1.27/sound/pci/au88x0/
Dau88x0_a3d.c38 a3d_addrA(a->slice, a->source, A3D_A_HrtfTrackTC), HrtfTrack); in a3dsrc_SetTimeConsts()
40 a3d_addrA(a->slice, a->source, A3D_A_ITDTrackTC), ItdTrack); in a3dsrc_SetTimeConsts()
42 a3d_addrA(a->slice, a->source, A3D_A_GainTrackTC), GTrack); in a3dsrc_SetTimeConsts()
44 a3d_addrA(a->slice, a->source, A3D_A_CoeffTrackTC), CTrack); in a3dsrc_SetTimeConsts()
64 a3d_addrB(a->slice, a->source, A3D_B_A21Target), in a3dsrc_SetAtmosTarget()
67 a3d_addrB(a->slice, a->source, A3D_B_B10Target), in a3dsrc_SetAtmosTarget()
70 a3d_addrB(a->slice, a->source, A3D_B_B2Target), c); in a3dsrc_SetAtmosTarget()
79 a3d_addrB(a->slice, a->source, A3D_B_A12Current), in a3dsrc_SetAtmosCurrent()
82 a3d_addrB(a->slice, a->source, A3D_B_B01Current), in a3dsrc_SetAtmosCurrent()
85 a3d_addrB(a->slice, a->source, A3D_B_B2Current), c); in a3dsrc_SetAtmosCurrent()
[all …]
Dau88x0_a3d.h50 unsigned int slice; /* this_08 */ member
118 #define a3d_addrA(slice,source,reg) (((slice)<<0xd)+((source)*0x3A4)+(reg)) argument
119 #define a3d_addrB(slice,source,reg) (((slice)<<0xd)+((source)*0x2C8)+(reg)) argument
120 #define a3d_addrS(slice,reg) (((slice)<<0xd)+(reg)) argument
/linux-4.1.27/drivers/misc/eeprom/
Dmax6875.c57 static void max6875_update_slice(struct i2c_client *client, int slice) in max6875_update_slice() argument
63 if (slice >= USER_EEPROM_SLICES) in max6875_update_slice()
68 buf = &data->data[slice << SLICE_BITS]; in max6875_update_slice()
70 if (!(data->valid & (1 << slice)) || in max6875_update_slice()
71 time_after(jiffies, data->last_updated[slice])) { in max6875_update_slice()
73 dev_dbg(&client->dev, "Starting update of slice %u\n", slice); in max6875_update_slice()
75 data->valid &= ~(1 << slice); in max6875_update_slice()
77 addr = USER_EEPROM_BASE + (slice << SLICE_BITS); in max6875_update_slice()
102 data->last_updated[slice] = jiffies; in max6875_update_slice()
103 data->valid |= (1 << slice); in max6875_update_slice()
[all …]
Deeprom.c50 static void eeprom_update_client(struct i2c_client *client, u8 slice) in eeprom_update_client() argument
57 if (!(data->valid & (1 << slice)) || in eeprom_update_client()
58 time_after(jiffies, data->last_updated[slice] + 300 * HZ)) { in eeprom_update_client()
59 dev_dbg(&client->dev, "Starting eeprom update, slice %u\n", slice); in eeprom_update_client()
62 for (i = slice << 5; i < (slice + 1) << 5; i += 32) in eeprom_update_client()
68 for (i = slice << 5; i < (slice + 1) << 5; i += 2) { in eeprom_update_client()
76 data->last_updated[slice] = jiffies; in eeprom_update_client()
77 data->valid |= (1 << slice); in eeprom_update_client()
89 u8 slice; in eeprom_read() local
97 for (slice = off >> 5; slice <= (off + count - 1) >> 5; slice++) in eeprom_read()
[all …]
/linux-4.1.27/drivers/staging/lustre/lustre/osc/
Dosc_io.c56 static struct osc_req *cl2osc_req(const struct cl_req_slice *slice) in cl2osc_req() argument
58 LINVRNT(slice->crs_dev->cd_lu_dev.ld_type == &osc_device_type); in cl2osc_req()
59 return container_of0(slice, struct osc_req, or_cl); in cl2osc_req()
63 const struct cl_io_slice *slice) in cl2osc_io() argument
65 struct osc_io *oio = container_of0(slice, struct osc_io, oi_cl); in cl2osc_io()
73 const struct cl_page_slice *slice; in osc_cl_page_osc() local
75 slice = cl_page_at(page, &osc_device_type); in osc_cl_page_osc()
76 LASSERT(slice != NULL); in osc_cl_page_osc()
78 return cl2osc_page(slice); in osc_cl_page_osc()
258 const struct cl_page_slice *slice, in osc_io_prepare_write() argument
[all …]
Dosc_lock.c202 const struct cl_lock_slice *slice) in osc_lock_unuse() argument
204 struct osc_lock *ols = cl2osc_lock(slice); in osc_lock_unuse()
238 struct cl_lock_slice *slice) in osc_lock_fini() argument
240 struct osc_lock *ols = cl2osc_lock(slice); in osc_lock_fini()
486 struct cl_lock_slice *slice = &olck->ols_cl; in osc_lock_upcall() local
487 struct cl_lock *lock = slice->cls_lock; in osc_lock_upcall()
533 osc_object_set_contended(cl2osc(slice->cls_obj)); in osc_lock_upcall()
534 LASSERT(slice->cls_ops == &osc_lock_ops); in osc_lock_upcall()
894 const struct cl_lock_slice *slice) in osc_lock_weigh() argument
900 return cl_object_header(slice->cls_obj)->coh_pages; in osc_lock_weigh()
[all …]
Dosc_page.c166 struct cl_page_slice *slice) in osc_page_fini() argument
168 struct osc_page *opg = cl2osc_page(slice); in osc_page_fini()
216 const struct cl_page_slice *slice, in osc_page_cache_add() argument
220 struct osc_page *opg = cl2osc_page(slice); in osc_page_cache_add()
290 const struct cl_page_slice *slice, in osc_page_is_under_lock() argument
296 lock = cl_lock_at_page(env, slice->cpl_obj, slice->cpl_page, in osc_page_is_under_lock()
299 if (osc_page_addref_lock(env, cl2osc_page(slice), lock) == 0) in osc_page_is_under_lock()
307 const struct cl_page_slice *slice, in osc_page_disown() argument
310 struct osc_page *opg = cl2osc_page(slice); in osc_page_disown()
317 const struct cl_page_slice *slice, in osc_page_completion_read() argument
[all …]
Dosc_cl_internal.h548 static inline struct osc_page *cl2osc_page(const struct cl_page_slice *slice) in cl2osc_page() argument
550 LINVRNT(osc_is_object(&slice->cpl_obj->co_lu)); in cl2osc_page()
551 return container_of0(slice, struct osc_page, ops_cl); in cl2osc_page()
569 static inline struct osc_lock *cl2osc_lock(const struct cl_lock_slice *slice) in cl2osc_lock() argument
571 LINVRNT(osc_is_object(&slice->cls_obj->co_lu)); in cl2osc_lock()
572 return container_of0(slice, struct osc_lock, ols_cl); in cl2osc_lock()
/linux-4.1.27/arch/mips/sgi-ip27/
Dip27-nmi.c35 void install_cpu_nmi_handler(int slice) in install_cpu_nmi_handler() argument
39 nmi_addr = (nmi_t *)NMI_ADDR(get_nasid(), slice); in install_cpu_nmi_handler()
54 void nmi_cpu_eframe_save(nasid_t nasid, int slice) in nmi_cpu_eframe_save() argument
62 slice * IP27_NMI_KREGS_CPU_SIZE); in nmi_cpu_eframe_save()
64 printk("NMI nasid %d: slice %d\n", nasid, slice); in nmi_cpu_eframe_save()
130 void nmi_dump_hub_irq(nasid_t nasid, int slice) in nmi_dump_hub_irq() argument
134 if (slice == 0) { /* Slice A */ in nmi_dump_hub_irq()
157 int slice; in nmi_node_eframe_save() local
168 for (slice = 0; slice < NODE_NUM_CPUS(slice); slice++) { in nmi_node_eframe_save()
169 nmi_cpu_eframe_save(nasid, slice); in nmi_node_eframe_save()
[all …]
Dip27-klconfig.c80 klcpu_t *nasid_slice_to_cpuinfo(nasid_t nasid, int slice) in nasid_slice_to_cpuinfo() argument
92 if ((acpu->cpu_info.physid) == slice) in nasid_slice_to_cpuinfo()
102 int slice; in sn_get_cpuinfo() local
119 for (slice = 0; slice < CPUS_PER_NODE; slice++) { in sn_get_cpuinfo()
120 acpu = nasid_slice_to_cpuinfo(nasid, slice); in sn_get_cpuinfo()
Dip27-init.c116 int slice = LOCAL_HUB_L(PI_CPU_NUM); in per_cpu_init() local
119 struct slice_data *si = hub->slice + slice; in per_cpu_init()
122 if (test_and_set_bit(slice, &hub->slice_map)) in per_cpu_init()
Dip27-irq.c185 int slice = LOCAL_HUB_L(PI_CPU_NUM); in install_ipi() local
191 resched = CPU_RESCHED_A_IRQ + slice; in install_ipi()
196 call = CPU_CALL_A_IRQ + slice; in install_ipi()
201 if (slice == 0) { in install_ipi()
Dip27-timer.c57 int slice = cputoslice(cpu); in rt_next_event() local
62 LOCAL_HUB_S(PI_RT_COMPARE_A + PI_COUNT_OFFSET * slice, cnt); in rt_next_event()
82 int slice = cputoslice(cpu); in hub_rt_counter_handler() local
87 LOCAL_HUB_S(PI_RT_PEND_A + PI_COUNT_OFFSET * slice, 0); in hub_rt_counter_handler()
/linux-4.1.27/drivers/staging/lustre/lustre/include/
Dlclient.h231 static inline struct ccc_page *cl2ccc_page(const struct cl_page_slice *slice) in cl2ccc_page() argument
233 return container_of(slice, struct ccc_page, cpg_cl); in cl2ccc_page()
299 const struct cl_page_slice *slice);
301 const struct cl_page_slice *slice, struct cl_io *io);
302 int ccc_fail(const struct lu_env *env, const struct cl_page_slice *slice);
305 const struct cl_page_slice *slice,
308 const struct cl_page_slice *slice,
311 const struct cl_page_slice *slice,
314 const struct cl_page_slice *slice,
317 const struct cl_page_slice *slice,
[all …]
Dcl_object.h446 #define cl_object_for_each(slice, obj) \ argument
447 list_for_each_entry((slice), \
454 #define cl_object_for_each_reverse(slice, obj) \ argument
455 list_for_each_entry_reverse((slice), \
845 const struct cl_page_slice *slice);
856 const struct cl_page_slice *slice,
864 const struct cl_page_slice *slice, struct cl_io *io);
873 const struct cl_page_slice *slice, struct cl_io *io);
882 const struct cl_page_slice *slice,
891 const struct cl_page_slice *slice, int uptodate);
[all …]
/linux-4.1.27/drivers/staging/lustre/lustre/lclient/
Dlcommon_cl.c465 const struct cl_page_slice *slice) in ccc_page_vmpage() argument
467 return cl2vm_page(slice); in ccc_page_vmpage()
471 const struct cl_page_slice *slice, in ccc_page_is_under_lock() argument
476 struct cl_page *page = slice->cpl_page; in ccc_page_is_under_lock()
497 int ccc_fail(const struct lu_env *env, const struct cl_page_slice *slice) in ccc_fail() argument
511 const struct cl_page_slice *slice, in ccc_transient_page_own() argument
515 ccc_transient_page_verify(slice->cpl_page); in ccc_transient_page_own()
520 const struct cl_page_slice *slice, in ccc_transient_page_assume() argument
523 ccc_transient_page_verify(slice->cpl_page); in ccc_transient_page_assume()
527 const struct cl_page_slice *slice, in ccc_transient_page_unassume() argument
[all …]
/linux-4.1.27/block/partitions/
Dsysv68.c43 struct slice { struct
56 struct slice *slice; in sysv68_partition() local
79 slice = (struct slice *)data; in sysv68_partition()
80 for (i = 0; i < slices; i++, slice++) { in sysv68_partition()
83 if (be32_to_cpu(slice->nblocks)) { in sysv68_partition()
85 be32_to_cpu(slice->blkoff), in sysv68_partition()
86 be32_to_cpu(slice->nblocks)); in sysv68_partition()
DKconfig159 Like some systems, UnixWare uses its own slice table inside a
/linux-4.1.27/drivers/gpu/drm/omapdrm/
Dtcm.h220 static inline void tcm_slice(struct tcm_area *parent, struct tcm_area *slice) in tcm_slice() argument
222 *slice = *parent; in tcm_slice()
225 if (slice->tcm && !slice->is2d && in tcm_slice()
226 slice->p0.y != slice->p1.y && in tcm_slice()
227 (slice->p0.x || (slice->p1.x != slice->tcm->width - 1))) { in tcm_slice()
229 slice->p1.x = slice->tcm->width - 1; in tcm_slice()
230 slice->p1.y = (slice->p0.x) ? slice->p0.y : slice->p1.y - 1; in tcm_slice()
233 parent->p0.y = slice->p1.y + 1; in tcm_slice()
Domap_dmm_tiler.c308 struct tcm_area slice, area_s; in fill() local
315 tcm_for_each_slice(slice, *area, area_s) { in fill()
317 .x0 = slice.p0.x, .y0 = slice.p0.y, in fill()
318 .x1 = slice.p1.x, .y1 = slice.p1.y, in fill()
323 roll += tcm_sizeof(slice); in fill()
/linux-4.1.27/arch/mips/include/asm/sn/
Daddrs.h302 #define EX_HANDLER_OFFSET(slice) ((slice) << 16) argument
303 #define EX_HANDLER_ADDR(nasid, slice) \ argument
304 PHYS_TO_K0(NODE_OFFSET(nasid) | EX_HANDLER_OFFSET(slice))
307 #define EX_FRAME_OFFSET(slice) ((slice) << 16 | 0x400) argument
308 #define EX_FRAME_ADDR(nasid, slice) \ argument
309 PHYS_TO_K0(NODE_OFFSET(nasid) | EX_FRAME_OFFSET(slice))
356 #define LAUNCH_OFFSET(nasid, slice) \ argument
358 KLD_LAUNCH(nasid)->stride * (slice))
359 #define LAUNCH_ADDR(nasid, slice) \ argument
360 TO_NODE_UNCAC((nasid), LAUNCH_OFFSET(nasid, slice))
[all …]
Dsn_private.h13 extern void install_cpu_nmi_handler(int slice);
/linux-4.1.27/drivers/staging/lustre/lustre/obdclass/
Dcl_lock.c189 void cl_lock_slice_add(struct cl_lock *lock, struct cl_lock_slice *slice, in cl_lock_slice_add() argument
193 slice->cls_lock = lock; in cl_lock_slice_add()
194 list_add_tail(&slice->cls_linkage, &lock->cll_layers); in cl_lock_slice_add()
195 slice->cls_obj = obj; in cl_lock_slice_add()
196 slice->cls_ops = ops; in cl_lock_slice_add()
257 struct cl_lock_slice *slice; in cl_lock_free() local
259 slice = list_entry(lock->cll_layers.next, in cl_lock_free()
262 slice->cls_ops->clo_fini(env, slice); in cl_lock_free()
460 const struct cl_lock_slice *slice; in cl_lock_fits_into() local
463 list_for_each_entry(slice, &lock->cll_layers, cls_linkage) { in cl_lock_fits_into()
[all …]
Dcl_io.c56 #define cl_io_for_each(slice, io) \ argument
57 list_for_each_entry((slice), &io->ci_layers, cis_linkage)
58 #define cl_io_for_each_reverse(slice, io) \ argument
59 list_for_each_entry_reverse((slice), &io->ci_layers, cis_linkage)
104 struct cl_io_slice *slice; in cl_io_fini() local
111 slice = container_of(io->ci_layers.prev, struct cl_io_slice, in cl_io_fini()
113 list_del_init(&slice->cis_linkage); in cl_io_fini()
114 if (slice->cis_iop->op[io->ci_type].cio_fini != NULL) in cl_io_fini()
115 slice->cis_iop->op[io->ci_type].cio_fini(env, slice); in cl_io_fini()
121 slice->cis_io = NULL; in cl_io_fini()
[all …]
Dcl_page.c110 const struct cl_page_slice *slice; in cl_page_at_trusted() local
114 list_for_each_entry(slice, &page->cp_layers, cpl_linkage) { in cl_page_at_trusted()
115 if (slice->cpl_obj->co_lu.lo_dev->ld_type == dtype) in cl_page_at_trusted()
116 return slice; in cl_page_at_trusted()
161 const struct cl_page_slice *slice; in cl_page_gang_lookup() local
191 slice = cl_page_at_trusted(page, dtype); in cl_page_gang_lookup()
196 PASSERT(env, page, slice != NULL); in cl_page_gang_lookup()
198 page = slice->cpl_page; in cl_page_gang_lookup()
261 struct cl_page_slice *slice; in cl_page_free() local
263 slice = list_entry(page->cp_layers.next, in cl_page_free()
[all …]
/linux-4.1.27/drivers/misc/cxl/
Ddebugfs.c20 int slice; in cxl_stop_trace() local
27 for (slice = 0; slice < adapter->slices; slice++) { in cxl_stop_trace()
28 if (adapter->afu[slice]) in cxl_stop_trace()
29 cxl_p1n_write(adapter->afu[slice], CXL_PSL_SLICE_TRACE, 0x8000000000000000LL); in cxl_stop_trace()
91 snprintf(buf, 32, "psl%i.%i", afu->adapter->adapter_num, afu->slice); in cxl_debugfs_afu_add()
Dtrace.h71 __entry->afu = ctx->afu->slice;
100 __entry->afu = ctx->afu->slice;
140 __entry->afu = ctx->afu->slice;
173 __entry->afu = ctx->afu->slice;
204 __entry->afu = ctx->afu->slice;
231 __entry->afu = ctx->afu->slice;
260 __entry->afu = ctx->afu->slice;
292 __entry->afu = ctx->afu->slice;
321 __entry->afu = ctx->afu->slice;
349 __entry->afu = ctx->afu->slice;
[all …]
Dmain.c49 ctx->afu->adapter->adapter_num, ctx->afu->slice, ctx->pe); in _cxl_slbia()
66 int card, slice, id; in cxl_slbia_core() local
74 for (slice = 0; slice < adapter->slices; slice++) { in cxl_slbia_core()
75 afu = adapter->afu[slice]; in cxl_slbia_core()
Dpci.c506 p1n_base = p1_base(dev) + 0x10000 + (afu->slice * p1n_size); in cxl_map_slice_regs()
507 p2n_base = p2_base(dev) + (afu->slice * p2n_size); in cxl_map_slice_regs()
508 afu->psn_phys = p2_base(dev) + (adapter->ps_off + (afu->slice * adapter->ps_size)); in cxl_map_slice_regs()
509 afu_desc = p2_base(dev) + adapter->afu_desc_off + (afu->slice * adapter->afu_desc_size); in cxl_map_slice_regs()
547 static struct cxl_afu *cxl_alloc_afu(struct cxl *adapter, int slice) in cxl_alloc_afu() argument
557 afu->slice = slice; in cxl_alloc_afu()
675 static int cxl_init_afu(struct cxl *adapter, int slice, struct pci_dev *dev) in cxl_init_afu() argument
681 if (!(afu = cxl_alloc_afu(adapter, slice))) in cxl_init_afu()
684 if ((rc = dev_set_name(&afu->dev, "afu%i.%i", adapter->adapter_num, slice))) in cxl_init_afu()
732 adapter->afu[afu->slice] = afu; in cxl_init_afu()
[all …]
Dfile.c32 #define CXL_AFU_MINOR_D(afu) (CXL_CARD_MINOR(afu->adapter) + 1 + (3 * afu->slice))
54 int slice = CXL_DEVT_AFU(inode->i_rdev); in __afu_open() local
57 pr_devel("afu_open afu%i.%i\n", slice, adapter_num); in __afu_open()
62 if (slice > adapter->slices) in __afu_open()
66 if (!(afu = adapter->afu[slice])) { in __afu_open()
413 "afu%i.%i%s", afu->adapter->adapter_num, afu->slice, postfix); in cxl_add_chardev()
Dcxl.h382 int slice; member
/linux-4.1.27/arch/powerpc/mm/
Dslice.c111 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice) in slice_low_has_vma() argument
113 return !slice_area_is_free(mm, slice << SLICE_LOW_SHIFT, in slice_low_has_vma()
117 static int slice_high_has_vma(struct mm_struct *mm, unsigned long slice) in slice_high_has_vma() argument
119 unsigned long start = slice << SLICE_HIGH_SHIFT; in slice_high_has_vma()
252 unsigned long slice; in slice_scan_available() local
254 slice = GET_LOW_SLICE_INDEX(addr); in slice_scan_available()
255 *boundary_addr = (slice + end) << SLICE_LOW_SHIFT; in slice_scan_available()
256 return !!(available.low_slices & (1u << slice)); in slice_scan_available()
258 slice = GET_HIGH_SLICE_INDEX(addr); in slice_scan_available()
259 *boundary_addr = (slice + end) ? in slice_scan_available()
[all …]
DMakefile28 obj-$(CONFIG_PPC_MM_SLICES) += slice.o
/linux-4.1.27/arch/ia64/sn/kernel/
Dirq.c116 nasid_t nasid, int slice) in sn_retarget_vector() argument
143 status = sn_intr_redirect(local_nasid, local_widget, sn_irq_info, nasid, slice); in sn_retarget_vector()
165 nasid, slice); in sn_retarget_vector()
210 int slice; in sn_set_affinity_irq() local
213 slice = cpuid_to_slice(cpumask_first_and(mask, cpu_online_mask)); in sn_set_affinity_irq()
217 (void)sn_retarget_vector(sn_irq_info, nasid, slice); in sn_set_affinity_irq()
341 int slice = sn_irq_info->irq_slice; in sn_irq_fixup() local
342 int cpu = nasid_slice_to_cpuid(nasid, slice); in sn_irq_fixup()
Dsetup.c567 int slice; in sn_cpu_init() local
619 if (ia64_sn_get_sapic_info(cpuphyid, &nasid, &subnode, &slice)) in sn_cpu_init()
625 nodepdaindr[i]->phys_cpuid[cpuid].slice = slice; in sn_cpu_init()
635 (typeof(pda->led_address)) (LED0 + (slice << LED_CPU_SHIFT)); in sn_cpu_init()
672 (volatile unsigned long *)GLOBAL_MMR_ADDR(nasid, pio[slice]); in sn_cpu_init()
748 nasid_slice_to_cpuid(int nasid, int slice) in nasid_slice_to_cpuid() argument
754 cpuid_to_slice(cpu) == slice) in nasid_slice_to_cpuid()
Dmsi_sn.c159 int slice; in sn_set_msi_irq_affinity() local
188 slice = cpuid_to_slice(cpu); in sn_set_msi_irq_affinity()
190 new_irq_info = sn_retarget_vector(sn_irq_info, nasid, slice); in sn_set_msi_irq_affinity()
Dtiocx.c280 nasid_t req_nasid, int slice) in tiocx_irq_alloc() argument
294 req_nasid, slice); in tiocx_irq_alloc()
/linux-4.1.27/tools/perf/scripts/python/
Dsched-migration.py232 slice = TimeSlice(ts, TimeSlice(-1, None))
234 slice = self.data[-1].next(ts)
235 return slice
280 def update_rectangle_cpu(self, slice, cpu): argument
281 rq = slice.rqs[cpu]
283 if slice.total_load != 0:
284 load_rate = rq.load() / float(slice.total_load)
293 if cpu in slice.event_cpus:
296 self.root_win.paint_rectangle_zone(cpu, color, top_color, slice.start, slice.end)
/linux-4.1.27/arch/ia64/include/asm/sn/
Dsn_cpuid.h96 #define get_slice() (sn_nodepda->phys_cpuid[smp_processor_id()].slice)
106 #define cpuid_to_slice(cpuid) (sn_nodepda->phys_cpuid[cpuid].slice)
Dnodepda.h34 char slice; member
Dsn_sal.h1006 ia64_sn_get_sapic_info(int sapicid, int *nasid, int *subnode, int *slice) in ia64_sn_get_sapic_info() argument
1020 if (slice) *slice = (sapicid >> 12) & 3; in ia64_sn_get_sapic_info()
1030 if (slice) *slice = (int) ret_stuff.v2; in ia64_sn_get_sapic_info()
Dgeo.h56 char slice; /* Which CPU on the node */ member
/linux-4.1.27/Documentation/filesystems/
Dbfs.txt4 The BFS filesystem is used by SCO UnixWare OS for the /stand slice, which
36 slice contains it. The command prtvtoc(1M) is your friend:
41 look for the slice with tag "STAND", which is usually slice 10. With this
/linux-4.1.27/fs/nfs/blocklayout/
Ddev.c82 p = xdr_decode_hyper(p, &b->slice.start); in nfs4_block_decode_volume()
83 p = xdr_decode_hyper(p, &b->slice.len); in nfs4_block_decode_volume()
84 b->slice.volume = be32_to_cpup(p++); in nfs4_block_decode_volume()
221 ret = bl_parse_deviceid(server, d, volumes, v->slice.volume, gfp_mask); in bl_parse_slice()
225 d->disk_offset = v->slice.start; in bl_parse_slice()
226 d->len = v->slice.len; in bl_parse_slice()
Dblocklayout.h82 } slice; member
/linux-4.1.27/drivers/staging/lustre/lustre/obdecho/
Decho_client.c228 const struct cl_page_slice *slice) in echo_page_vmpage() argument
230 return cl2echo_page(slice)->ep_vmpage; in echo_page_vmpage()
234 const struct cl_page_slice *slice, in echo_page_own() argument
237 struct echo_page *ep = cl2echo_page(slice); in echo_page_own()
247 const struct cl_page_slice *slice, in echo_page_disown() argument
250 struct echo_page *ep = cl2echo_page(slice); in echo_page_disown()
257 const struct cl_page_slice *slice, in echo_page_discard() argument
260 cl_page_delete(env, slice->cpl_page); in echo_page_discard()
264 const struct cl_page_slice *slice) in echo_page_is_vmlocked() argument
266 if (mutex_is_locked(&cl2echo_page(slice)->ep_lock)) in echo_page_is_vmlocked()
[all …]
/linux-4.1.27/drivers/gpu/drm/i915/
Di915_sysfs.c200 int slice = (int)(uintptr_t)attr->private; in i915_l3_read() local
215 if (dev_priv->l3_parity.remap_info[slice]) in i915_l3_read()
217 dev_priv->l3_parity.remap_info[slice] + (offset/4), in i915_l3_read()
238 int slice = (int)(uintptr_t)attr->private; in i915_l3_write() local
252 if (!dev_priv->l3_parity.remap_info[slice]) { in i915_l3_write()
272 dev_priv->l3_parity.remap_info[slice] = temp; in i915_l3_write()
274 memcpy(dev_priv->l3_parity.remap_info[slice] + (offset/4), buf, count); in i915_l3_write()
278 ctx->remap_slice |= (1<<slice); in i915_l3_write()
Di915_irq.c1162 uint8_t slice = 0; in ivybridge_parity_work() local
1178 while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) { in ivybridge_parity_work()
1181 slice--; in ivybridge_parity_work()
1182 if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv->dev))) in ivybridge_parity_work()
1185 dev_priv->l3_parity.which_slice &= ~(1<<slice); in ivybridge_parity_work()
1187 reg = GEN7_L3CDERRST1 + (slice * 0x200); in ivybridge_parity_work()
1201 parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice); in ivybridge_parity_work()
1208 slice, row, bank, subbank); in ivybridge_parity_work()
Di915_reg.h1368 #define GEN9_IZ_HASHING_MASK(slice) (0x3 << (slice * 2)) argument
1369 #define GEN9_IZ_HASHING(slice, val) ((val) << (slice * 2)) argument
Di915_gem.c4627 int i915_gem_l3_remap(struct intel_engine_cs *ring, int slice) in i915_gem_l3_remap() argument
4631 u32 reg_base = GEN7_L3LOG_BASE + (slice * 0x200); in i915_gem_l3_remap()
4632 u32 *remap_info = dev_priv->l3_parity.remap_info[slice]; in i915_gem_l3_remap()
Di915_drv.h2746 int i915_gem_l3_remap(struct intel_engine_cs *ring, int slice);
/linux-4.1.27/fs/bfs/
DKconfig8 and corresponds to the slice marked as "STAND" in the UnixWare
10 on your /stand slice from within Linux. You then also need to say Y
/linux-4.1.27/fs/efs/
Dsuper.c165 int pt_type, slice = -1; in module_exit() local
222 slice = i; in module_exit()
226 if (slice == -1) { in module_exit()
230 pr_info("using slice %d (type %s, offset 0x%x)\n", slice, in module_exit()
/linux-4.1.27/drivers/net/ethernet/myricom/myri10ge/
Dmyri10ge.c1972 int slice; in myri10ge_get_ethtool_stats() local
2013 for (slice = 0; slice < mgp->num_slices; slice++) { in myri10ge_get_ethtool_stats()
2014 ss = &mgp->ss[slice]; in myri10ge_get_ethtool_stats()
2015 data[i++] = slice; in myri10ge_get_ethtool_stats()
2124 int i, slice, status; in myri10ge_allocate_rings() local
2128 slice = ss - mgp->ss; in myri10ge_allocate_rings()
2129 cmd.data0 = slice; in myri10ge_allocate_rings()
2132 cmd.data0 = slice; in myri10ge_allocate_rings()
2203 slice, ss->rx_small.fill_cnt); in myri10ge_allocate_rings()
2210 slice, ss->rx_big.fill_cnt); in myri10ge_allocate_rings()
[all …]
/linux-4.1.27/arch/mips/include/asm/sn/sn0/
Daddrs.h148 #define KERN_NMI_ADDR(nasid, slice) \ argument
150 (IP27_NMI_KREGS_CPU_SIZE * (slice)))
/linux-4.1.27/arch/mips/include/asm/mach-ip27/
Dmmzone.h23 struct slice_data slice[2]; member
/linux-4.1.27/arch/mips/include/asm/mach-loongson/
Dmmzone.h36 struct slice_data slice[2]; member
/linux-4.1.27/arch/ia64/sn/kernel/sn2/
Dsn_hwperf.c386 char slice; in sn_topology_show() local
475 slice = 'a' + cpuid_to_slice(i); in sn_topology_show()
479 i, obj->location, slice, in sn_topology_show()
698 char slice; in sn_hwperf_ioctl() local
761 slice = 'a' + cpuid_to_slice(j); in sn_hwperf_ioctl()
772 slice); in sn_hwperf_ioctl()
/linux-4.1.27/Documentation/ABI/testing/
Dsysfs-cfq-target-latency8 use it to calculate the time slice used for every task.
/linux-4.1.27/Documentation/devicetree/bindings/powerpc/fsl/
Dmpc5121-psc.txt25 - fsl,rx-fifo-size : the size of the RX fifo slice (a multiple of 4)
26 - fsl,tx-fifo-size : the size of the TX fifo slice (a multiple of 4)
/linux-4.1.27/Documentation/block/
Dcfq-iosched.txt88 to recompute the slice time for each process based on the target_latency set
91 system to get a full time slice.
97 This parameter is used to calculate the time slice for a process if cfq's
118 device request queue in queue's slice time. The maximum number of request that
126 queue. This parameter is used to calculate the time slice of synchronous
137 queue's time slice, a request will not be dispatched if the number of request
207 process gets bigger time slice and lower priority process gets smaller time
208 slice. Measuring time becomes harder if storage is fast and supports NCQ and
/linux-4.1.27/block/
Dcfq-iosched.c796 unsigned long slice; in cfq_io_thinktime_big() local
800 slice = cfqd->cfq_group_idle; in cfq_io_thinktime_big()
802 slice = cfqd->cfq_slice_idle; in cfq_io_thinktime_big()
803 return ttime->ttime_mean > slice; in cfq_io_thinktime_big()
1018 unsigned slice = cfq_prio_to_slice(cfqd, cfqq); in cfq_scaled_cfqq_slice() local
1035 min(slice, base_low_slice * slice / sync_slice); in cfq_scaled_cfqq_slice()
1038 slice = max(slice * group_slice / expect_latency, in cfq_scaled_cfqq_slice()
1042 return slice; in cfq_scaled_cfqq_slice()
1048 unsigned slice = cfq_scaled_cfqq_slice(cfqd, cfqq); in cfq_set_prio_slice() local
1051 cfqq->slice_end = jiffies + slice; in cfq_set_prio_slice()
[all …]
/linux-4.1.27/Documentation/scheduler/
Dsched-bwc.txt20 within each of these updates is tunable and described as the "slice".
56 is described as the "slice".
61 Larger slice values will reduce transfer overheads, while smaller values allow
Dsched-nice-design.txt87 enough), the scheduler was decoupled from 'time slice' and HZ concepts
/linux-4.1.27/drivers/gpu/drm/radeon/
Dr600_blit.c80 int pitch, slice; in set_render_target() local
90 slice = ((w * h) / 64) - 1; in set_render_target()
109 OUT_RING((pitch << 0) | (slice << 10)); in set_render_target()
Devergreen_cs.c394 unsigned pitch, slice, mslice; in evergreen_cs_track_validate_cb() local
400 slice = track->cb_color_slice[id]; in evergreen_cs_track_validate_cb()
402 surf.nby = ((slice + 1) * 64) / surf.nbx; in evergreen_cs_track_validate_cb()
464 slice = ((nby * surf.nbx) / 64) - 1; in evergreen_cs_track_validate_cb()
469 ib[track->cb_color_slice_idx[id]] = slice; in evergreen_cs_track_validate_cb()
479 radeon_bo_size(track->cb_color_bo[id]), slice); in evergreen_cs_track_validate_cb()
561 unsigned pitch, slice, mslice; in evergreen_cs_track_validate_stencil() local
567 slice = track->db_depth_slice; in evergreen_cs_track_validate_stencil()
569 surf.nby = ((slice + 1) * 64) / surf.nbx; in evergreen_cs_track_validate_stencil()
658 unsigned pitch, slice, mslice; in evergreen_cs_track_validate_depth() local
[all …]
/linux-4.1.27/crypto/
Ddrbg.c1446 unsigned int slice = 0; in drbg_generate_long() local
1450 slice = ((buflen - len) / drbg_max_request_bytes(drbg)); in drbg_generate_long()
1451 chunk = slice ? drbg_max_request_bytes(drbg) : (buflen - len); in drbg_generate_long()
1456 } while (slice > 0 && (len < buflen)); in drbg_generate_long()
/linux-4.1.27/drivers/block/
Dsunvdc.c487 desc->slice = 0xff; in __send_request()
489 desc->slice = 0; in __send_request()
646 desc->slice = 0; in generic_request()
/linux-4.1.27/kernel/sched/
Dfair.c638 u64 slice = __sched_period(cfs_rq->nr_running + !se->on_rq); in sched_slice() local
653 slice = __calc_delta(slice, se->load.weight, load); in sched_slice()
655 return slice; in sched_slice()
678 u32 slice; in init_task_runnable_average() local
680 slice = sched_slice(task_cfs_rq(p), &p->se) >> 10; in init_task_runnable_average()
681 p->se.avg.runnable_avg_sum = p->se.avg.running_avg_sum = slice; in init_task_runnable_average()
682 p->se.avg.avg_period = slice; in init_task_runnable_average()
3927 u64 runtime = 0, slice = sched_cfs_bandwidth_slice(); in do_sched_cfs_slack_timer() local
3937 if (cfs_b->quota != RUNTIME_INF && cfs_b->runtime > slice) in do_sched_cfs_slack_timer()
4177 u64 slice = sched_slice(cfs_rq, se); in hrtick_start_fair() local
[all …]
/linux-4.1.27/arch/sparc/include/asm/
Dvio.h155 u8 slice; member
/linux-4.1.27/lib/
DKconfig124 of CRC32 algorithm. Choose the default ("slice by 8") unless you
142 This is a bit slower than slice by 8, but has a smaller 4KiB lookup
/linux-4.1.27/arch/mips/cavium-octeon/executive/
Dcvmx-spi.c367 gmxx_tx_spi_max.s.slice = 0; in cvmx_spi_calendar_setup_cb()
/linux-4.1.27/Documentation/timers/
Dhighres.txt238 systems, where the time slice is controlled by the scheduler, variable
/linux-4.1.27/scripts/
Danalyze_suspend.py754 def slice(self, t0, tN): member in FTraceCallGraph
1552 dev['ftrace'] = cg.slice(dev['start'], dev['end'])
/linux-4.1.27/arch/mips/include/asm/octeon/
Dcvmx-gmxx-defs.h6807 uint64_t slice:7; member
6813 uint64_t slice:7;
/linux-4.1.27/init/
DKconfig1124 control disk bandwidth allocation (proportional time slice allocation)
/linux-4.1.27/Documentation/
Dkernel-parameters.txt405 Align virtual addresses by clearing slice [14:12] when