Searched refs:period (Results 1 - 200 of 1178) sorted by relevance

123456

/linux-4.1.27/drivers/gpu/drm/tegra/
H A Dmipi-phy.c20 unsigned long period) mipi_dphy_timing_get_default()
23 timing->clkpost = 70 + 52 * period; mipi_dphy_timing_get_default()
33 timing->hsprepare = 65 + 5 * period; mipi_dphy_timing_get_default()
34 timing->hszero = 145 + 5 * period; mipi_dphy_timing_get_default()
35 timing->hssettle = 85 + 6 * period; mipi_dphy_timing_get_default()
42 * T_HS-TRAIL = max(n * 8 * period, 60 + n * 4 * period) mipi_dphy_timing_get_default()
46 * not parameterize on anything other that period, so this code will mipi_dphy_timing_get_default()
49 timing->hstrail = max(4 * 8 * period, 60 + 4 * 4 * period); mipi_dphy_timing_get_default()
66 unsigned long period) mipi_dphy_timing_validate()
71 if (timing->clkpost < (60 + 52 * period)) mipi_dphy_timing_validate()
92 if (timing->dtermen > 35 + 4 * period) mipi_dphy_timing_validate()
95 if (timing->eot > 105 + 12 * period) mipi_dphy_timing_validate()
101 if (timing->hsprepare < 40 + 4 * period || mipi_dphy_timing_validate()
102 timing->hsprepare > 85 + 6 * period) mipi_dphy_timing_validate()
105 if (timing->hsprepare + timing->hszero < 145 + 10 * period) mipi_dphy_timing_validate()
108 if ((timing->hssettle < 85 + 6 * period) || mipi_dphy_timing_validate()
109 (timing->hssettle > 145 + 10 * period)) mipi_dphy_timing_validate()
112 if (timing->hsskip < 40 || timing->hsskip > 55 + 4 * period) mipi_dphy_timing_validate()
115 if (timing->hstrail < max(8 * period, 60 + 4 * period)) mipi_dphy_timing_validate()
19 mipi_dphy_timing_get_default(struct mipi_dphy_timing *timing, unsigned long period) mipi_dphy_timing_get_default() argument
65 mipi_dphy_timing_validate(struct mipi_dphy_timing *timing, unsigned long period) mipi_dphy_timing_validate() argument
H A Dmipi-phy.h47 unsigned long period);
49 unsigned long period);
H A Ddsi.c35 unsigned long period; member in struct:tegra_dsi_state
366 unsigned long period, tegra_dsi_set_phy_timing()
371 value = DSI_TIMING_FIELD(timing->hsexit, period, 1) << 24 | tegra_dsi_set_phy_timing()
372 DSI_TIMING_FIELD(timing->hstrail, period, 0) << 16 | tegra_dsi_set_phy_timing()
373 DSI_TIMING_FIELD(timing->hszero, period, 3) << 8 | tegra_dsi_set_phy_timing()
374 DSI_TIMING_FIELD(timing->hsprepare, period, 1); tegra_dsi_set_phy_timing()
377 value = DSI_TIMING_FIELD(timing->clktrail, period, 1) << 24 | tegra_dsi_set_phy_timing()
378 DSI_TIMING_FIELD(timing->clkpost, period, 1) << 16 | tegra_dsi_set_phy_timing()
379 DSI_TIMING_FIELD(timing->clkzero, period, 1) << 8 | tegra_dsi_set_phy_timing()
380 DSI_TIMING_FIELD(timing->lpx, period, 1); tegra_dsi_set_phy_timing()
383 value = DSI_TIMING_FIELD(timing->clkprepare, period, 1) << 16 | tegra_dsi_set_phy_timing()
384 DSI_TIMING_FIELD(timing->clkpre, period, 1) << 8 | tegra_dsi_set_phy_timing()
385 DSI_TIMING_FIELD(0xff * period, period, 0) << 0; tegra_dsi_set_phy_timing()
388 value = DSI_TIMING_FIELD(timing->taget, period, 1) << 16 | tegra_dsi_set_phy_timing()
389 DSI_TIMING_FIELD(timing->tasure, period, 1) << 8 | tegra_dsi_set_phy_timing()
390 DSI_TIMING_FIELD(timing->tago, period, 1); tegra_dsi_set_phy_timing()
394 tegra_dsi_set_phy_timing(dsi->slave, period, timing); tegra_dsi_set_phy_timing()
813 * multiply the period by 8. tegra_dsi_encoder_mode_set()
815 tegra_dsi_set_phy_timing(dsi, state->period * 8, &state->timing); tegra_dsi_encoder_mode_set()
917 state->period = DIV_ROUND_CLOSEST(NSEC_PER_SEC, plld); tegra_dsi_encoder_atomic_check()
919 err = mipi_dphy_timing_get_default(&state->timing, state->period); tegra_dsi_encoder_atomic_check()
923 err = mipi_dphy_timing_validate(&state->timing, state->period); tegra_dsi_encoder_atomic_check()
365 tegra_dsi_set_phy_timing(struct tegra_dsi *dsi, unsigned long period, const struct mipi_dphy_timing *timing) tegra_dsi_set_phy_timing() argument
H A Ddsi.h87 #define DSI_TIMING_FIELD(value, period, hwinc) \
88 ((DIV_ROUND_CLOSEST(value, period) - (hwinc)) & 0xff)
/linux-4.1.27/lib/
H A Dflex_proportions.c2 * Floating proportions with flexible aging period
13 * Where x_{i,j} is j's number of events in i-th last time period and x_i is
14 * total number of events in i-th last time period.
25 * When a new period is declared, we could do:
32 * occurs. This can bit trivially implemented by remembering last period in
41 p->period = 0; fprop_global_init()
56 * Declare @periods new periods. It is upto the caller to make sure period
82 p->period += periods; fprop_new_period()
96 pl->period = 0; fprop_local_init_single()
108 unsigned int period = p->period; fprop_reflect_period_single() local
111 /* Fast path - period didn't change */ fprop_reflect_period_single()
112 if (pl->period == period) fprop_reflect_period_single()
115 /* Someone updated pl->period while we were spinning? */ fprop_reflect_period_single()
116 if (pl->period >= period) { fprop_reflect_period_single()
121 if (period - pl->period < BITS_PER_LONG) fprop_reflect_period_single()
122 pl->events >>= period - pl->period; fprop_reflect_period_single()
125 pl->period = period; fprop_reflect_period_single()
178 pl->period = 0; fprop_local_init_percpu()
191 unsigned int period = p->period; fprop_reflect_period_percpu() local
194 /* Fast path - period didn't change */ fprop_reflect_period_percpu()
195 if (pl->period == period) fprop_reflect_period_percpu()
198 /* Someone updated pl->period while we were spinning? */ fprop_reflect_period_percpu()
199 if (pl->period >= period) { fprop_reflect_period_percpu()
204 if (period - pl->period < BITS_PER_LONG) { fprop_reflect_period_percpu()
211 -val + (val >> (period-pl->period)), PROP_BATCH); fprop_reflect_period_percpu()
214 pl->period = period; fprop_reflect_period_percpu()
H A Dproportions.c14 * and i the time period over which the differential is taken. So d/dt_{-i} is
15 * the differential over the i-th last period.
34 * if (++x_{j}, ++t > period)
45 * if (++t > period) t /= 2:
49 * period/2 + (++t % period/2)
51 * [ Furthermore, when we choose period to be 2^n it can be written in terms of
56 * c = t / (period/2)
61 * This allows us to do away with the loop over all prop_locals on each period
62 * expiration. By remembering the period count under which it was last accessed
67 * We can then lazily catch up to the global period count every time we are
195 pl->period = 0; prop_local_init_percpu()
205 * Catch up with missed period expirations.
214 unsigned long period = 1UL << (pg->shift - 1); prop_norm_percpu() local
215 unsigned long period_mask = ~(period - 1); prop_norm_percpu()
223 * Fast path - check if the local and global period count still match prop_norm_percpu()
226 if (pl->period == global_period) prop_norm_percpu()
230 prop_adjust_shift(&pl->shift, &pl->period, pg->shift); prop_norm_percpu()
233 * For each missed period, we half the local counter. prop_norm_percpu()
235 * pl->events >> (global_period - pl->period); prop_norm_percpu()
237 period = (global_period - pl->period) >> (pg->shift - 1); prop_norm_percpu()
238 if (period < BITS_PER_LONG) { prop_norm_percpu()
244 __percpu_counter_add(&pl->events, -val + (val >> period), prop_norm_percpu()
249 pl->period = global_period; prop_norm_percpu()
301 * p_{j} = x_{j} / (period/2 + t % period/2)
329 pl->period = 0; prop_local_init_single()
339 * Catch up with missed period expirations.
344 unsigned long period = 1UL << (pg->shift - 1); prop_norm_single() local
345 unsigned long period_mask = ~(period - 1); prop_norm_single()
353 * Fast path - check if the local and global period count still match prop_norm_single()
356 if (pl->period == global_period) prop_norm_single()
360 prop_adjust_shift(&pl->shift, &pl->period, pg->shift); prop_norm_single()
362 * For each missed period, we half the local counter. prop_norm_single()
364 period = (global_period - pl->period) >> (pg->shift - 1); prop_norm_single()
365 if (likely(period < BITS_PER_LONG)) prop_norm_single()
366 pl->events >>= period; prop_norm_single()
369 pl->period = global_period; prop_norm_single()
389 * p_{j} = x_{j} / (period/2 + t % period/2)
/linux-4.1.27/arch/m68k/amiga/
H A Damisound.c29 * The minimum period for audio may be modified by the frame buffer
40 * Current period (set by dmasound.c)
82 unsigned long period = (clock_constant / hz); amiga_mksound() local
84 if (period < amiga_audio_min_period) amiga_mksound()
85 period = amiga_audio_min_period; amiga_mksound()
86 if (period > MAX_PERIOD) amiga_mksound()
87 period = MAX_PERIOD; amiga_mksound()
89 /* setup pointer to data, period, length and volume */ amiga_mksound()
92 custom.aud[2].audper = (unsigned short)period; amiga_mksound()
114 /* restore period to previous value after beeping */ nosound()
/linux-4.1.27/include/linux/
H A Dflex_proportions.h2 * Floating proportions with flexible aging period
18 * bound on the number of events per period like
28 /* Number of events in the current period */
30 /* Current period */
31 unsigned int period; member in struct:fprop_global
32 /* Synchronization with period transitions */
47 unsigned int period; member in struct:fprop_local_single
48 raw_spinlock_t lock; /* Protect period and numerator */
79 unsigned int period; member in struct:fprop_local_percpu
80 raw_spinlock_t lock; /* Protect period and numerator */
H A Dtimeriomem-rng.h15 unsigned int period; member in struct:timeriomem_rng_data
H A Dproportions.h19 * The period over which we differentiate
21 * period = 2^shift
28 * counter bits, the remaining upper bits the period counter.
61 unsigned long period; member in struct:prop_local_percpu
112 unsigned long period; member in struct:prop_local_single
H A Dpwm.h66 * period
69 * period
90 unsigned int period; /* in nanoseconds */ member in struct:pwm_device
95 static inline void pwm_set_period(struct pwm_device *pwm, unsigned int period) pwm_set_period() argument
98 pwm->period = period; pwm_set_period()
103 return pwm ? pwm->period : 0; pwm_get_period()
126 * @config: configure duty cycles and period length for this PWM
277 unsigned int period; member in struct:pwm_lookup
287 .period = _period, \
H A Di2c-algo-pca.h8 /* Internal period for PCA9665 oscilator */
35 #define I2C_PCA_ISCLL 0x02 /* SCL LOW period */
36 #define I2C_PCA_ISCLH 0x03 /* SCL HIGH period */
H A Dcnt32_to_63.h38 * a relatively short period making wrap-arounds rather frequent. This
45 * memory is used to synchronize with the hardware clock half-period. When
58 * 1) this code must be called at least once per each half period of the
62 * 32-bit counter half period minus the longest period between two
H A Drcutree.h56 * synchronize_rcu_bh_expedited - Brute-force RCU-bh grace period
58 * Wait for an RCU-bh grace period to elapse, but use a "big hammer"
59 * approach to force the grace period to end quickly. This consumes
H A Ddevfreq-event.h42 * @load_count : load count of devfreq-event device for the given period.
43 * @total_count : total count of devfreq-event device for the given period.
48 * This structure contains the data of devfreq-event device for polling period.
H A Dalarmtimer.h29 * @period: Period for recuring alarms
H A Dmailbox_client.h23 * @tx_tout: Max block period in ms before TX is assumed failure
H A Dsrcu.h112 * call_srcu() - Queue a callback for invocation after an SRCU grace period
115 * @func: function to be invoked after the SRCU grace period
118 * grace period elapses, in other words after all pre-existing SRCU
205 * call anything that waits on an SRCU grace period for the same
207 * one way to indirectly wait on an SRCU grace period is to acquire
H A Drcupdate.h129 * call_rcu() - Queue an RCU callback for invocation after a grace period.
131 * @func: actual callback function to be invoked after the grace period
134 * period elapses, in other words after all pre-existing RCU read-side
141 * Note that all CPUs must agree that the grace period extended beyond
171 * call_rcu_bh() - Queue an RCU for invocation after a quicker grace period.
173 * @func: actual callback function to be invoked after the grace period
176 * period elapses, in other words after all currently executing RCU
195 * call_rcu_sched() - Queue an RCU for invocation after sched grace period.
197 * @func: actual callback function to be invoked after the grace period
200 * period elapses, in other words after all currently executing RCU
228 * call_rcu_tasks() - Queue an RCU for invocation task-based grace period
230 * @func: actual callback function to be invoked after the grace period
233 * period elapses, in other words after all currently executing RCU
493 * period. Otherwise we would delay any grace period as long as we run in
713 * access to the pointer was removed at least one grace period ago, as
716 * when tearing down multi-linked structures after a grace period
1128 * kfree_rcu() - kfree an object after a grace period.
H A Doom.h90 * A coredumping process may sleep for an extended period in exit_mm(), task_will_free_mem()
/linux-4.1.27/drivers/char/hw_random/
H A Dtimeriomem-rng.c39 unsigned int period; member in struct:timeriomem_rng_private_data
77 delay = priv->period - (delay % priv->period); timeriomem_rng_data_read()
103 int period; timeriomem_rng_probe() local
132 "period", &i)) timeriomem_rng_probe()
133 period = i; timeriomem_rng_probe()
135 dev_err(&pdev->dev, "missing period\n"); timeriomem_rng_probe()
139 period = pdata->period; timeriomem_rng_probe()
142 priv->period = usecs_to_jiffies(period); timeriomem_rng_probe()
143 if (priv->period < 1) { timeriomem_rng_probe()
144 dev_err(&pdev->dev, "period is less than one jiffy\n"); timeriomem_rng_probe()
174 priv->io_base, period); timeriomem_rng_probe()
/linux-4.1.27/fs/nfs_common/
H A Dgrace.c18 * @lm: who this grace period is for
20 * A grace period is a period during which locks should not be given
23 * check when they are in a grace period.
25 * This function is called to start a grace period.
41 * @lm: who this grace period is for
44 * resume regular locking. The grace period will not end until all lock
/linux-4.1.27/arch/m68k/atari/
H A Datasound.c60 int period; atari_mksound() local
72 /* Convert from frequency value to PSG period value (base atari_mksound()
75 period = PSG_FREQ / hz; atari_mksound()
77 if (period > 0xfff) period = 0xfff; atari_mksound()
81 sound_ym.wd_data = period & 0xff; atari_mksound()
83 sound_ym.wd_data = (period >> 8) & 0xf; atari_mksound()
/linux-4.1.27/arch/microblaze/kernel/
H A Dheartbeat.c22 static unsigned int cnt, period, dist; microblaze_heartbeat() local
30 if (++cnt > period) { microblaze_heartbeat()
34 * period length in dependency of the current (5min) microblaze_heartbeat()
38 period = ((672 << FSHIFT) / (5 * avenrun[0] + microblaze_heartbeat()
40 dist = period / 4; microblaze_heartbeat()
/linux-4.1.27/drivers/leds/trigger/
H A Dledtrig-heartbeat.c28 unsigned int period; member in struct:heartbeat_trig_data
49 * heartbeat period length in dependency of the led_heartbeat_function()
53 heartbeat_data->period = 300 + led_heartbeat_function()
55 heartbeat_data->period = led_heartbeat_function()
56 msecs_to_jiffies(heartbeat_data->period); led_heartbeat_function()
62 delay = heartbeat_data->period / 4 - msecs_to_jiffies(70); led_heartbeat_function()
71 delay = heartbeat_data->period - heartbeat_data->period / 4 - led_heartbeat_function()
/linux-4.1.27/drivers/clk/
H A Dclk-pwm.c74 if (!pwm->period) { clk_pwm_probe()
75 dev_err(&pdev->dev, "invalid PWM period\n"); clk_pwm_probe()
80 clk_pwm->fixed_rate = NSEC_PER_SEC / pwm->period; clk_pwm_probe()
82 if (pwm->period != NSEC_PER_SEC / clk_pwm->fixed_rate && clk_pwm_probe()
83 pwm->period != DIV_ROUND_UP(NSEC_PER_SEC, clk_pwm->fixed_rate)) { clk_pwm_probe()
85 "clock-frequency does not match PWM period\n"); clk_pwm_probe()
89 ret = pwm_config(pwm, (pwm->period + 1) >> 1, pwm->period); clk_pwm_probe()
/linux-4.1.27/drivers/watchdog/
H A Dbooke_wdt.c26 * Also, the wdt_period sets the watchdog timer period timeout.
49 /* For the specified period, determine the number of seconds
54 * 2.5 * (2^(63-period+1)) / timebase_freq
56 * In order to simplify things, we assume that period is
59 static unsigned long long period_to_sec(unsigned int period) period_to_sec() argument
61 unsigned long long tmp = 1ULL << (64 - period); period_to_sec()
74 * This procedure will find the highest period which will give a timeout
80 unsigned int period; sec_to_period() local
81 for (period = 63; period > 0; period--) { sec_to_period()
82 if (period_to_sec(period) >= secs) sec_to_period()
83 return period; sec_to_period()
92 static unsigned long long period_to_sec(unsigned int period) period_to_sec() argument
94 return period; period_to_sec()
155 * effectively disable the watchdog by setting its period to the maximum value.
H A Docteon-wdt-main.c38 * The hardware watchdog interval we call the period. The OCTEON
39 * watchdog goes through several stages, after the first period an
40 * irq is asserted, then if it is not reset, after the next period NMI
41 * is asserted, then after an additional period a chip wide soft reset.
42 * So for the software counters, we reset watchdog after each period
81 /* The maximum period supported. */
84 /* The current period. */
477 * Find the largest interrupt period, that can evenly divide octeon_wdt_calc_parameters()
H A Ddavinci_wdt.c89 /* set timeout period */ davinci_wdt_start()
234 "Watchdog heartbeat period in seconds from 1 to "
H A Dpnx4008_wdt.c100 /* the longest pulse period 65541/(13*10^6) seconds ~ 5 ms. */ pnx4008_wdt_start()
224 "Watchdog heartbeat period in seconds from 1 to "
H A Dstmp3xxx_rtc_wdt.c24 MODULE_PARM_DESC(heartbeat, "Watchdog heartbeat period in seconds from 1 to "
/linux-4.1.27/include/uapi/linux/netfilter/
H A Dxt_time.h20 /* treat timestart > timestop (e.g. 23:00-01:00) as single period */
H A Dxt_limit.h11 /* 1/10,000 sec period => max of 10,000/sec. Min rate is then 429490
H A Dxt_hashlimit.h8 /* 1/10,000 sec period => max of 10,000/sec. Min rate is then 429490
/linux-4.1.27/include/uapi/linux/netfilter_bridge/
H A Debt_limit.h11 /* 1/10,000 sec period => max of 10,000/sec. Min rate is then 429490
/linux-4.1.27/drivers/pwm/
H A Dpwm-bfin.c69 unsigned long period, duty; bfin_pwm_config() local
74 period = val; bfin_pwm_config()
76 val = (unsigned long long)period * duty_ns; bfin_pwm_config()
78 duty = period - val; bfin_pwm_config()
80 if (duty >= period) bfin_pwm_config()
81 duty = period - 1; bfin_pwm_config()
85 set_gptimer_period(priv->pin, period); bfin_pwm_config()
H A Dpwm-jz4740.c113 unsigned long period, duty; jz4740_pwm_config() local
120 period = tmp; jz4740_pwm_config()
122 while (period > 0xffff && prescaler < 6) { jz4740_pwm_config()
123 period >>= 2; jz4740_pwm_config()
130 tmp = (unsigned long long)period * duty_ns; jz4740_pwm_config()
132 duty = period - tmp; jz4740_pwm_config()
134 if (duty >= period) jz4740_pwm_config()
135 duty = period - 1; jz4740_pwm_config()
143 jz4740_timer_set_period(pwm->hwpwm, period); jz4740_pwm_config()
H A Dpwm-renesas-tpu.c85 u16 period; member in struct:tpu_pwm_device
188 tpu_pwm_write(pwm, TPU_TGRBn, pwm->period); tpu_pwm_timer_start()
191 pwm->channel, pwm->duty, pwm->period); tpu_pwm_timer_start()
234 pwm->period = 0; tpu_pwm_request()
261 u32 period; tpu_pwm_config() local
272 period = clk_rate / prescalers[prescaler] tpu_pwm_config()
274 if (period <= 0xffff) tpu_pwm_config()
278 if (prescaler == ARRAY_SIZE(prescalers) || period == 0) { tpu_pwm_config()
286 if (duty > period) tpu_pwm_config()
293 "rate %u, prescaler %u, period %u, duty %u\n", tpu_pwm_config()
294 clk_rate, prescalers[prescaler], period, duty); tpu_pwm_config()
296 if (pwm->prescaler == prescaler && pwm->period == period) tpu_pwm_config()
300 pwm->period = period; tpu_pwm_config()
323 if (duty == 0 || duty == period) { tpu_pwm_config()
358 if (pwm->duty == 0 || pwm->duty == pwm->period) { tpu_pwm_enable()
H A Dpwm-atmel-tcb.c37 unsigned period; /* PWM period expressed in clk cycles */ member in struct:atmel_tcb_pwm_device
88 tcbpwm->period = 0; atmel_tcb_pwm_request()
106 tcbpwm->period = __raw_readl(regs + ATMEL_TC_REG(group, RC)); atmel_tcb_pwm_request()
233 * If duty is 0 or equal to period there's no need to register atmel_tcb_pwm_enable()
238 if (tcbpwm->duty != tcbpwm->period && tcbpwm->duty > 0) { atmel_tcb_pwm_enable()
261 __raw_writel(tcbpwm->period, regs + ATMEL_TC_REG(group, RC)); atmel_tcb_pwm_enable()
281 unsigned period; atmel_tcb_pwm_config() local
312 /* If period is too big return ERANGE error */ atmel_tcb_pwm_config()
318 period = div_u64(period_ns, min); atmel_tcb_pwm_config()
334 * We're checking the period value of the second PWM device atmel_tcb_pwm_config()
338 atcbpwm->duty != atcbpwm->period) && atmel_tcb_pwm_config()
339 (atcbpwm->div != i || atcbpwm->period != period)) { atmel_tcb_pwm_config()
345 tcbpwm->period = period; atmel_tcb_pwm_config()
H A Dpwm-rockchip.c42 unsigned long period; member in struct:rockchip_pwm_regs
105 unsigned long period, duty; rockchip_pwm_config() local
112 * Since period and duty cycle registers have a width of 32 rockchip_pwm_config()
113 * bits, every possible input period can be obtained using the rockchip_pwm_config()
118 period = div; rockchip_pwm_config()
128 writel(period, pc->base + pc->data->regs.period); rockchip_pwm_config()
191 .period = 0x08,
203 .period = 0x04,
215 .period = 0x04,
H A Dpwm-sti.c79 * Calculate the prescaler value corresponding to the period.
81 static int sti_pwm_get_prescale(struct sti_pwm_chip *pc, unsigned long period, sti_pwm_get_prescale() argument
94 if (period % val) { sti_pwm_get_prescale()
97 ps = period / val - 1; sti_pwm_get_prescale()
107 * For STiH4xx PWM IP, the PWM period is fixed to 256 local clock cycles.
108 * The only way to change the period (apart from changing the PWM input clock)
111 * 256 possible period values are supported (for a particular clock rate).
112 * The requested period will be applied only if it matches one of these
137 * for a new channel and period of the new channel is same as sti_pwm_config()
138 * the current configured period. sti_pwm_config()
139 * 4. More than one channels are configured and period of the new sti_pwm_config()
140 * requestis the same as the current period. sti_pwm_config()
186 dev_dbg(dev, "prescale:%u, period:%i, duty:%i, pwmvalx:%u\n", sti_pwm_config()
H A Dpwm-atmel.c49 * Max value for duty and period
51 * Although the duty and period register is 32 bit,
111 if (test_bit(PWMF_ENABLED, &pwm->flags) && (period_ns != pwm->period)) { atmel_pwm_config()
112 dev_err(chip->dev, "cannot change PWM period while enabled\n"); atmel_pwm_config()
116 /* Calculate the period cycles and prescale value */ atmel_pwm_config()
171 * period registers directly. atmel_pwm_config_v1()
192 * period registers directly. atmel_pwm_config_v2()
H A Dpwm-tiecap.c106 * Update shadow registers to configure period and ecap_pwm_config()
107 * compare values. This helps current PWM period to ecap_pwm_config()
134 /* Duty cycle defines LOW period of PWM */ ecap_pwm_set_polarity()
137 /* Duty cycle defines HIGH period of PWM */ ecap_pwm_set_polarity()
H A Dpwm-fsl-ftm.c237 u32 period, duty; fsl_pwm_config() local
242 * The Freescale FTM controller supports only a single period for fsl_pwm_config()
248 "conflicting period requested for PWM %u\n", fsl_pwm_config()
255 period = fsl_pwm_calculate_period(fpc, period_ns); fsl_pwm_config()
256 if (!period) { fsl_pwm_config()
257 dev_err(fpc->chip.dev, "failed to calculate period\n"); fsl_pwm_config()
264 regmap_write(fpc->regmap, FTM_MOD, period - 1); fsl_pwm_config()
H A Dpwm-tiehrpwm.c183 * Configure prescale_div value such that period set_prescale_div()
207 * on counter value reaches period register value and configure_polarity()
265 * same period register for multiple channels. ehrpwm_pwm_config()
271 * Allow channel to reconfigure period if no other ehrpwm_pwm_config()
297 /* Update period & duty cycle with presacler division */ ehrpwm_pwm_config()
415 /* set period value to zero on free */ ehrpwm_pwm_free()
H A Dpwm-bcm-kona.c33 * 2) Changes to prescale, duty, period, and polarity do not take effect until
40 * will transition to the new settings on a period boundary (which could be
103 * Find period count, duty count and prescale to suit duty_ns and kona_pwmc_config()
197 ret = kona_pwmc_config(chip, pwm, pwm->duty_cycle, pwm->period); kona_pwmc_enable()
/linux-4.1.27/drivers/input/misc/
H A Dpwm-beeper.c29 unsigned long period; member in struct:pwm_beeper
36 unsigned long period = beeper->period; __pwm_beeper_set() local
38 if (period) { __pwm_beeper_set()
39 pwm_config(beeper->pwm, period / 2, period); __pwm_beeper_set()
72 beeper->period = 0; pwm_beeper_event()
74 beeper->period = HZ_TO_NANOSECONDS(value); pwm_beeper_event()
85 if (beeper->period) pwm_beeper_stop()
189 if (beeper->period) pwm_beeper_resume()
/linux-4.1.27/drivers/net/wireless/iwlwifi/dvm/
H A Dpower.c82 /* for DTIM period 0 through IWL_DTIM_RANGE_0_MAX */
93 /* for DTIM period IWL_DTIM_RANGE_0_MAX + 1 through IWL_DTIM_RANGE_1_MAX */
103 /* for DTIM period > IWL_DTIM_RANGE_1_MAX */
129 /* for DTIM period IWL_DTIM_RANGE_0_MAX + 1 through IWL_DTIM_RANGE_1_MAX */
144 /* for DTIM period > IWL_DTIM_RANGE_1_MAX */
161 enum iwl_power_level lvl, int period) iwl_static_sleep_cmd()
171 if (period <= IWL_DTIM_RANGE_1_MAX) iwl_static_sleep_cmd()
173 if (period <= IWL_DTIM_RANGE_0_MAX) iwl_static_sleep_cmd()
177 if (period <= IWL_DTIM_RANGE_1_MAX) iwl_static_sleep_cmd()
179 if (period <= IWL_DTIM_RANGE_0_MAX) iwl_static_sleep_cmd()
188 if (period == 0) { iwl_static_sleep_cmd()
190 period = 1; iwl_static_sleep_cmd()
202 /* figure out the listen interval based on dtim period and skip */ iwl_static_sleep_cmd()
205 cpu_to_le32(period * (skip + 1)); iwl_static_sleep_cmd()
208 if (slp_itrvl > period) iwl_static_sleep_cmd()
210 cpu_to_le32((slp_itrvl / period) * period); iwl_static_sleep_cmd()
238 (max_sleep[i] * period)) iwl_static_sleep_cmd()
240 cpu_to_le32(max_sleep[i] * period); iwl_static_sleep_cmd()
255 skip, period); iwl_static_sleep_cmd()
159 iwl_static_sleep_cmd(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd, enum iwl_power_level lvl, int period) iwl_static_sleep_cmd() argument
/linux-4.1.27/include/sound/
H A Dpcm_oss.h50 size_t period_bytes; /* requested period size */
51 size_t period_frames; /* period frames for poll */
52 size_t period_ptr; /* actual write pointer to period */
57 char *buffer; /* vmallocated period */
58 size_t buffer_used; /* used length from period buffer */
H A Dtlv320dac33-plat.h19 int auto_fifo_config; /* FIFO config based on the period size */
H A Dcs8427.h108 #define CS8427_SIDEL (1<<2) /* Delay of SDIN data relative to ILRCK for left-justified data formats, 0 = first ISCLK period, 1 = second ISCLK period */
121 #define CS8427_SODEL (1<<2) /* Delay of SDOUT data relative to OLRCK for left-justified data formats, 0 = first OSCLK period, 1 = second OSCLK period */
/linux-4.1.27/arch/m68k/kernel/
H A Dtime.c52 static unsigned cnt = 0, period = 0, dist = 0; timer_interrupt() local
59 if (++cnt > period) { timer_interrupt()
61 /* The hyperbolic function below modifies the heartbeat period timer_interrupt()
65 period = ((672<<FSHIFT)/(5*avenrun[0]+(7<<FSHIFT))) + 30; timer_interrupt()
66 dist = period / 4; timer_interrupt()
/linux-4.1.27/tools/perf/tests/
H A Dhists_output.c50 struct perf_sample sample = { .period = 100, }; add_hist_entries()
167 !strcmp(SYM(he), "main") && he->stat.period == 200); test1()
173 !strcmp(SYM(he), "page_fault") && he->stat.period == 100); test1()
179 !strcmp(SYM(he), "main") && he->stat.period == 100); test1()
185 !strcmp(SYM(he), "xmalloc") && he->stat.period == 100); test1()
191 !strcmp(SYM(he), "page_fault") && he->stat.period == 100); test1()
197 !strcmp(SYM(he), "schedule") && he->stat.period == 100); test1()
203 !strcmp(SYM(he), "free") && he->stat.period == 100); test1()
209 !strcmp(SYM(he), "malloc") && he->stat.period == 100); test1()
215 !strcmp(SYM(he), "cmd_record") && he->stat.period == 100); test1()
266 CPU(he) == 1 && PID(he) == 100 && he->stat.period == 300); test2()
271 CPU(he) == 0 && PID(he) == 100 && he->stat.period == 100); test2()
321 he->stat.period == 200); test3()
327 he->stat.period == 100); test3()
333 he->stat.period == 300); test3()
339 he->stat.period == 200); test3()
345 he->stat.period == 200); test3()
399 !strcmp(COMM(he), "perf") && he->stat.period == 100); test4()
405 !strcmp(COMM(he), "perf") && he->stat.period == 100); test4()
411 !strcmp(COMM(he), "bash") && he->stat.period == 100); test4()
417 !strcmp(COMM(he), "perf") && he->stat.period == 200); test4()
423 !strcmp(COMM(he), "perf") && he->stat.period == 100); test4()
429 !strcmp(COMM(he), "bash") && he->stat.period == 100); test4()
435 !strcmp(COMM(he), "perf") && he->stat.period == 100); test4()
441 !strcmp(COMM(he), "perf") && he->stat.period == 100); test4()
447 !strcmp(COMM(he), "bash") && he->stat.period == 100); test4()
504 !strcmp(SYM(he), "schedule") && he->stat.period == 100); test5()
511 !strcmp(SYM(he), "page_fault") && he->stat.period == 100); test5()
518 !strcmp(SYM(he), "page_fault") && he->stat.period == 100); test5()
525 !strcmp(SYM(he), "xmalloc") && he->stat.period == 100); test5()
532 !strcmp(SYM(he), "main") && he->stat.period == 100); test5()
539 !strcmp(SYM(he), "malloc") && he->stat.period == 100); test5()
546 !strcmp(SYM(he), "free") && he->stat.period == 100); test5()
553 !strcmp(SYM(he), "cmd_record") && he->stat.period == 100); test5()
560 !strcmp(SYM(he), "main") && he->stat.period == 100); test5()
567 !strcmp(SYM(he), "main") && he->stat.period == 100); test5()
H A Dsw-clock.c16 * then check their frequency -> period conversion has no artifact of
17 * setting period to 1 forcefully.
98 total_periods += sample.period; __test__sw_clock_freq()
105 pr_debug("All (%d) samples have period value of 1!\n", __test__sw_clock_freq()
H A Dhists_filter.c50 struct perf_sample sample = { .period = 100, }; add_hist_entries()
152 TEST_ASSERT_VAL("Invalid total period", evlist__for_each()
159 TEST_ASSERT_VAL("Unmatched total period", evlist__for_each()
177 TEST_ASSERT_VAL("Invalid total period", evlist__for_each()
185 TEST_ASSERT_VAL("Unmatched total period for thread filter", evlist__for_each()
206 TEST_ASSERT_VAL("Invalid total period", evlist__for_each()
214 TEST_ASSERT_VAL("Unmatched total period for dso filter", evlist__for_each()
226 * total period will be remained. evlist__for_each()
241 TEST_ASSERT_VAL("Invalid total period", evlist__for_each()
249 TEST_ASSERT_VAL("Unmatched total period for symbol filter", evlist__for_each()
268 TEST_ASSERT_VAL("Invalid total period", evlist__for_each()
276 TEST_ASSERT_VAL("Unmatched total period for all filter", evlist__for_each()
H A Dhists_common.c172 pr_info("%2d: entry: %-8s [%-8s] %20s: period = %"PRIu64"\n", print_hists_in()
175 he->ms.sym->name, he->stat.period); print_hists_in()
199 pr_info("%2d: entry: %8s:%5d [%-8s] %20s: period = %"PRIu64"/%"PRIu64"\n", print_hists_out()
202 he->ms.sym->name, he->stat.period, print_hists_out()
203 he->stat_acc ? he->stat_acc->period : 0); print_hists_out()
/linux-4.1.27/drivers/usb/host/whci/
H A Dpzl.c28 static void update_pzl_pointers(struct whc *whc, int period, u64 addr) update_pzl_pointers() argument
30 switch (period) { update_pzl_pointers()
61 * Return the 'period' to use for this qset. The minimum interval for
78 int period; qset_insert_in_sw_list() local
80 period = qset_get_period(whc, qset); qset_insert_in_sw_list()
83 list_move(&qset->list_node, &whc->periodic_list[period]); qset_insert_in_sw_list()
210 int period; update_pzl_hw_view() local
213 for (period = 0; period < 5; period++) { update_pzl_hw_view()
214 list_for_each_entry_safe(qset, t, &whc->periodic_list[period], list_node) { update_pzl_hw_view()
219 update_pzl_pointers(whc, period, tmp_qh); update_pzl_hw_view()
236 int period; scan_periodic_work() local
240 for (period = 4; period >= 0; period--) { scan_periodic_work()
241 list_for_each_entry_safe(qset, t, &whc->periodic_list[period], list_node) { scan_periodic_work()
H A Ddebug.c122 int period; pzl_print() local
124 for (period = 0; period < 5; period++) { pzl_print()
125 seq_printf(s, "Period %d\n", period); pzl_print()
126 list_for_each_entry(qset, &whc->periodic_list[period], list_node) { pzl_print()
/linux-4.1.27/drivers/md/
H A Dfaulty.c36 * remainder indicate a period, or 0 for one-shot.
86 int period[Modes]; member in struct:faulty_conf
96 if (conf->period[mode] == 0 && check_mode()
102 if (conf->period[mode]) check_mode()
103 atomic_set(&conf->counters[mode], conf->period[mode]); check_mode()
236 n, conf->period[WriteTransient]); status()
240 n, conf->period[ReadTransient]); status()
244 n, conf->period[WritePersistent]); status()
248 n, conf->period[ReadPersistent]); status()
253 n, conf->period[ReadFixable]); status()
277 conf->period[i] = 0; reshape()
281 conf->period[mode] = count; reshape()
317 conf->period[i] = 0; run()
/linux-4.1.27/drivers/video/fbdev/
H A Dcontrolfb.h53 struct preg vperiod; /* vert period */
56 struct preg hperiod; /* horiz period - 2 */
62 struct preg hlfln; /* half horiz period */
63 struct preg hserr; /* horiz period - horiz sync len */
85 unsigned vperiod; /* vert period */
89 unsigned hperiod; /* horiz period - 2 */
95 unsigned hlfln; /* half horiz period */
96 unsigned hserr; /* horiz period - horiz sync len */
/linux-4.1.27/sound/soc/spear/
H A Dspear_pcm.c28 .period_bytes_min = 2 * 1024, /* 1 msec data minimum period size */
29 .period_bytes_max = 2 * 1024, /* maximum period size */
/linux-4.1.27/drivers/leds/
H A Dleds-lp3944.c22 * - period: from 0s to 1.6s
23 * - duty cycle: percentage of the period the led is on, from 0 to 100
57 /* period in ms */
102 * Set the period for DIM status
106 * @period: period of a blink, that is a on/off cycle, expressed in ms.
108 static int lp3944_dim_set_period(struct i2c_client *client, u8 dim, u16 period) lp3944_dim_set_period() argument
121 /* Convert period to Prescaler value */ lp3944_dim_set_period()
122 if (period > LP3944_PERIOD_MAX) lp3944_dim_set_period()
125 psc_value = (period * 255) / LP3944_PERIOD_MAX; lp3944_dim_set_period()
137 * @duty_cycle: percentage of a period during which a led is ON
232 u16 period; lp3944_led_set_blink() local
249 period = (*delay_on) + (*delay_off); lp3944_led_set_blink()
251 /* duty_cycle is the percentage of period during which the led is ON */ lp3944_led_set_blink()
252 duty_cycle = 100 * (*delay_on) / period; lp3944_led_set_blink()
267 err = lp3944_dim_set_period(led->client, LP3944_DIM0, period); lp3944_led_set_blink()
H A Dleds-pwm.c32 unsigned int period; member in struct:led_pwm_data
46 pwm_config(led_dat->pwm, new_duty, led_dat->period); __led_pwm_set()
68 unsigned long long duty = led_dat->period; led_pwm_set()
74 duty = led_dat->period - duty; led_pwm_set()
128 led_data->period = pwm_get_period(led_data->pwm); led_pwm_add()
129 if (!led_data->period && (led->pwm_period_ns > 0)) led_pwm_add()
130 led_data->period = led->pwm_period_ns; led_pwm_add()
H A Dleds-pca963x.c86 /* Total blink period in milliseconds */
219 unsigned long time_on, time_off, period; pca963x_blink_set() local
233 period = time_on + time_off; pca963x_blink_set()
235 /* If period not supported by hardware, default to someting sane. */ pca963x_blink_set()
236 if ((period < PCA963X_BLINK_PERIOD_MIN) || pca963x_blink_set()
237 (period > PCA963X_BLINK_PERIOD_MAX)) { pca963x_blink_set()
240 period = time_on + time_off; pca963x_blink_set()
245 * (time_on / period) = (GDC / 256) -> pca963x_blink_set()
246 * GDC = ((time_on * 256) / period) pca963x_blink_set()
248 gdc = (time_on * 256) / period; pca963x_blink_set()
251 * From manual: period = ((GFRQ + 1) / 24) in seconds. pca963x_blink_set()
252 * So, period (in ms) = (((GFRQ + 1) / 24) * 1000) -> pca963x_blink_set()
253 * GFRQ = ((period * 24 / 1000) - 1) pca963x_blink_set()
255 gfrq = (period * 24 / 1000) - 1; pca963x_blink_set()
/linux-4.1.27/drivers/oprofile/
H A Dnmi_timer_int.c128 u64 period; nmi_timer_setup() local
131 period = (u64)cpu_khz * 1000; nmi_timer_setup()
132 do_div(period, HZ); nmi_timer_setup()
133 nmi_timer_attr.sample_period = period; nmi_timer_setup()
/linux-4.1.27/arch/mips/include/asm/mips-boards/
H A Dlaunch.h34 /* Polling period in count cycles for secondary CPU's */
/linux-4.1.27/drivers/staging/iio/accel/
H A Dadis16203.h15 #define ADIS16203_ALM_SMPL1 0x24 /* Alarm 1, sample period */
16 #define ADIS16203_ALM_SMPL2 0x26 /* Alarm 2, sample period */
21 #define ADIS16203_SMPL_PRD 0x36 /* Internal sample period (rate) control */
H A Dadis16201.h24 #define ADIS16201_ALM_SMPL1 0x24 /* Alarm 1, sample period */
25 #define ADIS16201_ALM_SMPL2 0x26 /* Alarm 2, sample period */
30 #define ADIS16201_SMPL_PRD 0x36 /* Internal sample period (rate) control */
H A Dadis16209.h38 /* Alarm 1, sample period */
40 /* Alarm 2, sample period */
50 /* Internal sample period (rate) control */
H A Dadis16220.h36 /* Control, capture period (automatic mode) */
103 /* Capture period violation/interruption */
H A Dadis16204.h29 #define ADIS16204_SMPL_PRD 0x36 /* Internal sample period (rate) control */
/linux-4.1.27/Documentation/blackfin/
H A Dgptimers-example.c21 uint32_t period, width; member in struct:gptimer_data
35 /* read the width/period values that were captured for the waveform */ gptimer_example_irq()
37 data->period = get_gptimer_period(TIMER5_id); gptimer_example_irq()
/linux-4.1.27/drivers/scsi/
H A Dscsi_transport_spi.c95 /* The PPR values at which you calculate the period in ns by multiplying
421 /* Translate the period into ns according to the current spec
423 static int period_to_str(char *buf, int period) period_to_str() argument
427 if (period < 0 || period > 0xff) { period_to_str()
429 } else if (period <= SPI_STATIC_PPR) { period_to_str()
430 picosec = ppr_to_ps[period]; period_to_str()
432 picosec = period * 4000; period_to_str()
445 show_spi_transport_period_helper(char *buf, int period) show_spi_transport_period_helper() argument
447 int len = period_to_str(buf, period); show_spi_transport_period_helper()
457 int j, picosec, period = -1; store_spi_transport_period_helper() local
475 period = j; store_spi_transport_period_helper()
479 if (period == -1) store_spi_transport_period_helper()
480 period = picosec / 4000; store_spi_transport_period_helper()
482 if (period > 0xff) store_spi_transport_period_helper()
483 period = 0xff; store_spi_transport_period_helper()
485 *periodp = period; store_spi_transport_period_helper()
503 return show_spi_transport_period_helper(buf, tp->period); show_spi_transport_period()
515 int period, retval; store_spi_transport_period() local
520 retval = store_spi_transport_period_helper(cdev, buf, count, &period); store_spi_transport_period()
522 if (period < tp->min_period) store_spi_transport_period()
523 period = tp->min_period; store_spi_transport_period()
525 i->f->set_period(starget, period); store_spi_transport_period()
530 static DEVICE_ATTR(period, S_IRUGO,
765 int period = 0, prevperiod = 0; spi_dv_retrain() local
796 period = newperiod > period ? newperiod : period; spi_dv_retrain()
797 if (period < 0x0d) spi_dv_retrain()
798 period++; spi_dv_retrain()
800 period += period >> 1; spi_dv_retrain()
802 if (unlikely(period > 0xff || period == prevperiod)) { spi_dv_retrain()
809 DV_SET(period, period); spi_dv_retrain()
810 prevperiod = period; spi_dv_retrain()
899 * for a transfer period that requires it */ spi_dv_device_internal()
922 DV_SET(period, min_period); spi_dv_device_internal()
1126 if (tp->offset > 0 && tp->period > 0) { spi_display_xfer_agreement()
1131 if (tp->period <= SPI_STATIC_PPR) { spi_display_xfer_agreement()
1132 picosec = ppr_to_ps[tp->period]; spi_display_xfer_agreement()
1133 switch (tp->period) { spi_display_xfer_agreement()
1142 picosec = tp->period * 4000; spi_display_xfer_agreement()
1143 if (tp->period < 25) spi_display_xfer_agreement()
1145 else if (tp->period < 50) spi_display_xfer_agreement()
1185 int spi_populate_sync_msg(unsigned char *msg, int period, int offset) spi_populate_sync_msg() argument
1190 msg[3] = period; spi_populate_sync_msg()
1196 int spi_populate_ppr_msg(unsigned char *msg, int period, int offset, spi_populate_ppr_msg() argument
1202 msg[3] = period; spi_populate_ppr_msg()
1261 printk("period = %s ns ", buf); print_nego()
1470 return TARGET_ATTRIBUTE_HELPER(period); target_attribute_is_visible()
1473 return TARGET_ATTRIBUTE_HELPER(period); target_attribute_is_visible()
H A Dnsp32.h157 # define SMPL_40M (0) /* 40MHz: 0-100ns/period */
158 # define SMPL_20M (SREQSMPLRATE_RATE0) /* 20MHz: 100-200ns/period */
159 # define SMPL_10M (SREQSMPLRATE_RATE1) /* 10Mhz: 200- ns/period */
516 unsigned char period_num; /* period number */
517 unsigned char ackwidth; /* ack width designated by period */
518 unsigned char start_period; /* search range - start period */
519 unsigned char end_period; /* search range - end period */
532 /* syncronous period value for nsp32_target.config_max */
537 /* flag for nsp32_target.{sync_offset}, period */
544 #define TO_SYNCREG(period, offset) (((period) & 0x0f) << 4 | ((offset) & 0x0f))
549 unsigned char period; /* sync period (0-255) */ member in struct:_nsp32_target
H A Dnsp32.c146 * Note: This period/ackwidth speed table must be in descending order.
388 unsigned char period, nsp32_build_sdtr()
397 data->msgoutbuf[pos] = period; pos++; nsp32_build_sdtr()
991 unsigned char period, offset; nsp32_queuecommand_lck() local
994 nsp32_set_max_sync(data, target, &period, &offset); nsp32_queuecommand_lck()
995 nsp32_build_sdtr(SCpnt, period, offset); nsp32_queuecommand_lck()
1004 target->limit_entry, period, offset); nsp32_queuecommand_lck()
1492 if (data->target[id].period == 0 && nsp32_show_info()
1502 if (data->target[id].period != 0) { nsp32_show_info()
1504 speed = 1000000 / (data->target[id].period * 4); nsp32_show_info()
2283 * period: nsp32_analyze_sdtr()
2284 * Check whether sync period is too short. If too short, nsp32_analyze_sdtr()
2286 * the received sync period. If sync period is acceptable nsp32_analyze_sdtr()
2288 * set this I_T nexus as sent offset and period. nsp32_analyze_sdtr()
2295 * unexpected period value. nsp32_analyze_sdtr()
2304 * Target want to use long period which is not nsp32_analyze_sdtr()
2326 /* period: */ nsp32_analyze_sdtr()
2342 target->period = get_period; nsp32_analyze_sdtr()
2356 target->period = 0; nsp32_analyze_sdtr()
2364 * target and speed period value. If failed to search, return negative value.
2368 unsigned char period) nsp32_search_period_entry()
2378 if (period >= data->synct[i].start_period && nsp32_search_period_entry()
2379 period <= data->synct[i].end_period) { nsp32_search_period_entry()
2385 * Check given period value is over the sync_table value. nsp32_search_period_entry()
2401 unsigned char period = data->synct[target->limit_entry].period_num; nsp32_set_async() local
2404 target->period = 0; nsp32_set_async()
2405 target->syncreg = TO_SYNCREG(period, ASYNC_OFFSET); nsp32_set_async()
2418 unsigned char *period, nsp32_set_max_sync()
2424 *period = data->synct[target->limit_entry].start_period; nsp32_set_max_sync()
2443 unsigned char period, ackwidth, sample_rate; nsp32_set_sync_entry() local
2445 period = data->synct[entry].period_num; nsp32_set_sync_entry()
2450 target->syncreg = TO_SYNCREG(period, offset); nsp32_set_sync_entry()
387 nsp32_build_sdtr(struct scsi_cmnd *SCpnt, unsigned char period, unsigned char offset) nsp32_build_sdtr() argument
2366 nsp32_search_period_entry(nsp32_hw_data *data, nsp32_target *target, unsigned char period) nsp32_search_period_entry() argument
2416 nsp32_set_max_sync(nsp32_hw_data *data, nsp32_target *target, unsigned char *period, unsigned char *offset) nsp32_set_max_sync() argument
H A Dmesh.h113 #define SYNC_PER(x) ((x) & 0xf) /* period field */
120 * The transfer period with SYNC_PER(sync_params) == x
/linux-4.1.27/kernel/rcu/
H A Dtree.c165 /* Delay in jiffies for grace-period initialization delays, debug only. */
198 * Return true if an RCU grace period is in progress. The ACCESS_ONCE()s
210 * one since the start of the grace period, this just sets a flag.
255 * period, which we in turn do by incrementing the ->dynticks counter
287 * This allows the grace-period kthread to record the for_each_rcu_flavor()
347 * How long the grace period must be before we start recruiting
444 * Show the state of the grace-period kthreads.
473 * Send along grace-period-related data for rcutorture diagnostics.
549 * Does the current CPU require a not-yet-started grace period?
559 return 0; /* No, a grace period is already in progress. */ cpu_needs_another_gp()
570 return 1; /* Yes, CBs for future grace period. */ cpu_needs_another_gp()
571 return 0; /* No grace period needed. */ cpu_needs_another_gp()
652 * of interrupt nesting level during the prior busy period.
775 * of interrupt nesting level during the busy period that is just
844 * rdtp->dynticks_nmi_nesting to let the RCU grace-period handling know
863 * period (observation due to Andy Lutomirski). rcu_nmi_enter()
881 * RCU-idle period, update rdtp->dynticks and rdtp->dynticks_nmi_nesting
882 * to let the RCU grace-period handling know that the CPU is back to
1043 * of the current RCU grace period. rcu_implicit_dynticks_qs()
1052 * Check for the CPU being offline, but only if the grace period rcu_implicit_dynticks_qs()
1057 * The reason for insisting that the grace period be at least rcu_implicit_dynticks_qs()
1063 return 0; /* Grace period is not old enough. */ rcu_implicit_dynticks_qs()
1076 * So if the grace period is old enough, make the CPU pay attention. rcu_implicit_dynticks_qs()
1081 * in incorrect behavior, merely in a grace period lasting rcu_implicit_dynticks_qs()
1085 * lossage (and thus of slight grace-period extension) is rcu_implicit_dynticks_qs()
1128 * Complain about starvation of grace-period kthread.
1230 /* Complain about tasks blocking the grace period. */
1299 * equivalent) during grace-period initialization and cleanup. check_cpu_stall()
1303 * grace period ends and another starts between these two fetches. check_cpu_stall()
1337 * rcu_cpu_stall_reset - prevent further stall warnings in current grace period
1379 * next subsequent grace period. This is used to tag callbacks so that
1381 * been dyntick-idle for an extended period with callbacks under the
1390 * If RCU is idle, we just wait for the next grace period. rcu_cbs_completed()
1393 * period might have started, but just not yet gotten around rcu_cbs_completed()
1400 * Otherwise, wait for a possible partial grace period and rcu_cbs_completed()
1401 * then the subsequent full grace period. rcu_cbs_completed()
1419 * Start some future grace period, as needed to handle newly arrived
1422 * is reason to awaken the grace-period kthread.
1436 * Pick up grace-period number for new callbacks. If this rcu_start_future_gp()
1437 * grace period is already marked as needed, return to the caller. rcu_start_future_gp()
1448 * believe that a grace period is in progress, then we must wait rcu_start_future_gp()
1450 * will be noticed at the end of the current grace period, we don't rcu_start_future_gp()
1453 * there is no grace period in flight, and because we hold rnp->lock, rcu_start_future_gp()
1467 * There might be no grace period in progress. If we don't already rcu_start_future_gp()
1477 * Get a new grace-period number. If there really is no grace rcu_start_future_gp()
1478 * period in progress, it will be smaller than the one we obtained rcu_start_future_gp()
1488 * If the needed for the required grace period is already rcu_start_future_gp()
1496 /* Record the need for the future grace period. */ rcu_start_future_gp()
1499 /* If a grace period is not already in progress, start one. */ rcu_start_future_gp()
1516 * Clean up any old requests for the just-ended grace period. Also return
1519 * waiting for this grace period to complete.
1536 * Awaken the grace-period kthread for the specified flavor of RCU.
1538 * nothing for the grace-period kthread to do (as in several CPUs
1559 * awaken the RCU grace-period kthread.
1577 * first sublist that is not assignable to an upcoming grace period. rcu_accelerate_cbs()
1605 * full grace period and group them all in the sublist initially rcu_accelerate_cbs()
1624 * Move any callbacks whose grace period has completed to the
1629 * Returns true if the RCU grace-period kthread needs to be awakened.
1671 * Returns true if the grace-period kthread needs to be awakened.
1682 /* No grace period end, so just accelerate recent callbacks. */ __note_gp_changes()
1690 /* Remember that we saw this grace-period completion. */ __note_gp_changes()
1697 * If the current grace period is waiting for this CPU, __note_gp_changes()
1735 * Initialize a new grace period. Return 0 if no grace period required.
1751 ACCESS_ONCE(rsp->gp_flags) = 0; /* Clear all flags: New grace period. */ rcu_gp_init()
1755 * Grace period already in progress, don't start another. rcu_gp_init()
1762 /* Advance to a new grace period and initialize state. */ rcu_gp_init()
1771 * rcu_node tree. Note that this new grace period need not wait rcu_gp_init()
1800 * If all waited-on tasks from prior grace period are rcu_for_each_leaf_node()
1824 * grace period is in progress, at least until the corresponding
1828 * The grace period cannot complete until the initialization
1896 * Clean up after the old grace period.
1914 * We know the grace period is complete, but to everyone else rcu_gp_cleanup()
1917 * they can do to advance the grace period. It is therefore rcu_gp_cleanup()
1919 * period as completed in all of the rcu_node structures. rcu_gp_cleanup()
1926 * grace period to process their callbacks. This also avoids rcu_gp_cleanup()
1927 * some nasty RCU grace-period initialization races by forcing rcu_gp_cleanup()
1928 * the end of the current grace period to be completely recorded in rcu_gp_cleanup()
1930 * grace period is recorded in any of the rcu_node structures. rcu_gp_cleanup()
1952 /* Declare grace period done. */
1983 /* Handle grace-period start. */ rcu_gp_kthread()
2025 /* If grace period done, leave loop. */ rcu_gp_kthread()
2060 /* Handle grace-period end. */ rcu_gp_kthread()
2066 * Start a new RCU grace period if warranted, re-initializing the hierarchy
2067 * in preparation for detecting the next grace period. The caller must hold
2074 * Returns true if the grace-period kthread must be awakened.
2082 * Either we have not yet spawned the grace-period rcu_start_gp_advanced()
2083 * task, this CPU does not need another grace period, rcu_start_gp_advanced()
2084 * or a grace period is already in progress. rcu_start_gp_advanced()
2085 * Either way, don't start a new grace period. rcu_start_gp_advanced()
2108 * Returns true if the grace-period kthread needs to be awakened.
2117 * If there is no grace period in progress right now, any rcu_start_gp()
2119 * next grace period. Also, advancing the callbacks reduces the rcu_start_gp()
2122 * then start the grace period! rcu_start_gp()
2132 * period and letting rcu_start_gp() start up the next grace period
2150 * is the grace-period snapshot, which means that the quiescent states
2168 * relevant grace period is already over, so done.
2202 * state for this grace period. Invoke rcu_report_qs_rsp()
2203 * to clean up and start the next grace period if one is needed.
2211 * RCU grace period. The caller must hold the specified rnp->lock with
2254 * grace period of interest. We don't want to end the current grace period
2255 * based on quiescent states detected in an earlier grace period!
2274 * The grace period in which this quiescent state was rcu_report_qs_rdp()
2277 * within the current grace period. rcu_report_qs_rdp()
2304 * Check to see if there is a new grace period of which this CPU
2307 * quiescent state for this grace period, and record that fact if so.
2312 /* Check for grace-period ends and beginnings. */ rcu_check_quiescent_state()
2316 * Does this CPU still need to do its part for current grace period? rcu_check_quiescent_state()
2324 * period? If no, then exit and wait for the next call. rcu_check_quiescent_state()
2366 * Next, move those callbacks still needing a grace period to rcu_send_cbs_to_orphanage()
2369 * period, but that is too bad. They get to start over because we rcu_send_cbs_to_orphanage()
2383 * required to pass though another grace period: They are done. rcu_send_cbs_to_orphanage()
2437 /* And then adopt the callbacks that still need a grace period. */ rcu_adopt_orphan_cbs()
2571 * period. Thottle as specified by rdp->blimit.
2753 * Race between grace-period rcu_for_each_leaf_node()
2840 /* Does this CPU require a not-yet-started grace period? */ __rcu_process_callbacks()
2919 * Force the grace period if too many callbacks or too long waiting. __call_rcu_core()
2923 * is the only one waiting for a grace period to complete. __call_rcu_core()
2927 /* Are we ignoring a completed grace period? */ __call_rcu_core()
2930 /* Start a new grace period if one not already started. */ __call_rcu_core()
2941 /* Give the grace period a kick. */ __call_rcu_core()
2983 * Opportunistically note grace-period endings and beginnings. __call_rcu()
3035 * Queue an RCU-sched callback for invocation after a grace period.
3044 * Queue an RCU callback for invocation after a quicker grace period.
3053 * Queue an RCU callback for lazy invocation after a grace period.
3067 * Because a context switch is a grace period for RCU-sched and RCU-bh,
3068 * any blocking grace-period wait automatically implies a grace period
3087 * synchronize_sched - wait until an rcu-sched grace period has elapsed.
3090 * grace period has elapsed, in other words after all currently executing
3143 * synchronize_rcu_bh - wait until an rcu_bh grace period has elapsed.
3146 * period has elapsed, in other words after all currently executing rcu_bh
3173 * to determine whether or not a full grace period has elapsed in the
3194 * cond_synchronize_rcu - Conditionally wait for an RCU grace period
3198 * If a full RCU grace period has elapsed since the earlier call to
3200 * synchronize_rcu() to wait for a full grace period.
3205 * so waiting for one additional grace period should be just fine.
3239 * synchronize_sched_expedited - Brute-force RCU-sched grace period
3241 * Wait for an RCU-sched grace period to elapse, but use a "big hammer"
3242 * approach to force the grace period to end quickly. This consumes
3256 * grace period. We are then done, so we use atomic_cmpxchg() to
3262 * initial snapshot, then someone else must have forced a grace period
3366 * callers to piggyback on our grace period. We retry
3367 * after they started, so our grace period works for them,
3369 * period works for us.
3388 * period. Update the counter, but only if our work is still
3446 /* Has RCU gone idle with this CPU needing another grace period? */ __rcu_pending()
3452 /* Has another RCU grace period completed? */ __rcu_pending()
3458 /* Has a new RCU grace period started? */ __rcu_pending()
3619 * avoid a too-soon return to zero in case of a short grace period _rcu_barrier()
3773 * of the next grace period. rcu_init_percpu_data()
H A Dtree_plugin.h70 pr_info("\tRCU dyntick-idle grace-period acceleration is enabled.\n"); rcu_bootup_announce_oddness()
135 * RCU read-side critical section. Therefore, the current grace period
137 * predating the current grace period drain, in other words, until
162 * will hold up the next grace period rather than the rcu_preempt_note_context_switch()
163 * current grace period. Queue the task accordingly. rcu_preempt_note_context_switch()
164 * If the task is queued for the current grace period rcu_preempt_note_context_switch()
166 * state for the current grace period), then as long rcu_preempt_note_context_switch()
167 * as that task remains queued, the current grace period rcu_preempt_note_context_switch()
169 * to exactly when the current grace period started. rcu_preempt_note_context_switch()
172 * slightly after the current grace period began. C'est rcu_preempt_note_context_switch()
214 * grace period, then the fact that the task has been enqueued rcu_preempt_note_context_switch()
215 * means that we continue to block the current grace period. rcu_preempt_note_context_switch()
221 * Check for preempted RCU readers blocking the current grace period
381 * grace period on the specified rcu_node structure.
402 * grace period.
462 * period is in fact empty. It is a serious bug to complete a grace
463 * period that still has RCU readers blocked! This function must be
468 * block the newly created grace period, so set up ->gp_tasks accordingly.
509 * Queue a preemptible-RCU callback for invocation after a grace period.
518 * synchronize_rcu - wait until a grace period has elapsed.
521 * period has elapsed, in other words after all currently executing RCU
552 * sections blocking the current preemptible-RCU expedited grace period.
553 * If there is no preemptible-RCU expedited grace period currently in
562 * return non-zero if there is no RCU expedited grace period in progress
565 * for the current expedited grace period. Works only for preemptible
579 * grace period. This event is reported either to the rcu_node structure on
618 * grace period for the specified rcu_node structure, phase 1. If there
659 * grace period for the specified rcu_node structure, phase 2. If the
700 * synchronize_rcu_expedited - Brute-force RCU grace period
702 * Wait for an RCU-preempt grace period, but expedite it. The basic
726 * this expedited grace period will already be in the process of synchronize_rcu_expedited()
739 * expedited grace period for us, just leave. synchronize_rcu_expedited()
788 smp_mb(); /* ensure subsequent action seen after grace period. */ synchronize_rcu_expedited()
795 * Note that this primitive does not necessarily wait for an RCU grace period
798 * immediately, without waiting for anything, much less an RCU grace period.
906 * Wait for an rcu-preempt grace period, but make it happen quickly.
1016 * expedited grace period must boost all blocked tasks, including rcu_boost()
1017 * those blocking the pre-existing normal grace period. rcu_boost()
1092 * blocking the current grace period, and, if so, tell the per-rcu_node
1094 * period in progress, it is always time to boost.
1156 * Do priority-boost accounting for the start of a new grace period.
1387 * Do the idle-entry grace-period work, which, because CONFIG_RCU_FAST_NO_HZ=n,
1414 * is sized to be roughly one RCU grace period. Those energy-efficiency
1427 #define RCU_IDLE_GP_DELAY 4 /* Roughly one grace period. */
1460 * Don't bother checking unless a grace period has for_each_rcu_flavor()
1521 * major task is to accelerate (that is, assign grace-period numbers to)
1733 * If the specified CPU is aware of the current RCU grace period
1738 * aware of the previous grace period.
1822 * waits for a grace period to elapse, and invokes the callbacks.
1858 * grace period.
1871 * were being updated for the end of the previous grace period.
2095 * If necessary, kick off a new grace period, and either way wait
2096 * for a subsequent grace period to complete.
2114 * Wait for the grace period. Do so interruptibly to avoid messing rcu_nocb_wait_gp()
2158 * nocb_gp_head, where they await a grace period. nocb_leader_wait()
2195 /* Wait for one grace period. */ nocb_leader_wait()
2265 * an optional leader-follower relationship so that the grace-period
2607 * arbitrarily long period of time with the scheduling-clock tick turned
2612 * period and has not be idle from an RCU perspective, kick it.
2627 #define RCU_SYSIDLE_SHORT 1 /* All CPUs idle for brief period. */
2664 /* Record start of fully idle period. */ rcu_sysidle_enter()
2739 /* Record end of idle period. */ rcu_sysidle_exit()
2837 /* First time all are idle, so note a short idle period. */ rcu_sysidle()
2897 * Wrapper for rcu_sysidle_report() when called from the grace-period
2910 /* Callback and function for forcing an RCU grace period. */
2925 smp_mb(); /* grace period precedes setting inuse. */ rcu_sysidle_cb()
2971 /* If this is the first observation of an idle period, record it. */
2985 * If we aren't there yet, and a grace period is not in flight,
2986 * initiate a grace period. Either way, tell the caller that
3039 * grace-period kthread will do force_quiescent_state() processing?
3041 * CPU unless the grace period has extended for too long.
3058 * Bind the grace-period kthread for the sysidle flavor of RCU to the
H A Dtree.h96 /* End of last non-NMI non-idle period. */
103 /* idle-period nonlazy_posted snapshot. */
121 * Definition for node within the RCU grace-period-detection hierarchy.
126 unsigned long gpnum; /* Current grace period for this node. */
133 /* order for current grace period to proceed.*/
140 /* current expedited grace period to */
145 /* beginning of each grace period. */
147 /* Online CPUs for next grace period. */
165 /* current grace period, or NULL if there */
169 /* current expedited grace period, or NULL */
171 /* is no current expedited grace period, */
180 /* are blocking the current grace period, */
257 /* 1) quiescent-state and grace-period handling : */
274 /* period it is aware of. */
288 * The grace period for these entries has completed, and
289 * the other grace-period-completed entries may be moved
324 /* Grace period that needs help */
379 #define RCU_GP_IDLE 0 /* No grace period in progress. */
380 #define RCU_GP_INIT 1 /* Grace period being initialized. */
450 /* need a grace period. */
504 #define RCU_GP_FLAG_INIT 0x1 /* Need grace-period initialization. */
505 #define RCU_GP_FLAG_FQS 0x2 /* Need grace-period quiescent-state forcing. */
509 #define RCU_GP_WAIT_GPS 1 /* Wait for grace-period start. */
H A Dupdate.c71 * Should normal grace-period primitives be expedited? Intended for
260 * wakeme_after_rcu() - Callback function to awaken a task after grace period
263 * Awaken the corresponding task now that a grace period has elapsed.
491 * synchronize_rcu_tasks - wait until an rcu-tasks grace period has elapsed.
494 * grace period has elapsed, in other words after all currently
529 /* Wait for the grace period. */ synchronize_rcu_tasks()
594 * one RCU-tasks grace period and then invokes the callbacks. rcu_tasks_kthread()
623 * grace period might be incorrectly seen as having started rcu_tasks_kthread()
624 * after the grace period. rcu_tasks_kthread()
629 * after the beginning of the grace period. rcu_tasks_kthread()
635 * RCU-tasks grace period. Start off by scanning rcu_tasks_kthread()
694 * extend past the end of the grace period. However,
701 * period, avoiding the need for memory barriers for
/linux-4.1.27/drivers/clocksource/
H A Dtimer-keystone.c43 * @hz_period: cycles per HZ period
76 * @ period: cycles number to configure for
78 static int keystone_timer_config(u64 period, enum clock_event_mode mode) keystone_timer_config() argument
103 /* reset counter to zero, set new period */ keystone_timer_config()
106 keystone_timer_writel(period & 0xffffffff, PRD12); keystone_timer_config()
107 keystone_timer_writel(period >> 32, PRD34); keystone_timer_config()
H A Dvf_pit_timer.c77 * to abort the current cycle and start a timer period with the new pit_set_next_event()
151 * LDVAL trigger = (period / clock period) - 1 pit_clockevent_init()
H A Ddw_apb_timer.c117 unsigned long period; apbt_set_mode() local
126 period = DIV_ROUND_UP(dw_ced->timer.freq, HZ); apbt_set_mode()
137 pr_debug("Setting clock period %lu for HZ %d\n", period, HZ); apbt_set_mode()
138 apbt_writel(&dw_ced->timer, period, APBTMR_N_LOAD_COUNT); apbt_set_mode()
/linux-4.1.27/arch/powerpc/platforms/52xx/
H A Dmpc52xx_gpt.c396 static int mpc52xx_gpt_do_start(struct mpc52xx_gpt_priv *gpt, u64 period, mpc52xx_gpt_do_start() argument
412 /* Determine the number of clocks in the requested period. 64 bit mpc52xx_gpt_do_start()
416 clocks = period * (u64)gpt->ipb_freq; mpc52xx_gpt_do_start()
424 * 'clocks' is the number of clock ticks in the period. The timer mpc52xx_gpt_do_start()
461 * @period: period of timer in ns; max. ~130s @ 33MHz IPB clock
466 int mpc52xx_gpt_start_timer(struct mpc52xx_gpt_priv *gpt, u64 period, mpc52xx_gpt_start_timer() argument
469 return mpc52xx_gpt_do_start(gpt, period, continuous, 0); mpc52xx_gpt_start_timer()
497 * mpc52xx_gpt_timer_period - Read the timer period
500 * Returns the timer period in ns
504 u64 period; mpc52xx_gpt_timer_period() local
509 period = in_be32(&gpt->regs->count); mpc52xx_gpt_timer_period()
512 prescale = period >> 16; mpc52xx_gpt_timer_period()
513 period &= 0xffff; mpc52xx_gpt_timer_period()
516 period = period * prescale * 1000000000ULL; mpc52xx_gpt_timer_period()
517 do_div(period, (u64)gpt->ipb_freq); mpc52xx_gpt_timer_period()
518 return period; mpc52xx_gpt_timer_period()
686 const u32 *period) mpc52xx_gpt_wdt_setup()
694 if (!period || *period == 0) mpc52xx_gpt_wdt_setup()
697 real_timeout = (u64) *period * 1000000000ULL; mpc52xx_gpt_wdt_setup()
701 dev_info(gpt->dev, "watchdog set to %us timeout\n", *period); mpc52xx_gpt_wdt_setup()
713 const u32 *period) mpc52xx_gpt_wdt_setup()
685 mpc52xx_gpt_wdt_setup(struct mpc52xx_gpt_priv *gpt, const u32 *period) mpc52xx_gpt_wdt_setup() argument
712 mpc52xx_gpt_wdt_setup(struct mpc52xx_gpt_priv *gpt, const u32 *period) mpc52xx_gpt_wdt_setup() argument
/linux-4.1.27/tools/perf/util/
H A Dsort.h50 u64 period; member in struct:he_stat
137 u64 period = he->stat.period; hist_entry__get_percent_limit() local
144 period = he->stat_acc->period; hist_entry__get_percent_limit()
146 return period * 100.0 / total_period; hist_entry__get_percent_limit()
H A Dhist.c179 unsigned int cpumode, u64 period) he_stat__add_cpumode_period()
183 he_stat->period_sys += period; he_stat__add_cpumode_period()
186 he_stat->period_us += period; he_stat__add_cpumode_period()
189 he_stat->period_guest_sys += period; he_stat__add_cpumode_period()
192 he_stat->period_guest_us += period; he_stat__add_cpumode_period()
199 static void he_stat__add_period(struct he_stat *he_stat, u64 period, he_stat__add_period() argument
203 he_stat->period += period; he_stat__add_period()
210 dest->period += src->period; he_stat__add_stat()
221 he_stat->period = (he_stat->period * 7) / 8; he_stat__decay()
228 u64 prev_period = he->stat.period; hists__decay_entry()
238 diff = prev_period - he->stat.period; hists__decay_entry()
244 return he->stat.period == 0; hists__decay_entry()
377 u64 period = entry->stat.period; add_hist_entry() local
396 he_stat__add_period(&he->stat, period, weight); add_hist_entry()
398 he_stat__add_period(he->stat_acc, period, weight); add_hist_entry()
436 he_stat__add_cpumode_period(&he->stat, al->cpumode, period); add_hist_entry()
438 he_stat__add_cpumode_period(he->stat_acc, al->cpumode, period); add_hist_entry()
447 u64 period, u64 weight, u64 transaction, __hists__add_entry()
463 .period = period, __hists__add_entry()
521 * must pass period=weight in order to get the correct iter_add_single_mem_entry()
524 * and this is indirectly achieved by passing period=weight here iter_add_single_mem_entry()
626 * and not events sampled. Thus we use a pseudo period of 1. iter_add_next_branch_entry()
666 sample->period, sample->weight, iter_add_single_normal_entry()
728 sample->period, sample->weight, iter_add_single_cumulative_entry()
801 sample->period, sample->weight, iter_add_next_cumulative_entry()
810 callchain_append(he->callchain, &cursor, sample->period); iter_add_next_cumulative_entry()
1088 hists->stats.total_non_filtered_period += h->stat.period; hists__inc_filter_stats()
1097 hists->stats.total_period += h->stat.period; hists__inc_stats()
1389 * we find them, just add a dummy entry on the leader hists, with period=0,
178 he_stat__add_cpumode_period(struct he_stat *he_stat, unsigned int cpumode, u64 period) he_stat__add_cpumode_period() argument
442 __hists__add_entry(struct hists *hists, struct addr_location *al, struct symbol *sym_parent, struct branch_info *bi, struct mem_info *mi, u64 period, u64 weight, u64 transaction, bool sample_self) __hists__add_entry() argument
H A Dcallchain.c476 u64 period) add_child()
484 new->hit = period; add_child()
509 u64 idx_parents, u64 idx_local, u64 period) split_add_child()
541 parent->children_hit += period; split_add_child()
544 new = add_child(parent, cursor, period); split_add_child()
563 parent->hit = period; split_add_child()
570 u64 period);
575 u64 period) append_chain_children()
594 ret = append_chain(rnode, cursor, period); append_chain_children()
604 rnode = add_child(root, cursor, period); append_chain_children()
609 root->children_hit += period; append_chain_children()
615 u64 period) append_chain()
655 split_add_child(root, cursor, cnode, start, matches, period); append_chain()
661 root->hit += period; append_chain()
666 append_chain_children(root, cursor, period); append_chain()
673 u64 period) callchain_append()
680 append_chain_children(&root->node, cursor, period); callchain_append()
779 return callchain_append(he->callchain, &callchain_cursor, sample->period); hist_entry__append_callchain()
474 add_child(struct callchain_node *parent, struct callchain_cursor *cursor, u64 period) add_child() argument
506 split_add_child(struct callchain_node *parent, struct callchain_cursor *cursor, struct callchain_list *to_split, u64 idx_parents, u64 idx_local, u64 period) split_add_child() argument
573 append_chain_children(struct callchain_node *root, struct callchain_cursor *cursor, u64 period) append_chain_children() argument
613 append_chain(struct callchain_node *root, struct callchain_cursor *cursor, u64 period) append_chain() argument
671 callchain_append(struct callchain_root *root, struct callchain_cursor *cursor, u64 period) callchain_append() argument
/linux-4.1.27/tools/testing/selftests/powerpc/pmu/ebb/
H A Dlost_exception_test.c42 * We want a low sample period, but we also want to get out of the EBB test_body()
63 /* Change the sample period slightly to try and hit the race */ test_body()
86 /* We vary our sample period so we need extra fudge here */ test_body()
H A Dback_to_back_ebbs_test.c19 * We do this by counting with a stupidly low sample period, causing us to
/linux-4.1.27/drivers/mmc/core/
H A Dsdio_irq.c105 unsigned long period, idle_period; sdio_irq_thread() local
117 period = (host->caps & MMC_CAP_SDIO_IRQ) ? sdio_irq_thread()
120 pr_debug("%s: IRQ thread started (poll period = %lu jiffies)\n", sdio_irq_thread()
121 mmc_hostname(host), period); sdio_irq_thread()
162 period /= 2; sdio_irq_thread()
164 period++; sdio_irq_thread()
165 if (period > idle_period) sdio_irq_thread()
166 period = idle_period; sdio_irq_thread()
177 schedule_timeout(period); sdio_irq_thread()
/linux-4.1.27/drivers/misc/
H A Dioc4.c45 #define IOC4_CALIBRATE_COUNT 63 /* Calibration cycle period */
130 /* Determines external interrupt output clock period of the PCI bus an
148 uint64_t start, end, period; ioc4_clock_calibrate() local
169 /* Check square wave period averaged over some number of cycles */ ioc4_clock_calibrate()
187 * 1. "end - start" gives us the measurement period over all ioc4_clock_calibrate()
189 * 2. Divide by number of square wave cycles to get the period ioc4_clock_calibrate()
193 * period of an IOC4 INT_OUT count. ioc4_clock_calibrate()
195 period = (end - start) / ioc4_clock_calibrate()
199 if (period > IOC4_CALIBRATE_LOW_LIMIT || ioc4_clock_calibrate()
200 period < IOC4_CALIBRATE_HIGH_LIMIT) { ioc4_clock_calibrate()
206 period = IOC4_CALIBRATE_DEFAULT; ioc4_clock_calibrate()
208 u64 ns = period; ioc4_clock_calibrate()
216 /* Remember results. We store the extint clock period rather ioc4_clock_calibrate()
217 * than the PCI clock period so that greater precision is ioc4_clock_calibrate()
219 * PCI clock period. ioc4_clock_calibrate()
221 idd->count_period = period; ioc4_clock_calibrate()
/linux-4.1.27/arch/nios2/kernel/
H A Dtime.c128 static void nios2_timer_config(struct nios2_timer *timer, unsigned long period, nios2_timer_config() argument
133 /* The timer's actual period is one cycle greater than the value nios2_timer_config()
134 * stored in the period register. */ nios2_timer_config()
135 period--; nios2_timer_config()
143 timer_writew(timer, period, ALTERA_TIMER_PERIODL_REG); nios2_timer_config()
144 timer_writew(timer, period >> 16, ALTERA_TIMER_PERIODH_REG); nios2_timer_config()
167 unsigned long period; nios2_timer_set_mode() local
173 period = DIV_ROUND_UP(timer->freq, HZ); nios2_timer_set_mode()
174 nios2_timer_config(timer, period, CLOCK_EVT_MODE_PERIODIC); nios2_timer_set_mode()
/linux-4.1.27/drivers/media/rc/img-ir/
H A Dimg-ir-sharp.c89 .space = { 680 /* 1 ms period */ },
94 .space = { 1680 /* 2 ms period */ },
/linux-4.1.27/include/uapi/linux/nfsd/
H A Dcld.h35 Cld_GraceDone, /* grace period is complete */
51 int64_t cm_gracetime; /* grace period start time */
/linux-4.1.27/tools/perf/
H A Dbuiltin-diff.c107 .name = "Base period",
220 static double period_percent(struct hist_entry *he, u64 period) period_percent() argument
224 return (period * 100.0) / total; period_percent()
229 double old_percent = period_percent(he, he->stat.period); compute_delta()
230 double new_percent = period_percent(pair, pair->stat.period); compute_delta()
239 double old_period = he->stat.period ?: 1; compute_ratio()
240 double new_period = pair->stat.period; compute_ratio()
249 u64 old_period = he->stat.period; compute_wdiff()
250 u64 new_period = pair->stat.period; compute_wdiff()
272 pair->stat.period, pair_total, formula_delta()
273 he->stat.period, he_total); formula_delta()
279 double old_period = he->stat.period; formula_ratio()
280 double new_period = pair->stat.period; formula_ratio()
288 u64 old_period = he->stat.period; formula_wdiff()
289 u64 new_period = pair->stat.period; formula_wdiff()
314 struct addr_location *al, u64 period, hists__add_entry()
317 if (__hists__add_entry(hists, al, NULL, NULL, NULL, period, weight, hists__add_entry()
338 if (hists__add_entry(hists, &al, sample->period, diff__process_sample_event()
340 pr_warning("problem incrementing symbol period, skipping event\n"); diff__process_sample_event()
348 * period in order to sort entries by percentage delta. diff__process_sample_event()
350 hists->stats.total_period += sample->period; diff__process_sample_event()
352 hists->stats.total_non_filtered_period += sample->period; diff__process_sample_event()
603 if (left->stat.period == right->stat.period) hist_entry__cmp_baseline()
605 return left->stat.period > right->stat.period ? 1 : -1; hist_entry__cmp_baseline()
787 OPT_BOOLEAN('p', "period", &show_period,
788 "Show period values."),
822 return 100.0 * he->stat.period / total; baseline_percent()
935 scnprintf(buf, size, "%" PRIu64, he->stat.period); hpp__entry_unpair()
998 scnprintf(buf, size, "%" PRIu64, pair->stat.period); hpp__entry_pair()
313 hists__add_entry(struct hists *hists, struct addr_location *al, u64 period, u64 weight, u64 transaction) hists__add_entry() argument
H A Dperf.h54 bool period; member in struct:record_opts
/linux-4.1.27/drivers/video/backlight/
H A Dpwm_bl.c30 unsigned int period; member in struct:pwm_bl_data
69 pwm_config(pb->pwm, 0, pb->period); pwm_backlight_power_off()
89 return (duty_cycle * (pb->period - lth) / pb->scale) + lth; compute_duty_cycle()
108 pwm_config(pb->pwm, duty_cycle, pb->period); pwm_backlight_update_status()
295 * period, parsed from the DT, in the PWM device. For the non-DT case, pwm_backlight_probe()
296 * set the period from platform data if it has not already been set pwm_backlight_probe()
299 pb->period = pwm_get_period(pb->pwm); pwm_backlight_probe()
300 if (!pb->period && (data->pwm_period_ns > 0)) { pwm_backlight_probe()
301 pb->period = data->pwm_period_ns; pwm_backlight_probe()
305 pb->lth_brightness = data->lth_brightness * (pb->period / pb->scale); pwm_backlight_probe()
H A Dtdo24m.c93 CMD1(0xcf, 0x02), /* Blanking period control (1) */
94 CMD2(0xd0, 0x08, 0x04), /* Blanking period control (2) */
106 CMD1(0xd6, 0x02), /* Blanking period control (1) */
107 CMD2(0xd7, 0x08, 0x04), /* Blanking period control (2) */
135 CMD1(0xcf, 0x02), /* Blanking period control (1) */
136 CMD2(0xd0, 0x08, 0x04), /* Blanking period control (2) */
H A Dlp8788_bl.c127 unsigned int period; lp8788_pwm_ctrl() local
135 period = bl->pdata->period_ns; lp8788_pwm_ctrl()
136 duty = br * period / max_br; lp8788_pwm_ctrl()
150 pwm_config(bl->pwm, duty, period); lp8788_pwm_ctrl()
H A Dlm3630a_bl.c165 unsigned int period = pwm_get_period(pchip->pwmd); lm3630a_pwm_ctrl() local
166 unsigned int duty = br * period / br_max; lm3630a_pwm_ctrl()
168 pwm_config(pchip->pwmd, duty, period); lm3630a_pwm_ctrl()
428 pchip->pwmd->period = pdata->pwm_period; lm3630a_probe()
H A Dlp855x_bl.c237 unsigned int period = lp->pdata->period_ns; lp855x_pwm_ctrl() local
238 unsigned int duty = br * period / max_br; lp855x_pwm_ctrl()
250 pwm_config(lp->pwm, duty, period); lp855x_pwm_ctrl()
364 of_property_read_u32(node, "pwm-period", &pdata->period_ns); lp855x_parse_dt()
/linux-4.1.27/drivers/staging/comedi/drivers/
H A Dni_at_a2150.c262 * period, adjusts requested period to actual timing.
264 static int a2150_get_timing(struct comedi_device *dev, unsigned int *period, a2150_get_timing() argument
281 /* make sure period is in available range */ a2150_get_timing()
282 if (*period < glb) a2150_get_timing()
283 *period = glb; a2150_get_timing()
284 if (*period > lub) a2150_get_timing()
285 *period = lub; a2150_get_timing()
287 /* we can multiply period by 1, 2, 4, or 8, using (1 << i) */ a2150_get_timing()
291 /* temp is the period in nanosec we are evaluating */ a2150_get_timing()
294 if (temp < lub && temp >= *period) { a2150_get_timing()
299 if (temp > glb && temp <= *period) { a2150_get_timing()
310 if (lub - *period < *period - glb) a2150_get_timing()
311 *period = lub; a2150_get_timing()
313 *period = glb; a2150_get_timing()
316 *period = lub; a2150_get_timing()
319 *period = glb; a2150_get_timing()
325 if (*period == lub) { a2150_get_timing()
H A Dcomedi_test.c67 unsigned long usec_period; /* waveform period in microseconds */
68 unsigned long usec_current; /* current time (mod waveform period) */
71 unsigned int scan_period; /* scan period in usec */
72 unsigned int convert_period; /* conversion period in usec */
378 int period = it->options[1]; waveform_attach() local
386 /* set default amplitude and period */ waveform_attach()
389 if (period <= 0) waveform_attach()
390 period = 100000; /* 0.1 sec */ waveform_attach()
393 devpriv->usec_period = period; waveform_attach()
/linux-4.1.27/arch/x86/kernel/cpu/
H A Dperf_event_amd_ibs.c76 s64 period = hwc->sample_period; perf_event_set_period() local
82 if (unlikely(left <= -period)) { perf_event_set_period()
83 left = period; perf_event_set_period()
85 hwc->last_period = period; perf_event_set_period()
90 left += period; perf_event_set_period()
92 hwc->last_period = period; perf_event_set_period()
97 * If the hw period that triggers the sw overflow is too short perf_event_set_period()
99 * Thus we shorten the next-to-last period and set the last perf_event_set_period()
100 * period to the max period. perf_event_set_period()
261 * sample period to set a frequency. perf_ibs_init()
291 struct hw_perf_event *hwc, u64 *period) perf_ibs_set_period()
296 overflow = perf_event_set_period(hwc, 1<<4, perf_ibs->max_period, period); perf_ibs_set_period()
370 u64 period; perf_ibs_start() local
378 perf_ibs_set_period(perf_ibs, hwc, &period); perf_ibs_start()
380 perf_ibs_enable_event(perf_ibs, hwc, period >> 4); perf_ibs_start()
527 u64 *buf, *config, period; perf_ibs_handle_irq() local
548 if (!perf_ibs_set_period(perf_ibs, hwc, &period)) perf_ibs_handle_irq()
604 perf_ibs_enable_event(perf_ibs, hwc, period >> 4); perf_ibs_handle_irq()
290 perf_ibs_set_period(struct perf_ibs *perf_ibs, struct hw_perf_event *hwc, u64 *period) perf_ibs_set_period() argument
/linux-4.1.27/drivers/hwmon/
H A Dultra45_env.c101 int rpm, period; show_fan_speed() local
105 period = (int) val << 8; show_fan_speed()
106 if (FAN_DATA_VALID(period)) show_fan_speed()
107 rpm = FAN_PERIOD_TO_RPM(period); show_fan_speed()
120 int period; set_fan_speed() local
131 period = FAN_RPM_TO_PERIOD(rpm); set_fan_speed()
132 val = period >> 8; set_fan_speed()
H A Dpwm-fan.c50 duty = DIV_ROUND_UP(pwm * (ctx->pwm->period - 1), MAX_PWM); __set_pwm()
51 ret = pwm_config(ctx->pwm, duty, ctx->pwm->period); __set_pwm()
237 duty_cycle = ctx->pwm->period - 1; pwm_fan_probe()
240 ret = pwm_config(ctx->pwm, duty_cycle, ctx->pwm->period); pwm_fan_probe()
312 duty = DIV_ROUND_UP(ctx->pwm_value * (ctx->pwm->period - 1), MAX_PWM); pwm_fan_resume()
313 ret = pwm_config(ctx->pwm, duty, ctx->pwm->period); pwm_fan_resume()
/linux-4.1.27/arch/arm/mach-davinci/
H A Dtime.c93 unsigned long period; member in struct:timer_s
132 * the new period (using 32-bit unsigned addition/wrapping timer32_config()
136 __raw_writel(__raw_readl(t->base + t->tim_off) + t->period, timer32_config()
145 /* reset counter to zero, set new period */ timer32_config()
147 __raw_writel(t->period, t->base + t->prd_off); timer32_config()
190 .period = ~0,
301 t->period = cycles; davinci_set_next_event()
313 t->period = davinci_clock_tick_rate / (HZ); davinci_set_mode()
434 /* clear counter and period regs */ davinci_watchdog_reset()
/linux-4.1.27/arch/arm/mach-gemini/
H A Dtime.c65 u32 period = DIV_ROUND_CLOSEST(tick_rate, HZ); gemini_timer_set_mode() local
71 writel(period, gemini_timer_set_mode()
73 writel(period, gemini_timer_set_mode()
/linux-4.1.27/drivers/rtc/
H A Drtc-msm6242.c58 #define MSM6242_CE_T_64HZ (0 << 2) /* period 1/64 second */
59 #define MSM6242_CE_T_1HZ (1 << 2) /* period 1 second */
60 #define MSM6242_CE_T_1MINUTE (2 << 2) /* period 1 minute */
61 #define MSM6242_CE_T_1HOUR (3 << 2) /* period 1 hour */
H A Dinterface.c401 rtc->aie_timer.period = ktime_set(0, 0); rtc_set_alarm()
429 rtc->aie_timer.period = ktime_set(0, 0); rtc_initialize_alarm()
501 rtc->uie_rtctimer.period = ktime_set(1, 0); rtc_update_irq_enable()
590 ktime_t period; rtc_pie_update_irq() local
594 period = ktime_set(0, NSEC_PER_SEC/rtc->irq_freq); rtc_pie_update_irq()
595 count = hrtimer_forward_now(timer, period); rtc_pie_update_irq()
705 ktime_t period = ktime_set(0, NSEC_PER_SEC / rtc->irq_freq); rtc_update_hrtimer() local
707 hrtimer_start(&rtc->pie_timer, period, HRTIMER_MODE_REL); rtc_update_hrtimer()
895 if (ktime_to_ns(timer->period)) { rtc_timer_do_work()
897 timer->period); rtc_timer_do_work()
952 * @ period: period that the timer will recur
957 ktime_t expires, ktime_t period) rtc_timer_start()
965 timer->period = period; rtc_timer_start()
956 rtc_timer_start(struct rtc_device *rtc, struct rtc_timer *timer, ktime_t expires, ktime_t period) rtc_timer_start() argument
/linux-4.1.27/arch/x86/kvm/
H A Di8254.h28 s64 period; /* unit: ns */ member in struct:kvm_kpit_state
H A Di8254.c112 if (!ps->period) __kpit_elapsed()
125 elapsed = ps->period - ktime_to_ns(remaining); __kpit_elapsed()
327 hrtimer_add_expires_ns(&ps->timer, ps->period); pit_timer_fn()
348 ps->period = interval; create_pit_timer()
365 if (ps->period < min_period) { create_pit_timer()
368 "i8254 timer period limited to %lld ns\n", create_pit_timer()
369 ps->period, min_period); create_pit_timer()
370 ps->period = min_period; create_pit_timer()
401 * mode 1 is one shot, mode 2 is period, otherwise del timer */ pit_load_count()
/linux-4.1.27/arch/mips/include/asm/mach-loongson/
H A Dloongson_hwmon.h41 /* period between two check. (Unit: S) */
/linux-4.1.27/include/linux/platform_data/
H A Dlm3630a_bl.h47 *@pwm_period : pwm period
/linux-4.1.27/include/uapi/linux/
H A Dgen_stats.h69 * @interval: sampling period
H A Dtime.h35 struct timespec it_interval; /* timer period */
H A Dptp_clock.h65 struct ptp_clock_time period; /* Desired period, zero means disable. */ member in struct:ptp_perout_request
H A Digmp.h115 /* message in this period of time, */
/linux-4.1.27/arch/m68k/include/uapi/asm/
H A Dbootinfo-amiga.h21 #define BI_AMIGA_SERPER 0x8007 /* serial port period (__be16) */
/linux-4.1.27/arch/arm/mach-pxa/include/mach/
H A Dmtd-xip.h28 * the system timer tick period. This should put the CPU into idle mode
/linux-4.1.27/arch/arm/mach-spear/
H A Dtime.c109 u32 period; clockevent_set_mode() local
119 period = clk_get_rate(gpt_clk) / HZ; clockevent_set_mode()
120 period >>= CTRL_PRESCALER16; clockevent_set_mode()
121 writew(period, gpt_base + LOAD(CLKEVT)); clockevent_set_mode()
/linux-4.1.27/sound/usb/line6/
H A Dpcm.h105 * This is modulo period size (to determine when a period is finished).
112 /* period size in bytes */
113 unsigned period; member in struct:line6_pcm_stream
H A Dplayback.h23 * the next period (sounds like a delay effect). As a workaround, the output
/linux-4.1.27/arch/sparc/kernel/
H A Dirq.h67 /* one period for clock source timer */
70 /* function to obtain offsett for cs period */
/linux-4.1.27/arch/powerpc/boot/
H A Dutil.S26 /* udelay (on non-601 processors) needs to know the period of the
28 * (period of 66MHz/4). Now a variable is used that is initialized to
/linux-4.1.27/kernel/time/
H A Dtimer_stats.c111 * They get freed when a new collection period is started.
282 struct timespec period; tstats_show() local
298 period = ktime_to_timespec(time); tstats_show()
299 ms = period.tv_nsec / 1000000; tstats_show()
302 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms); tstats_show()
325 ms += period.tv_sec * 1000; tstats_show()
329 if (events && period.tv_sec) tstats_show()
/linux-4.1.27/drivers/net/wireless/ath/ath10k/
H A Dthermal.c73 u32 period, duration, enabled; ath10k_thermal_set_cur_dutycycle() local
99 period = max(ATH10K_QUIET_PERIOD_MIN, ath10k_thermal_set_cur_dutycycle()
101 duration = (period * duty_cycle) / 100; ath10k_thermal_set_cur_dutycycle()
104 ret = ath10k_wmi_pdev_set_quiet_mode(ar, period, duration, ath10k_thermal_set_cur_dutycycle()
108 ath10k_warn(ar, "failed to set quiet mode period %u duarion %u enabled %u ret %d\n", ath10k_thermal_set_cur_dutycycle()
109 period, duration, enabled, ret); ath10k_thermal_set_cur_dutycycle()
/linux-4.1.27/drivers/scsi/aic7xxx/
H A Daic79xx_proc.c58 u_int period; /* in 100ths of ns */ member in struct:__anon8594
69 * sync period factor.
76 /* See if the period is in the "exception" table */ ahd_calc_syncsrate()
81 return (100000000 / scsi_syncrates[i].period); ahd_calc_syncsrate()
99 if (tinfo->period == AHD_PERIOD_UNKNOWN) { ahd_format_transinfo()
106 freq = ahd_calc_syncsrate(tinfo->period); ahd_format_transinfo()
H A Daic79xx_osm.c325 "period-delimited options string:\n"
645 spi_min_period(starget) = tinfo->user.period; ahd_linux_target_alloc()
1708 if (tinfo->curr.period != tinfo->goal.period ahd_send_async()
1733 if (tinfo->curr.period == spi_period(starget) ahd_send_async()
1740 spi_period(starget) = tinfo->curr.period; ahd_send_async()
2402 static void ahd_linux_set_period(struct scsi_target *starget, int period) ahd_linux_set_period() argument
2419 printk("%s: set period to %d\n", ahd_name(ahd), period); ahd_linux_set_period()
2424 if (period < 8) ahd_linux_set_period()
2425 period = 8; ahd_linux_set_period()
2426 if (period < 10) { ahd_linux_set_period()
2429 if (period == 8) ahd_linux_set_period()
2432 period = 10; ahd_linux_set_period()
2446 ahd_find_syncrate(ahd, &period, &ppr_options, ahd_linux_set_period()
2450 ahd_set_syncrate(ahd, &devinfo, period, offset, ahd_linux_set_period()
2466 unsigned int period = 0; ahd_linux_set_offset() local
2478 period = tinfo->goal.period; ahd_linux_set_offset()
2480 ahd_find_syncrate(ahd, &period, &ppr_options, ahd_linux_set_offset()
2485 ahd_set_syncrate(ahd, &devinfo, period, offset, ppr_options, ahd_linux_set_offset()
2502 unsigned int period = tinfo->goal.period; ahd_linux_set_dt() local
2516 if (period <= 9) ahd_linux_set_dt()
2517 period = 10; /* If resetting DT, period must be >= 25ns */ ahd_linux_set_dt()
2523 ahd_find_syncrate(ahd, &period, &ppr_options, ahd_linux_set_dt()
2527 ahd_set_syncrate(ahd, &devinfo, period, tinfo->goal.offset, ahd_linux_set_dt()
2544 unsigned int period = tinfo->goal.period; ahd_linux_set_qas() local
2562 ahd_find_syncrate(ahd, &period, &ppr_options, ahd_linux_set_qas()
2566 ahd_set_syncrate(ahd, &devinfo, period, tinfo->goal.offset, ahd_linux_set_qas()
2583 unsigned int period = tinfo->goal.period; ahd_linux_set_iu() local
2602 ahd_find_syncrate(ahd, &period, &ppr_options, ahd_linux_set_iu()
2606 ahd_set_syncrate(ahd, &devinfo, period, tinfo->goal.offset, ahd_linux_set_iu()
2623 unsigned int period = tinfo->goal.period; ahd_linux_set_rd_strm() local
2638 ahd_find_syncrate(ahd, &period, &ppr_options, ahd_linux_set_rd_strm()
2642 ahd_set_syncrate(ahd, &devinfo, period, tinfo->goal.offset, ahd_linux_set_rd_strm()
2659 unsigned int period = tinfo->goal.period; ahd_linux_set_wr_flow() local
2674 ahd_find_syncrate(ahd, &period, &ppr_options, ahd_linux_set_wr_flow()
2678 ahd_set_syncrate(ahd, &devinfo, period, tinfo->goal.offset, ahd_linux_set_wr_flow()
2695 unsigned int period = tinfo->goal.period; ahd_linux_set_rti() local
2718 ahd_find_syncrate(ahd, &period, &ppr_options, ahd_linux_set_rti()
2722 ahd_set_syncrate(ahd, &devinfo, period, tinfo->goal.offset, ahd_linux_set_rti()
2739 unsigned int period = tinfo->goal.period; ahd_linux_set_pcomp_en() local
2768 ahd_find_syncrate(ahd, &period, &ppr_options, ahd_linux_set_pcomp_en()
2772 ahd_set_syncrate(ahd, &devinfo, period, tinfo->goal.offset, ahd_linux_set_pcomp_en()
2789 unsigned int period = tinfo->goal.period; ahd_linux_set_hold_mcs() local
2798 ahd_find_syncrate(ahd, &period, &ppr_options, ahd_linux_set_hold_mcs()
2802 ahd_set_syncrate(ahd, &devinfo, period, tinfo->goal.offset, ahd_linux_set_hold_mcs()
H A Daic7xxx_proc.c59 u_int period; /* in 100ths of ns */ member in struct:__anon8616
70 * sync period factor.
77 /* See if the period is in the "exception" table */ ahc_calc_syncsrate()
82 return (100000000 / scsi_syncrates[i].period); ahc_calc_syncsrate()
103 freq = ahc_calc_syncsrate(tinfo->period); ahc_format_transinfo()
H A Daic7xxx_core.c119 /* ultra2 fast/ultra period rate */
153 u_int *period,
170 u_int period, u_int offset);
176 u_int period, u_int offset,
1909 /*period*/0, /*offset*/0, ahc_handle_scsiint()
2211 * this function finds the nearest syncrate to the input period limited
2218 u_int *period, u_int *ppr_options, role_t role) ahc_devlimited_syncrate()
2239 * period otherwise we may allow a target initiated ahc_devlimited_syncrate()
2256 if (transinfo->period == 0) { ahc_devlimited_syncrate()
2257 *period = 0; ahc_devlimited_syncrate()
2261 *period = max(*period, (u_int)transinfo->period); ahc_devlimited_syncrate()
2262 return (ahc_find_syncrate(ahc, period, ppr_options, maxsync)); ahc_devlimited_syncrate()
2266 * Look up the valid period to SCSIRATE conversion in our table.
2267 * Return the period and offset that should be sent to the target
2271 ahc_find_syncrate(struct ahc_softc *ahc, u_int *period, ahc_find_syncrate() argument
2305 if (*period <= syncrate->period) { ahc_find_syncrate()
2313 * if the period we use to send data to it ahc_find_syncrate()
2314 * is lower. Only lower the response period ahc_find_syncrate()
2318 *period = syncrate->period; ahc_find_syncrate()
2330 if ((*period == 0) ahc_find_syncrate()
2335 *period = 0; ahc_find_syncrate()
2344 * sync "period" factor.
2374 return (syncrate->period); ahc_find_period()
2376 return (syncrate->period); ahc_find_period()
2466 tinfo->curr.period = AHC_PERIOD_UNKNOWN; ahc_update_neg_request()
2469 if (tinfo->curr.period != tinfo->goal.period ahc_update_neg_request()
2494 const struct ahc_syncrate *syncrate, u_int period, ahc_set_syncrate()
2509 period = 0; ahc_set_syncrate()
2517 tinfo->user.period = period; ahc_set_syncrate()
2523 tinfo->goal.period = period; ahc_set_syncrate()
2528 old_period = tinfo->curr.period; ahc_set_syncrate()
2533 && (old_period != period ahc_set_syncrate()
2583 tinfo->curr.period = period; ahc_set_syncrate()
2975 u_int period; ahc_build_transfer_msg() local
2982 * Filter our period based on the current connection. ahc_build_transfer_msg()
2987 period = tinfo->goal.period; ahc_build_transfer_msg()
2993 rate = ahc_devlimited_syncrate(ahc, tinfo, &period, ahc_build_transfer_msg()
2996 dosync = tinfo->curr.offset != offset || tinfo->curr.period != period; ahc_build_transfer_msg()
3045 ahc_construct_ppr(ahc, devinfo, period, offset, ahc_build_transfer_msg()
3048 ahc_construct_sdtr(ahc, devinfo, period, offset); ahc_build_transfer_msg()
3061 u_int period, u_int offset) ahc_construct_sdtr()
3064 period = AHC_ASYNC_XFER_PERIOD; ahc_construct_sdtr()
3066 ahc->msgout_buf + ahc->msgout_index, period, offset); ahc_construct_sdtr()
3069 printk("(%s:%c:%d:%d): Sending SDTR period %x, offset %x\n", ahc_construct_sdtr()
3071 devinfo->lun, period, offset); ahc_construct_sdtr()
3099 u_int period, u_int offset, u_int bus_width, ahc_construct_ppr()
3103 period = AHC_ASYNC_XFER_PERIOD; ahc_construct_ppr()
3105 ahc->msgout_buf + ahc->msgout_index, period, offset, ahc_construct_ppr()
3109 printk("(%s:%c:%d:%d): Sending PPR bus_width %x, period %x, " ahc_construct_ppr()
3112 bus_width, period, offset, ppr_options); ahc_construct_ppr()
3638 u_int period; ahc_parse_msg() local
3658 period = ahc->msgin_buf[3]; ahc_parse_msg()
3661 syncrate = ahc_devlimited_syncrate(ahc, tinfo, &period, ahc_parse_msg()
3669 "SDTR period %x, offset %x\n\t" ahc_parse_msg()
3670 "Filtered to period %x, offset %x\n", ahc_parse_msg()
3674 period, offset); ahc_parse_msg()
3677 syncrate, period, ahc_parse_msg()
3707 period, offset); ahc_parse_msg()
3813 u_int period; ahc_parse_msg() local
3836 period = ahc->msgin_buf[3]; ahc_parse_msg()
3843 * period factor with no DT option ahc_parse_msg()
3847 && period == 9) ahc_parse_msg()
3863 syncrate = ahc_devlimited_syncrate(ahc, tinfo, &period, ahc_parse_msg()
3880 period = 0; ahc_parse_msg()
3899 ahc_construct_ppr(ahc, devinfo, period, offset, ahc_parse_msg()
3906 "period %x, offset %x,options %x\n" ahc_parse_msg()
3907 "\tFiltered to width %x, period %x, " ahc_parse_msg()
3913 bus_width, period, offset, ppr_options); ahc_parse_msg()
3919 syncrate, period, ahc_parse_msg()
4075 ahc_set_syncrate(ahc, devinfo, /*syncrate*/NULL, /*period*/0, ahc_handle_msg_reject()
4362 /*period*/0, /*offset*/0, /*ppr_options*/0, ahc_handle_devreset()
5511 tinfo->user.period = ahc_syncrates->period; ahc_init()
5542 tinfo->user.period = ahc_init()
5545 tinfo->user.period = 0; ahc_init()
5559 tinfo->user.period = ahc_init()
5564 if (tinfo->user.period != 0) ahc_init()
5567 if (tinfo->user.period == 0) ahc_init()
6635 /*period*/0, /*offset*/0, ahc_reset_channel()
2216 ahc_devlimited_syncrate(struct ahc_softc *ahc, struct ahc_initiator_tinfo *tinfo, u_int *period, u_int *ppr_options, role_t role) ahc_devlimited_syncrate() argument
2493 ahc_set_syncrate(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, const struct ahc_syncrate *syncrate, u_int period, u_int offset, u_int ppr_options, u_int type, int paused) ahc_set_syncrate() argument
3060 ahc_construct_sdtr(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, u_int period, u_int offset) ahc_construct_sdtr() argument
3098 ahc_construct_ppr(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, u_int period, u_int offset, u_int bus_width, u_int ppr_options) ahc_construct_ppr() argument
H A Daic7xxx_osm.c335 "period-delimited options string:\n"
1632 if (tinfo->curr.period != tinfo->goal.period ahc_send_async()
1656 if (tinfo->curr.period == spi_period(starget) ahc_send_async()
1663 spi_period(starget) = tinfo->curr.period; ahc_send_async()
2352 static void ahc_linux_set_period(struct scsi_target *starget, int period) ahc_linux_set_period() argument
2370 if (period < 9) ahc_linux_set_period()
2371 period = 9; /* 12.5ns is our minimum */ ahc_linux_set_period()
2372 if (period == 9) { ahc_linux_set_period()
2377 period = 10; ahc_linux_set_period()
2389 syncrate = ahc_find_syncrate(ahc, &period, &ppr_options, AHC_SYNCRATE_DT); ahc_linux_set_period()
2391 ahc_set_syncrate(ahc, &devinfo, syncrate, period, offset, ahc_linux_set_period()
2407 unsigned int period = 0; ahc_linux_set_offset() local
2414 syncrate = ahc_find_syncrate(ahc, &period, &ppr_options, AHC_SYNCRATE_DT); ahc_linux_set_offset()
2415 period = tinfo->goal.period; ahc_linux_set_offset()
2419 ahc_set_syncrate(ahc, &devinfo, syncrate, period, offset, ahc_linux_set_offset()
2436 unsigned int period = tinfo->goal.period; ahc_linux_set_dt() local
2445 } else if (period == 9) ahc_linux_set_dt()
2446 period = 10; /* if resetting DT, period must be >= 25ns */ ahc_linux_set_dt()
2450 syncrate = ahc_find_syncrate(ahc, &period, &ppr_options,AHC_SYNCRATE_DT); ahc_linux_set_dt()
2452 ahc_set_syncrate(ahc, &devinfo, syncrate, period, tinfo->goal.offset, ahc_linux_set_dt()
2474 unsigned int period = tinfo->goal.period;
2483 syncrate = ahc_find_syncrate(ahc, &period, &ppr_options, AHC_SYNCRATE_DT);
2485 ahc_set_syncrate(ahc, &devinfo, syncrate, period, tinfo->goal.offset,
2502 unsigned int period = tinfo->goal.period;
2511 syncrate = ahc_find_syncrate(ahc, &period, &ppr_options, AHC_SYNCRATE_DT);
2513 ahc_set_syncrate(ahc, &devinfo, syncrate, period, tinfo->goal.offset,
/linux-4.1.27/include/scsi/fc/
H A Dfc_fip.h30 #define FIP_DEF_FKA 8000 /* default FCF keep-alive/advert period (mS) */
31 #define FIP_VN_KA_PERIOD 90000 /* required VN_port keep-alive period (mS) */
43 #define FIP_VN_BEACON_FUZZ 100 /* random time to add to beacon period (ms) */
159 FIP_DT_FKA = 12, /* advertisement keep-alive period */
243 * FIP_DT_FKA - Advertisement keep-alive period.
249 __be32 fd_fka_period; /* adv./keep-alive period in mS */
/linux-4.1.27/sound/arm/
H A Dpxa2xx-pcm-lib.c45 size_t period = params_period_bytes(params); __pxa2xx_pcm_hw_params() local
94 if (period > totsize) __pxa2xx_pcm_hw_params()
95 period = totsize; __pxa2xx_pcm_hw_params()
96 dma_desc->dcmd = dcmd | period | DCMD_ENDIRQEN; __pxa2xx_pcm_hw_params()
98 dma_buff_phys += period; __pxa2xx_pcm_hw_params()
99 } while (totsize -= period); __pxa2xx_pcm_hw_params()
/linux-4.1.27/sound/soc/fsl/
H A Dfsl_dma.c91 * @dma_buf_next: physical address of the next period to process
93 * @buffer period_size: the size of a single period
158 * fsl_dma_update_pointers - update LD pointers to point to the next period
160 * As each period is completed, this function changes the the link
161 * descriptor pointers for that period to point to the next period.
168 /* Update our link descriptors to point to the next period. On a 36-bit fsl_dma_update_pointers()
243 /* Tell ALSA we completed a period. */ fsl_dma_isr()
247 * Update our link descriptors to point to the next period. We fsl_dma_isr()
329 * descriptors that ping-pong from one period to the next. For example, if
348 * and here's how they look after the first period is finished playing:
365 * The first link descriptor now points to the third period. The DMA
366 * controller is currently playing the second period. When it finishes, it
367 * will jump back to the first descriptor and play the third period.
374 * 2. We need to receive an interrupt at the end of every period. The DMA
376 * (aka segment). Making each period into a DMA segment will give us the
402 * Reject any DMA buffer whose size is not a multiple of the period fsl_dma_open()
494 * ALSA period, so this is how we get an interrupt at the end of every fsl_dma_open()
495 * period. fsl_dma_open()
558 /* Number of bytes per period */ fsl_dma_hw_params()
561 /* Pointer to next period */ fsl_dma_hw_params()
658 * cache incoherency if the period size is larger than the fsl_dma_hw_params()
659 * size of L1 cache. This is because filling in one period will fsl_dma_hw_params()
660 * flush out the data for the previous period. So if you fsl_dma_hw_params()
H A Dimx-pcm-fiq.c39 unsigned int period; member in struct:imx_pcm_runtime_data
84 iprtd->period = params_period_bytes(params); snd_imx_pcm_hw_params()
101 regs.ARM_r8 = (iprtd->period * iprtd->periods - 1) << 16; snd_imx_pcm_prepare()
103 regs.ARM_r9 = (iprtd->period * iprtd->periods - 1) << 16; snd_imx_pcm_prepare()
H A Dmpc5200_dma.h18 * @period_bytes: size of DMA period in bytes
/linux-4.1.27/sound/soc/sh/
H A Dsiu_pcm.c48 /* transfersize is number of u32 dma transfers per period */ siu_pcm_stmwrite_stop()
78 /* Current period in buffer */ siu_pcm_stmwrite_start()
97 /* Update completed period count */ siu_dma_tx_complete()
103 pr_debug("%s: done period #%d (%u/%u bytes), cookie %d\n", siu_dma_tx_complete()
110 /* Notify alsa: a period is done */ siu_dma_tx_complete()
262 /* Current period in buffer */ siu_pcm_stmread_start()
422 dev_dbg(dev, "%s: port=%d, %d channels, period=%u bytes\n", __func__, siu_pcm_prepare()
425 /* We only support buffers that are multiples of the period */ siu_pcm_prepare()
427 dev_err(dev, "%s() - buffer=%d not multiple of period=%d\n", siu_pcm_prepare()
488 * So far only resolution of one period is supported, subject to extending the
H A Dsiu.h80 #define SIU_PERIOD_BYTES_MAX 8192 /* DMA transfer/period size */
81 #define SIU_PERIOD_BYTES_MIN 256 /* DMA transfer/period size */
/linux-4.1.27/tools/perf/ui/
H A Dhist.c9 /* hist period print (hpp) functions */
47 u64 period = get_field(pair); __hpp__fmt() local
72 100.0 * period / total); __hpp__fmt()
75 len, period); __hpp__fmt()
201 * Put caller above callee when they have equal period. __hpp__sort_acc()
357 HPP_PERCENT_FNS(overhead, period)
362 HPP_PERCENT_ACC_FNS(overhead_acc, period)
365 HPP_RAW_FNS(period, period)
417 HPP__PRINT_FNS("Period", period)
/linux-4.1.27/drivers/media/rc/
H A Dnuvoton-cir.h111 /* carrier period = 1 / frequency */
182 /* select sample period as 50us */
266 /* select a same sample period like cir register */
378 /* MCE CIR signal length, related on sample period */
381 * 43ms / 50us (sample period) * 0.85 (inaccuracy)
386 * 26ms / 50us (sample period) * 0.85 (inaccuracy)
392 * 24ms / 50us (sample period) * 0.85 (inaccuracy)
H A Dene_ir.c337 int period = ene_read_reg(dev, ENE_CIRCAR_PRD); ene_rx_sense_carrier() local
340 if (!(period & ENE_CIRCAR_PRD_VALID)) ene_rx_sense_carrier()
343 period &= ~ENE_CIRCAR_PRD_VALID; ene_rx_sense_carrier()
345 if (!period) ene_rx_sense_carrier()
348 dbg("RX: hardware carrier period = %02x", period); ene_rx_sense_carrier()
349 dbg("RX: hardware carrier pulse period = %02x", hperiod); ene_rx_sense_carrier()
351 carrier = 2000000 / period; ene_rx_sense_carrier()
352 duty_cycle = (hperiod * 100) / period; ene_rx_sense_carrier()
406 /* set sample period*/ ene_rx_setup()
895 u32 period; ene_set_tx_carrier() local
901 period = 2000000 / carrier; ene_set_tx_carrier()
902 if (period && (period > ENE_CIRMOD_PRD_MAX || ene_set_tx_carrier()
903 period < ENE_CIRMOD_PRD_MIN)) { ene_set_tx_carrier()
910 dev->tx_period = period; ene_set_tx_carrier()
1199 MODULE_PARM_DESC(sample_period, "Hardware sample period (50 us default)");
H A Dene_ir.h66 #define ENE_FW_SAMPLE_PERIOD_FAN 61 /* fan input has fixed sample period */
126 /* RLC configuration - sample period (1us resulution) + idle mode */
144 /* detected RX carrier period (resolution: 500 ns) */
151 /* TX period (resolution: 500 ns, minimum 2)*/
/linux-4.1.27/drivers/usb/host/
H A Duhci-q.c433 * Link a high-period interrupt QH into the schedule at the end of its
449 * Link a period-1 interrupt or async QH into the schedule at the
520 * Unlink a high-period interrupt QH from the schedule
532 * Unlink a period-1 interrupt or async QH from the schedule
609 * Find the highest existing bandwidth load for a given phase and period.
611 static int uhci_highest_load(struct uhci_hcd *uhci, int phase, int period) uhci_highest_load() argument
615 for (phase += period; phase < MAX_PHASE; phase += period) uhci_highest_load()
631 minimax_load = uhci_highest_load(uhci, qh->phase, qh->period); uhci_check_bandwidth()
634 int max_phase = min_t(int, MAX_PHASE, qh->period); uhci_check_bandwidth()
637 minimax_load = uhci_highest_load(uhci, qh->phase, qh->period); uhci_check_bandwidth()
639 load = uhci_highest_load(uhci, phase, qh->period); uhci_check_bandwidth()
650 "period %d, phase %d, %d + %d us\n", uhci_check_bandwidth()
651 qh->period, qh->phase, minimax_load, qh->load); uhci_check_bandwidth()
666 for (i = qh->phase; i < MAX_PHASE; i += qh->period) { uhci_reserve_bandwidth()
684 "%s dev %d ep%02x-%s, period %d, phase %d, %d us\n", uhci_reserve_bandwidth()
687 qh->period, qh->phase, load); uhci_reserve_bandwidth()
699 for (i = qh->phase; i < MAX_PHASE; i += qh->period) { uhci_release_bandwidth()
717 "%s dev %d ep%02x-%s, period %d, phase %d, %d us\n", uhci_release_bandwidth()
720 qh->period, qh->phase, load); uhci_release_bandwidth()
1095 /* If the slot is full, try a lower period */ uhci_submit_interrupt()
1097 qh->period = 1 << exponent; uhci_submit_interrupt()
1103 qh->phase = (qh->period / 2) & (MAX_PHASE - 1); uhci_submit_interrupt()
1108 } else if (qh->period > urb->interval) uhci_submit_interrupt()
1109 return -EINVAL; /* Can't decrease the period */ uhci_submit_interrupt()
1113 urb->interval = qh->period; uhci_submit_interrupt()
1271 /* Check the period and figure out the starting frame number */ uhci_submit_isochronous()
1273 qh->period = urb->interval; uhci_submit_isochronous()
1284 frame += (next - frame + qh->period - 1) & -qh->period; uhci_submit_isochronous()
1286 } else if (qh->period != urb->interval) { uhci_submit_isochronous()
1287 return -EINVAL; /* Can't change the period */ uhci_submit_isochronous()
1310 frame += (next - frame + qh->period - 1) & uhci_submit_isochronous()
1311 -qh->period; uhci_submit_isochronous()
1319 qh->period)) uhci_submit_isochronous()
1323 qh->period, uhci_submit_isochronous()
1356 frame += qh->period; uhci_submit_isochronous()
1403 qh->iso_frame += qh->period; uhci_result_isochronous()
H A Dehci-sched.c214 ps->phase, ps->phase_uf, ps->period, bandwidth_dbg()
386 unsigned period = ps->bw_period; tt_available() local
389 if ((period == 0) || (uframe >= 7)) /* error */ tt_available()
392 for (frame &= period - 1; frame < EHCI_BANDWIDTH_FRAMES; tt_available()
393 frame += period) { tt_available()
440 unsigned period, tt_no_collision()
446 if (period == 0) /* error */ tt_no_collision()
453 for (; frame < ehci->periodic_size; frame += period) { tt_no_collision()
548 unsigned period = qh->ps.period; qh_link_periodic() local
552 period, hc32_to_cpup(ehci, &qh->hw->hw_info2) qh_link_periodic()
557 if (period == 0) qh_link_periodic()
558 period = 1; qh_link_periodic()
560 for (i = qh->ps.phase; i < ehci->periodic_size; i += period) { qh_link_periodic()
576 /* sorting each branch by period (slow-->fast) qh_link_periodic()
580 if (qh->ps.period > here.qh->ps.period) qh_link_periodic()
615 unsigned period; qh_unlink_periodic() local
633 period = qh->ps.period ? : 1; qh_unlink_periodic()
635 for (i = qh->ps.phase; i < ehci->periodic_size; i += period) qh_unlink_periodic()
645 qh->ps.period, qh_unlink_periodic()
911 qh->ps.phase = (qh->ps.period ? ehci->random_frame & qh_schedule()
912 (qh->ps.period - 1) : 0); qh_schedule()
915 qh->ps.cs_mask = qh->ps.period ? qh_schedule()
1086 /* period for bandwidth allocation */ iso_stream_init()
1094 stream->ps.period = urb->interval >> 3; iso_stream_init()
1129 /* period for bandwidth allocation */ iso_stream_init()
1137 stream->ps.period = urb->interval; iso_stream_init()
1501 u32 now, base, next, start, period, span, now2; iso_stream_schedule() local
1509 period = stream->uperiod; iso_stream_schedule()
1529 start = ((-(++ehci->random_frame)) << 3) & (period - 1); iso_stream_schedule()
1537 start += period; iso_stream_schedule()
1560 (stream->ps.period - 1); iso_stream_schedule()
1608 if (unlikely(!empty && start < period)) { iso_stream_schedule()
1610 urb, stream->next_uframe, base, period, mod); iso_stream_schedule()
1616 if (likely(!empty || start <= now2 + period)) { iso_stream_schedule()
1636 skip = (now2 - start + period - 1) & -period; iso_stream_schedule()
1639 urb, start + base, span - period, now2 + base, iso_stream_schedule()
1643 skip = span - period; iso_stream_schedule()
1653 urb->error_count = skip / period; iso_stream_schedule()
1660 start = next + ((start - next) & (period - 1)); iso_stream_schedule()
1664 if (unlikely(start + span - period >= mod + wrap)) { iso_stream_schedule()
1666 urb, start, span - period, mod + wrap); iso_stream_schedule()
2019 iso_sched->span = urb->number_of_packets * stream->ps.period; sitd_sched_init()
2332 if (urb->interval != stream->ps.period) { sitd_submit()
2334 stream->ps.period, urb->interval); sitd_submit()
438 tt_no_collision( struct ehci_hcd *ehci, unsigned period, struct usb_device *dev, unsigned frame, u32 uf_mask ) tt_no_collision() argument
H A Dsl811-hcd.c360 if (ep->period) start()
368 if (ep->period) start()
450 "deschedule qh%d/%p branch %d\n", ep->period, ep, ep->branch);
451 for (i = ep->branch; i < PERIODIC_SIZE; i += ep->period) {
464 -= ep->load / ep->period;
490 if (!ep->period) done()
763 static int balance(struct sl811 *sl811, u16 period, u16 load) balance() argument
767 /* search for the least loaded schedule branch of that period balance()
770 for (i = 0; i < period ; i++) { balance()
774 for (j = i; j < PERIODIC_SIZE; j += period) { balance()
875 ep->period = urb->interval; sl811h_urb_enqueue()
899 urb->interval = ep->period; sl811h_urb_enqueue()
911 retval = balance(sl811, ep->period, ep->load); sl811h_urb_enqueue()
919 /* sort each schedule branch by period (slow before fast) sl811h_urb_enqueue()
924 ep->period, ep, ep->branch); sl811h_urb_enqueue()
925 for (i = ep->branch; i < PERIODIC_SIZE; i += ep->period) { sl811h_urb_enqueue()
930 if (ep->period > here->period) sl811h_urb_enqueue()
942 hcd->self.bandwidth_allocated += ep->load / ep->period; sl811h_urb_enqueue()
1475 ep->period, ep, sl811h_show()
H A Dfotg210-hcd.c236 "period=%s%s %s", dbg_command_buf()
558 p.qh->period, fill_periodic_buffer()
2887 qh->period = urb->interval >> 3; qh_make()
2888 if (qh->period == 0 && urb->interval != 1) { qh_make()
2894 } else if (qh->period > fotg210->periodic_size) { qh_make()
2895 qh->period = fotg210->periodic_size; qh_make()
2896 urb->interval = qh->period << 3; qh_make()
2918 qh->period = urb->interval; qh_make()
2919 if (qh->period > fotg210->periodic_size) { qh_make()
2920 qh->period = fotg210->periodic_size; qh_make()
2921 urb->interval = qh->period; qh_make()
3553 unsigned period, tt_no_collision()
3559 if (period == 0) /* error */ tt_no_collision()
3566 for (; frame < fotg210->periodic_size; frame += period) { tt_no_collision()
3646 unsigned period = qh->period; qh_link_periodic() local
3650 period, hc32_to_cpup(fotg210, &qh->hw->hw_info2) qh_link_periodic()
3655 if (period == 0) qh_link_periodic()
3656 period = 1; qh_link_periodic()
3658 for (i = qh->start; i < fotg210->periodic_size; i += period) { qh_link_periodic()
3674 /* sorting each branch by period (slow-->fast) qh_link_periodic()
3678 if (qh->period > here.qh->period) qh_link_periodic()
3698 fotg210_to_hcd(fotg210)->self.bandwidth_allocated += qh->period qh_link_periodic()
3699 ? ((qh->usecs + qh->c_usecs) / qh->period) qh_link_periodic()
3713 unsigned period; qh_unlink_periodic() local
3731 period = qh->period; qh_unlink_periodic()
3732 if (!period) qh_unlink_periodic()
3733 period = 1; qh_unlink_periodic()
3735 for (i = qh->start; i < fotg210->periodic_size; i += period) qh_unlink_periodic()
3739 fotg210_to_hcd(fotg210)->self.bandwidth_allocated -= qh->period qh_unlink_periodic()
3740 ? ((qh->usecs + qh->c_usecs) / qh->period) qh_unlink_periodic()
3745 qh->period, qh_unlink_periodic()
3839 unsigned period, check_period()
3854 * for period 0, check _every_ microframe in the schedule. check_period()
3856 if (unlikely(period == 0)) { check_period()
3866 /* just check the specified uframe, at that period */ check_period()
3872 } while ((frame += period) < fotg210->periodic_size); check_period()
3893 if (!check_period(fotg210, frame, uframe, qh->period, qh->usecs)) check_intr_schedule()
3912 if (tt_no_collision(fotg210, qh->period, qh->dev, frame, mask)) { check_intr_schedule()
3914 qh->period, qh->c_usecs)) check_intr_schedule()
3917 qh->period, qh->c_usecs)) check_intr_schedule()
3933 unsigned frame; /* 0..(qh->period - 1), or NO_FRAME */ qh_schedule()
3941 if (frame < qh->period) { qh_schedule()
3956 if (qh->period) { qh_schedule()
3959 for (i = qh->period; status && i > 0; --i) { qh_schedule()
3960 frame = ++fotg210->random_frame % qh->period; qh_schedule()
3970 /* qh->period == 0 means every uframe */ qh_schedule()
3982 hw->hw_info2 |= qh->period qh_schedule()
4341 u32 period itd_slot_ok()
4344 uframe %= period; itd_slot_ok()
4352 uframe += period; itd_slot_ok()
4377 u32 now, next, start, period, span; iso_stream_schedule() local
4382 period = urb->interval; iso_stream_schedule()
4415 excess = (stream->next_uframe - period - next) & (mod - 1); iso_stream_schedule()
4417 start = next + excess - mod + period * iso_stream_schedule()
4418 DIV_ROUND_UP(mod - excess, period); iso_stream_schedule()
4420 start = next + excess + period; iso_stream_schedule()
4423 urb, start - now - period, period, iso_stream_schedule()
4448 start += period; iso_stream_schedule()
4453 stream->usecs, period)) iso_stream_schedule()
4467 if (unlikely(start - now + span - period iso_stream_schedule()
4470 urb, start - now, span - period, iso_stream_schedule()
4589 "schedule devp %s ep%d%s-iso period %d start %d.%d\n", itd_link_urb()
3551 tt_no_collision( struct fotg210_hcd *fotg210, unsigned period, struct usb_device *dev, unsigned frame, u32 uf_mask ) tt_no_collision() argument
3835 check_period( struct fotg210_hcd *fotg210, unsigned frame, unsigned uframe, unsigned period, unsigned usecs ) check_period() argument
H A Dfusbh200-hcd.c236 "period=%s%s %s", dbg_command_buf()
536 p.qh->period, fill_periodic_buffer()
2834 qh->period = urb->interval >> 3; qh_make()
2835 if (qh->period == 0 && urb->interval != 1) { qh_make()
2841 } else if (qh->period > fusbh200->periodic_size) { qh_make()
2842 qh->period = fusbh200->periodic_size; qh_make()
2843 urb->interval = qh->period << 3; qh_make()
2865 qh->period = urb->interval; qh_make()
2866 if (qh->period > fusbh200->periodic_size) { qh_make()
2867 qh->period = fusbh200->periodic_size; qh_make()
2868 urb->interval = qh->period; qh_make()
3495 unsigned period, tt_no_collision()
3501 if (period == 0) /* error */ tt_no_collision()
3508 for (; frame < fusbh200->periodic_size; frame += period) { tt_no_collision()
3587 unsigned period = qh->period; qh_link_periodic() local
3591 period, hc32_to_cpup(fusbh200, &qh->hw->hw_info2) qh_link_periodic()
3596 if (period == 0) qh_link_periodic()
3597 period = 1; qh_link_periodic()
3599 for (i = qh->start; i < fusbh200->periodic_size; i += period) { qh_link_periodic()
3615 /* sorting each branch by period (slow-->fast) qh_link_periodic()
3619 if (qh->period > here.qh->period) qh_link_periodic()
3639 fusbh200_to_hcd(fusbh200)->self.bandwidth_allocated += qh->period qh_link_periodic()
3640 ? ((qh->usecs + qh->c_usecs) / qh->period) qh_link_periodic()
3653 unsigned period; qh_unlink_periodic() local
3671 if ((period = qh->period) == 0) qh_unlink_periodic()
3672 period = 1; qh_unlink_periodic()
3674 for (i = qh->start; i < fusbh200->periodic_size; i += period) qh_unlink_periodic()
3678 fusbh200_to_hcd(fusbh200)->self.bandwidth_allocated -= qh->period qh_unlink_periodic()
3679 ? ((qh->usecs + qh->c_usecs) / qh->period) qh_unlink_periodic()
3684 qh->period, qh_unlink_periodic()
3775 unsigned period, check_period()
3790 * for period 0, check _every_ microframe in the schedule. check_period()
3792 if (unlikely (period == 0)) { check_period()
3801 /* just check the specified uframe, at that period */ check_period()
3807 } while ((frame += period) < fusbh200->periodic_size); check_period()
3828 if (!check_period (fusbh200, frame, uframe, qh->period, qh->usecs)) check_intr_schedule()
3847 if (tt_no_collision (fusbh200, qh->period, qh->dev, frame, mask)) { check_intr_schedule()
3849 qh->period, qh->c_usecs)) check_intr_schedule()
3852 qh->period, qh->c_usecs)) check_intr_schedule()
3868 unsigned frame; /* 0..(qh->period - 1), or NO_FRAME */ qh_schedule()
3876 if (frame < qh->period) { qh_schedule()
3891 if (qh->period) { qh_schedule()
3894 for (i = qh->period; status && i > 0; --i) { qh_schedule()
3895 frame = ++fusbh200->random_frame % qh->period; qh_schedule()
3905 /* qh->period == 0 means every uframe */ qh_schedule()
3916 hw->hw_info2 |= qh->period qh_schedule()
4275 u32 period itd_slot_ok()
4278 uframe %= period; itd_slot_ok()
4286 uframe += period; itd_slot_ok()
4311 u32 now, next, start, period, span; iso_stream_schedule() local
4316 period = urb->interval; iso_stream_schedule()
4349 excess = (stream->next_uframe - period - next) & (mod - 1); iso_stream_schedule()
4351 start = next + excess - mod + period * iso_stream_schedule()
4352 DIV_ROUND_UP(mod - excess, period); iso_stream_schedule()
4354 start = next + excess + period; iso_stream_schedule()
4357 urb, start - now - period, period, iso_stream_schedule()
4382 start += period; iso_stream_schedule()
4387 stream->usecs, period)) iso_stream_schedule()
4401 if (unlikely(start - now + span - period iso_stream_schedule()
4404 urb, start - now, span - period, iso_stream_schedule()
4525 "schedule devp %s ep%d%s-iso period %d start %d.%d\n", itd_link_urb()
3493 tt_no_collision( struct fusbh200_hcd *fusbh200, unsigned period, struct usb_device *dev, unsigned frame, u32 uf_mask ) tt_no_collision() argument
3771 check_period( struct fusbh200_hcd *fusbh200, unsigned frame, unsigned uframe, unsigned period, unsigned usecs ) check_period() argument
H A Disp116x-hcd.c322 DBG("deschedule qh%d/%p branch %d\n", ep->period, ep, ep->branch);
323 for (i = ep->branch; i < PERIODIC_SIZE; i += ep->period) {
335 ep->load / ep->period;
650 static int balance(struct isp116x *isp116x, u16 period, u16 load) balance() argument
654 /* search for the least loaded schedule branch of that period balance()
656 for (i = 0; i < period; i++) { balance()
660 for (j = i; j < PERIODIC_SIZE; j += period) { balance()
753 ep->period = urb->interval >> 1; isp116x_urb_enqueue()
774 urb->interval = ep->period; isp116x_urb_enqueue()
782 ep->branch = ret = balance(isp116x, ep->period, ep->load); isp116x_urb_enqueue()
790 /* sort each schedule branch by period (slow before fast) isp116x_urb_enqueue()
793 DBG("schedule qh%d/%p branch %d\n", ep->period, ep, ep->branch); isp116x_urb_enqueue()
794 for (i = ep->branch; i < PERIODIC_SIZE; i += ep->period) { isp116x_urb_enqueue()
799 if (ep->period > here->period) isp116x_urb_enqueue()
810 hcd->self.bandwidth_allocated += ep->load / ep->period; isp116x_urb_enqueue()
/linux-4.1.27/include/trace/events/
H A Drcu.h42 * Tracepoint for grace-period events. Takes a string identifying the
43 * RCU flavor, the grace-period number, and a string identifying the
44 * grace-period-related event as follows:
48 * "newreq": Request a new grace period.
49 * "start": Start a grace period.
50 * "cpustart": CPU first notices a grace-period start.
54 * "reqwait": GP kthread sleeps waiting for grace-period request.
60 * "end": End a grace period.
61 * "cpuend": CPU first notices a grace-period end.
86 * Tracepoint for future grace-period events, including those for no-callbacks
91 * "Startleaf": Request a nocb grace period based on leaf-node data.
94 * "Startedroot": Requested a nocb grace period based on root-node data.
95 * "StartWait": Start waiting for the requested grace period.
138 * Tracepoint for grace-period-initialization events. These are
139 * distinguished by the type of RCU, the new grace-period number, the
220 * include SRCU), the grace-period number that the task is blocking
273 * distinguished by the type of RCU, the grace-period number, the
276 * whether there are any blocked tasks blocking the current grace period.
317 * These trace events include the type of RCU, the grace-period number
390 * done everything RCU requires for the current grace period. In this
392 * period in order to process the remainder of its callbacks.
H A Dwriteback.h436 unsigned long period,
442 dirtied, period, pause, start_time),
457 __field(unsigned long, period)
477 __entry->period = period * 1000 / HZ;
488 "paused=%lu pause=%ld period=%lu think=%ld",
501 __entry->period, /* ms */
/linux-4.1.27/arch/metag/kernel/perf/
H A Dperf_event.c225 s64 period = hwc->sample_period; metag_pmu_event_set_period() local
228 /* The period may have been changed */ metag_pmu_event_set_period()
229 if (unlikely(period != hwc->last_period)) metag_pmu_event_set_period()
230 left += period - hwc->last_period; metag_pmu_event_set_period()
232 if (unlikely(left <= -period)) { metag_pmu_event_set_period()
233 left = period; metag_pmu_event_set_period()
235 hwc->last_period = period; metag_pmu_event_set_period()
240 left += period; metag_pmu_event_set_period()
242 hwc->last_period = period; metag_pmu_event_set_period()
269 * We always have to reprogram the period, so ignore PERF_EF_RELOAD. metag_pmu_start()
277 * Reset the period. metag_pmu_start()
280 * the period, then we'll either: a) get an overflow too soon; metag_pmu_start()
772 /* Update the counts and reset the sample period */ metag_pmu_counter_overflow()
/linux-4.1.27/drivers/net/ethernet/intel/fm10k/
H A Dfm10k_ptp.c325 struct ptp_clock_time *t = &rq->perout.period; fm10k_ptp_enable()
328 u64 period; fm10k_ptp_enable() local
341 * the period. fm10k_ptp_enable()
356 period = t->sec * 1000000000LL + t->nsec; fm10k_ptp_enable()
358 /* determine the minimum size for period */ fm10k_ptp_enable()
363 if ((period && (period < step)) || (period > U32_MAX)) fm10k_ptp_enable()
368 (u32)period); fm10k_ptp_enable()
/linux-4.1.27/drivers/input/joystick/iforce/
H A Diforce-ff.c64 * Upload the component of an effect dealing with the period, phase and magnitude
69 __s16 magnitude, __s16 offset, u16 period, u16 phase) make_period_modifier()
73 period = TIME_SCALE(period); make_period_modifier()
93 data[5] = LO(period); make_period_modifier()
94 data[6] = HI(period); make_period_modifier()
285 return (old->u.periodic.period != new->u.periodic.period need_period_modifier()
372 effect->u.periodic.period, effect->u.periodic.phase); iforce_upload_periodic()
67 make_period_modifier(struct iforce* iforce, struct resource* mod_chunk, int no_alloc, __s16 magnitude, __s16 offset, u16 period, u16 phase) make_period_modifier() argument
/linux-4.1.27/arch/arm/kernel/
H A Dperf_event.c102 s64 period = hwc->sample_period; armpmu_event_set_period() local
105 if (unlikely(left <= -period)) { armpmu_event_set_period()
106 left = period; armpmu_event_set_period()
108 hwc->last_period = period; armpmu_event_set_period()
113 left += period; armpmu_event_set_period()
115 hwc->last_period = period; armpmu_event_set_period()
120 * Limit the maximum period to prevent the counter value armpmu_event_set_period()
188 * ARM pmu always has to reprogram the period, so ignore armpmu_start()
196 * Set the period again. Some counters can't be stopped, so when we armpmu_start()
/linux-4.1.27/drivers/ptp/
H A Dptp_sysfs.c139 &req.perout.period.sec, &req.perout.period.nsec); period_store()
145 enable = req.perout.period.sec || req.perout.period.nsec; period_store()
240 static DEVICE_ATTR(period, 0220, NULL, period_store);
/linux-4.1.27/drivers/cpufreq/
H A Dsa1110-cpufreq.c121 * Given a period in ns and frequency in khz, calculate the number of
122 * cycles of frequency in period. Note that we round up to the next
208 * Update the refresh period. We do this such that we always refresh
209 * the SDRAMs within their permissible period. The refresh period is
/linux-4.1.27/drivers/gpu/drm/radeon/
H A Dsumo_smc.c147 u32 period, unit, timer_value; sumo_enable_boost_timer() local
153 period = 100 * (xclk / 100 / sumo_power_of_4(unit)); sumo_enable_boost_timer()
155 timer_value = (period << 16) | (unit << 4); sumo_enable_boost_timer()
/linux-4.1.27/arch/mips/loongson1/common/
H A Dtime.c41 static inline void ls1x_pwmtimer_set_period(uint32_t period) ls1x_pwmtimer_set_period() argument
43 __raw_writel(period, timer_base + PWM_HRC); ls1x_pwmtimer_set_period()
44 __raw_writel(period, timer_base + PWM_LRC); ls1x_pwmtimer_set_period()
/linux-4.1.27/kernel/
H A Dhung_task.c31 * the RCU grace period. So it needs to be upper-bound.
131 * To avoid extending the RCU grace period for an unbounded amount of time,
135 * to exit the grace period. For classic RCU, a reschedule is required.
/linux-4.1.27/sound/oss/dmasound/
H A Ddmasound_paula.c39 * The minimum period for audio depends on htotal (for OCS/ECS/AGA)
47 * amiga_mksound() should be able to restore the period after beeping
359 int period, i; AmiInit() local
364 period = amiga_colorclock/dmasound.soft.speed-1; AmiInit()
366 period = amiga_audio_min_period; AmiInit()
370 if (period < amiga_audio_min_period) { AmiInit()
372 period = amiga_audio_min_period; AmiInit()
373 } else if (period > 65535) { AmiInit()
374 period = 65535; AmiInit()
376 dmasound.hard.speed = amiga_colorclock/(period+1); AmiInit()
379 custom.aud[i].audper = period; AmiInit()
380 amiga_audio_period = period; AmiInit()
/linux-4.1.27/include/linux/mfd/
H A Dmax8997.h128 * @pwm_period: period in nano second for PWM device
140 * @pattern_signal_period: period of the waveform for the internal mode pattern
141 * [0 - 255]: available period
/linux-4.1.27/arch/mips/include/asm/mach-jz4740/
H A Dtimer.h89 static inline void jz4740_timer_set_period(unsigned int timer, uint16_t period) jz4740_timer_set_period() argument
91 writew(period, jz4740_timer_base + JZ_REG_TIMER_DFR(timer)); jz4740_timer_set_period()
/linux-4.1.27/drivers/usb/serial/
H A Dgeneric.c250 unsigned long period; usb_serial_generic_wait_until_sent() local
257 * Use a poll-period of roughly the time it takes to send one usb_serial_generic_wait_until_sent()
260 period = max_t(unsigned long, (10 * HZ / bps), 1); usb_serial_generic_wait_until_sent()
262 period = min_t(unsigned long, period, timeout); usb_serial_generic_wait_until_sent()
264 dev_dbg(&port->dev, "%s - timeout = %u ms, period = %u ms\n", usb_serial_generic_wait_until_sent()
266 jiffies_to_msecs(period)); usb_serial_generic_wait_until_sent()
269 schedule_timeout_interruptible(period); usb_serial_generic_wait_until_sent()
/linux-4.1.27/net/core/
H A Dsecure_seq.c36 * Choosing a clock of 64 ns period is OK. (period of 274 s) seq_scale()
/linux-4.1.27/sound/core/
H A Dpcm_trace.h37 TP_printk("pcmC%dD%d%c/sub%d: %s: pos=%lu, old=%lu, base=%lu, period=%lu, buf=%lu",
72 TP_printk("pcmC%dD%d%c/sub%d: XRUN: old=%lu, base=%lu, period=%lu, buf=%lu",
/linux-4.1.27/drivers/mtd/nand/gpmi-nand/
H A Dgpmi-lib.c316 unsigned int period, unsigned int min) ns_to_cycles()
320 k = (time + period - 1) / period; ns_to_cycles()
378 * next-highest clock period to make sure we apply at least the gpmi_nfc_compute_hardware_timing()
393 * The clock's period affects the sample delay in a number of ways: gpmi_nfc_compute_hardware_timing()
395 * (1) The NFC HAL tells us the maximum clock period the sample delay gpmi_nfc_compute_hardware_timing()
396 * DLL can tolerate. If the clock period is greater than half that gpmi_nfc_compute_hardware_timing()
410 * RP is the reference period, in ns, which is a full clock period gpmi_nfc_compute_hardware_timing()
420 * The reference period is either the clock period or half that, so this gpmi_nfc_compute_hardware_timing()
430 * P is the clock period. gpmi_nfc_compute_hardware_timing()
741 /* ...and one less period for the delay time. */ gpmi_nfc_compute_hardware_timing()
778 * If you set 100MHz to gpmi nand controller, the GPMI-clock's period
779 * is 10ns. Mark the GPMI-clock's period as GPMI-clock-period.
831 * tRP = (GPMI-clock-period) * DATA_SETUP
836 * RP : the DLL reference period.
837 * if (GPMI-clock-period > DLL_THRETHOLD)
838 * RP = GPMI-clock-period / 2;
840 * RP = GPMI-clock-period;
842 * Set the HW_GPMI_CTRL1:HALF_PERIOD if GPMI-clock-period
315 ns_to_cycles(unsigned int time, unsigned int period, unsigned int min) ns_to_cycles() argument
/linux-4.1.27/drivers/net/irda/
H A Dirda-usb.h76 * Rx notification will only be done at the end of the USB frame period :
77 * OHCI : frame period = 1ms
78 * UHCI : frame period = 1ms, but notification can take 2 or 3 ms :-(
79 * EHCI : frame period = 125us */
/linux-4.1.27/drivers/input/
H A Dinput-compat.h41 __u16 period; member in struct:ff_periodic_effect_compat
/linux-4.1.27/arch/mips/include/asm/
H A Dhpet.h60 * Min HPET period is 10^5 femto sec just for safety. If it is less than this,
/linux-4.1.27/fs/nfsd/
H A Dcache.h74 /* Cache entries expire after this time period */
/linux-4.1.27/include/scsi/
H A Dscsi_transport_spi.h32 int period; /* value in the PPR/SDTR command */ member in struct:spi_transport_attrs
75 #define spi_period(x) (((struct spi_transport_attrs *)&(x)->starget_data)->period)
157 int spi_populate_sync_msg(unsigned char *msg, int period, int offset);
158 int spi_populate_ppr_msg(unsigned char *msg, int period, int offset, int width,
/linux-4.1.27/include/linux/netfilter/ipset/
H A Dip_set_timeout.h16 /* Timeout period depending on the timeout value of the given set */
/linux-4.1.27/arch/powerpc/include/asm/
H A Dftrace.h72 * 32bit do not start with a period so the generic function will work. arch_syscall_match_sym_name()
/linux-4.1.27/arch/blackfin/kernel/
H A Dgptimers.c143 void set_gptimer_period(unsigned int timer_id, uint32_t period) set_gptimer_period() argument
146 bfin_write(&timer_regs[timer_id]->period, period); set_gptimer_period()
154 return bfin_read(&timer_regs[timer_id]->period); get_gptimer_period()
/linux-4.1.27/arch/arm/mach-s3c24xx/
H A Dcpufreq-utils.c42 * This should work for HCLK up to 133MHz and refresh period up s3c2410_cpufreq_setrefresh()
/linux-4.1.27/arch/arm/mach-omap1/include/mach/
H A Dmtd-xip.h54 * the system timer tick period. This should put the CPU into idle mode
/linux-4.1.27/sound/ppc/
H A Dbeep.c107 int period, ncycles, nsamples; snd_pmac_beep_event() local
149 period = srate * 256 / hz; /* fixed point */ snd_pmac_beep_event()
150 ncycles = BEEP_BUFLEN * 256 / period; snd_pmac_beep_event()
151 nsamples = (period * ncycles) >> 8; snd_pmac_beep_event()
/linux-4.1.27/sound/sh/
H A Daica.h46 /* Buffer and period size */
/linux-4.1.27/sound/usb/6fire/
H A Dpcm.h47 snd_pcm_uframes_t period_off; /* current position in current period */
/linux-4.1.27/sound/usb/usx2y/
H A Dusbusx2y.h71 int transfer_done; /* processed frames since last period update */
/linux-4.1.27/tools/perf/ui/gtk/
H A Dhists.c66 __HPP_COLOR_PERCENT_FN(overhead, period) __HPP_COLOR_PERCENT_FN()
71 __HPP_COLOR_ACC_PERCENT_FN(overhead_acc, period) __HPP_COLOR_PERCENT_FN()
255 h->stat_acc->period : h->stat.period;
/linux-4.1.27/sound/pci/emu10k1/
H A Dp16v.h23 * Corrected playback interrupts. Now interrupt per period, instead of half period.
90 #define PLAYBACK_LIST_ADDR 0x00 /* Base DMA address of a list of pointers to each period/size */
94 * One list entry for each period in the buffer.
97 #define PLAYBACK_LIST_PTR 0x02 /* Pointer to the current period being played */
100 #define PLAYBACK_PERIOD_SIZE 0x05 /* Playback period size. win2000 uses 0x04000000 */
101 #define PLAYBACK_POINTER 0x06 /* Playback period pointer. Used with PLAYBACK_LIST_PTR to determine buffer position currently in DAC */
/linux-4.1.27/arch/powerpc/perf/
H A Dcore-fsl-emb.c609 u64 period = event->hw.sample_period; record_and_restart() local
624 * See if the total period for this event has expired, record_and_restart()
625 * and update for the next period. record_and_restart()
629 if (period) { record_and_restart()
631 left += period; record_and_restart()
633 left = period; record_and_restart()
/linux-4.1.27/sound/pci/
H A Dsis7019.c71 * interrupted for the next period.
75 * channel to clock out virtual periods, and adjust the virtual period length
155 * that places our period/buffer range at 9-0xfff9 samples. That makes the
159 * We'll add a constraint upon open that limits the period and buffer sample
204 static void sis_update_sso(struct voice *voice, u16 period) sis_update_sso() argument
208 voice->sso += period; sis_update_sso()
227 /* If we've not hit the end of the virtual period, update sis_update_voice()
262 * a bit late. We'll adjst our next waiting period based sis_update_voice()
266 * it really is past a period when we get our interrupt -- sis_update_voice()
546 /* The baseline setup is for a single period per buffer, and sis_pcm_playback_prepare()
739 /* Set our initial buffer and period as large as we can given a sis_prepare_timing_voice()
747 * the period we're clocking out. 12 samples seems to give a good sis_prepare_timing_voice()
750 * We want to spread our interrupts throughout the virtual period, sis_prepare_timing_voice()
753 * clocking period size so that the last period is at least a fourth sis_prepare_timing_voice()
754 * of a full period. sis_prepare_timing_voice()
774 /* The initial period will fit inside the buffer, so we sis_prepare_timing_voice()
/linux-4.1.27/include/uapi/sound/
H A Dasound.h259 #define SNDRV_PCM_INFO_MMAP_VALID 0x00000002 /* period data are valid during transfer */
272 #define SNDRV_PCM_INFO_NO_PERIOD_WAKEUP 0x00800000 /* period wakeup can be disabled */
358 #define SNDRV_PCM_HW_PARAMS_NO_PERIOD_WAKEUP (1<<2) /* disable period wakeups */
691 unsigned long resolution; /* average period resolution in ns */
692 unsigned long resolution_min; /* minimal period resolution in ns */
693 unsigned long resolution_max; /* maximal period resolution in ns */
700 unsigned long period_num; /* requested precise period duration (in seconds) - numerator */
701 unsigned long period_den; /* requested precise period duration (in seconds) - denominator */
707 unsigned long resolution; /* current period resolution in ns */
708 unsigned long resolution_num; /* precise current period resolution (in seconds) - numerator */
709 unsigned long resolution_den; /* precise current period resolution (in seconds) - denominator */
724 unsigned long resolution; /* average period resolution in ns */
743 unsigned int resolution; /* current period resolution in ns */
/linux-4.1.27/drivers/net/phy/
H A Dicplus.c60 /* ensure no bus delays overlap reset period */ ip175c_config_init()
63 /* data sheet specifies reset period is 2 msec */ ip175c_config_init()
/linux-4.1.27/drivers/staging/lustre/lnet/selftest/
H A Dtimer.c49 * 2**STTIMER_MINPOLL (8) second period. The timers in each slot are
51 * to cover a time period of 1024 seconds into the future before wrapping.
/linux-4.1.27/arch/tile/kernel/
H A Dperf_event.c74 u64 max_period; /* max sampling period */
545 * Set the next IRQ period, based on the hwc->period_left value.
553 s64 period = hwc->sample_period; tile_event_set_period() local
559 if (unlikely(left <= -period)) { tile_event_set_period()
560 left = period; tile_event_set_period()
562 hwc->last_period = period; tile_event_set_period()
567 left += period; tile_event_set_period()
569 hwc->last_period = period; tile_event_set_period()
/linux-4.1.27/arch/sh/include/asm/
H A Dsh7760fb.h89 /* Disable output of HSYNC during VSYNC period */
92 /* Disable output of VSYNC during VSYNC period */
/linux-4.1.27/arch/c6x/platforms/
H A Dtimer64.c80 static void timer64_config(unsigned long period) timer64_config() argument
85 soc_writel(period - 1, &timer->prdlo); timer64_config()
/linux-4.1.27/net/atm/
H A Dlec.h97 * Within the period of time defined by this variable, the client will send
102 * If no traffic has been sent in this vcc for this period of time,
/linux-4.1.27/drivers/net/ethernet/moxa/
H A Dmoxart_ether.h147 #define TXINT_TIME_SEL BIT(15) /* TX cycle time period */
150 #define RXINT_TIME_SEL BIT(7) /* RX cycle time period */
155 #define TXPOLL_TIME_SEL BIT(12) /* TX poll time period */
158 #define RXPOLL_TIME_SEL BIT(4) /* RX poll time period */
/linux-4.1.27/drivers/staging/fbtft/
H A Dfb_ra8875.c87 /* pixel clock period */ init_display()
111 /* pixel clock period */ init_display()
135 /* pixel clock period */ init_display()
159 /* pixel clock period */ init_display()
/linux-4.1.27/drivers/edac/
H A Dedac_pci.c239 /* if we are on a one second period, then use round */ edac_pci_workq_function()
256 * passing in the new delay period in msec
288 * called with a new period value for the workq period
/linux-4.1.27/arch/mips/include/asm/dec/
H A Dkn02ca.h58 #define KN02CA_MSR_MS10EN (1<<25) /* 10/1ms IRQ period select */
/linux-4.1.27/arch/frv/include/asm/
H A Dmb93493-regs.h61 #define MB93493_VDC_RHDC 0x150 /* horizontal display period */
63 #define MB93493_VDC_RVDC 0x158 /* vertical display period */
101 #define MB93493_VCC_RVCC 0x118 /* vertical capture period */
102 #define MB93493_VCC_RVBC 0x11c /* vertical back porch period */
/linux-4.1.27/tools/perf/ui/stdio/
H A Dhist.c38 int depth, int depth_mask, int period, ipchain__fprintf_graph()
52 if (!period && i == depth - 1) { ipchain__fprintf_graph()
271 he->stat_acc->period : he->stat.period, hist_entry_callchain__fprintf()
37 ipchain__fprintf_graph(FILE *fp, struct callchain_list *chain, int depth, int depth_mask, int period, u64 total_samples, u64 hits, int left_margin) ipchain__fprintf_graph() argument
/linux-4.1.27/drivers/net/ethernet/intel/ixgbe/
H A Dixgbe_ptp.c41 * period of 6.4ns. In order to convert the scale counter into
48 * PeriodWidth: Number of bits to store the clock period
50 * Period: The clock period for the oscillator
55 * For the X540, MaxWidth is 31 bits, and the base period is 6.4 ns
56 * For the 82599, MaxWidth is 24 bits, and the base period is 6.4 ns
58 * The period also changes based on the link speed:
59 * At 10Gb link or no link, the period remains the same.
60 * At 1Gb link, the period is multiplied by 10. (64ns)
61 * At 100Mb link, the period is multiplied by 100. (640ns)
67 * These diagrams are only for the 10Gb link period
101 /* half of a one second clock period, for use with PPS signal. We have to use
150 /* clock period (or pulse length) */ ixgbe_ptp_setup_sdp()
/linux-4.1.27/drivers/staging/iio/trigger/
H A Diio-trig-bfin-timer.c124 unsigned int period = get_gptimer_period(st->t->id); iio_bfin_tmr_frequency_show() local
127 if (period == 0) iio_bfin_tmr_frequency_show()
237 * The interrupt will be generated at the end of the period, iio_bfin_tmr_trigger_probe()

Completed in 6282 milliseconds

123456