Searched refs:ref (Results 1 - 200 of 966) sorted by relevance

12345

/linux-4.4.14/net/mac80211/
H A Drate.h37 struct rate_control_ref *ref = local->rate_ctrl; rate_control_tx_status() local
42 if (!ref || !test_sta_flag(sta, WLAN_STA_RATE_CONTROL)) rate_control_tx_status()
46 if (ref->ops->tx_status) rate_control_tx_status()
47 ref->ops->tx_status(ref->priv, sband, ista, priv_sta, skb); rate_control_tx_status()
49 ref->ops->tx_status_noskb(ref->priv, sband, ista, priv_sta, info); rate_control_tx_status()
59 struct rate_control_ref *ref = local->rate_ctrl; rate_control_tx_status_noskb() local
63 if (!ref || !test_sta_flag(sta, WLAN_STA_RATE_CONTROL)) rate_control_tx_status_noskb()
66 if (WARN_ON_ONCE(!ref->ops->tx_status_noskb)) rate_control_tx_status_noskb()
70 ref->ops->tx_status_noskb(ref->priv, sband, ista, priv_sta, info); rate_control_tx_status_noskb()
79 static inline void *rate_control_alloc_sta(struct rate_control_ref *ref, rate_control_alloc_sta() argument
83 return ref->ops->alloc_sta(ref->priv, &sta->sta, gfp); rate_control_alloc_sta()
88 struct rate_control_ref *ref = sta->rate_ctrl; rate_control_free_sta() local
92 ref->ops->free_sta(ref->priv, ista, priv_sta); rate_control_free_sta()
98 struct rate_control_ref *ref = sta->rate_ctrl; rate_control_add_sta_debugfs() local
99 if (ref && sta->debugfs.dir && ref->ops->add_sta_debugfs) rate_control_add_sta_debugfs()
100 ref->ops->add_sta_debugfs(ref->priv, sta->rate_ctrl_priv, rate_control_add_sta_debugfs()
108 struct rate_control_ref *ref = sta->rate_ctrl; rate_control_remove_sta_debugfs() local
109 if (ref && ref->ops->remove_sta_debugfs) rate_control_remove_sta_debugfs()
110 ref->ops->remove_sta_debugfs(ref->priv, sta->rate_ctrl_priv); rate_control_remove_sta_debugfs()
H A Drate.c35 struct rate_control_ref *ref = sta->rate_ctrl; rate_control_rate_init() local
43 if (!ref) rate_control_rate_init()
57 ref->ops->rate_init(ref->priv, sband, &chanctx_conf->def, ista, rate_control_rate_init()
68 struct rate_control_ref *ref = local->rate_ctrl; rate_control_rate_update() local
73 if (ref && ref->ops->rate_update) { rate_control_rate_update()
83 ref->ops->rate_update(ref->priv, sband, &chanctx_conf->def, rate_control_rate_update()
188 struct rate_control_ref *ref = file->private_data; rcname_read() local
189 int len = strlen(ref->ops->name); rcname_read()
192 ref->ops->name, len); rcname_read()
206 struct rate_control_ref *ref; rate_control_alloc() local
208 ref = kmalloc(sizeof(struct rate_control_ref), GFP_KERNEL); rate_control_alloc()
209 if (!ref) rate_control_alloc()
211 ref->local = local; rate_control_alloc()
212 ref->ops = ieee80211_rate_control_ops_get(name); rate_control_alloc()
213 if (!ref->ops) rate_control_alloc()
219 debugfs_create_file("name", 0400, debugfsdir, ref, &rcname_ops); rate_control_alloc()
222 ref->priv = ref->ops->alloc(&local->hw, debugfsdir); rate_control_alloc()
223 if (!ref->priv) rate_control_alloc()
225 return ref; rate_control_alloc()
228 kfree(ref); rate_control_alloc()
834 struct rate_control_ref *ref = sdata->local->rate_ctrl; rate_control_get_rate() local
856 ref->ops->get_rate(ref->priv, ista, priv_sta, txrc); rate_control_get_rate()
859 ref->ops->get_rate(ref->priv, NULL, NULL, txrc); rate_control_get_rate()
900 struct rate_control_ref *ref; ieee80211_init_rate_ctrl_alg() local
913 ref = rate_control_alloc(name, local); ieee80211_init_rate_ctrl_alg()
914 if (!ref) { ieee80211_init_rate_ctrl_alg()
921 local->rate_ctrl = ref; ieee80211_init_rate_ctrl_alg()
924 ref->ops->name); ieee80211_init_rate_ctrl_alg()
931 struct rate_control_ref *ref; rate_control_deinitialize() local
933 ref = local->rate_ctrl; rate_control_deinitialize()
935 if (!ref) rate_control_deinitialize()
939 rate_control_free(ref); rate_control_deinitialize()
H A Dkey.h158 #define key_mtx_dereference(local, ref) \
159 rcu_dereference_protected(ref, lockdep_is_held(&((local)->key_mtx)))
/linux-4.4.14/lib/
H A Dpercpu-refcount.c10 * don't try to detect the ref hitting 0 - which means that get/put can just
21 * the ref hitting 0 on every put - this would require global synchronization
25 * the ref can't hit 0 before the user drops the initial ref, so as long as we
26 * convert to non percpu mode before the initial ref is dropped everything
38 static unsigned long __percpu *percpu_count_ptr(struct percpu_ref *ref) percpu_count_ptr() argument
41 (ref->percpu_count_ptr & ~__PERCPU_REF_ATOMIC_DEAD); percpu_count_ptr()
46 * @ref: percpu_ref to initialize
51 * Initializes @ref. If @flags is zero, @ref starts in percpu mode with a
52 * refcount of 1; analagous to atomic_long_set(ref, 1). See the
58 int percpu_ref_init(struct percpu_ref *ref, percpu_ref_func_t *release, percpu_ref_init() argument
65 ref->percpu_count_ptr = (unsigned long) percpu_ref_init()
67 if (!ref->percpu_count_ptr) percpu_ref_init()
70 ref->force_atomic = flags & PERCPU_REF_INIT_ATOMIC; percpu_ref_init()
73 ref->percpu_count_ptr |= __PERCPU_REF_ATOMIC; percpu_ref_init()
78 ref->percpu_count_ptr |= __PERCPU_REF_DEAD; percpu_ref_init()
82 atomic_long_set(&ref->count, start_count); percpu_ref_init()
84 ref->release = release; percpu_ref_init()
91 * @ref: percpu_ref to exit
93 * This function exits @ref. The caller is responsible for ensuring that
94 * @ref is no longer in active use. The usual places to invoke this
95 * function from are the @ref->release() callback or in init failure path
99 void percpu_ref_exit(struct percpu_ref *ref) percpu_ref_exit() argument
101 unsigned long __percpu *percpu_count = percpu_count_ptr(ref); percpu_ref_exit()
105 ref->percpu_count_ptr = __PERCPU_REF_ATOMIC_DEAD; percpu_ref_exit()
112 struct percpu_ref *ref = container_of(rcu, struct percpu_ref, rcu); percpu_ref_call_confirm_rcu() local
114 ref->confirm_switch(ref); percpu_ref_call_confirm_rcu()
115 ref->confirm_switch = NULL; percpu_ref_call_confirm_rcu()
118 /* drop ref from percpu_ref_switch_to_atomic() */ percpu_ref_call_confirm_rcu()
119 percpu_ref_put(ref); percpu_ref_call_confirm_rcu()
124 struct percpu_ref *ref = container_of(rcu, struct percpu_ref, rcu); percpu_ref_switch_to_atomic_rcu() local
125 unsigned long __percpu *percpu_count = percpu_count_ptr(ref); percpu_ref_switch_to_atomic_rcu()
133 atomic_long_read(&ref->count), (long)count); percpu_ref_switch_to_atomic_rcu()
137 * to &ref->count; since gets could be happening on one cpu while puts percpu_ref_switch_to_atomic_rcu()
139 * @ref->count to hit 0 before we've got a consistent value - but the percpu_ref_switch_to_atomic_rcu()
143 * &ref->count; we need the bias value to prevent &ref->count from percpu_ref_switch_to_atomic_rcu()
147 atomic_long_add((long)count - PERCPU_COUNT_BIAS, &ref->count); percpu_ref_switch_to_atomic_rcu()
149 WARN_ONCE(atomic_long_read(&ref->count) <= 0, percpu_ref_switch_to_atomic_rcu()
150 "percpu ref (%pf) <= 0 (%ld) after switching to atomic", percpu_ref_switch_to_atomic_rcu()
151 ref->release, atomic_long_read(&ref->count)); percpu_ref_switch_to_atomic_rcu()
153 /* @ref is viewed as dead on all CPUs, send out switch confirmation */ percpu_ref_switch_to_atomic_rcu()
157 static void percpu_ref_noop_confirm_switch(struct percpu_ref *ref) percpu_ref_noop_confirm_switch() argument
161 static void __percpu_ref_switch_to_atomic(struct percpu_ref *ref, __percpu_ref_switch_to_atomic() argument
164 if (!(ref->percpu_count_ptr & __PERCPU_REF_ATOMIC)) { __percpu_ref_switch_to_atomic()
166 ref->percpu_count_ptr |= __PERCPU_REF_ATOMIC; __percpu_ref_switch_to_atomic()
172 WARN_ON_ONCE(ref->confirm_switch); __percpu_ref_switch_to_atomic()
173 ref->confirm_switch = __percpu_ref_switch_to_atomic()
176 percpu_ref_get(ref); /* put after confirmation */ __percpu_ref_switch_to_atomic()
177 call_rcu_sched(&ref->rcu, percpu_ref_switch_to_atomic_rcu); __percpu_ref_switch_to_atomic()
186 wait_event(percpu_ref_switch_waitq, !ref->confirm_switch); __percpu_ref_switch_to_atomic()
187 ref->confirm_switch = confirm_switch; __percpu_ref_switch_to_atomic()
189 percpu_ref_get(ref); /* put after confirmation */ __percpu_ref_switch_to_atomic()
190 call_rcu_sched(&ref->rcu, percpu_ref_call_confirm_rcu); __percpu_ref_switch_to_atomic()
196 * @ref: percpu_ref to switch to atomic mode
202 * Schedule switching of @ref to atomic mode. All its percpu counts will
207 * operations. Note that @ref will stay in atomic mode across kill/reinit
211 * but it may block if @confirm_kill is specified and @ref is already in
219 void percpu_ref_switch_to_atomic(struct percpu_ref *ref, percpu_ref_switch_to_atomic() argument
222 ref->force_atomic = true; percpu_ref_switch_to_atomic()
223 __percpu_ref_switch_to_atomic(ref, confirm_switch); percpu_ref_switch_to_atomic()
226 static void __percpu_ref_switch_to_percpu(struct percpu_ref *ref) __percpu_ref_switch_to_percpu() argument
228 unsigned long __percpu *percpu_count = percpu_count_ptr(ref); __percpu_ref_switch_to_percpu()
233 if (!(ref->percpu_count_ptr & __PERCPU_REF_ATOMIC)) __percpu_ref_switch_to_percpu()
236 wait_event(percpu_ref_switch_waitq, !ref->confirm_switch); __percpu_ref_switch_to_percpu()
238 atomic_long_add(PERCPU_COUNT_BIAS, &ref->count); __percpu_ref_switch_to_percpu()
249 smp_store_release(&ref->percpu_count_ptr, __percpu_ref_switch_to_percpu()
250 ref->percpu_count_ptr & ~__PERCPU_REF_ATOMIC); __percpu_ref_switch_to_percpu()
255 * @ref: percpu_ref to switch to percpu mode
258 * To re-use an expired ref, use percpu_ref_reinit().
260 * Switch @ref to percpu mode. This function may be invoked concurrently
263 * by PERCPU_REF_INIT_ATOMIC or percpu_ref_switch_to_atomic(). If @ref is
268 * but it may block if @ref is in the process of switching to atomic mode
271 void percpu_ref_switch_to_percpu(struct percpu_ref *ref) percpu_ref_switch_to_percpu() argument
273 ref->force_atomic = false; percpu_ref_switch_to_percpu()
275 /* a dying or dead ref can't be switched to percpu mode w/o reinit */ percpu_ref_switch_to_percpu()
276 if (!(ref->percpu_count_ptr & __PERCPU_REF_DEAD)) percpu_ref_switch_to_percpu()
277 __percpu_ref_switch_to_percpu(ref); percpu_ref_switch_to_percpu()
281 * percpu_ref_kill_and_confirm - drop the initial ref and schedule confirmation
282 * @ref: percpu_ref to kill
287 * called after @ref is seen as dead from all CPUs at which point all
292 * but it may block if @confirm_kill is specified and @ref is in the
299 void percpu_ref_kill_and_confirm(struct percpu_ref *ref, percpu_ref_kill_and_confirm() argument
302 WARN_ONCE(ref->percpu_count_ptr & __PERCPU_REF_DEAD, percpu_ref_kill_and_confirm()
303 "%s called more than once on %pf!", __func__, ref->release); percpu_ref_kill_and_confirm()
305 ref->percpu_count_ptr |= __PERCPU_REF_DEAD; percpu_ref_kill_and_confirm()
306 __percpu_ref_switch_to_atomic(ref, confirm_kill); percpu_ref_kill_and_confirm()
307 percpu_ref_put(ref); percpu_ref_kill_and_confirm()
313 * @ref: perpcu_ref to re-initialize
315 * Re-initialize @ref so that it's in the same state as when it finished
316 * percpu_ref_init() ignoring %PERCPU_REF_INIT_DEAD. @ref must have been
319 * Note that percpu_ref_tryget[_live]() are safe to perform on @ref while
322 void percpu_ref_reinit(struct percpu_ref *ref) percpu_ref_reinit() argument
324 WARN_ON_ONCE(!percpu_ref_is_zero(ref)); percpu_ref_reinit()
326 ref->percpu_count_ptr &= ~__PERCPU_REF_DEAD; percpu_ref_reinit()
327 percpu_ref_get(ref); percpu_ref_reinit()
328 if (!ref->force_atomic) percpu_ref_reinit()
329 __percpu_ref_switch_to_percpu(ref); percpu_ref_reinit()
H A Ddma-debug.c299 struct dma_debug_entry *ref, __hash_bucket_find()
306 if (!match(ref, entry)) __hash_bucket_find()
321 entry->size == ref->size ? ++match_lvl : 0; __hash_bucket_find()
322 entry->type == ref->type ? ++match_lvl : 0; __hash_bucket_find()
323 entry->direction == ref->direction ? ++match_lvl : 0; __hash_bucket_find()
324 entry->sg_call_ents == ref->sg_call_ents ? ++match_lvl : 0; __hash_bucket_find()
349 struct dma_debug_entry *ref) bucket_find_exact()
351 return __hash_bucket_find(bucket, ref, exact_match); bucket_find_exact()
355 struct dma_debug_entry *ref, bucket_find_contain()
359 unsigned int max_range = dma_get_max_seg_size(ref->dev); bucket_find_contain()
360 struct dma_debug_entry *entry, index = *ref; bucket_find_contain()
364 entry = __hash_bucket_find(*bucket, ref, containing_match); bucket_find_contain()
1074 static void check_unmap(struct dma_debug_entry *ref) check_unmap() argument
1080 bucket = get_hash_bucket(ref, &flags); check_unmap()
1081 entry = bucket_find_exact(bucket, ref); check_unmap()
1087 if (dma_mapping_error(ref->dev, ref->dev_addr)) { check_unmap()
1088 err_printk(ref->dev, NULL, check_unmap()
1092 err_printk(ref->dev, NULL, check_unmap()
1096 ref->dev_addr, ref->size); check_unmap()
1101 if (ref->size != entry->size) { check_unmap()
1102 err_printk(ref->dev, entry, "DMA-API: device driver frees " check_unmap()
1106 ref->dev_addr, entry->size, ref->size); check_unmap()
1109 if (ref->type != entry->type) { check_unmap()
1110 err_printk(ref->dev, entry, "DMA-API: device driver frees " check_unmap()
1114 ref->dev_addr, ref->size, check_unmap()
1115 type2name[entry->type], type2name[ref->type]); check_unmap()
1117 (phys_addr(ref) != phys_addr(entry))) { check_unmap()
1118 err_printk(ref->dev, entry, "DMA-API: device driver frees " check_unmap()
1123 ref->dev_addr, ref->size, check_unmap()
1125 phys_addr(ref)); check_unmap()
1128 if (ref->sg_call_ents && ref->type == dma_debug_sg && check_unmap()
1129 ref->sg_call_ents != entry->sg_call_ents) { check_unmap()
1130 err_printk(ref->dev, entry, "DMA-API: device driver frees " check_unmap()
1133 entry->sg_call_ents, ref->sg_call_ents); check_unmap()
1140 if (ref->direction != entry->direction) { check_unmap()
1141 err_printk(ref->dev, entry, "DMA-API: device driver frees " check_unmap()
1145 ref->dev_addr, ref->size, check_unmap()
1147 dir2name[ref->direction]); check_unmap()
1151 err_printk(ref->dev, entry, check_unmap()
1155 ref->dev_addr, ref->size, check_unmap()
1190 struct dma_debug_entry *ref, check_sync()
1197 bucket = get_hash_bucket(ref, &flags); check_sync()
1199 entry = bucket_find_contain(&bucket, ref, &flags); check_sync()
1205 (unsigned long long)ref->dev_addr, ref->size); check_sync()
1209 if (ref->size > entry->size) { check_sync()
1216 ref->size); check_sync()
1222 if (ref->direction != entry->direction) { check_sync()
1227 (unsigned long long)ref->dev_addr, entry->size, check_sync()
1229 dir2name[ref->direction]); check_sync()
1233 !(ref->direction == DMA_TO_DEVICE)) check_sync()
1238 (unsigned long long)ref->dev_addr, entry->size, check_sync()
1240 dir2name[ref->direction]); check_sync()
1243 !(ref->direction == DMA_FROM_DEVICE)) check_sync()
1248 (unsigned long long)ref->dev_addr, entry->size, check_sync()
1250 dir2name[ref->direction]); check_sync()
1252 if (ref->sg_call_ents && ref->type == dma_debug_sg && check_sync()
1253 ref->sg_call_ents != entry->sg_call_ents) { check_sync()
1254 err_printk(ref->dev, entry, "DMA-API: device driver syncs " check_sync()
1257 entry->sg_call_ents, ref->sg_call_ents); check_sync()
1305 struct dma_debug_entry ref; debug_dma_mapping_error() local
1313 ref.dev = dev; debug_dma_mapping_error()
1314 ref.dev_addr = dma_addr; debug_dma_mapping_error()
1315 bucket = get_hash_bucket(&ref, &flags); debug_dma_mapping_error()
1318 if (!exact_match(&ref, entry)) debug_dma_mapping_error()
1344 struct dma_debug_entry ref = { debug_dma_unmap_page() local
1356 ref.type = dma_debug_single; debug_dma_unmap_page()
1358 check_unmap(&ref); debug_dma_unmap_page()
1398 struct dma_debug_entry *ref) get_nr_mapped_entries()
1405 bucket = get_hash_bucket(ref, &flags); get_nr_mapped_entries()
1406 entry = bucket_find_exact(bucket, ref); get_nr_mapped_entries()
1427 struct dma_debug_entry ref = { for_each_sg() local
1442 mapped_ents = get_nr_mapped_entries(dev, &ref); for_each_sg()
1444 check_unmap(&ref); for_each_sg()
1479 struct dma_debug_entry ref = { debug_dma_free_coherent() local
1492 check_unmap(&ref); debug_dma_free_coherent()
1499 struct dma_debug_entry ref; debug_dma_sync_single_for_cpu() local
1504 ref.type = dma_debug_single; debug_dma_sync_single_for_cpu()
1505 ref.dev = dev; debug_dma_sync_single_for_cpu()
1506 ref.dev_addr = dma_handle; debug_dma_sync_single_for_cpu()
1507 ref.size = size; debug_dma_sync_single_for_cpu()
1508 ref.direction = direction; debug_dma_sync_single_for_cpu()
1509 ref.sg_call_ents = 0; debug_dma_sync_single_for_cpu()
1511 check_sync(dev, &ref, true); debug_dma_sync_single_for_cpu()
1519 struct dma_debug_entry ref; debug_dma_sync_single_for_device() local
1524 ref.type = dma_debug_single; debug_dma_sync_single_for_device()
1525 ref.dev = dev; debug_dma_sync_single_for_device()
1526 ref.dev_addr = dma_handle; debug_dma_sync_single_for_device()
1527 ref.size = size; debug_dma_sync_single_for_device()
1528 ref.direction = direction; debug_dma_sync_single_for_device()
1529 ref.sg_call_ents = 0; debug_dma_sync_single_for_device()
1531 check_sync(dev, &ref, false); debug_dma_sync_single_for_device()
1540 struct dma_debug_entry ref; debug_dma_sync_single_range_for_cpu() local
1545 ref.type = dma_debug_single; debug_dma_sync_single_range_for_cpu()
1546 ref.dev = dev; debug_dma_sync_single_range_for_cpu()
1547 ref.dev_addr = dma_handle; debug_dma_sync_single_range_for_cpu()
1548 ref.size = offset + size; debug_dma_sync_single_range_for_cpu()
1549 ref.direction = direction; debug_dma_sync_single_range_for_cpu()
1550 ref.sg_call_ents = 0; debug_dma_sync_single_range_for_cpu()
1552 check_sync(dev, &ref, true); debug_dma_sync_single_range_for_cpu()
1561 struct dma_debug_entry ref; debug_dma_sync_single_range_for_device() local
1566 ref.type = dma_debug_single; debug_dma_sync_single_range_for_device()
1567 ref.dev = dev; debug_dma_sync_single_range_for_device()
1568 ref.dev_addr = dma_handle; debug_dma_sync_single_range_for_device()
1569 ref.size = offset + size; debug_dma_sync_single_range_for_device()
1570 ref.direction = direction; debug_dma_sync_single_range_for_device()
1571 ref.sg_call_ents = 0; debug_dma_sync_single_range_for_device()
1573 check_sync(dev, &ref, false); debug_dma_sync_single_range_for_device()
1588 struct dma_debug_entry ref = { for_each_sg() local
1600 mapped_ents = get_nr_mapped_entries(dev, &ref); for_each_sg()
1605 check_sync(dev, &ref, true); for_each_sg()
1621 struct dma_debug_entry ref = { for_each_sg() local
1632 mapped_ents = get_nr_mapped_entries(dev, &ref); for_each_sg()
1637 check_sync(dev, &ref, false); for_each_sg()
298 __hash_bucket_find(struct hash_bucket *bucket, struct dma_debug_entry *ref, match_fn match) __hash_bucket_find() argument
348 bucket_find_exact(struct hash_bucket *bucket, struct dma_debug_entry *ref) bucket_find_exact() argument
354 bucket_find_contain(struct hash_bucket **bucket, struct dma_debug_entry *ref, unsigned long *flags) bucket_find_contain() argument
1189 check_sync(struct device *dev, struct dma_debug_entry *ref, bool to_cpu) check_sync() argument
1397 get_nr_mapped_entries(struct device *dev, struct dma_debug_entry *ref) get_nr_mapped_entries() argument
H A Dcpu_rmap.c67 * @ref: kref to struct cpu_rmap
69 static void cpu_rmap_release(struct kref *ref) cpu_rmap_release() argument
71 struct cpu_rmap *rmap = container_of(ref, struct cpu_rmap, refcount); cpu_rmap_release()
76 * cpu_rmap_get - internal helper to get new ref on a cpu_rmap
85 * cpu_rmap_put - release ref on a cpu_rmap
266 * @ref: kref to struct irq_affinity_notify passed by irq/manage.c
268 static void irq_cpu_rmap_release(struct kref *ref) irq_cpu_rmap_release() argument
271 container_of(ref, struct irq_glue, notify.kref); irq_cpu_rmap_release()
/linux-4.4.14/include/linux/
H A Dpercpu-refcount.h20 * puts the ref back in single atomic_t mode, collecting the per cpu refs and
21 * issuing the appropriate barriers, and then marks the ref as shutting down so
22 * that percpu_ref_put() will check for the ref hitting 0. After it returns,
23 * it's safe to drop the initial ref.
35 * the initial ref with percpu_ref_put().
69 * Start w/ ref == 1 in atomic mode. Can be switched to percpu
71 * with this flag, the ref will stay in atomic mode until
77 * Start dead w/ ref == 0 in atomic mode. Must be revived with
86 * The low bit of the pointer indicates whether the ref is in percpu
96 int __must_check percpu_ref_init(struct percpu_ref *ref,
99 void percpu_ref_exit(struct percpu_ref *ref);
100 void percpu_ref_switch_to_atomic(struct percpu_ref *ref,
102 void percpu_ref_switch_to_percpu(struct percpu_ref *ref);
103 void percpu_ref_kill_and_confirm(struct percpu_ref *ref,
105 void percpu_ref_reinit(struct percpu_ref *ref);
108 * percpu_ref_kill - drop the initial ref
109 * @ref: percpu_ref to kill
111 * Must be used to drop the initial ref on a percpu refcount; must be called
114 * Puts @ref in non percpu mode, then does a call_rcu() before gathering up the
115 * percpu counters and dropping the initial ref.
117 static inline void percpu_ref_kill(struct percpu_ref *ref) percpu_ref_kill() argument
119 return percpu_ref_kill_and_confirm(ref, NULL); percpu_ref_kill()
126 * branches as it can't assume that @ref->percpu_count is not NULL.
128 static inline bool __ref_is_percpu(struct percpu_ref *ref, __ref_is_percpu() argument
134 * The value of @ref->percpu_count_ptr is tested for __ref_is_percpu()
146 percpu_ptr = lockless_dereference(ref->percpu_count_ptr); __ref_is_percpu()
163 * @ref: percpu_ref to get
168 * This function is safe to call as long as @ref is between init and exit.
170 static inline void percpu_ref_get_many(struct percpu_ref *ref, unsigned long nr) percpu_ref_get_many() argument
176 if (__ref_is_percpu(ref, &percpu_count)) percpu_ref_get_many()
179 atomic_long_add(nr, &ref->count); percpu_ref_get_many()
186 * @ref: percpu_ref to get
190 * This function is safe to call as long as @ref is between init and exit.
192 static inline void percpu_ref_get(struct percpu_ref *ref) percpu_ref_get() argument
194 percpu_ref_get_many(ref, 1); percpu_ref_get()
199 * @ref: percpu_ref to try-get
204 * This function is safe to call as long as @ref is between init and exit.
206 static inline bool percpu_ref_tryget(struct percpu_ref *ref) percpu_ref_tryget() argument
213 if (__ref_is_percpu(ref, &percpu_count)) { percpu_ref_tryget()
217 ret = atomic_long_inc_not_zero(&ref->count); percpu_ref_tryget()
227 * @ref: percpu_ref to try-get
238 * This function is safe to call as long as @ref is between init and exit.
240 static inline bool percpu_ref_tryget_live(struct percpu_ref *ref) percpu_ref_tryget_live() argument
247 if (__ref_is_percpu(ref, &percpu_count)) { percpu_ref_tryget_live()
250 } else if (!(ref->percpu_count_ptr & __PERCPU_REF_DEAD)) { percpu_ref_tryget_live()
251 ret = atomic_long_inc_not_zero(&ref->count); percpu_ref_tryget_live()
261 * @ref: percpu_ref to put
267 * This function is safe to call as long as @ref is between init and exit.
269 static inline void percpu_ref_put_many(struct percpu_ref *ref, unsigned long nr) percpu_ref_put_many() argument
275 if (__ref_is_percpu(ref, &percpu_count)) percpu_ref_put_many()
277 else if (unlikely(atomic_long_sub_and_test(nr, &ref->count))) percpu_ref_put_many()
278 ref->release(ref); percpu_ref_put_many()
285 * @ref: percpu_ref to put
290 * This function is safe to call as long as @ref is between init and exit.
292 static inline void percpu_ref_put(struct percpu_ref *ref) percpu_ref_put() argument
294 percpu_ref_put_many(ref, 1); percpu_ref_put()
299 * @ref: percpu_ref to test
301 * Returns %true if @ref is dying or dead.
303 * This function is safe to call as long as @ref is between init and exit
306 static inline bool percpu_ref_is_dying(struct percpu_ref *ref) percpu_ref_is_dying() argument
308 return ref->percpu_count_ptr & __PERCPU_REF_DEAD; percpu_ref_is_dying()
313 * @ref: percpu_ref to test
315 * Returns %true if @ref reached zero.
317 * This function is safe to call as long as @ref is between init and exit.
319 static inline bool percpu_ref_is_zero(struct percpu_ref *ref) percpu_ref_is_zero() argument
323 if (__ref_is_percpu(ref, &percpu_count)) percpu_ref_is_zero()
325 return !atomic_long_read(&ref->count); percpu_ref_is_zero()
H A Dbsg.h13 struct kref ref; member in struct:bsg_class_device
H A Delf-fdpic.h21 struct elfhdr hdr; /* ref copy of ELF header */
22 struct elf_phdr *phdrs; /* ref copy of PT_PHDR table */
H A Dwatchdog.h29 * @ref: The ref operation for dyn. allocated watchdog_device structs
48 void (*ref)(struct watchdog_device *); member in struct:watchdog_ops
H A Dinit.h76 #define __ref __section(.ref.text) noinline
77 #define __refdata __section(.ref.data)
78 #define __refconst __constsection(.ref.rodata)
116 #define __REF .section ".ref.text", "ax"
117 #define __REFDATA .section ".ref.data", "aw"
118 #define __REFCONST .section ".ref.rodata", "a"
H A Dhw_random.h49 struct kref ref; member in struct:hwrng
H A Dgenhd.h128 struct percpu_ref ref; member in struct:hd_struct
658 if (percpu_ref_init(&part->ref, __delete_partition, 0, hd_ref_init()
666 percpu_ref_get(&part->ref); hd_struct_get()
671 return percpu_ref_tryget_live(&part->ref); hd_struct_try_get()
676 percpu_ref_put(&part->ref); hd_struct_put()
681 percpu_ref_kill(&part->ref); hd_struct_kill()
688 percpu_ref_exit(&part->ref); hd_free_part()
/linux-4.4.14/fs/jffs2/
H A Dxattr.c424 * verify_xattr_ref(c, ref)
427 * save_xattr_ref(c, ref)
432 * delete_xattr_ref(c, ref)
444 static int verify_xattr_ref(struct jffs2_sb_info *c, struct jffs2_xattr_ref *ref) verify_xattr_ref() argument
454 if (ref_flags(ref->node) != REF_UNCHECKED) verify_xattr_ref()
456 offset = ref_offset(ref->node); verify_xattr_ref()
482 ref->ino = je32_to_cpu(rr.ino); verify_xattr_ref()
483 ref->xid = je32_to_cpu(rr.xid); verify_xattr_ref()
484 ref->xseqno = je32_to_cpu(rr.xseqno); verify_xattr_ref()
485 if (ref->xseqno > c->highest_xseqno) verify_xattr_ref()
486 c->highest_xseqno = (ref->xseqno & ~XREF_DELETE_MARKER); verify_xattr_ref()
490 for (raw=ref->node; raw != (void *)ref; raw=raw->next_in_ino) { verify_xattr_ref()
497 raw->flash_offset = ref_offset(raw) | ((ref->node==raw) ? REF_PRISTINE : REF_NORMAL); verify_xattr_ref()
502 ref->ino, ref->xid, ref_offset(ref->node)); verify_xattr_ref()
506 static int save_xattr_ref(struct jffs2_sb_info *c, struct jffs2_xattr_ref *ref) save_xattr_ref() argument
520 if (is_xattr_ref_dead(ref)) { save_xattr_ref()
522 rr.ino = cpu_to_je32(ref->ino); save_xattr_ref()
523 rr.xid = cpu_to_je32(ref->xid); save_xattr_ref()
525 rr.ino = cpu_to_je32(ref->ic->ino); save_xattr_ref()
526 rr.xid = cpu_to_je32(ref->xd->xid); save_xattr_ref()
542 ref->xseqno = xseqno; save_xattr_ref()
543 jffs2_add_physical_node_ref(c, phys_ofs | REF_PRISTINE, PAD(sizeof(rr)), (void *)ref); save_xattr_ref()
545 dbg_xattr("success on saving xref (ino=%u, xid=%u)\n", ref->ic->ino, ref->xd->xid); save_xattr_ref()
554 struct jffs2_xattr_ref *ref; create_xattr_ref() local
557 ref = jffs2_alloc_xattr_ref(); create_xattr_ref()
558 if (!ref) create_xattr_ref()
560 ref->ic = ic; create_xattr_ref()
561 ref->xd = xd; create_xattr_ref()
563 ret = save_xattr_ref(c, ref); create_xattr_ref()
565 jffs2_free_xattr_ref(ref); create_xattr_ref()
570 ref->next = ic->xref; create_xattr_ref()
571 ic->xref = ref; create_xattr_ref()
573 return ref; /* success */ create_xattr_ref()
576 static void delete_xattr_ref(struct jffs2_sb_info *c, struct jffs2_xattr_ref *ref) delete_xattr_ref() argument
581 xd = ref->xd; delete_xattr_ref()
582 ref->xseqno |= XREF_DELETE_MARKER; delete_xattr_ref()
583 ref->ino = ref->ic->ino; delete_xattr_ref()
584 ref->xid = ref->xd->xid; delete_xattr_ref()
586 ref->next = c->xref_dead_list; delete_xattr_ref()
587 c->xref_dead_list = ref; delete_xattr_ref()
591 ref->ino, ref->xid, ref->xseqno); delete_xattr_ref()
600 struct jffs2_xattr_ref *ref, *_ref; jffs2_xattr_delete_inode() local
606 for (ref = ic->xref; ref; ref = _ref) { jffs2_xattr_delete_inode()
607 _ref = ref->next; jffs2_xattr_delete_inode()
608 delete_xattr_ref(c, ref); jffs2_xattr_delete_inode()
618 struct jffs2_xattr_ref *ref, *_ref; jffs2_xattr_free_inode() local
621 for (ref = ic->xref; ref; ref = _ref) { jffs2_xattr_free_inode()
622 _ref = ref->next; jffs2_xattr_free_inode()
623 xd = ref->xd; jffs2_xattr_free_inode()
628 jffs2_free_xattr_ref(ref); jffs2_xattr_free_inode()
640 struct jffs2_xattr_ref *ref, *cmp, **pref, **pcmp; check_xattr_ref_inode() local
648 for (ref=ic->xref, pref=&ic->xref; ref; pref=&ref->next, ref=ref->next) { check_xattr_ref_inode()
649 if (!ref->xd->xname) { check_xattr_ref_inode()
650 rc = load_xattr_datum(c, ref->xd); check_xattr_ref_inode()
652 *pref = ref->next; check_xattr_ref_inode()
653 delete_xattr_ref(c, ref); check_xattr_ref_inode()
658 for (cmp=ref->next, pcmp=&ref->next; cmp; pcmp=&cmp->next, cmp=cmp->next) { check_xattr_ref_inode()
660 ref->xd->flags |= JFFS2_XFLAGS_BIND; check_xattr_ref_inode()
662 ref->xd->flags &= ~JFFS2_XFLAGS_BIND; check_xattr_ref_inode()
670 if (ref->xd->xprefix == cmp->xd->xprefix check_xattr_ref_inode()
671 && !strcmp(ref->xd->xname, cmp->xd->xname)) { check_xattr_ref_inode()
672 if (ref->xseqno > cmp->xseqno) { check_xattr_ref_inode()
676 *pref = ref->next; check_xattr_ref_inode()
677 delete_xattr_ref(c, ref); check_xattr_ref_inode()
743 struct jffs2_xattr_ref *ref, *_ref; jffs2_clear_xattr_subsystem() local
746 for (ref=c->xref_temp; ref; ref = _ref) { jffs2_clear_xattr_subsystem()
747 _ref = ref->next; jffs2_clear_xattr_subsystem()
748 jffs2_free_xattr_ref(ref); jffs2_clear_xattr_subsystem()
751 for (ref=c->xref_dead_list; ref; ref = _ref) { jffs2_clear_xattr_subsystem()
752 _ref = ref->next; jffs2_clear_xattr_subsystem()
753 jffs2_free_xattr_ref(ref); jffs2_clear_xattr_subsystem()
777 struct jffs2_xattr_ref *ref, *_ref; jffs2_build_xattr_subsystem() local
790 for (ref=c->xref_temp; ref; ref=_ref) { jffs2_build_xattr_subsystem()
793 _ref = ref->next; jffs2_build_xattr_subsystem()
794 if (ref_flags(ref->node) != REF_PRISTINE) { jffs2_build_xattr_subsystem()
795 if (verify_xattr_ref(c, ref)) { jffs2_build_xattr_subsystem()
796 BUG_ON(ref->node->next_in_ino != (void *)ref); jffs2_build_xattr_subsystem()
797 ref->node->next_in_ino = NULL; jffs2_build_xattr_subsystem()
798 jffs2_mark_node_obsolete(c, ref->node); jffs2_build_xattr_subsystem()
799 jffs2_free_xattr_ref(ref); jffs2_build_xattr_subsystem()
804 i = (ref->ino ^ ref->xid) % XREF_TMPHASH_SIZE; jffs2_build_xattr_subsystem()
806 if (tmp->ino == ref->ino && tmp->xid == ref->xid) jffs2_build_xattr_subsystem()
810 raw = ref->node; jffs2_build_xattr_subsystem()
811 if (ref->xseqno > tmp->xseqno) { jffs2_build_xattr_subsystem()
812 tmp->xseqno = ref->xseqno; jffs2_build_xattr_subsystem()
819 jffs2_free_xattr_ref(ref); jffs2_build_xattr_subsystem()
822 ref->next = xref_tmphash[i]; jffs2_build_xattr_subsystem()
823 xref_tmphash[i] = ref; jffs2_build_xattr_subsystem()
830 for (ref=xref_tmphash[i]; ref; ref=_ref) { jffs2_build_xattr_subsystem()
832 _ref = ref->next; jffs2_build_xattr_subsystem()
833 if (is_xattr_ref_dead(ref)) { jffs2_build_xattr_subsystem()
834 ref->next = c->xref_dead_list; jffs2_build_xattr_subsystem()
835 c->xref_dead_list = ref; jffs2_build_xattr_subsystem()
839 /* At this point, ref->xid and ref->ino contain XID and inode number. jffs2_build_xattr_subsystem()
840 ref->xd and ref->ic are not valid yet. */ jffs2_build_xattr_subsystem()
841 xd = jffs2_find_xattr_datum(c, ref->xid); jffs2_build_xattr_subsystem()
842 ic = jffs2_get_ino_cache(c, ref->ino); jffs2_build_xattr_subsystem()
845 ref->ino, ref->xid, ref->xseqno); jffs2_build_xattr_subsystem()
846 ref->xseqno |= XREF_DELETE_MARKER; jffs2_build_xattr_subsystem()
847 ref->next = c->xref_dead_list; jffs2_build_xattr_subsystem()
848 c->xref_dead_list = ref; jffs2_build_xattr_subsystem()
852 ref->xd = xd; jffs2_build_xattr_subsystem()
853 ref->ic = ic; jffs2_build_xattr_subsystem()
855 ref->next = ic->xref; jffs2_build_xattr_subsystem()
856 ic->xref = ref; jffs2_build_xattr_subsystem()
967 struct jffs2_xattr_ref *ref, **pref; jffs2_listxattr() local
980 for (ref=ic->xref, pref=&ic->xref; ref; pref=&ref->next, ref=ref->next) { jffs2_listxattr()
981 BUG_ON(ref->ic != ic); jffs2_listxattr()
982 xd = ref->xd; jffs2_listxattr()
993 *pref = ref->next; jffs2_listxattr()
994 delete_xattr_ref(c, ref); jffs2_listxattr()
1032 struct jffs2_xattr_ref *ref, **pref; do_jffs2_getxattr() local
1041 for (ref=ic->xref, pref=&ic->xref; ref; pref=&ref->next, ref=ref->next) { do_jffs2_getxattr()
1042 BUG_ON(ref->ic!=ic); do_jffs2_getxattr()
1044 xd = ref->xd; do_jffs2_getxattr()
1057 *pref = ref->next; do_jffs2_getxattr()
1058 delete_xattr_ref(c, ref); do_jffs2_getxattr()
1094 struct jffs2_xattr_ref *ref, *newref, **pref; do_jffs2_setxattr() local
1113 for (ref=ic->xref, pref=&ic->xref; ref; pref=&ref->next, ref=ref->next) { do_jffs2_setxattr()
1114 xd = ref->xd; do_jffs2_setxattr()
1120 *pref = ref->next; do_jffs2_setxattr()
1121 delete_xattr_ref(c, ref); do_jffs2_setxattr()
1132 ref->ino = ic->ino; do_jffs2_setxattr()
1133 ref->xid = xd->xid; do_jffs2_setxattr()
1134 ref->xseqno |= XREF_DELETE_MARKER; do_jffs2_setxattr()
1135 rc = save_xattr_ref(c, ref); do_jffs2_setxattr()
1137 *pref = ref->next; do_jffs2_setxattr()
1139 ref->next = c->xref_dead_list; do_jffs2_setxattr()
1140 c->xref_dead_list = ref; do_jffs2_setxattr()
1144 ref->ic = ic; do_jffs2_setxattr()
1145 ref->xd = xd; do_jffs2_setxattr()
1146 ref->xseqno &= ~XREF_DELETE_MARKER; do_jffs2_setxattr()
1182 if (ref) do_jffs2_setxattr()
1183 *pref = ref->next; do_jffs2_setxattr()
1186 if (ref) { do_jffs2_setxattr()
1187 ref->next = ic->xref; do_jffs2_setxattr()
1188 ic->xref = ref; do_jffs2_setxattr()
1192 } else if (ref) { do_jffs2_setxattr()
1193 delete_xattr_ref(c, ref); do_jffs2_setxattr()
1204 * jffs2_garbage_collect_xattr_ref(c, ref, raw)
1210 * jffs2_release_xattr_ref(c, ref)
1249 int jffs2_garbage_collect_xattr_ref(struct jffs2_sb_info *c, struct jffs2_xattr_ref *ref, jffs2_garbage_collect_xattr_ref() argument
1256 BUG_ON(!ref->node); jffs2_garbage_collect_xattr_ref()
1258 if (ref->node != raw) jffs2_garbage_collect_xattr_ref()
1260 if (is_xattr_ref_dead(ref) && (raw->next_in_ino == (void *)ref)) jffs2_garbage_collect_xattr_ref()
1263 old_ofs = ref_offset(ref->node); jffs2_garbage_collect_xattr_ref()
1264 totlen = ref_totlen(c, c->gcblock, ref->node); jffs2_garbage_collect_xattr_ref()
1272 rc = save_xattr_ref(c, ref); jffs2_garbage_collect_xattr_ref()
1275 ref->ic->ino, ref->xd->xid, old_ofs, ref_offset(ref->node)); jffs2_garbage_collect_xattr_ref()
1326 void jffs2_release_xattr_ref(struct jffs2_sb_info *c, struct jffs2_xattr_ref *ref) jffs2_release_xattr_ref() argument
1331 if (ref->node != (void *)ref) jffs2_release_xattr_ref()
1335 if (ref == tmp) { jffs2_release_xattr_ref()
1340 jffs2_free_xattr_ref(ref); jffs2_release_xattr_ref()
H A Dmalloc.c204 struct jffs2_raw_node_ref **p, *ref; jffs2_prealloc_raw_node_refs() local
210 ref = *p; jffs2_prealloc_raw_node_refs()
215 if (ref && ref->flash_offset != REF_EMPTY_NODE) jffs2_prealloc_raw_node_refs()
216 ref++; jffs2_prealloc_raw_node_refs()
219 if (!ref) { jffs2_prealloc_raw_node_refs()
221 ref = *p = jffs2_alloc_refblock(); jffs2_prealloc_raw_node_refs()
222 if (!ref) jffs2_prealloc_raw_node_refs()
225 if (ref->flash_offset == REF_LINK_NODE) { jffs2_prealloc_raw_node_refs()
226 p = &ref->next_in_ino; jffs2_prealloc_raw_node_refs()
227 ref = *p; jffs2_prealloc_raw_node_refs()
231 ref++; jffs2_prealloc_raw_node_refs()
299 struct jffs2_xattr_ref *ref; jffs2_alloc_xattr_ref() local
300 ref = kmem_cache_zalloc(xattr_ref_cache, GFP_KERNEL); jffs2_alloc_xattr_ref()
301 dbg_memalloc("%p\n", ref); jffs2_alloc_xattr_ref()
302 if (!ref) jffs2_alloc_xattr_ref()
305 ref->class = RAWNODE_CLASS_XATTR_REF; jffs2_alloc_xattr_ref()
306 ref->node = (void *)ref; jffs2_alloc_xattr_ref()
307 return ref; jffs2_alloc_xattr_ref()
310 void jffs2_free_xattr_ref(struct jffs2_xattr_ref *ref) jffs2_free_xattr_ref() argument
312 dbg_memalloc("%p\n", ref); jffs2_free_xattr_ref()
313 kmem_cache_free(xattr_ref_cache, ref); jffs2_free_xattr_ref()
H A Dnodelist.c590 struct jffs2_raw_node_ref *ref; jffs2_link_node_ref() local
595 ref = jeb->last_node; jffs2_link_node_ref()
597 dbg_noderef("Last node at %p is (%08x,%p)\n", ref, ref->flash_offset, jffs2_link_node_ref()
598 ref->next_in_ino); jffs2_link_node_ref()
600 while (ref->flash_offset != REF_EMPTY_NODE) { jffs2_link_node_ref()
601 if (ref->flash_offset == REF_LINK_NODE) jffs2_link_node_ref()
602 ref = ref->next_in_ino; jffs2_link_node_ref()
604 ref++; jffs2_link_node_ref()
607 dbg_noderef("New ref is %p (%08x becomes %08x,%p) len 0x%x\n", ref, jffs2_link_node_ref()
608 ref->flash_offset, ofs, ref->next_in_ino, len); jffs2_link_node_ref()
610 ref->flash_offset = ofs; jffs2_link_node_ref()
613 jeb->first_node = ref; jffs2_link_node_ref()
614 BUG_ON(ref_offset(ref) != jeb->offset); jffs2_link_node_ref()
615 } else if (unlikely(ref_offset(ref) != jeb->offset + c->sector_size - jeb->free_size)) { jffs2_link_node_ref()
618 JFFS2_ERROR("Adding new ref %p at (0x%08x-0x%08x) not immediately after previous (0x%08x-0x%08x)\n", jffs2_link_node_ref()
619 ref, ref_offset(ref), ref_offset(ref)+len, jffs2_link_node_ref()
624 jeb->last_node = ref; jffs2_link_node_ref()
627 ref->next_in_ino = ic->nodes; jffs2_link_node_ref()
628 ic->nodes = ref; jffs2_link_node_ref()
630 ref->next_in_ino = NULL; jffs2_link_node_ref()
633 switch(ref_flags(ref)) { jffs2_link_node_ref()
655 ref->__totlen = len; jffs2_link_node_ref()
656 ref_totlen(c, jeb, ref); jffs2_link_node_ref()
658 return ref; jffs2_link_node_ref()
661 /* No locking, no reservation of 'ref'. Do not use on a live file system */ jffs2_scan_dirty_space()
694 struct jffs2_raw_node_ref *ref) __ref_totlen()
697 struct jffs2_raw_node_ref *next_ref = ref_next(ref); __ref_totlen()
703 jeb = &c->blocks[ref->flash_offset / c->sector_size]; __ref_totlen()
706 if (unlikely(ref != jeb->last_node)) { __ref_totlen()
707 pr_crit("ref %p @0x%08x is not jeb->last_node (%p @0x%08x)\n", __ref_totlen()
708 ref, ref_offset(ref), jeb->last_node, __ref_totlen()
715 return ref_end - ref_offset(ref); __ref_totlen()
719 struct jffs2_raw_node_ref *ref) __jffs2_ref_totlen()
723 ret = __ref_totlen(c, jeb, ref); __jffs2_ref_totlen()
726 if (unlikely(ret != ref->__totlen)) { __jffs2_ref_totlen()
728 jeb = &c->blocks[ref->flash_offset / c->sector_size]; __jffs2_ref_totlen()
730 pr_crit("Totlen for ref at %p (0x%08x-0x%08x) miscalculated as 0x%x instead of %x\n", __jffs2_ref_totlen()
731 ref, ref_offset(ref), ref_offset(ref) + ref->__totlen, __jffs2_ref_totlen()
732 ret, ref->__totlen); __jffs2_ref_totlen()
733 if (ref_next(ref)) { __jffs2_ref_totlen()
735 ref_next(ref), ref_offset(ref_next(ref)), __jffs2_ref_totlen()
736 ref_offset(ref_next(ref)) + ref->__totlen); __jffs2_ref_totlen()
738 pr_crit("No next ref. jeb->last_node is %p\n", __jffs2_ref_totlen()
751 ret = ref->__totlen; __jffs2_ref_totlen()
692 __ref_totlen(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, struct jffs2_raw_node_ref *ref) __ref_totlen() argument
718 __jffs2_ref_totlen(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, struct jffs2_raw_node_ref *ref) __jffs2_ref_totlen() argument
H A Dreadinode.c33 struct jffs2_raw_node_ref *ref = tn->fn->raw; check_node_data() local
43 ofs = ref_offset(ref) + sizeof(struct jffs2_raw_inode); check_node_data()
53 ref_offset(ref), tn->csize, ofs); check_node_data()
62 ref_offset(ref), tn->csize, tn->partial_crc, tn->data_crc, ofs - len, ofs, len); check_node_data()
109 ref_offset(ref), tn->data_crc, crc); check_node_data()
114 jeb = &c->blocks[ref->flash_offset / c->sector_size]; check_node_data()
115 len = ref_totlen(c, jeb, ref); check_node_data()
120 ref->flash_offset |= REF_PRISTINE; check_node_data()
567 /* Returns first valid node after 'ref'. May return 'ref' */ jffs2_first_valid_node()
568 static struct jffs2_raw_node_ref *jffs2_first_valid_node(struct jffs2_raw_node_ref *ref) jffs2_first_valid_node() argument
570 while (ref && ref->next_in_ino) { jffs2_first_valid_node()
571 if (!ref_obsolete(ref)) jffs2_first_valid_node()
572 return ref; jffs2_first_valid_node()
573 dbg_noderef("node at 0x%08x is obsoleted. Ignoring.\n", ref_offset(ref)); jffs2_first_valid_node()
574 ref = ref->next_in_ino; jffs2_first_valid_node()
586 static inline int read_direntry(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref, read_direntry() argument
594 BUG_ON(ref_obsolete(ref)); read_direntry()
599 ref_offset(ref), je32_to_cpu(rd->node_crc), crc); read_direntry()
600 jffs2_mark_node_obsolete(c, ref); read_direntry()
605 if (ref_flags(ref) == REF_UNCHECKED) { read_direntry()
612 ref_offset(ref), rd->nsize, je32_to_cpu(rd->totlen)); read_direntry()
613 jffs2_mark_node_obsolete(c, ref); read_direntry()
617 jeb = &c->blocks[ref->flash_offset / c->sector_size]; read_direntry()
618 len = ref_totlen(c, jeb, ref); read_direntry()
625 ref->flash_offset = ref_offset(ref) | dirent_node_state(rd); read_direntry()
633 fd->raw = ref; read_direntry()
661 err = jffs2_flash_read(c, (ref_offset(ref)) + read, read_direntry()
697 static inline int read_dnode(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref, read_dnode() argument
707 BUG_ON(ref_obsolete(ref)); read_dnode()
712 ref_offset(ref), je32_to_cpu(rd->node_crc), crc); read_dnode()
713 jffs2_mark_node_obsolete(c, ref); read_dnode()
727 if (ref_flags(ref) == REF_UNCHECKED) { read_dnode()
732 JFFS2_WARNING("inode node header CRC is corrupted at %#08x\n", ref_offset(ref)); read_dnode()
733 jffs2_dbg_dump_node(c, ref_offset(ref)); read_dnode()
734 jffs2_mark_node_obsolete(c, ref); read_dnode()
788 ref_offset(ref), tn->partial_crc, je32_to_cpu(rd->data_crc)); read_dnode()
789 jffs2_mark_node_obsolete(c, ref); read_dnode()
803 jeb = &c->blocks[ref->flash_offset / c->sector_size]; read_dnode()
804 len = ref_totlen(c, jeb, ref); read_dnode()
811 ref->flash_offset = ref_offset(ref) | REF_NORMAL; read_dnode()
827 tn->fn->raw = ref; read_dnode()
841 ref_offset(ref), je32_to_cpu(rd->version), read_dnode()
872 static inline int read_unknown(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref, struct jffs2_unknown_node *un) read_unknown() argument
875 if (ref_flags(ref) == REF_UNCHECKED) { read_unknown()
877 ref_offset(ref)); read_unknown()
881 jffs2_mark_node_obsolete(c, ref); read_unknown()
891 je16_to_cpu(un->nodetype), ref_offset(ref)); read_unknown()
898 je16_to_cpu(un->nodetype), ref_offset(ref)); read_unknown()
904 je16_to_cpu(un->nodetype), ref_offset(ref)); read_unknown()
909 je16_to_cpu(un->nodetype), ref_offset(ref)); read_unknown()
910 jffs2_mark_node_obsolete(c, ref); read_unknown()
924 static int read_more(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref, read_more() argument
939 offs = ref_offset(ref) + *rdlen; read_more()
968 struct jffs2_raw_node_ref *ref, *valid_ref; jffs2_get_inode_nodes() local
992 they're in gets erased. So if we mark 'ref' obsolete while we're jffs2_get_inode_nodes()
994 we find the next valid node first, before processing 'ref'. jffs2_get_inode_nodes()
996 ref = valid_ref; jffs2_get_inode_nodes()
997 valid_ref = jffs2_first_valid_node(ref->next_in_ino); jffs2_get_inode_nodes()
1019 end = ref_offset(ref) + len; jffs2_get_inode_nodes()
1023 len = end - ref_offset(ref); jffs2_get_inode_nodes()
1026 dbg_readinode("read %d bytes at %#08x(%d).\n", len, ref_offset(ref), ref_flags(ref)); jffs2_get_inode_nodes()
1029 err = jffs2_flash_read(c, ref_offset(ref), len, &retlen, buf); jffs2_get_inode_nodes()
1031 JFFS2_ERROR("can not read %d bytes from 0x%08x, error code: %d.\n", len, ref_offset(ref), err); jffs2_get_inode_nodes()
1036 JFFS2_ERROR("short read at %#08x: %zu instead of %d.\n", ref_offset(ref), retlen, len); jffs2_get_inode_nodes()
1046 ref_offset(ref), je16_to_cpu(node->u.magic), jffs2_get_inode_nodes()
1050 jffs2_dbg_dump_node(c, ref_offset(ref)); jffs2_get_inode_nodes()
1051 jffs2_mark_node_obsolete(c, ref); jffs2_get_inode_nodes()
1057 je16_to_cpu(node->u.magic), ref_offset(ref)); jffs2_get_inode_nodes()
1058 jffs2_mark_node_obsolete(c, ref); jffs2_get_inode_nodes()
1068 err = read_more(c, ref, sizeof(struct jffs2_raw_dirent), &len, buf); jffs2_get_inode_nodes()
1073 err = read_direntry(c, ref, &node->d, retlen, rii); jffs2_get_inode_nodes()
1083 err = read_more(c, ref, sizeof(struct jffs2_raw_inode), &len, buf); jffs2_get_inode_nodes()
1088 err = read_dnode(c, ref, &node->i, len, rii); jffs2_get_inode_nodes()
1097 err = read_more(c, ref, sizeof(struct jffs2_unknown_node), &len, buf); jffs2_get_inode_nodes()
1102 err = read_unknown(c, ref, &node->u); jffs2_get_inode_nodes()
H A Dxattr.h66 static inline int is_xattr_ref_dead(struct jffs2_xattr_ref *ref) is_xattr_ref_dead() argument
68 return ((ref->xseqno & XREF_DELETE_MARKER) != 0); is_xattr_ref_dead()
86 extern int jffs2_garbage_collect_xattr_ref(struct jffs2_sb_info *c, struct jffs2_xattr_ref *ref,
90 extern void jffs2_release_xattr_ref(struct jffs2_sb_info *c, struct jffs2_xattr_ref *ref);
H A Derase.c236 struct jffs2_raw_node_ref *ref, struct jffs2_eraseblock *jeb) jffs2_remove_node_refs_from_ino_list()
241 prev = &ref->next_in_ino; jffs2_remove_node_refs_from_ino_list()
262 if (this == ref) jffs2_remove_node_refs_from_ino_list()
318 struct jffs2_raw_node_ref *block, *ref; jffs2_free_jeb_node_refs() local
322 block = ref = jeb->first_node; jffs2_free_jeb_node_refs()
324 while (ref) { jffs2_free_jeb_node_refs()
325 if (ref->flash_offset == REF_LINK_NODE) { jffs2_free_jeb_node_refs()
326 ref = ref->next_in_ino; jffs2_free_jeb_node_refs()
328 block = ref; jffs2_free_jeb_node_refs()
331 if (ref->flash_offset != REF_EMPTY_NODE && ref->next_in_ino) jffs2_free_jeb_node_refs()
332 jffs2_remove_node_refs_from_ino_list(c, ref, jeb); jffs2_free_jeb_node_refs()
335 ref++; jffs2_free_jeb_node_refs()
235 jffs2_remove_node_refs_from_ino_list(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref, struct jffs2_eraseblock *jeb) jffs2_remove_node_refs_from_ino_list() argument
H A Dnodelist.h101 static inline struct jffs2_raw_node_ref *ref_next(struct jffs2_raw_node_ref *ref) ref_next() argument
103 ref++; ref_next()
106 if (ref->flash_offset == REF_LINK_NODE) { ref_next()
107 ref = ref->next_in_ino; ref_next()
108 if (!ref) ref_next()
109 return ref; ref_next()
113 if (ref->flash_offset == REF_EMPTY_NODE) ref_next()
116 return ref; ref_next()
136 #define ref_flags(ref) ((ref)->flash_offset & 3)
137 #define ref_offset(ref) ((ref)->flash_offset & ~3)
138 #define ref_obsolete(ref) (((ref)->flash_offset & 3) == REF_OBSOLETE)
139 #define mark_ref_normal(ref) do { (ref)->flash_offset = ref_offset(ref) | REF_NORMAL; } while(0)
146 /* NB: REF_PRISTINE for an inode-less node (ref->next_in_ino == NULL) indicates
381 struct jffs2_raw_node_ref *ref);
H A Dnodemgmt.c579 void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref) jffs2_mark_node_obsolete() argument
588 if(unlikely(!ref)) { jffs2_mark_node_obsolete()
592 if (ref_obsolete(ref)) { jffs2_mark_node_obsolete()
594 __func__, ref_offset(ref)); jffs2_mark_node_obsolete()
597 blocknr = ref->flash_offset / c->sector_size; jffs2_mark_node_obsolete()
600 ref->flash_offset); jffs2_mark_node_obsolete()
618 freed_len = ref_totlen(c, jeb, ref); jffs2_mark_node_obsolete()
620 if (ref_flags(ref) == REF_UNCHECKED) { jffs2_mark_node_obsolete()
624 ref->flash_offset, jeb->used_size); jffs2_mark_node_obsolete()
628 ref_offset(ref), freed_len); jffs2_mark_node_obsolete()
635 ref->flash_offset, jeb->used_size); jffs2_mark_node_obsolete()
639 ref_offset(ref), freed_len); jffs2_mark_node_obsolete()
673 ref->flash_offset = ref_offset(ref) | REF_OBSOLETE; jffs2_mark_node_obsolete()
753 the block hasn't _already_ been erased, and that 'ref' itself hasn't been freed yet jffs2_mark_node_obsolete()
757 ref_offset(ref)); jffs2_mark_node_obsolete()
758 ret = jffs2_flash_read(c, ref_offset(ref), sizeof(n), &retlen, (char *)&n); jffs2_mark_node_obsolete()
761 ref_offset(ref), ret); jffs2_mark_node_obsolete()
766 ref_offset(ref), retlen); jffs2_mark_node_obsolete()
770 pr_warn("Node totlen on flash (0x%08x) != totlen from node ref (0x%08x)\n", jffs2_mark_node_obsolete()
776 ref_offset(ref), je16_to_cpu(n.nodetype)); jffs2_mark_node_obsolete()
781 ret = jffs2_flash_write(c, ref_offset(ref), sizeof(n), &retlen, (char *)&n); jffs2_mark_node_obsolete()
784 ref_offset(ref), ret); jffs2_mark_node_obsolete()
789 ref_offset(ref), retlen); jffs2_mark_node_obsolete()
803 if (ref->next_in_ino) { jffs2_mark_node_obsolete()
809 ic = jffs2_raw_ref_to_ic(ref); jffs2_mark_node_obsolete()
810 for (p = &ic->nodes; (*p) != ref; p = &((*p)->next_in_ino)) jffs2_mark_node_obsolete()
813 *p = ref->next_in_ino; jffs2_mark_node_obsolete()
814 ref->next_in_ino = NULL; jffs2_mark_node_obsolete()
H A Dscan.c387 struct jffs2_xattr_ref *ref; jffs2_scan_xref_node() local
409 ref = jffs2_alloc_xattr_ref(); jffs2_scan_xref_node()
410 if (!ref) jffs2_scan_xref_node()
415 * ref->xid is used to store 32bit xid, xd is not used jffs2_scan_xref_node()
416 * ref->ino is used to store 32bit inode-number, ic is not used jffs2_scan_xref_node()
418 * are exclusive. In a similar way, ref->next is temporarily jffs2_scan_xref_node()
422 ref->ino = je32_to_cpu(rr->ino); jffs2_scan_xref_node()
423 ref->xid = je32_to_cpu(rr->xid); jffs2_scan_xref_node()
424 ref->xseqno = je32_to_cpu(rr->xseqno); jffs2_scan_xref_node()
425 if (ref->xseqno > c->highest_xseqno) jffs2_scan_xref_node()
426 c->highest_xseqno = (ref->xseqno & ~XREF_DELETE_MARKER); jffs2_scan_xref_node()
427 ref->next = c->xref_temp; jffs2_scan_xref_node()
428 c->xref_temp = ref; jffs2_scan_xref_node()
430 jffs2_link_node_ref(c, jeb, ofs | REF_PRISTINE, PAD(je32_to_cpu(rr->totlen)), (void *)ref); jffs2_scan_xref_node()
435 ofs, ref->xid, ref->ino); jffs2_scan_xref_node()
H A Ddebug.c357 /* This should work when we implement ref->__totlen elemination */ __jffs2_dbg_acct_paranoia_check_nolock()
404 struct jffs2_raw_node_ref *ref; __jffs2_dbg_dump_node_refs_nolock() local
414 for (ref = jeb->first_node; ; ref = ref_next(ref)) { __jffs2_dbg_dump_node_refs_nolock()
415 printk("%#08x", ref_offset(ref)); __jffs2_dbg_dump_node_refs_nolock()
417 printk("(%x)", ref->__totlen); __jffs2_dbg_dump_node_refs_nolock()
419 if (ref_next(ref)) __jffs2_dbg_dump_node_refs_nolock()
H A Dsummary.c402 /* Make sure there's a spare ref for dirty space */ jffs2_sum_process_sum_data()
523 struct jffs2_xattr_ref *ref; jffs2_sum_process_sum_data() local
532 ref = jffs2_alloc_xattr_ref(); jffs2_sum_process_sum_data()
533 if (!ref) { jffs2_sum_process_sum_data()
537 ref->next = c->xref_temp; jffs2_sum_process_sum_data()
538 c->xref_temp = ref; jffs2_sum_process_sum_data()
541 PAD(sizeof(struct jffs2_raw_xref)), (void *)ref); jffs2_sum_process_sum_data()
543 *pseudo_random += ref->node->flash_offset; jffs2_sum_process_sum_data()
/linux-4.4.14/drivers/clk/mxs/
H A Dclk-ref.c39 struct clk_ref *ref = to_clk_ref(hw); clk_ref_enable() local
41 writel_relaxed(1 << ((ref->idx + 1) * 8 - 1), ref->reg + CLR); clk_ref_enable()
48 struct clk_ref *ref = to_clk_ref(hw); clk_ref_disable() local
50 writel_relaxed(1 << ((ref->idx + 1) * 8 - 1), ref->reg + SET); clk_ref_disable()
56 struct clk_ref *ref = to_clk_ref(hw); clk_ref_recalc_rate() local
58 u8 frac = (readl_relaxed(ref->reg) >> (ref->idx * 8)) & 0x3f; clk_ref_recalc_rate()
92 struct clk_ref *ref = to_clk_ref(hw); clk_ref_set_rate() local
96 u8 frac, shift = ref->idx * 8; clk_ref_set_rate()
109 val = readl_relaxed(ref->reg); clk_ref_set_rate()
112 writel_relaxed(val, ref->reg); clk_ref_set_rate()
130 struct clk_ref *ref; mxs_clk_ref() local
134 ref = kzalloc(sizeof(*ref), GFP_KERNEL); mxs_clk_ref()
135 if (!ref) mxs_clk_ref()
144 ref->reg = reg; mxs_clk_ref()
145 ref->idx = idx; mxs_clk_ref()
146 ref->hw.init = &init; mxs_clk_ref()
148 clk = clk_register(NULL, &ref->hw); mxs_clk_ref()
150 kfree(ref); mxs_clk_ref()
/linux-4.4.14/lib/lz4/
H A Dlz4_decompress.c58 const BYTE *ref; lz4_uncompress() local
100 LZ4_READ_LITTLEENDIAN_16(ref, cpy, ip); lz4_uncompress()
104 if (unlikely(ref < (BYTE *const) dest)) lz4_uncompress()
118 if (unlikely((op - ref) < STEPSIZE)) { lz4_uncompress()
120 int dec64 = dec64table[op - ref]; lz4_uncompress()
124 op[0] = ref[0]; lz4_uncompress()
125 op[1] = ref[1]; lz4_uncompress()
126 op[2] = ref[2]; lz4_uncompress()
127 op[3] = ref[3]; lz4_uncompress()
129 ref += 4; lz4_uncompress()
130 ref -= dec32table[op-ref]; lz4_uncompress()
131 PUT4(ref, op); lz4_uncompress()
133 ref -= dec64; lz4_uncompress()
135 LZ4_COPYSTEP(ref, op); lz4_uncompress()
144 if ((ref + COPYLENGTH) > oend) lz4_uncompress()
146 if ((ref + COPYLENGTH) > oend || lz4_uncompress()
150 LZ4_SECURECOPY(ref, op, (oend - COPYLENGTH)); lz4_uncompress()
152 *op++ = *ref++; lz4_uncompress()
162 LZ4_SECURECOPY(ref, op, cpy); lz4_uncompress()
178 const BYTE *ref; lz4_uncompress_unknownoutputsize() local
226 LZ4_READ_LITTLEENDIAN_16(ref, cpy, ip); lz4_uncompress_unknownoutputsize()
228 if (ref < (BYTE * const) dest) lz4_uncompress_unknownoutputsize()
250 if (unlikely((op - ref) < STEPSIZE)) { lz4_uncompress_unknownoutputsize()
252 int dec64 = dec64table[op - ref]; lz4_uncompress_unknownoutputsize()
256 op[0] = ref[0]; lz4_uncompress_unknownoutputsize()
257 op[1] = ref[1]; lz4_uncompress_unknownoutputsize()
258 op[2] = ref[2]; lz4_uncompress_unknownoutputsize()
259 op[3] = ref[3]; lz4_uncompress_unknownoutputsize()
261 ref += 4; lz4_uncompress_unknownoutputsize()
262 ref -= dec32table[op - ref]; lz4_uncompress_unknownoutputsize()
263 PUT4(ref, op); lz4_uncompress_unknownoutputsize()
265 ref -= dec64; lz4_uncompress_unknownoutputsize()
267 LZ4_COPYSTEP(ref, op); lz4_uncompress_unknownoutputsize()
274 if ((ref + COPYLENGTH) > oend) lz4_uncompress_unknownoutputsize()
276 if ((ref + COPYLENGTH) > oend || lz4_uncompress_unknownoutputsize()
280 LZ4_SECURECOPY(ref, op, (oend - COPYLENGTH)); lz4_uncompress_unknownoutputsize()
282 *op++ = *ref++; lz4_uncompress_unknownoutputsize()
292 LZ4_SECURECOPY(ref, op, cpy); lz4_uncompress_unknownoutputsize()
H A Dlz4_compress.c92 const u8 *ref; lz4_compressctx() local
106 ref = base + hashtable[h]; lz4_compressctx()
108 } while ((ref < ip - MAX_DISTANCE) || (A32(ref) != A32(ip))); lz4_compressctx()
111 while ((ip > anchor) && (ref > (u8 *)source) && lz4_compressctx()
112 unlikely(ip[-1] == ref[-1])) { lz4_compressctx()
114 ref--; lz4_compressctx()
139 LZ4_WRITE_LITTLEENDIAN_16(op, (u16)(ip - ref)); lz4_compressctx()
144 ref += MINMATCH; lz4_compressctx()
148 u64 diff = A64(ref) ^ A64(ip); lz4_compressctx()
150 u32 diff = A32(ref) ^ A32(ip); lz4_compressctx()
154 ref += STEPSIZE; lz4_compressctx()
161 if ((ip < (MATCHLIMIT - 3)) && (A32(ref) == A32(ip))) { lz4_compressctx()
163 ref += 4; lz4_compressctx()
166 if ((ip < (MATCHLIMIT - 1)) && (A16(ref) == A16(ip))) { lz4_compressctx()
168 ref += 2; lz4_compressctx()
170 if ((ip < MATCHLIMIT) && (*ref == *ip)) lz4_compressctx()
203 ref = base + hashtable[LZ4_HASH_VALUE(ip)]; lz4_compressctx()
205 if ((ref > ip - (MAX_DISTANCE + 1)) && (A32(ref) == A32(ip))) { lz4_compressctx()
273 const u8 *ref; lz4_compress64kctx() local
287 ref = base + hashtable[h]; lz4_compress64kctx()
289 } while (A32(ref) != A32(ip)); lz4_compress64kctx()
292 while ((ip > anchor) && (ref > (u8 *)source) lz4_compress64kctx()
293 && (ip[-1] == ref[-1])) { lz4_compress64kctx()
295 ref--; lz4_compress64kctx()
319 LZ4_WRITE_LITTLEENDIAN_16(op, (u16)(ip - ref)); lz4_compress64kctx()
324 ref += MINMATCH; lz4_compress64kctx()
329 u64 diff = A64(ref) ^ A64(ip); lz4_compress64kctx()
331 u32 diff = A32(ref) ^ A32(ip); lz4_compress64kctx()
336 ref += STEPSIZE; lz4_compress64kctx()
343 if ((ip < (MATCHLIMIT - 3)) && (A32(ref) == A32(ip))) { lz4_compress64kctx()
345 ref += 4; lz4_compress64kctx()
348 if ((ip < (MATCHLIMIT - 1)) && (A16(ref) == A16(ip))) { lz4_compress64kctx()
350 ref += 2; lz4_compress64kctx()
352 if ((ip < MATCHLIMIT) && (*ref == *ip)) lz4_compress64kctx()
386 ref = base + hashtable[LZ4_HASH64K_VALUE(ip)]; lz4_compress64kctx()
388 if (A32(ref) == A32(ip)) { lz4_compress64kctx()
H A Dlz4hc_compress.c126 const u8 *ref; lz4hc_insertandfindbestmatch() local
138 ref = hashtable[HASH_VALUE(ip)] + base; lz4hc_insertandfindbestmatch()
141 if (ref >= ip-4) { lz4hc_insertandfindbestmatch()
143 if (A32(ref) == A32(ip)) { lz4hc_insertandfindbestmatch()
144 delta = (u16)(ip-ref); lz4hc_insertandfindbestmatch()
146 ref + MINMATCH, matchlimit) + MINMATCH; lz4hc_insertandfindbestmatch()
147 *matchpos = ref; lz4hc_insertandfindbestmatch()
149 ref -= (size_t)chaintable[(size_t)(ref) & MAXD_MASK]; lz4hc_insertandfindbestmatch()
152 while ((ref >= ip - MAX_DISTANCE) && nbattempts) { lz4hc_insertandfindbestmatch()
154 if (*(ref + ml) == *(ip + ml)) { lz4hc_insertandfindbestmatch()
155 if (A32(ref) == A32(ip)) { lz4hc_insertandfindbestmatch()
158 ref + MINMATCH, matchlimit) + MINMATCH; lz4hc_insertandfindbestmatch()
161 *matchpos = ref; lz4hc_insertandfindbestmatch()
165 ref -= (size_t)chaintable[(size_t)(ref) & MAXD_MASK]; lz4hc_insertandfindbestmatch()
201 const u8 *ref; lz4hc_insertandgetwidermatch() local
207 ref = hashtable[HASH_VALUE(ip)] + base; lz4hc_insertandgetwidermatch()
209 while ((ref >= ip - MAX_DISTANCE) && (ref >= hc4->base) lz4hc_insertandgetwidermatch()
212 if (*(startlimit + longest) == *(ref - delta + longest)) { lz4hc_insertandgetwidermatch()
213 if (A32(ref) == A32(ip)) { lz4hc_insertandgetwidermatch()
214 const u8 *reft = ref + MINMATCH; lz4hc_insertandgetwidermatch()
248 reft = ref; lz4hc_insertandgetwidermatch()
264 ref -= (size_t)chaintable[(size_t)(ref) & MAXD_MASK]; lz4hc_insertandgetwidermatch()
270 int ml, const u8 *ref) lz4_encodesequence()
291 LZ4_WRITE_LITTLEENDIAN_16(*op, (u16)(*ip - ref)); lz4_encodesequence()
331 const u8 *ref = NULL; lz4_compresshcctx() local
344 ml = lz4hc_insertandfindbestmatch(ctx, ip, matchlimit, (&ref)); lz4_compresshcctx()
352 ref0 = ref; lz4_compresshcctx()
362 lz4_encodesequence(&ip, &op, &anchor, ml, ref); lz4_compresshcctx()
370 ref = ref0; lz4_compresshcctx()
381 ref = ref2; lz4_compresshcctx()
418 /* ip & ref are known; Now for ml */ lz4_compresshcctx()
423 lz4_encodesequence(&ip, &op, &anchor, ml, ref); lz4_compresshcctx()
449 lz4_encodesequence(&ip, &op, &anchor, ml, ref); lz4_compresshcctx()
451 ref = ref3; lz4_compresshcctx()
468 * the first one ip & ref are known; Now for ml lz4_compresshcctx()
487 lz4_encodesequence(&ip, &op, &anchor, ml, ref); lz4_compresshcctx()
490 ref = ref2; lz4_compresshcctx()
269 lz4_encodesequence(const u8 **ip, u8 **op, const u8 **anchor, int ml, const u8 *ref) lz4_encodesequence() argument
/linux-4.4.14/fs/btrfs/
H A Ddelayed-ref.c23 #include "delayed-ref.h"
88 /* insert a new ref to head ref rbtree */ htree_insert()
119 * find an head entry based on bytenr. This returns the delayed ref
184 struct btrfs_delayed_ref_node *ref) drop_delayed_ref()
186 if (btrfs_delayed_ref_is_head(ref)) { drop_delayed_ref()
187 head = btrfs_delayed_node_to_head(ref); drop_delayed_ref()
191 list_del(&ref->list); drop_delayed_ref()
193 ref->in_tree = 0; drop_delayed_ref()
194 btrfs_put_delayed_ref(ref); drop_delayed_ref()
203 struct btrfs_delayed_ref_node *ref, merge_ref()
217 if (next == ref) merge_ref()
223 if (next->type != ref->type) merge_ref()
226 if ((ref->type == BTRFS_TREE_BLOCK_REF_KEY || merge_ref()
227 ref->type == BTRFS_SHARED_BLOCK_REF_KEY) && merge_ref()
228 comp_tree_refs(btrfs_delayed_node_to_tree_ref(ref), merge_ref()
230 ref->type)) merge_ref()
232 if ((ref->type == BTRFS_EXTENT_DATA_REF_KEY || merge_ref()
233 ref->type == BTRFS_SHARED_DATA_REF_KEY) && merge_ref()
234 comp_data_refs(btrfs_delayed_node_to_data_ref(ref), merge_ref()
238 if (ref->action == next->action) { merge_ref()
241 if (ref->ref_mod < next->ref_mod) { merge_ref()
242 swap(ref, next); merge_ref()
249 ref->ref_mod += mod; merge_ref()
250 if (ref->ref_mod == 0) { merge_ref()
251 drop_delayed_ref(trans, delayed_refs, head, ref); merge_ref()
255 * Can't have multiples of the same ref on a tree block. merge_ref()
257 WARN_ON(ref->type == BTRFS_TREE_BLOCK_REF_KEY || merge_ref()
258 ref->type == BTRFS_SHARED_BLOCK_REF_KEY); merge_ref()
272 struct btrfs_delayed_ref_node *ref; btrfs_merge_delayed_refs() local
294 ref = list_first_entry(&head->ref_list, struct btrfs_delayed_ref_node, btrfs_merge_delayed_refs()
296 while (&ref->list != &head->ref_list) { btrfs_merge_delayed_refs()
297 if (seq && ref->seq >= seq) btrfs_merge_delayed_refs()
300 if (merge_ref(trans, delayed_refs, head, ref, seq)) { btrfs_merge_delayed_refs()
303 ref = list_first_entry(&head->ref_list, btrfs_merge_delayed_refs()
309 ref = list_next_entry(ref, list); btrfs_merge_delayed_refs()
395 struct btrfs_delayed_ref_node *ref) add_delayed_ref_tail_merge()
402 /* Check whether we can merge the tail node with ref */ add_delayed_ref_tail_merge()
408 if (exist->type != ref->type || exist->seq != ref->seq) add_delayed_ref_tail_merge()
414 btrfs_delayed_node_to_tree_ref(ref), add_delayed_ref_tail_merge()
415 ref->type)) add_delayed_ref_tail_merge()
420 btrfs_delayed_node_to_data_ref(ref))) add_delayed_ref_tail_merge()
425 if (exist->action == ref->action) { add_delayed_ref_tail_merge()
426 mod = ref->ref_mod; add_delayed_ref_tail_merge()
429 if (exist->ref_mod < ref->ref_mod) { add_delayed_ref_tail_merge()
430 exist->action = ref->action; add_delayed_ref_tail_merge()
432 exist->ref_mod = ref->ref_mod; add_delayed_ref_tail_merge()
434 mod = -ref->ref_mod; add_delayed_ref_tail_merge()
445 list_add_tail(&ref->list, &href->ref_list); add_delayed_ref_tail_merge()
453 * helper function to update the accounting in the head ref
462 struct btrfs_delayed_ref_head *ref; update_existing_head_ref() local
466 ref = btrfs_delayed_node_to_head(update); update_existing_head_ref()
467 BUG_ON(existing_ref->is_data != ref->is_data); update_existing_head_ref()
470 if (ref->must_insert_reserved) { update_existing_head_ref()
472 * reallocated before the delayed ref update_existing_head_ref()
474 * with an existing head ref without update_existing_head_ref()
478 existing_ref->must_insert_reserved = ref->must_insert_reserved; update_existing_head_ref()
488 if (ref->extent_op) { update_existing_head_ref()
490 existing_ref->extent_op = ref->extent_op; update_existing_head_ref()
492 if (ref->extent_op->update_key) { update_existing_head_ref()
494 &ref->extent_op->key, update_existing_head_ref()
495 sizeof(ref->extent_op->key)); update_existing_head_ref()
498 if (ref->extent_op->update_flags) { update_existing_head_ref()
500 ref->extent_op->flags_to_set; update_existing_head_ref()
503 btrfs_free_delayed_extent_op(ref->extent_op); update_existing_head_ref()
516 * If we are going to from a positive ref mod to a negative or vice update_existing_head_ref()
536 struct btrfs_delayed_ref_node *ref, add_delayed_ref_head()
552 * the head node stores the sum of all the mods, so dropping a ref add_delayed_ref_head()
563 * if a later modification deletes the delayed ref without ever add_delayed_ref_head()
565 * ref->must_insert_reserved is the flag used to record add_delayed_ref_head()
578 /* first set the basic ref node struct up */ add_delayed_ref_head()
579 atomic_set(&ref->refs, 1); add_delayed_ref_head()
580 ref->bytenr = bytenr; add_delayed_ref_head()
581 ref->num_bytes = num_bytes; add_delayed_ref_head()
582 ref->ref_mod = count_mod; add_delayed_ref_head()
583 ref->type = 0; add_delayed_ref_head()
584 ref->action = 0; add_delayed_ref_head()
585 ref->is_head = 1; add_delayed_ref_head()
586 ref->in_tree = 1; add_delayed_ref_head()
587 ref->seq = 0; add_delayed_ref_head()
589 head_ref = btrfs_delayed_node_to_head(ref); add_delayed_ref_head()
618 trace_add_delayed_ref_head(ref, head_ref, action); add_delayed_ref_head()
625 update_existing_head_ref(delayed_refs, &existing->node, ref); add_delayed_ref_head()
627 * we've updated the existing ref, free the newly add_delayed_ref_head()
628 * allocated ref add_delayed_ref_head()
644 * helper to insert a delayed tree ref into the rbtree.
650 struct btrfs_delayed_ref_node *ref, u64 bytenr, add_delayed_tree_ref()
666 /* first set the basic ref node struct up */ add_delayed_tree_ref()
667 atomic_set(&ref->refs, 1); add_delayed_tree_ref()
668 ref->bytenr = bytenr; add_delayed_tree_ref()
669 ref->num_bytes = num_bytes; add_delayed_tree_ref()
670 ref->ref_mod = 1; add_delayed_tree_ref()
671 ref->action = action; add_delayed_tree_ref()
672 ref->is_head = 0; add_delayed_tree_ref()
673 ref->in_tree = 1; add_delayed_tree_ref()
674 ref->seq = seq; add_delayed_tree_ref()
676 full_ref = btrfs_delayed_node_to_tree_ref(ref); add_delayed_tree_ref()
680 ref->type = BTRFS_SHARED_BLOCK_REF_KEY; add_delayed_tree_ref()
682 ref->type = BTRFS_TREE_BLOCK_REF_KEY; add_delayed_tree_ref()
685 trace_add_delayed_tree_ref(ref, full_ref, action); add_delayed_tree_ref()
687 ret = add_delayed_ref_tail_merge(trans, delayed_refs, head_ref, ref); add_delayed_tree_ref()
698 * helper to insert a delayed data ref into the rbtree.
704 struct btrfs_delayed_ref_node *ref, u64 bytenr, add_delayed_data_ref()
721 /* first set the basic ref node struct up */ add_delayed_data_ref()
722 atomic_set(&ref->refs, 1); add_delayed_data_ref()
723 ref->bytenr = bytenr; add_delayed_data_ref()
724 ref->num_bytes = num_bytes; add_delayed_data_ref()
725 ref->ref_mod = 1; add_delayed_data_ref()
726 ref->action = action; add_delayed_data_ref()
727 ref->is_head = 0; add_delayed_data_ref()
728 ref->in_tree = 1; add_delayed_data_ref()
729 ref->seq = seq; add_delayed_data_ref()
731 full_ref = btrfs_delayed_node_to_data_ref(ref); add_delayed_data_ref()
735 ref->type = BTRFS_SHARED_DATA_REF_KEY; add_delayed_data_ref()
737 ref->type = BTRFS_EXTENT_DATA_REF_KEY; add_delayed_data_ref()
742 trace_add_delayed_data_ref(ref, full_ref, action); add_delayed_data_ref()
744 ret = add_delayed_ref_tail_merge(trans, delayed_refs, head_ref, ref); add_delayed_data_ref()
751 * add a delayed tree ref. This does all of the accounting required
752 * to make sure the delayed ref is eventually processed before this
761 struct btrfs_delayed_tree_ref *ref; btrfs_add_delayed_tree_ref() local
767 ref = kmem_cache_alloc(btrfs_delayed_tree_ref_cachep, GFP_NOFS); btrfs_add_delayed_tree_ref()
768 if (!ref) btrfs_add_delayed_tree_ref()
787 * insert both the head node and the new ref without dropping btrfs_add_delayed_tree_ref()
793 add_delayed_tree_ref(fs_info, trans, head_ref, &ref->node, bytenr, btrfs_add_delayed_tree_ref()
802 kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref); btrfs_add_delayed_tree_ref()
808 * add a delayed data ref. it's similar to btrfs_add_delayed_tree_ref.
817 struct btrfs_delayed_data_ref *ref; btrfs_add_delayed_data_ref() local
823 ref = kmem_cache_alloc(btrfs_delayed_data_ref_cachep, GFP_NOFS); btrfs_add_delayed_data_ref()
824 if (!ref) btrfs_add_delayed_data_ref()
829 kmem_cache_free(btrfs_delayed_data_ref_cachep, ref); btrfs_add_delayed_data_ref()
836 kmem_cache_free(btrfs_delayed_data_ref_cachep, ref); btrfs_add_delayed_data_ref()
849 * insert both the head node and the new ref without dropping btrfs_add_delayed_data_ref()
856 add_delayed_data_ref(fs_info, trans, head_ref, &ref->node, bytenr, btrfs_add_delayed_data_ref()
918 * It must be called with the delayed ref spinlock held, and it returns
181 drop_delayed_ref(struct btrfs_trans_handle *trans, struct btrfs_delayed_ref_root *delayed_refs, struct btrfs_delayed_ref_head *head, struct btrfs_delayed_ref_node *ref) drop_delayed_ref() argument
200 merge_ref(struct btrfs_trans_handle *trans, struct btrfs_delayed_ref_root *delayed_refs, struct btrfs_delayed_ref_head *head, struct btrfs_delayed_ref_node *ref, u64 seq) merge_ref() argument
392 add_delayed_ref_tail_merge(struct btrfs_trans_handle *trans, struct btrfs_delayed_ref_root *root, struct btrfs_delayed_ref_head *href, struct btrfs_delayed_ref_node *ref) add_delayed_ref_tail_merge() argument
534 add_delayed_ref_head(struct btrfs_fs_info *fs_info, struct btrfs_trans_handle *trans, struct btrfs_delayed_ref_node *ref, struct btrfs_qgroup_extent_record *qrecord, u64 bytenr, u64 num_bytes, u64 ref_root, u64 reserved, int action, int is_data) add_delayed_ref_head() argument
647 add_delayed_tree_ref(struct btrfs_fs_info *fs_info, struct btrfs_trans_handle *trans, struct btrfs_delayed_ref_head *head_ref, struct btrfs_delayed_ref_node *ref, u64 bytenr, u64 num_bytes, u64 parent, u64 ref_root, int level, int action) add_delayed_tree_ref() argument
701 add_delayed_data_ref(struct btrfs_fs_info *fs_info, struct btrfs_trans_handle *trans, struct btrfs_delayed_ref_head *head_ref, struct btrfs_delayed_ref_node *ref, u64 bytenr, u64 num_bytes, u64 parent, u64 ref_root, u64 owner, u64 offset, int action) add_delayed_data_ref() argument
H A Dinode-item.c29 struct btrfs_inode_ref *ref; find_name_in_backref() local
40 ref = (struct btrfs_inode_ref *)(ptr + cur_offset); find_name_in_backref()
41 len = btrfs_inode_ref_name_len(leaf, ref); find_name_in_backref()
42 name_ptr = (unsigned long)(ref + 1); find_name_in_backref()
43 cur_offset += len + sizeof(*ref); find_name_in_backref()
47 *ref_ret = ref; find_name_in_backref()
74 * we'll return success and the inode ref object. btrfs_find_name_in_ext_backref()
172 * Common case only one ref in the item, remove the btrfs_del_inode_extref()
200 struct btrfs_inode_ref *ref; btrfs_del_inode_ref() local
208 int del_len = name_len + sizeof(*ref); btrfs_del_inode_ref()
228 if (!find_name_in_backref(path, name, name_len, &ref)) { btrfs_del_inode_ref()
237 *index = btrfs_inode_ref_index(leaf, ref); btrfs_del_inode_ref()
243 ptr = (unsigned long)ref; btrfs_del_inode_ref()
244 sub_item_len = name_len + sizeof(*ref); btrfs_del_inode_ref()
255 * name in our ref array. Find and remove the extended btrfs_del_inode_ref()
256 * inode ref then. btrfs_del_inode_ref()
266 * btrfs_insert_inode_extref() - Inserts an extended inode ref into a tree.
333 struct btrfs_inode_ref *ref; btrfs_insert_inode_ref() local
336 int ins_len = name_len + sizeof(*ref); btrfs_insert_inode_ref()
353 if (find_name_in_backref(path, name, name_len, &ref)) btrfs_insert_inode_ref()
358 ref = btrfs_item_ptr(path->nodes[0], path->slots[0], btrfs_insert_inode_ref()
360 ref = (struct btrfs_inode_ref *)((unsigned long)ref + old_size); btrfs_insert_inode_ref()
361 btrfs_set_inode_ref_name_len(path->nodes[0], ref, name_len); btrfs_insert_inode_ref()
362 btrfs_set_inode_ref_index(path->nodes[0], ref, index); btrfs_insert_inode_ref()
363 ptr = (unsigned long)(ref + 1); btrfs_insert_inode_ref()
367 if (find_name_in_backref(path, name, name_len, &ref)) btrfs_insert_inode_ref()
374 ref = btrfs_item_ptr(path->nodes[0], path->slots[0], btrfs_insert_inode_ref()
376 btrfs_set_inode_ref_name_len(path->nodes[0], ref, name_len); btrfs_insert_inode_ref()
377 btrfs_set_inode_ref_index(path->nodes[0], ref, index); btrfs_insert_inode_ref()
378 ptr = (unsigned long)(ref + 1); btrfs_insert_inode_ref()
388 /* We ran out of space in the ref array. Need to btrfs_insert_inode_ref()
389 * add an extended ref. */ btrfs_insert_inode_ref()
H A Ddelayed-ref.h25 #define BTRFS_UPDATE_DELAYED_HEAD 4 /* not changing ref count on head ref */
28 * XXX: Qu: I really hate the design that ref_head and tree/data ref shares the
30 * Ref_head is in a higher logic level than tree/data ref, and duplicated
33 * This gets more disgusting after we use list to store tree/data ref in
43 /*data/tree ref use list, stored in ref_head->ref_list. */
55 /* ref count on this data structure */
65 * parent to have more than one ref on an extent
109 * with this head ref, this is not adjusted as delayed refs are run,
128 * until the delayed ref is processed. must_insert_reserved is
129 * used to flag a delayed ref so the accounting can be updated
158 /* head ref rbtree */
167 /* how many delayed ref updates we've queued, used by the
219 static inline void btrfs_put_delayed_ref(struct btrfs_delayed_ref_node *ref) btrfs_put_delayed_ref() argument
221 WARN_ON(atomic_read(&ref->refs) == 0); btrfs_put_delayed_ref()
222 if (atomic_dec_and_test(&ref->refs)) { btrfs_put_delayed_ref()
223 WARN_ON(ref->in_tree); btrfs_put_delayed_ref()
224 switch (ref->type) { btrfs_put_delayed_ref()
227 kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref); btrfs_put_delayed_ref()
231 kmem_cache_free(btrfs_delayed_data_ref_cachep, ref); btrfs_put_delayed_ref()
234 kmem_cache_free(btrfs_delayed_ref_head_cachep, ref); btrfs_put_delayed_ref()
283 * a node might live in a head or a regular ref, this lets you
H A Dbackref.c25 #include "delayed-ref.h"
95 * from the shared data ref, we only have the leaf but we need find_extent_in_eb()
199 struct __prelim_ref *ref; __add_prelim_ref() local
204 ref = kmem_cache_alloc(btrfs_prelim_ref_cache, gfp_mask); __add_prelim_ref()
205 if (!ref) __add_prelim_ref()
208 ref->root_id = root_id; __add_prelim_ref()
210 ref->key_for_search = *key; __add_prelim_ref()
230 if (ref->key_for_search.type == BTRFS_EXTENT_DATA_KEY && __add_prelim_ref()
231 ref->key_for_search.offset >= LLONG_MAX) __add_prelim_ref()
232 ref->key_for_search.offset = 0; __add_prelim_ref()
234 memset(&ref->key_for_search, 0, sizeof(ref->key_for_search)); __add_prelim_ref()
237 ref->inode_list = NULL; __add_prelim_ref()
238 ref->level = level; __add_prelim_ref()
239 ref->count = count; __add_prelim_ref()
240 ref->parent = parent; __add_prelim_ref()
241 ref->wanted_disk_byte = wanted_disk_byte; __add_prelim_ref()
242 list_add_tail(&ref->list, head); __add_prelim_ref()
248 struct ulist *parents, struct __prelim_ref *ref, add_all_parents()
256 struct btrfs_key *key_for_search = &ref->key_for_search; add_all_parents()
260 u64 wanted_disk_byte = ref->wanted_disk_byte; add_all_parents()
340 struct __prelim_ref *ref, __resolve_indirect_ref()
349 int level = ref->level; __resolve_indirect_ref()
352 root_key.objectid = ref->root_id; __resolve_indirect_ref()
385 ret = btrfs_search_slot(NULL, root, &ref->key_for_search, path, __resolve_indirect_ref()
388 ret = btrfs_search_old_slot(root, &ref->key_for_search, path, __resolve_indirect_ref()
394 pr_debug("search slot in root %llu (level %d, ref count %d) returned " __resolve_indirect_ref()
396 ref->root_id, level, ref->count, ret, __resolve_indirect_ref()
397 ref->key_for_search.objectid, ref->key_for_search.type, __resolve_indirect_ref()
398 ref->key_for_search.offset); __resolve_indirect_ref()
412 ret = add_all_parents(root, path, parents, ref, level, time_seq, __resolve_indirect_ref()
431 struct __prelim_ref *ref; __resolve_indirect_refs() local
445 * we're also allowed to re-assign ref during iteration. __resolve_indirect_refs()
447 list_for_each_entry_safe(ref, ref_safe, head, list) { list_for_each_entry_safe()
448 if (ref->parent) /* already direct */ list_for_each_entry_safe()
450 if (ref->count == 0) list_for_each_entry_safe()
452 if (root_objectid && ref->root_id != root_objectid) { list_for_each_entry_safe()
456 err = __resolve_indirect_ref(fs_info, path, time_seq, ref, list_for_each_entry_safe()
470 /* we put the first parent into the ref at hand */ list_for_each_entry_safe()
473 ref->parent = node ? node->val : 0; list_for_each_entry_safe()
474 ref->inode_list = node ? list_for_each_entry_safe()
485 memcpy(new_ref, ref, sizeof(*ref)); list_for_each_entry_safe()
489 list_add(&new_ref->list, &ref->list); list_for_each_entry_safe()
527 struct __prelim_ref *ref; list_for_each() local
528 ref = list_entry(pos, struct __prelim_ref, list); list_for_each()
530 if (ref->parent) list_for_each()
532 if (ref->key_for_search.type) list_for_each()
534 BUG_ON(!ref->wanted_disk_byte); list_for_each()
535 eb = read_tree_block(fs_info->tree_root, ref->wanted_disk_byte, list_for_each()
545 btrfs_item_key_to_cpu(eb, &ref->key_for_search, 0); list_for_each()
547 btrfs_node_key_to_cpu(eb, &ref->key_for_search, 0); list_for_each()
652 struct btrfs_delayed_tree_ref *ref; __add_delayed_refs() local
654 ref = btrfs_delayed_node_to_tree_ref(node); __add_delayed_refs()
655 ret = __add_prelim_ref(prefs, ref->root, &op_key, __add_delayed_refs()
656 ref->level + 1, 0, node->bytenr, __add_delayed_refs()
661 struct btrfs_delayed_tree_ref *ref; __add_delayed_refs() local
663 ref = btrfs_delayed_node_to_tree_ref(node); __add_delayed_refs()
665 ref->level + 1, ref->parent, __add_delayed_refs()
671 struct btrfs_delayed_data_ref *ref; __add_delayed_refs() local
672 ref = btrfs_delayed_node_to_data_ref(node); __add_delayed_refs()
674 key.objectid = ref->objectid; __add_delayed_refs()
676 key.offset = ref->offset; __add_delayed_refs()
682 if (inum && ref->objectid != inum) { __add_delayed_refs()
687 ret = __add_prelim_ref(prefs, ref->root, &key, 0, 0, __add_delayed_refs()
693 struct btrfs_delayed_data_ref *ref; __add_delayed_refs() local
695 ref = btrfs_delayed_node_to_data_ref(node); __add_delayed_refs()
697 ref->parent, node->bytenr, __add_delayed_refs()
942 struct __prelim_ref *ref; local
968 * grab both a lock on the path and a lock on the delayed ref head.
987 * look if there are updates for this ref queued and lock the
1062 ref = list_first_entry(&prefs, struct __prelim_ref, list);
1063 WARN_ON(ref->count < 0);
1064 if (roots && ref->count && ref->root_id && ref->parent == 0) {
1065 if (root_objectid && ref->root_id != root_objectid) {
1071 ret = ulist_add(roots, ref->root_id, 0, GFP_NOFS);
1075 if (ref->count && ref->parent) {
1076 if (extent_item_pos && !ref->inode_list &&
1077 ref->level == 0) {
1081 ref->parent, 0);
1098 ref->inode_list = eie;
1100 ret = ulist_add_merge_ptr(refs, ref->parent,
1101 ref->inode_list,
1113 eie->next = ref->inode_list;
1117 list_del(&ref->list);
1118 kmem_cache_free(btrfs_prelim_ref_cache, ref);
1124 ref = list_first_entry(&prefs, struct __prelim_ref, list);
1125 list_del(&ref->list);
1126 kmem_cache_free(btrfs_prelim_ref_cache, ref);
1129 ref = list_first_entry(&prefs_delayed, struct __prelim_ref,
1131 list_del(&ref->list);
1132 kmem_cache_free(btrfs_prelim_ref_cache, ref);
1260 * shared but do not need a ref count.
1359 * Check that we're still looking at an extended ref key for btrfs_find_one_extref()
1544 * next ref. after the last ref was processed, 1 is returned.
1623 /* we can treat both ref types equally here */ tree_backref_for_extent()
1650 pr_debug("ref for %llu resolved, key (%llu EXTEND_DATA %llu), " iterate_leaf_refs()
1809 pr_debug("following ref at offset %u for inode %llu in " iterate_inode_refs()
247 add_all_parents(struct btrfs_root *root, struct btrfs_path *path, struct ulist *parents, struct __prelim_ref *ref, int level, u64 time_seq, const u64 *extent_item_pos, u64 total_refs) add_all_parents() argument
338 __resolve_indirect_ref(struct btrfs_fs_info *fs_info, struct btrfs_path *path, u64 time_seq, struct __prelim_ref *ref, struct ulist *parents, const u64 *extent_item_pos, u64 total_refs) __resolve_indirect_ref() argument
H A Droot-tree.c364 struct btrfs_root_ref *ref; btrfs_del_root_ref() local
383 ref = btrfs_item_ptr(leaf, path->slots[0], btrfs_del_root_ref()
386 WARN_ON(btrfs_root_ref_dirid(leaf, ref) != dirid); btrfs_del_root_ref()
387 WARN_ON(btrfs_root_ref_name_len(leaf, ref) != name_len); btrfs_del_root_ref()
388 ptr = (unsigned long)(ref + 1); btrfs_del_root_ref()
390 *sequence = btrfs_root_ref_sequence(leaf, ref); btrfs_del_root_ref()
420 * For a forward ref, the root_id is the id of the tree referencing
423 * For a back ref the root_id is the id of the subvol or snapshot and
436 struct btrfs_root_ref *ref; btrfs_add_root_ref() local
449 sizeof(*ref) + name_len); btrfs_add_root_ref()
457 ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref); btrfs_add_root_ref()
458 btrfs_set_root_ref_dirid(leaf, ref, dirid); btrfs_add_root_ref()
459 btrfs_set_root_ref_sequence(leaf, ref, sequence); btrfs_add_root_ref()
460 btrfs_set_root_ref_name_len(leaf, ref, name_len); btrfs_add_root_ref()
461 ptr = (unsigned long)(ref + 1); btrfs_add_root_ref()
H A Dprint-tree.c47 struct btrfs_extent_data_ref *ref) print_extent_data_ref()
51 btrfs_extent_data_ref_root(eb, ref), print_extent_data_ref()
52 btrfs_extent_data_ref_objectid(eb, ref), print_extent_data_ref()
53 btrfs_extent_data_ref_offset(eb, ref), print_extent_data_ref()
54 btrfs_extent_data_ref_count(eb, ref)); print_extent_data_ref()
144 printk("\t\textent back ref root %llu gen %llu " print_extent_ref_v0()
46 print_extent_data_ref(struct extent_buffer *eb, struct btrfs_extent_data_ref *ref) print_extent_data_ref() argument
H A Dqgroup.h23 #include "delayed-ref.h"
85 * called by everywhere, can't provide good trace for delayed ref case.
H A Dhash.h34 * Figure the key offset of an extended inode ref
H A Dextent-tree.c799 * the head node for delayed ref is used to store the sum of all the
1000 * The extent ref structure for the implicit back refs has fields for:
1010 * The extent ref structure for the full back refs has field for:
1140 struct btrfs_extent_data_ref *ref) hash_extent_data_ref_item()
1142 return hash_extent_data_ref(btrfs_extent_data_ref_root(leaf, ref), hash_extent_data_ref_item()
1143 btrfs_extent_data_ref_objectid(leaf, ref), hash_extent_data_ref_item()
1144 btrfs_extent_data_ref_offset(leaf, ref)); hash_extent_data_ref_item()
1148 struct btrfs_extent_data_ref *ref, match_extent_data_ref()
1151 if (btrfs_extent_data_ref_root(leaf, ref) != root_objectid || match_extent_data_ref()
1152 btrfs_extent_data_ref_objectid(leaf, ref) != owner || match_extent_data_ref()
1153 btrfs_extent_data_ref_offset(leaf, ref) != offset) match_extent_data_ref()
1166 struct btrfs_extent_data_ref *ref; lookup_extent_data_ref() local
1227 ref = btrfs_item_ptr(leaf, path->slots[0], lookup_extent_data_ref()
1230 if (match_extent_data_ref(leaf, ref, root_objectid, lookup_extent_data_ref()
1276 struct btrfs_shared_data_ref *ref; insert_extent_data_ref() local
1277 ref = btrfs_item_ptr(leaf, path->slots[0], insert_extent_data_ref()
1280 btrfs_set_shared_data_ref_count(leaf, ref, refs_to_add); insert_extent_data_ref()
1282 num_refs = btrfs_shared_data_ref_count(leaf, ref); insert_extent_data_ref()
1284 btrfs_set_shared_data_ref_count(leaf, ref, num_refs); insert_extent_data_ref()
1287 struct btrfs_extent_data_ref *ref; insert_extent_data_ref() local
1289 ref = btrfs_item_ptr(leaf, path->slots[0], insert_extent_data_ref()
1291 if (match_extent_data_ref(leaf, ref, root_objectid, insert_extent_data_ref()
1303 ref = btrfs_item_ptr(leaf, path->slots[0], insert_extent_data_ref()
1306 btrfs_set_extent_data_ref_root(leaf, ref, insert_extent_data_ref()
1308 btrfs_set_extent_data_ref_objectid(leaf, ref, owner); insert_extent_data_ref()
1309 btrfs_set_extent_data_ref_offset(leaf, ref, offset); insert_extent_data_ref()
1310 btrfs_set_extent_data_ref_count(leaf, ref, refs_to_add); insert_extent_data_ref()
1312 num_refs = btrfs_extent_data_ref_count(leaf, ref); insert_extent_data_ref()
1314 btrfs_set_extent_data_ref_count(leaf, ref, num_refs); insert_extent_data_ref()
1518 * look for inline back ref. if back ref is found, *ref_ret is set
1519 * to the address of inline back ref, and 0 is returned.
1521 * if back ref isn't found, *ref_ret is set to the address where it
1527 * NOTE: inline back refs are ordered in the same way that back ref
1699 * To add new inline back ref, we have to make sure lookup_inline_extent_backref()
1700 * there is no corresponding back ref item. lookup_inline_extent_backref()
1702 * ref if there is any kind of item for this block lookup_inline_extent_backref()
1721 * helper to add new inline back ref
1813 * helper to update/remove inline back ref
2120 /* this will setup the path even if it fails to insert the back ref */ __btrfs_inc_extent_ref()
2130 * inline extent ref, so just update the reference count and add a __btrfs_inc_extent_ref()
2164 struct btrfs_delayed_data_ref *ref; run_delayed_data_ref() local
2174 ref = btrfs_delayed_node_to_data_ref(node); run_delayed_data_ref()
2175 trace_run_delayed_data_ref(node, ref, node->action); run_delayed_data_ref()
2178 parent = ref->parent; run_delayed_data_ref()
2179 ref_root = ref->root; run_delayed_data_ref()
2186 ref->objectid, ref->offset, run_delayed_data_ref()
2190 ref_root, ref->objectid, run_delayed_data_ref()
2191 ref->offset, node->ref_mod, run_delayed_data_ref()
2195 ref_root, ref->objectid, run_delayed_data_ref()
2196 ref->offset, node->ref_mod, run_delayed_data_ref()
2322 struct btrfs_delayed_tree_ref *ref; run_delayed_tree_ref() local
2329 ref = btrfs_delayed_node_to_tree_ref(node); run_delayed_tree_ref()
2330 trace_run_delayed_tree_ref(node, ref, node->action); run_delayed_tree_ref()
2333 parent = ref->parent; run_delayed_tree_ref()
2334 ref_root = ref->root; run_delayed_tree_ref()
2338 ins.offset = ref->level; run_delayed_tree_ref()
2352 ref->level, &ins); run_delayed_tree_ref()
2356 ref->level, 0, 1, run_delayed_tree_ref()
2361 ref->level, 0, 1, extent_op); run_delayed_tree_ref()
2368 /* helper function to actually process a single delayed ref entry */ run_one_delayed_ref()
2429 struct btrfs_delayed_ref_node *ref; select_delayed_ref() local
2435 * Select a delayed ref of type BTRFS_ADD_DELAYED_REF first. select_delayed_ref()
2436 * This is to prevent a ref count from going down to zero, which deletes select_delayed_ref()
2440 list_for_each_entry(ref, &head->ref_list, list) { select_delayed_ref()
2441 if (ref->action == BTRFS_ADD_DELAYED_REF) select_delayed_ref()
2442 return ref; select_delayed_ref()
2458 struct btrfs_delayed_ref_node *ref; __btrfs_run_delayed_refs() local
2499 * We need to try and merge add/drops of the same ref since we __btrfs_run_delayed_refs()
2500 * can run into issues with relocate dropping the implicit ref __btrfs_run_delayed_refs()
2503 * get a good ref. __btrfs_run_delayed_refs()
2516 * node back for any delayed ref updates __btrfs_run_delayed_refs()
2518 ref = select_delayed_ref(locked_ref); __btrfs_run_delayed_refs()
2520 if (ref && ref->seq && __btrfs_run_delayed_refs()
2521 btrfs_check_delayed_seq(fs_info, delayed_refs, ref->seq)) { __btrfs_run_delayed_refs()
2544 if (!ref) { __btrfs_run_delayed_refs()
2551 ref = &locked_ref->node; __btrfs_run_delayed_refs()
2561 ref, extent_op); __btrfs_run_delayed_refs()
2582 * Need to drop our head ref lock and re-aqcuire the __btrfs_run_delayed_refs()
2583 * delayed ref lock and then re-check to make sure __btrfs_run_delayed_refs()
2595 ref->in_tree = 0; __btrfs_run_delayed_refs()
2602 ref->in_tree = 0; __btrfs_run_delayed_refs()
2603 list_del(&ref->list); __btrfs_run_delayed_refs()
2607 if (!btrfs_delayed_ref_is_head(ref)) { __btrfs_run_delayed_refs()
2609 * when we play the delayed ref, also correct the __btrfs_run_delayed_refs()
2612 switch (ref->action) { __btrfs_run_delayed_refs()
2615 locked_ref->node.ref_mod -= ref->ref_mod; __btrfs_run_delayed_refs()
2618 locked_ref->node.ref_mod += ref->ref_mod; __btrfs_run_delayed_refs()
2626 ret = run_one_delayed_ref(trans, root, ref, extent_op, __btrfs_run_delayed_refs()
2633 btrfs_put_delayed_ref(ref); __btrfs_run_delayed_refs()
2644 if (btrfs_delayed_ref_is_head(ref)) { __btrfs_run_delayed_refs()
2648 delayed_refs->pending_csums -= ref->num_bytes; __btrfs_run_delayed_refs()
2654 btrfs_put_delayed_ref(ref); __btrfs_run_delayed_refs()
2660 * We don't want to include ref heads since we can have empty ref heads __btrfs_run_delayed_refs()
2947 struct btrfs_delayed_ref_node *ref; btrfs_run_delayed_refs() local
2949 ref = &head->node; btrfs_run_delayed_refs()
2950 atomic_inc(&ref->refs); btrfs_run_delayed_refs()
2960 btrfs_put_delayed_ref(ref); btrfs_run_delayed_refs()
3009 struct btrfs_delayed_ref_node *ref; check_delayed_ref() local
3040 list_for_each_entry(ref, &head->ref_list, list) { check_delayed_ref()
3041 /* If it's a shared ref we know a cross reference exists */ check_delayed_ref()
3042 if (ref->type != BTRFS_EXTENT_DATA_REF_KEY) { check_delayed_ref()
3047 data_ref = btrfs_delayed_node_to_data_ref(ref); check_delayed_ref()
3050 * If our ref doesn't match the one we're currently looking at check_delayed_ref()
3072 struct btrfs_extent_data_ref *ref; check_committed_ref() local
3122 ref = (struct btrfs_extent_data_ref *)(&iref->offset); check_committed_ref()
3124 btrfs_extent_data_ref_count(leaf, ref) || check_committed_ref()
3125 btrfs_extent_data_ref_root(leaf, ref) != check_committed_ref()
3127 btrfs_extent_data_ref_objectid(leaf, ref) != objectid || check_committed_ref()
3128 btrfs_extent_data_ref_offset(leaf, ref) != offset) check_committed_ref()
6546 "unable to find ref byte nr %llu parent %llu root %llu owner %llu offset %llu", __btrfs_free_extent()
6617 * In the case of inline back ref, reference count will __btrfs_free_extent()
6682 * delayed ref for that extent as well. This searches the delayed ref tree for
6727 * we don't take a ref on the node because we're removing it from the check_ref_cleanup()
6728 * tree, so we just steal the ref the tree was holding. check_ref_cleanup()
7662 struct btrfs_shared_data_ref *ref; alloc_reserved_file_extent() local
7663 ref = (struct btrfs_shared_data_ref *)(iref + 1); alloc_reserved_file_extent()
7665 btrfs_set_shared_data_ref_count(leaf, ref, ref_mod); alloc_reserved_file_extent()
7667 struct btrfs_extent_data_ref *ref; alloc_reserved_file_extent() local
7668 ref = (struct btrfs_extent_data_ref *)(&iref->offset); alloc_reserved_file_extent()
7669 btrfs_set_extent_data_ref_root(leaf, ref, root_objectid); alloc_reserved_file_extent()
7670 btrfs_set_extent_data_ref_objectid(leaf, ref, owner); alloc_reserved_file_extent()
7671 btrfs_set_extent_data_ref_offset(leaf, ref, offset); alloc_reserved_file_extent()
7672 btrfs_set_extent_data_ref_count(leaf, ref, ref_mod); alloc_reserved_file_extent()
8114 * These may not be seen by the usual inc/dec ref code so we have to
10101 /* One for the block groups ref */ btrfs_remove_block_group()
10111 /* One for our lookup ref */ btrfs_remove_block_group()
1139 hash_extent_data_ref_item(struct extent_buffer *leaf, struct btrfs_extent_data_ref *ref) hash_extent_data_ref_item() argument
1147 match_extent_data_ref(struct extent_buffer *leaf, struct btrfs_extent_data_ref *ref, u64 root_objectid, u64 owner, u64 offset) match_extent_data_ref() argument
H A Dexport.c159 struct btrfs_root_ref *ref; btrfs_get_parent() local
199 ref = btrfs_item_ptr(leaf, path->slots[0], btrfs_get_parent()
201 key.objectid = btrfs_root_ref_dirid(leaf, ref); btrfs_get_parent()
H A Dsend.c1100 * ref/hardlink, this is ignored.
1806 * Used by process_recorded_refs to determine if a new ref would overwrite an
1807 * already existing ref. In case it detects an overwrite, it returns the
1811 * Orphanizing is however only required for the first ref of an inode.
1859 * Check if the overwritten ref was already processed. If yes, the ref will_overwrite_ref()
1880 * Checks if the ref was overwritten by an already processed inode. This is
1881 * used by __get_cur_name_and_parent to find out if the ref was orphanized and
1903 /* check if the ref was overwritten by another ref */ did_overwrite_ref()
1942 * Same as did_overwrite_ref, but also checks if it is the first ref of an inode
2092 * Used by get_cur_path for each ref up to the root.
2151 * send_root or parent_root for ref lookup. __get_cur_name_and_parent()
2163 * Check if the ref was overwritten by an inode's ref that was processed __get_cur_name_and_parent()
2211 * Magic happens here. This function returns the first ref to an inode as it
2226 * 3. When an inode is moved around or gets new links, it may overwrite the ref
2227 * of an unprocessed inode. If in that case the first ref would be
2309 struct btrfs_root_ref *ref; send_subvol_begin() local
2344 ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref); send_subvol_begin()
2345 namelen = btrfs_root_ref_name_len(leaf, ref); send_subvol_begin()
2346 read_extent_buffer(leaf, name, (unsigned long)(ref + 1), namelen); send_subvol_begin()
2726 * This function is a helper to record one ref.
2731 struct recorded_ref *ref; __record_ref() local
2733 ref = kmalloc(sizeof(*ref), GFP_NOFS); __record_ref()
2734 if (!ref) __record_ref()
2737 ref->dir = dir; __record_ref()
2738 ref->dir_gen = dir_gen; __record_ref()
2739 ref->full_path = path; __record_ref()
2741 ref->name = (char *)kbasename(ref->full_path->start); __record_ref()
2742 ref->name_len = ref->full_path->end - ref->name; __record_ref()
2743 ref->dir_path = ref->full_path->start; __record_ref()
2744 if (ref->name == ref->full_path->start) __record_ref()
2745 ref->dir_path_len = 0; __record_ref()
2747 ref->dir_path_len = ref->full_path->end - __record_ref()
2748 ref->full_path->start - 1 - ref->name_len; __record_ref()
2750 list_add_tail(&ref->list, head); __record_ref()
2754 static int dup_ref(struct recorded_ref *ref, struct list_head *list) dup_ref() argument
2758 new = kmalloc(sizeof(*ref), GFP_NOFS); dup_ref()
2762 new->dir = ref->dir; dup_ref()
2763 new->dir_gen = ref->dir_gen; dup_ref()
2790 * ref of an unprocessed inode gets overwritten and for all non empty
3541 * This should never happen as the root dir always has the same ref process_recorded_refs()
3554 * First, check if the first ref of the current inode was overwritten process_recorded_refs()
3557 * get the path of the first ref as it would like while receiving at process_recorded_refs()
3561 * The first ref is stored in valid_path and will be updated if it process_recorded_refs()
3627 * Check if this new ref would overwrite the first ref of process_recorded_refs()
3629 * overwritten inode. If we find an overwritten ref that is process_recorded_refs()
3630 * not the first ref, simply unlink it. process_recorded_refs()
3694 * link/move the ref to the new place. If we have an orphan process_recorded_refs()
3711 * ref. The deleted ref is ignored later. process_recorded_refs()
3793 * happen when a previous inode did overwrite the first ref process_recorded_refs()
/linux-4.4.14/drivers/gpu/drm/
H A Ddrm_global.c66 int drm_global_item_ref(struct drm_global_reference *ref) drm_global_item_ref() argument
69 struct drm_global_item *item = &glob[ref->global_type]; drm_global_item_ref()
73 item->object = kzalloc(ref->size, GFP_KERNEL); drm_global_item_ref()
79 ref->object = item->object; drm_global_item_ref()
80 ret = ref->init(ref); drm_global_item_ref()
86 ref->object = item->object; drm_global_item_ref()
96 void drm_global_item_unref(struct drm_global_reference *ref) drm_global_item_unref() argument
98 struct drm_global_item *item = &glob[ref->global_type]; drm_global_item_unref()
102 BUG_ON(ref->object != item->object); drm_global_item_unref()
104 ref->release(ref); drm_global_item_unref()
H A Ddrm_drv.c564 * The initial ref-count of the object is 1. Use drm_dev_ref() and
565 * drm_dev_unref() to take and drop further ref-counts.
582 kref_init(&dev->ref); drm_dev_alloc()
653 static void drm_dev_release(struct kref *ref) drm_dev_release() argument
655 struct drm_device *dev = container_of(ref, struct drm_device, ref); drm_dev_release()
677 * This increases the ref-count of @dev by one. You *must* already own a
688 kref_get(&dev->ref); drm_dev_ref()
696 * This decreases the ref-count of @dev by one. The device is destroyed if the
697 * ref-count drops to zero.
702 kref_put(&dev->ref, drm_dev_release); drm_dev_unref()
H A Ddrm_prime.c46 * dma_buf (which in turn holds a ref to the exporting GEM object).
372 /* normally the created dma-buf takes ownership of the ref, export_and_register_object()
373 * but if that fails then drop the ref export_and_register_object()
385 /* Grab a new ref since the callers is now used by the dma-buf */ export_and_register_object()
442 /* normally the created dma-buf takes ownership of the ref, drm_gem_prime_handle_to_fd()
443 * but if that fails then drop the ref drm_gem_prime_handle_to_fd()
/linux-4.4.14/drivers/iio/adc/
H A Dti-adc081c.c19 struct regulator *ref; member in struct:adc081c
41 err = regulator_get_voltage(adc->ref); adc081c_read_raw()
85 adc->ref = devm_regulator_get(&client->dev, "vref"); adc081c_probe()
86 if (IS_ERR(adc->ref)) adc081c_probe()
87 return PTR_ERR(adc->ref); adc081c_probe()
89 err = regulator_enable(adc->ref); adc081c_probe()
110 regulator_disable(adc->ref); adc081c_probe()
121 regulator_disable(adc->ref); adc081c_remove()
/linux-4.4.14/drivers/gpu/drm/ttm/
H A Dttm_object.c55 * @ref_hash: Hash tables of ref objects, one per ttm_ref_type,
56 * for fast lookup of ref objects given a base object.
104 * @head: List entry for the per-file list of ref-objects.
108 * @obj: Base object this ref object is referencing.
110 * @ref_type: Type of ref object.
115 * a particular ttm_object_file. It also carries a ref count to avoid creating
116 * multiple ref objects if a ttm_object_file references the same base
274 * ttm_ref_object_exists - Check whether a caller has a valid ref object
288 struct ttm_ref_object *ref; ttm_ref_object_exists() local
295 * Verify that the ref object is really pointing to our base object. ttm_ref_object_exists()
296 * Our base object could actually be dead, and the ref object pointing ttm_ref_object_exists()
299 ref = drm_hash_entry(hash, struct ttm_ref_object, hash); ttm_ref_object_exists()
300 if (unlikely(base != ref->obj)) ttm_ref_object_exists()
304 * Verify that the ref->obj pointer was actually valid! ttm_ref_object_exists()
307 if (unlikely(atomic_read(&ref->kref.refcount) == 0)) ttm_ref_object_exists()
324 struct ttm_ref_object *ref; ttm_ref_object_add() local
340 ref = drm_hash_entry(hash, struct ttm_ref_object, hash); ttm_ref_object_add()
341 if (kref_get_unless_zero(&ref->kref)) { ttm_ref_object_add()
348 ret = ttm_mem_global_alloc(mem_glob, sizeof(*ref), ttm_ref_object_add()
352 ref = kmalloc(sizeof(*ref), GFP_KERNEL); ttm_ref_object_add()
353 if (unlikely(ref == NULL)) { ttm_ref_object_add()
354 ttm_mem_global_free(mem_glob, sizeof(*ref)); ttm_ref_object_add()
358 ref->hash.key = base->hash.key; ttm_ref_object_add()
359 ref->obj = base; ttm_ref_object_add()
360 ref->tfile = tfile; ttm_ref_object_add()
361 ref->ref_type = ref_type; ttm_ref_object_add()
362 kref_init(&ref->kref); ttm_ref_object_add()
365 ret = drm_ht_insert_item_rcu(ht, &ref->hash); ttm_ref_object_add()
368 list_add_tail(&ref->head, &tfile->ref_list); ttm_ref_object_add()
379 ttm_mem_global_free(mem_glob, sizeof(*ref)); ttm_ref_object_add()
380 kfree(ref); ttm_ref_object_add()
389 struct ttm_ref_object *ref = ttm_ref_object_release() local
391 struct ttm_base_object *base = ref->obj; ttm_ref_object_release()
392 struct ttm_object_file *tfile = ref->tfile; ttm_ref_object_release()
396 ht = &tfile->ref_hash[ref->ref_type]; ttm_ref_object_release()
397 (void)drm_ht_remove_item_rcu(ht, &ref->hash); ttm_ref_object_release()
398 list_del(&ref->head); ttm_ref_object_release()
401 if (ref->ref_type != TTM_REF_USAGE && base->ref_obj_release) ttm_ref_object_release()
402 base->ref_obj_release(base, ref->ref_type); ttm_ref_object_release()
404 ttm_base_object_unref(&ref->obj); ttm_ref_object_release()
405 ttm_mem_global_free(mem_glob, sizeof(*ref)); ttm_ref_object_release()
406 kfree_rcu(ref, rcu_head); ttm_ref_object_release()
414 struct ttm_ref_object *ref; ttm_ref_object_base_unref() local
424 ref = drm_hash_entry(hash, struct ttm_ref_object, hash); ttm_ref_object_base_unref()
425 kref_put(&ref->kref, ttm_ref_object_release); ttm_ref_object_base_unref()
433 struct ttm_ref_object *ref; ttm_object_file_release() local
448 ref = list_entry(list, struct ttm_ref_object, head); ttm_object_file_release()
449 ttm_ref_object_release(&ref->kref); ttm_object_file_release()
/linux-4.4.14/drivers/gpu/host1x/
H A Dintr.h74 * @ref must be passed if cancellation is possible, else NULL
80 struct host1x_waitlist *waiter, void **ref);
84 * You must call this if you passed non-NULL as ref.
85 * @ref the ref returned from host1x_intr_add_action()
87 void host1x_intr_put_ref(struct host1x *host, u32 id, void *ref);
H A Dintr.c214 struct host1x_waitlist *waiter, void **ref) host1x_intr_add_action()
227 if (ref) host1x_intr_add_action()
252 if (ref) host1x_intr_add_action()
253 *ref = waiter; host1x_intr_add_action()
257 void host1x_intr_put_ref(struct host1x *host, u32 id, void *ref) host1x_intr_put_ref() argument
259 struct host1x_waitlist *waiter = ref; host1x_intr_put_ref()
212 host1x_intr_add_action(struct host1x *host, u32 id, u32 thresh, enum host1x_intr_action action, void *data, struct host1x_waitlist *waiter, void **ref) host1x_intr_add_action() argument
H A Djob.c58 kref_init(&job->ref); host1x_job_alloc()
82 kref_get(&job->ref); host1x_job_get()
87 static void job_free(struct kref *ref) job_free() argument
89 struct host1x_job *job = container_of(ref, struct host1x_job, ref); job_free()
96 kref_put(&job->ref, job_free); host1x_job_put()
H A Dsyncpt.c192 void *ref; host1x_syncpt_wait() local
230 &wq, waiter, &ref); host1x_syncpt_wait()
268 host1x_intr_put_ref(sp->host, sp->id, ref); host1x_syncpt_wait()
/linux-4.4.14/drivers/xen/
H A Dgrant-table.c98 void (*update_entry)(grant_ref_t ref, domid_t domid,
108 int (*end_foreign_access_ref)(grant_ref_t ref, int readonly);
116 unsigned long (*end_foreign_transfer_ref)(grant_ref_t ref);
123 int (*query_foreign_access)(grant_ref_t ref);
152 int ref, rc = 0; get_free_entries() local
163 ref = head = gnttab_free_head; get_free_entries()
172 return ref; get_free_entries()
201 static void put_free_entry(grant_ref_t ref) put_free_entry() argument
205 gnttab_entry(ref) = gnttab_free_head; put_free_entry()
206 gnttab_free_head = ref; put_free_entry()
223 static void gnttab_update_entry_v1(grant_ref_t ref, domid_t domid, gnttab_update_entry_v1() argument
226 gnttab_shared.v1[ref].domid = domid; gnttab_update_entry_v1()
227 gnttab_shared.v1[ref].frame = frame; gnttab_update_entry_v1()
229 gnttab_shared.v1[ref].flags = flags; gnttab_update_entry_v1()
235 void gnttab_grant_foreign_access_ref(grant_ref_t ref, domid_t domid, gnttab_grant_foreign_access_ref() argument
238 gnttab_interface->update_entry(ref, domid, frame, gnttab_grant_foreign_access_ref()
246 int ref; gnttab_grant_foreign_access() local
248 ref = get_free_entries(1); gnttab_grant_foreign_access()
249 if (unlikely(ref < 0)) gnttab_grant_foreign_access()
252 gnttab_grant_foreign_access_ref(ref, domid, frame, readonly); gnttab_grant_foreign_access()
254 return ref; gnttab_grant_foreign_access()
258 static int gnttab_query_foreign_access_v1(grant_ref_t ref) gnttab_query_foreign_access_v1() argument
260 return gnttab_shared.v1[ref].flags & (GTF_reading|GTF_writing); gnttab_query_foreign_access_v1()
263 int gnttab_query_foreign_access(grant_ref_t ref) gnttab_query_foreign_access() argument
265 return gnttab_interface->query_foreign_access(ref); gnttab_query_foreign_access()
269 static int gnttab_end_foreign_access_ref_v1(grant_ref_t ref, int readonly) gnttab_end_foreign_access_ref_v1() argument
274 pflags = &gnttab_shared.v1[ref].flags; gnttab_end_foreign_access_ref_v1()
285 static inline int _gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly) _gnttab_end_foreign_access_ref() argument
287 return gnttab_interface->end_foreign_access_ref(ref, readonly); _gnttab_end_foreign_access_ref()
290 int gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly) gnttab_end_foreign_access_ref() argument
292 if (_gnttab_end_foreign_access_ref(ref, readonly)) gnttab_end_foreign_access_ref()
294 pr_warn("WARNING: g.e. %#x still in use!\n", ref); gnttab_end_foreign_access_ref()
301 grant_ref_t ref; member in struct:deferred_entry
326 if (_gnttab_end_foreign_access_ref(entry->ref, entry->ro)) { gnttab_handle_deferred()
327 put_free_entry(entry->ref); gnttab_handle_deferred()
330 entry->ref, page_to_pfn(entry->page)); gnttab_handle_deferred()
333 pr_info("freeing g.e. %#x\n", entry->ref); gnttab_handle_deferred()
338 pr_info("g.e. %#x still pending\n", entry->ref); gnttab_handle_deferred()
355 static void gnttab_add_deferred(grant_ref_t ref, bool readonly, gnttab_add_deferred() argument
364 entry->ref = ref; gnttab_add_deferred()
378 what, ref, page ? page_to_pfn(page) : -1); gnttab_add_deferred()
381 void gnttab_end_foreign_access(grant_ref_t ref, int readonly, gnttab_end_foreign_access() argument
384 if (gnttab_end_foreign_access_ref(ref, readonly)) { gnttab_end_foreign_access()
385 put_free_entry(ref); gnttab_end_foreign_access()
389 gnttab_add_deferred(ref, readonly, gnttab_end_foreign_access()
396 int ref; gnttab_grant_foreign_transfer() local
398 ref = get_free_entries(1); gnttab_grant_foreign_transfer()
399 if (unlikely(ref < 0)) gnttab_grant_foreign_transfer()
401 gnttab_grant_foreign_transfer_ref(ref, domid, pfn); gnttab_grant_foreign_transfer()
403 return ref; gnttab_grant_foreign_transfer()
407 void gnttab_grant_foreign_transfer_ref(grant_ref_t ref, domid_t domid, gnttab_grant_foreign_transfer_ref() argument
410 gnttab_interface->update_entry(ref, domid, pfn, GTF_accept_transfer); gnttab_grant_foreign_transfer_ref()
414 static unsigned long gnttab_end_foreign_transfer_ref_v1(grant_ref_t ref) gnttab_end_foreign_transfer_ref_v1() argument
420 pflags = &gnttab_shared.v1[ref].flags; gnttab_end_foreign_transfer_ref_v1()
439 frame = gnttab_shared.v1[ref].frame; gnttab_end_foreign_transfer_ref_v1()
445 unsigned long gnttab_end_foreign_transfer_ref(grant_ref_t ref) gnttab_end_foreign_transfer_ref() argument
447 return gnttab_interface->end_foreign_transfer_ref(ref); gnttab_end_foreign_transfer_ref()
451 unsigned long gnttab_end_foreign_transfer(grant_ref_t ref) gnttab_end_foreign_transfer() argument
453 unsigned long frame = gnttab_end_foreign_transfer_ref(ref); gnttab_end_foreign_transfer()
454 put_free_entry(ref); gnttab_end_foreign_transfer()
459 void gnttab_free_grant_reference(grant_ref_t ref) gnttab_free_grant_reference() argument
461 put_free_entry(ref); gnttab_free_grant_reference()
467 grant_ref_t ref; gnttab_free_grant_references() local
473 ref = head; gnttab_free_grant_references()
474 while (gnttab_entry(ref) != GNTTAB_LIST_END) { gnttab_free_grant_references()
475 ref = gnttab_entry(ref); gnttab_free_grant_references()
478 gnttab_entry(ref) = gnttab_free_head; gnttab_free_grant_references()
849 foreign->gref = map_ops[i].ref; gnttab_map_refs()
/linux-4.4.14/drivers/android/
H A Dbinder.c257 /* node + proc => ref (transaction) */
258 /* desc + proc => ref (transaction, inc/dec ref) */
1009 struct binder_ref *ref; binder_get_ref() local
1012 ref = rb_entry(n, struct binder_ref, rb_node_desc); binder_get_ref()
1014 if (desc < ref->desc) binder_get_ref()
1016 else if (desc > ref->desc) binder_get_ref()
1019 return ref; binder_get_ref()
1030 struct binder_ref *ref, *new_ref; binder_get_ref_for_node() local
1034 ref = rb_entry(parent, struct binder_ref, rb_node_node); binder_get_ref_for_node()
1036 if (node < ref->node) binder_get_ref_for_node()
1038 else if (node > ref->node) binder_get_ref_for_node()
1041 return ref; binder_get_ref_for_node()
1043 new_ref = kzalloc(sizeof(*ref), GFP_KERNEL); binder_get_ref_for_node()
1055 ref = rb_entry(n, struct binder_ref, rb_node_desc); binder_get_ref_for_node()
1056 if (ref->desc > new_ref->desc) binder_get_ref_for_node()
1058 new_ref->desc = ref->desc + 1; binder_get_ref_for_node()
1064 ref = rb_entry(parent, struct binder_ref, rb_node_desc); binder_get_ref_for_node()
1066 if (new_ref->desc < ref->desc) binder_get_ref_for_node()
1068 else if (new_ref->desc > ref->desc) binder_get_ref_for_node()
1079 "%d new ref %d desc %d for node %d\n", binder_get_ref_for_node()
1084 "%d new ref %d desc %d for dead node\n", binder_get_ref_for_node()
1090 static void binder_delete_ref(struct binder_ref *ref) binder_delete_ref() argument
1093 "%d delete ref %d desc %d for node %d\n", binder_delete_ref()
1094 ref->proc->pid, ref->debug_id, ref->desc, binder_delete_ref()
1095 ref->node->debug_id); binder_delete_ref()
1097 rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc); binder_delete_ref()
1098 rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node); binder_delete_ref()
1099 if (ref->strong) binder_delete_ref()
1100 binder_dec_node(ref->node, 1, 1); binder_delete_ref()
1101 hlist_del(&ref->node_entry); binder_delete_ref()
1102 binder_dec_node(ref->node, 0, 1); binder_delete_ref()
1103 if (ref->death) { binder_delete_ref()
1105 "%d delete ref %d desc %d has death notification\n", binder_delete_ref()
1106 ref->proc->pid, ref->debug_id, ref->desc); binder_delete_ref()
1107 list_del(&ref->death->work.entry); binder_delete_ref()
1108 kfree(ref->death); binder_delete_ref()
1111 kfree(ref); binder_delete_ref()
1115 static int binder_inc_ref(struct binder_ref *ref, int strong, binder_inc_ref() argument
1121 if (ref->strong == 0) { binder_inc_ref()
1122 ret = binder_inc_node(ref->node, 1, 1, target_list); binder_inc_ref()
1126 ref->strong++; binder_inc_ref()
1128 if (ref->weak == 0) { binder_inc_ref()
1129 ret = binder_inc_node(ref->node, 0, 1, target_list); binder_inc_ref()
1133 ref->weak++; binder_inc_ref()
1139 static int binder_dec_ref(struct binder_ref *ref, int strong) binder_dec_ref() argument
1142 if (ref->strong == 0) { binder_dec_ref()
1143 binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n", binder_dec_ref()
1144 ref->proc->pid, ref->debug_id, binder_dec_ref()
1145 ref->desc, ref->strong, ref->weak); binder_dec_ref()
1148 ref->strong--; binder_dec_ref()
1149 if (ref->strong == 0) { binder_dec_ref()
1152 ret = binder_dec_node(ref->node, strong, 1); binder_dec_ref()
1157 if (ref->weak == 0) { binder_dec_ref()
1158 binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n", binder_dec_ref()
1159 ref->proc->pid, ref->debug_id, binder_dec_ref()
1160 ref->desc, ref->strong, ref->weak); binder_dec_ref()
1163 ref->weak--; binder_dec_ref()
1165 if (ref->strong == 0 && ref->weak == 0) binder_dec_ref()
1166 binder_delete_ref(ref); binder_dec_ref()
1289 struct binder_ref *ref = binder_get_ref(proc, fp->handle); binder_transaction_buffer_release() local
1291 if (ref == NULL) { binder_transaction_buffer_release()
1297 " ref %d desc %d (node %d)\n", binder_transaction_buffer_release()
1298 ref->debug_id, ref->desc, ref->node->debug_id); binder_transaction_buffer_release()
1299 binder_dec_ref(ref, fp->type == BINDER_TYPE_HANDLE); binder_transaction_buffer_release()
1381 struct binder_ref *ref; binder_transaction() local
1383 ref = binder_get_ref(proc, tr->target.handle); binder_transaction()
1384 if (ref == NULL) { binder_transaction()
1390 target_node = ref->node; binder_transaction()
1540 struct binder_ref *ref; binder_transaction() local
1565 ref = binder_get_ref_for_node(target_proc, node); binder_transaction()
1566 if (ref == NULL) { binder_transaction()
1574 fp->handle = ref->desc; binder_transaction()
1575 binder_inc_ref(ref, fp->type == BINDER_TYPE_HANDLE, binder_transaction()
1578 trace_binder_transaction_node_to_ref(t, node, ref); binder_transaction()
1580 " node %d u%016llx -> ref %d desc %d\n", binder_transaction()
1582 ref->debug_id, ref->desc); binder_transaction()
1586 struct binder_ref *ref = binder_get_ref(proc, fp->handle); binder_transaction() local
1588 if (ref == NULL) { binder_transaction()
1600 if (ref->node->proc == target_proc) { binder_transaction()
1605 fp->binder = ref->node->ptr; binder_transaction()
1606 fp->cookie = ref->node->cookie; binder_transaction()
1607 binder_inc_node(ref->node, fp->type == BINDER_TYPE_BINDER, 0, NULL); binder_transaction()
1608 trace_binder_transaction_ref_to_node(t, ref); binder_transaction()
1610 " ref %d desc %d -> node %d u%016llx\n", binder_transaction()
1611 ref->debug_id, ref->desc, ref->node->debug_id, binder_transaction()
1612 (u64)ref->node->ptr); binder_transaction()
1616 new_ref = binder_get_ref_for_node(target_proc, ref->node); binder_transaction()
1623 trace_binder_transaction_ref_to_ref(t, ref, binder_transaction()
1626 " ref %d desc %d -> ref %d desc %d (node %d)\n", binder_transaction()
1627 ref->debug_id, ref->desc, new_ref->debug_id, binder_transaction()
1628 new_ref->desc, ref->node->debug_id); binder_transaction()
1781 struct binder_ref *ref; binder_thread_write() local
1789 ref = binder_get_ref_for_node(proc, binder_thread_write()
1791 if (ref->desc != target) { binder_thread_write()
1794 ref->desc); binder_thread_write()
1797 ref = binder_get_ref(proc, target); binder_thread_write()
1798 if (ref == NULL) { binder_thread_write()
1799 binder_user_error("%d:%d refcount change on invalid ref %d\n", binder_thread_write()
1806 binder_inc_ref(ref, 0, NULL); binder_thread_write()
1810 binder_inc_ref(ref, 1, NULL); binder_thread_write()
1814 binder_dec_ref(ref, 1); binder_thread_write()
1819 binder_dec_ref(ref, 0); binder_thread_write()
1823 "%d:%d %s ref %d desc %d s %d w %d for node %d\n", binder_thread_write()
1824 proc->pid, thread->pid, debug_string, ref->debug_id, binder_thread_write()
1825 ref->desc, ref->strong, ref->weak, ref->node->debug_id); binder_thread_write()
1984 struct binder_ref *ref; binder_thread_write() local
1993 ref = binder_get_ref(proc, target); binder_thread_write()
1994 if (ref == NULL) { binder_thread_write()
1995 binder_user_error("%d:%d %s invalid ref %d\n", binder_thread_write()
2005 "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n", binder_thread_write()
2010 (u64)cookie, ref->debug_id, ref->desc, binder_thread_write()
2011 ref->strong, ref->weak, ref->node->debug_id); binder_thread_write()
2014 if (ref->death) { binder_thread_write()
2030 ref->death = death; binder_thread_write()
2031 if (ref->node->proc == NULL) { binder_thread_write()
2032 ref->death->work.type = BINDER_WORK_DEAD_BINDER; binder_thread_write()
2034 list_add_tail(&ref->death->work.entry, &thread->todo); binder_thread_write()
2036 list_add_tail(&ref->death->work.entry, &proc->todo); binder_thread_write()
2041 if (ref->death == NULL) { binder_thread_write()
2046 death = ref->death; binder_thread_write()
2054 ref->death = NULL; binder_thread_write()
3021 struct binder_ref *ref; binder_node_release() local
3039 hlist_for_each_entry(ref, &node->refs, node_entry) { binder_node_release()
3042 if (!ref->death) binder_node_release()
3047 if (list_empty(&ref->death->work.entry)) { binder_node_release()
3048 ref->death->work.type = BINDER_WORK_DEAD_BINDER; binder_node_release()
3049 list_add_tail(&ref->death->work.entry, binder_node_release()
3050 &ref->proc->todo); binder_node_release()
3051 wake_up_interruptible(&ref->proc->wait); binder_node_release()
3105 struct binder_ref *ref; binder_deferred_release() local
3107 ref = rb_entry(n, struct binder_ref, rb_node_desc); binder_deferred_release()
3109 binder_delete_ref(ref); binder_deferred_release()
3161 "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d, buffers %d, pages %d\n", binder_deferred_release()
3326 struct binder_ref *ref; print_binder_node() local
3331 hlist_for_each_entry(ref, &node->refs, node_entry) print_binder_node()
3341 hlist_for_each_entry(ref, &node->refs, node_entry) print_binder_node()
3342 seq_printf(m, " %d", ref->proc->pid); print_binder_node()
3350 static void print_binder_ref(struct seq_file *m, struct binder_ref *ref) print_binder_ref() argument
3352 seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %p\n", print_binder_ref()
3353 ref->debug_id, ref->desc, ref->node->proc ? "" : "dead ", print_binder_ref()
3354 ref->node->debug_id, ref->strong, ref->weak, ref->death); print_binder_ref()
3442 "ref",
3507 struct binder_ref *ref = rb_entry(n, struct binder_ref, print_binder_proc_stats() local
3510 strong += ref->strong; print_binder_proc_stats()
3511 weak += ref->weak; print_binder_proc_stats()
H A Dbinder_trace.h149 struct binder_ref *ref),
150 TP_ARGS(t, node, ref),
163 __entry->ref_debug_id = ref->debug_id;
164 __entry->ref_desc = ref->desc;
173 TP_PROTO(struct binder_transaction *t, struct binder_ref *ref),
174 TP_ARGS(t, ref),
185 __entry->ref_debug_id = ref->debug_id;
186 __entry->ref_desc = ref->desc;
187 __entry->node_debug_id = ref->node->debug_id;
188 __entry->node_ptr = ref->node->ptr;
/linux-4.4.14/arch/powerpc/kvm/
H A De500_mmu_host.c191 struct tlbe_ref *ref = &vcpu_e500->gtlb_priv[tlbsel][esel].ref; inval_gtlbe_on_host() local
194 if (!(ref->flags & E500_TLB_VALID)) { inval_gtlbe_on_host()
195 WARN(ref->flags & (E500_TLB_BITMAP | E500_TLB_TLB0), inval_gtlbe_on_host()
196 "%s: flags %x\n", __func__, ref->flags); inval_gtlbe_on_host()
200 if (tlbsel == 1 && ref->flags & E500_TLB_BITMAP) { inval_gtlbe_on_host()
218 ref->flags &= ~(E500_TLB_BITMAP | E500_TLB_VALID); inval_gtlbe_on_host()
222 if (tlbsel == 1 && ref->flags & E500_TLB_TLB0) { inval_gtlbe_on_host()
228 ref->flags &= ~(E500_TLB_TLB0 | E500_TLB_VALID); inval_gtlbe_on_host()
235 if (ref->flags & E500_TLB_VALID) inval_gtlbe_on_host()
239 ref->flags = 0; inval_gtlbe_on_host()
247 static inline void kvmppc_e500_ref_setup(struct tlbe_ref *ref, kvmppc_e500_ref_setup() argument
251 ref->pfn = pfn; kvmppc_e500_ref_setup()
252 ref->flags = E500_TLB_VALID; kvmppc_e500_ref_setup()
255 ref->flags |= (gtlbe->mas2 & MAS2_ATTRIB_MASK) | wimg; kvmppc_e500_ref_setup()
264 static inline void kvmppc_e500_ref_release(struct tlbe_ref *ref) kvmppc_e500_ref_release() argument
266 if (ref->flags & E500_TLB_VALID) { kvmppc_e500_ref_release()
268 trace_kvm_booke206_ref_release(ref->pfn, ref->flags); kvmppc_e500_ref_release()
269 ref->flags = 0; kvmppc_e500_ref_release()
290 struct tlbe_ref *ref = clear_tlb_privs() local
291 &vcpu_e500->gtlb_priv[tlbsel][i].ref; clear_tlb_privs()
292 kvmppc_e500_ref_release(ref); clear_tlb_privs()
309 int tsize, struct tlbe_ref *ref, u64 gvaddr, kvmppc_e500_setup_stlbe()
312 pfn_t pfn = ref->pfn; kvmppc_e500_setup_stlbe()
315 BUG_ON(!(ref->flags & E500_TLB_VALID)); kvmppc_e500_setup_stlbe()
319 stlbe->mas2 = (gvaddr & MAS2_EPN) | (ref->flags & E500_TLB_MAS2_ATTR); kvmppc_e500_setup_stlbe()
327 struct tlbe_ref *ref) kvmppc_e500_shadow_map()
495 kvmppc_e500_ref_setup(ref, gtlbe, pfn, wimg); kvmppc_e500_shadow_map()
498 ref, gvaddr, stlbe); kvmppc_e500_shadow_map()
517 struct tlbe_ref *ref; kvmppc_e500_tlb0_map() local
523 ref = &vcpu_e500->gtlb_priv[0][esel].ref; kvmppc_e500_tlb0_map()
527 gtlbe, 0, stlbe, ref); kvmppc_e500_tlb0_map()
537 struct tlbe_ref *ref, kvmppc_e500_tlb1_map_tlb1()
550 vcpu_e500->gtlb_priv[1][esel].ref.flags |= E500_TLB_BITMAP; kvmppc_e500_tlb1_map_tlb1()
553 WARN_ON(!(ref->flags & E500_TLB_VALID)); kvmppc_e500_tlb1_map_tlb1()
565 struct tlbe_ref *ref = &vcpu_e500->gtlb_priv[1][esel].ref; kvmppc_e500_tlb1_map() local
570 ref); kvmppc_e500_tlb1_map()
576 vcpu_e500->gtlb_priv[1][esel].ref.flags |= E500_TLB_TLB0; kvmppc_e500_tlb1_map()
582 sesel = kvmppc_e500_tlb1_map_tlb1(vcpu_e500, ref, esel); kvmppc_e500_tlb1_map()
604 if (!(priv->ref.flags & E500_TLB_VALID)) { kvmppc_mmu_map()
608 &priv->ref, eaddr, &stlbe); kvmppc_mmu_map()
306 kvmppc_e500_setup_stlbe( struct kvm_vcpu *vcpu, struct kvm_book3e_206_tlb_entry *gtlbe, int tsize, struct tlbe_ref *ref, u64 gvaddr, struct kvm_book3e_206_tlb_entry *stlbe) kvmppc_e500_setup_stlbe() argument
324 kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500, u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe, int tlbsel, struct kvm_book3e_206_tlb_entry *stlbe, struct tlbe_ref *ref) kvmppc_e500_shadow_map() argument
536 kvmppc_e500_tlb1_map_tlb1(struct kvmppc_vcpu_e500 *vcpu_e500, struct tlbe_ref *ref, int esel) kvmppc_e500_tlb1_map_tlb1() argument
/linux-4.4.14/drivers/gpu/drm/armada/
H A Darmada_510.c64 uint32_t rate, ref, div; armada510_crtc_compute_clock() local
67 ref = clk_round_rate(clk, rate); armada510_crtc_compute_clock()
68 div = DIV_ROUND_UP(ref, rate); armada510_crtc_compute_clock()
72 clk_set_rate(clk, ref); armada510_crtc_compute_clock()
/linux-4.4.14/include/linux/phy/
H A Dphy-qcom-ufs.h22 * ref clock.
31 * ref clock.
38 * ref clock.
45 * ref clock.
/linux-4.4.14/drivers/staging/lustre/lustre/include/
H A Dlu_ref.h117 static inline void lu_ref_init(struct lu_ref *ref) lu_ref_init() argument
121 static inline void lu_ref_fini(struct lu_ref *ref) lu_ref_fini() argument
125 static inline struct lu_ref_link *lu_ref_add(struct lu_ref *ref, lu_ref_add() argument
132 static inline struct lu_ref_link *lu_ref_add_atomic(struct lu_ref *ref, lu_ref_add_atomic() argument
139 static inline void lu_ref_add_at(struct lu_ref *ref, lu_ref_add_at() argument
146 static inline void lu_ref_del(struct lu_ref *ref, const char *scope, lu_ref_del() argument
151 static inline void lu_ref_set_at(struct lu_ref *ref, struct lu_ref_link *link, lu_ref_set_at() argument
157 static inline void lu_ref_del_at(struct lu_ref *ref, struct lu_ref_link *link, lu_ref_del_at() argument
171 static inline void lu_ref_print(const struct lu_ref *ref) lu_ref_print() argument
/linux-4.4.14/drivers/gpu/drm/vmwgfx/
H A Dvmwgfx_ttm_glue.c46 static int vmw_ttm_mem_global_init(struct drm_global_reference *ref) vmw_ttm_mem_global_init() argument
49 return ttm_mem_global_init(ref->object); vmw_ttm_mem_global_init()
52 static void vmw_ttm_mem_global_release(struct drm_global_reference *ref) vmw_ttm_mem_global_release() argument
54 ttm_mem_global_release(ref->object); vmw_ttm_mem_global_release()
76 global_ref = &dev_priv->bo_global_ref.ref; vmw_ttm_global_init()
96 drm_global_item_unref(&dev_priv->bo_global_ref.ref); vmw_ttm_global_release()
/linux-4.4.14/tools/perf/util/
H A Dthread-stack.c69 * @ref: external reference (e.g. db_id of sample)
77 u64 ref; member in struct:thread_stack_entry
197 u64 timestamp, u64 ref, bool no_return) thread_stack__call_return()
212 cr.call_ref = tse->ref; thread_stack__call_return()
213 cr.return_ref = ref; thread_stack__call_return()
470 u64 timestamp, u64 ref, struct call_path *cp, thread_stack__push_cp()
485 tse->ref = ref; thread_stack__push_cp()
494 u64 ret_addr, u64 timestamp, u64 ref, thread_stack__pop_cp()
507 timestamp, ref, false); thread_stack__pop_cp()
512 timestamp, ref, false); thread_stack__pop_cp()
523 timestamp, ref, thread_stack__pop_cp()
529 timestamp, ref, false); thread_stack__pop_cp()
539 struct addr_location *to_al, u64 ref) thread_stack__bottom()
561 return thread_stack__push_cp(thread->ts, ip, sample->time, ref, cp, thread_stack__bottom()
569 struct addr_location *to_al, u64 ref) thread_stack__no_call_return()
580 sample->time, ref, thread_stack__no_call_return()
593 return thread_stack__push_cp(ts, 0, sample->time, ref, thread_stack__no_call_return()
600 sample->time, ref, thread_stack__no_call_return()
618 err = thread_stack__push_cp(ts, sample->addr, sample->time, ref, cp, thread_stack__no_call_return()
623 return thread_stack__pop_cp(thread, ts, sample->addr, sample->time, ref, thread_stack__no_call_return()
629 u64 ref) thread_stack__trace_begin()
641 timestamp, ref, false); thread_stack__trace_begin()
650 struct perf_sample *sample, u64 ref) thread_stack__trace_end()
657 if (!ts->cnt || (ts->cnt == 1 && ts->stack[0].ref == ref)) thread_stack__trace_end()
667 return thread_stack__push_cp(ts, ret_addr, sample->time, ref, cp, thread_stack__trace_end()
674 struct addr_location *to_al, u64 ref, thread_stack__process()
709 ref); thread_stack__process()
734 err = thread_stack__push_cp(ts, ret_addr, sample->time, ref, thread_stack__process()
741 sample->time, ref, from_al->sym); thread_stack__process()
746 from_al, to_al, ref); thread_stack__process()
749 err = thread_stack__trace_begin(thread, ts, sample->time, ref); thread_stack__process()
751 err = thread_stack__trace_end(ts, sample, ref); thread_stack__process()
195 thread_stack__call_return(struct thread *thread, struct thread_stack *ts, size_t idx, u64 timestamp, u64 ref, bool no_return) thread_stack__call_return() argument
469 thread_stack__push_cp(struct thread_stack *ts, u64 ret_addr, u64 timestamp, u64 ref, struct call_path *cp, bool no_call) thread_stack__push_cp() argument
493 thread_stack__pop_cp(struct thread *thread, struct thread_stack *ts, u64 ret_addr, u64 timestamp, u64 ref, struct symbol *sym) thread_stack__pop_cp() argument
536 thread_stack__bottom(struct thread *thread, struct thread_stack *ts, struct perf_sample *sample, struct addr_location *from_al, struct addr_location *to_al, u64 ref) thread_stack__bottom() argument
565 thread_stack__no_call_return(struct thread *thread, struct thread_stack *ts, struct perf_sample *sample, struct addr_location *from_al, struct addr_location *to_al, u64 ref) thread_stack__no_call_return() argument
627 thread_stack__trace_begin(struct thread *thread, struct thread_stack *ts, u64 timestamp, u64 ref) thread_stack__trace_begin() argument
649 thread_stack__trace_end(struct thread_stack *ts, struct perf_sample *sample, u64 ref) thread_stack__trace_end() argument
671 thread_stack__process(struct thread *thread, struct comm *comm, struct perf_sample *sample, struct addr_location *from_al, struct addr_location *to_al, u64 ref, struct call_return_processor *crp) thread_stack__process() argument
H A Dprobe-finder.c158 struct probe_trace_arg_ref *ref; alloc_trace_arg_ref() local
159 ref = zalloc(sizeof(struct probe_trace_arg_ref)); alloc_trace_arg_ref()
160 if (ref != NULL) alloc_trace_arg_ref()
161 ref->offset = offs; alloc_trace_arg_ref()
162 return ref; alloc_trace_arg_ref()
181 bool ref = false; convert_variable_location() local
233 tvar->ref = alloc_trace_arg_ref((long)offs); convert_variable_location()
234 if (tvar->ref == NULL) convert_variable_location()
243 ref = true; convert_variable_location()
251 ref = true; convert_variable_location()
257 ref = true; convert_variable_location()
280 if (ref) { convert_variable_location()
281 tvar->ref = alloc_trace_arg_ref((long)offs); convert_variable_location()
282 if (tvar->ref == NULL) convert_variable_location()
294 struct probe_trace_arg_ref **ref_ptr = &tvar->ref; convert_variable_type()
398 struct probe_trace_arg_ref *ref = *ref_ptr; convert_variable_fields() local
424 ref = zalloc(sizeof(struct probe_trace_arg_ref)); convert_variable_fields()
425 if (ref == NULL) convert_variable_fields()
428 (*ref_ptr)->next = ref; convert_variable_fields()
430 *ref_ptr = ref; convert_variable_fields()
432 ref->offset += dwarf_bytesize(&type) * field->index; convert_variable_fields()
439 if (!field->ref) { convert_variable_fields()
457 ref = zalloc(sizeof(struct probe_trace_arg_ref)); convert_variable_fields()
458 if (ref == NULL) convert_variable_fields()
461 (*ref_ptr)->next = ref; convert_variable_fields()
463 *ref_ptr = ref; convert_variable_fields()
477 if (field->ref && dwarf_diename(vr_die)) { convert_variable_fields()
482 if (!ref) { convert_variable_fields()
506 ref->offset += (long)offs; convert_variable_fields()
511 &ref, die_mem); convert_variable_fields()
517 field->next, &ref, die_mem); convert_variable_fields()
542 pf->pvar->field, &pf->tvar->ref, convert_variable()
H A Dprobe-event.h41 struct probe_trace_arg_ref *ref; /* Referencing offset */ member in struct:probe_trace_arg
70 bool ref; /* Referencing flag */ member in struct:perf_probe_arg_field
H A Dprobe-event.c1392 (*fieldp)->ref = true; parse_perf_probe_arg()
1404 (*fieldp)->ref = false; parse_perf_probe_arg()
1407 (*fieldp)->ref = true; parse_perf_probe_arg()
1421 pr_debug("%s(%d), ", (*fieldp)->name, (*fieldp)->ref); parse_perf_probe_arg()
1430 pr_debug("%s(%d)\n", (*fieldp)->name, (*fieldp)->ref); parse_perf_probe_arg()
1641 field->ref ? "->" : ".", field->name); synthesize_perf_probe_arg()
1738 static int __synthesize_probe_trace_arg_ref(struct probe_trace_arg_ref *ref, __synthesize_probe_trace_arg_ref() argument
1743 if (ref->next) { __synthesize_probe_trace_arg_ref()
1744 depth = __synthesize_probe_trace_arg_ref(ref->next, buf, __synthesize_probe_trace_arg_ref()
1750 ret = e_snprintf(*buf, *buflen, "%+ld(", ref->offset); __synthesize_probe_trace_arg_ref()
1765 struct probe_trace_arg_ref *ref = arg->ref; synthesize_probe_trace_arg() local
1780 if (arg->value[0] == '@' && arg->ref) synthesize_probe_trace_arg()
1781 ref = ref->next; synthesize_probe_trace_arg()
1784 if (ref) { synthesize_probe_trace_arg()
1785 depth = __synthesize_probe_trace_arg_ref(ref, &buf, synthesize_probe_trace_arg()
1792 if (arg->value[0] == '@' && arg->ref) synthesize_probe_trace_arg()
1794 arg->ref->offset); synthesize_probe_trace_arg()
2026 struct probe_trace_arg_ref *ref, *next; clear_probe_trace_event() local
2038 ref = tev->args[i].ref; clear_probe_trace_event()
2039 while (ref) { clear_probe_trace_event()
2040 next = ref->next; clear_probe_trace_event()
2041 free(ref); clear_probe_trace_event()
2042 ref = next; clear_probe_trace_event()
/linux-4.4.14/fs/cifs/
H A Dcifs_dfs_ref.c128 * @ref: server's referral
139 const struct dfs_info3_param *ref, cifs_compose_mount_options()
154 if (strlen(fullpath) - ref->path_consumed) cifs_compose_mount_options()
155 prepath = fullpath + ref->path_consumed; cifs_compose_mount_options()
157 *devname = cifs_build_devname(ref->node_name, prepath); cifs_compose_mount_options()
217 /* copy new IP and ref share name */ cifs_compose_mount_options()
242 * @ref: server's referral
245 const char *fullpath, const struct dfs_info3_param *ref) cifs_dfs_do_refmount()
253 fullpath + 1, ref, &devname); cifs_dfs_do_refmount()
265 static void dump_referral(const struct dfs_info3_param *ref) dump_referral() argument
267 cifs_dbg(FYI, "DFS: ref path: %s\n", ref->path_name); dump_referral()
268 cifs_dbg(FYI, "DFS: node path: %s\n", ref->node_name); dump_referral()
270 ref->flags, ref->server_type); dump_referral()
272 ref->ref_flag, ref->path_consumed); dump_referral()
137 cifs_compose_mount_options(const char *sb_mountdata, const char *fullpath, const struct dfs_info3_param *ref, char **devname) cifs_compose_mount_options() argument
244 cifs_dfs_do_refmount(struct cifs_sb_info *cifs_sb, const char *fullpath, const struct dfs_info3_param *ref) cifs_dfs_do_refmount() argument
/linux-4.4.14/drivers/misc/mic/host/
H A Dmic_smpt.c78 static void mic_add_smpt_entry(int spt, s64 *ref, u64 addr, mic_add_smpt_entry() argument
91 smpt_info->entry[i].ref_count += ref[i - spt]; mic_add_smpt_entry()
100 int entries, s64 *ref, size_t size) mic_smpt_op()
138 mic_add_smpt_entry(spt, ref, dma_addr, entries, mdev); mic_smpt_op()
151 size_t size, s64 *ref, u64 *smpt_start) mic_get_smpt_ref_count()
158 ref[i++] = min(mic_smpt_align_high(mdev, start + 1), mic_get_smpt_ref_count()
210 s64 *ref; mic_map() local
216 ref = kmalloc_array(mdev->smpt->info.num_reg, sizeof(s64), GFP_ATOMIC); mic_map()
217 if (!ref) mic_map()
221 ref, &smpt_start); mic_map()
224 mic_addr = mic_smpt_op(mdev, smpt_start, num_entries, ref, size); mic_map()
226 kfree(ref); mic_map()
257 s64 *ref; mic_unmap() local
273 ref = kmalloc_array(mdev->smpt->info.num_reg, sizeof(s64), GFP_ATOMIC); mic_unmap()
274 if (!ref) mic_unmap()
277 /* Get number of smpt entries to be mapped, ref count array */ mic_unmap()
278 num_smpt = mic_get_smpt_ref_count(mdev, mic_addr, size, ref, NULL); mic_unmap()
285 smpt_info->entry[i].ref_count -= ref[i - spt]; mic_unmap()
288 "ref count for entry %d is negative\n", i); mic_unmap()
291 kfree(ref); mic_unmap()
401 "nodeid %d SMPT ref count %lld map %lld unmap %lld\n", mic_smpt_uninit()
412 "ref count for entry %d is not zero\n", i); mic_smpt_uninit()
99 mic_smpt_op(struct mic_device *mdev, u64 dma_addr, int entries, s64 *ref, size_t size) mic_smpt_op() argument
150 mic_get_smpt_ref_count(struct mic_device *mdev, dma_addr_t dma_addr, size_t size, s64 *ref, u64 *smpt_start) mic_get_smpt_ref_count() argument
/linux-4.4.14/drivers/gpu/drm/nouveau/nvkm/engine/sw/
H A Dnv04.c35 atomic_t ref; member in struct:nv04_sw_chan
52 args->v0.ref = atomic_read(&chan->ref); nv04_nvsw_mthd_get_ref()
93 atomic_set(&chan->ref, data); nv04_sw_chan_mthd()
115 atomic_set(&chan->ref, 0); nv04_sw_chan_new()
/linux-4.4.14/arch/x86/tools/
H A Dgen-insn-attr-x86.awk127 ref = ""
129 ref = ref $i
130 eid = escape[ref]
268 ref = ""
270 ref = ref $i
271 if (ref in escape)
272 semantic_error("Redefine escape (" ref ")")
273 escape[ref] = geid
275 table[idx] = "INAT_MAKE_ESCAPE(" escape[ref] ")"
/linux-4.4.14/tools/perf/util/intel-pt-decoder/
H A Dgen-insn-attr-x86.awk127 ref = ""
129 ref = ref $i
130 eid = escape[ref]
268 ref = ""
270 ref = ref $i
271 if (ref in escape)
272 semantic_error("Redefine escape (" ref ")")
273 escape[ref] = geid
275 table[idx] = "INAT_MAKE_ESCAPE(" escape[ref] ")"
/linux-4.4.14/drivers/staging/rdma/ipath/
H A Dipath_mmap.c44 * @ref: a pointer to the kref within struct ipath_mmap_info
46 void ipath_release_mmap_info(struct kref *ref) ipath_release_mmap_info() argument
49 container_of(ref, struct ipath_mmap_info, ref); ipath_release_mmap_info()
68 kref_get(&ip->ref); ipath_vma_open()
75 kref_put(&ip->ref, ipath_release_mmap_info); ipath_vma_close()
154 kref_init(&ip->ref); ipath_create_mmap_info()
/linux-4.4.14/drivers/staging/lustre/lustre/libcfs/
H A Dlibcfs_lock.c161 atomic_t *ref; cfs_percpt_atomic_alloc() local
164 refs = cfs_percpt_alloc(cptab, sizeof(*ref)); cfs_percpt_atomic_alloc()
168 cfs_percpt_for_each(ref, i, refs) cfs_percpt_atomic_alloc()
169 atomic_set(ref, init_val); cfs_percpt_atomic_alloc()
178 atomic_t *ref; cfs_percpt_atomic_summary() local
182 cfs_percpt_for_each(ref, i, refs) cfs_percpt_atomic_summary()
183 val += atomic_read(ref); cfs_percpt_atomic_summary()
/linux-4.4.14/drivers/infiniband/hw/qib/
H A Dqib_mmap.c44 * @ref: a pointer to the kref within struct qib_mmap_info
46 void qib_release_mmap_info(struct kref *ref) qib_release_mmap_info() argument
49 container_of(ref, struct qib_mmap_info, ref); qib_release_mmap_info()
68 kref_get(&ip->ref); qib_vma_open()
75 kref_put(&ip->ref, qib_release_mmap_info); qib_vma_close()
154 kref_init(&ip->ref); qib_create_mmap_info()
/linux-4.4.14/scripts/dtc/
H A Dtreesource.c74 fprintf(f, "%s: ", m->ref); write_propval_string()
115 fprintf(f, "%s: ", m->ref); write_propval_string()
133 fprintf(f, " %s:", m->ref); for_each_marker_of_type()
148 fprintf(f, "%s: ", m->ref); write_propval_cells()
162 fprintf(f, " %s:", m->ref); for_each_marker_of_type()
177 fprintf(f, "%s: ", m->ref); write_propval_bytes()
190 fprintf(f, " %s:", m->ref); for_each_marker_of_type()
H A Ddata.c30 free(m->ref); data_free()
240 struct data data_add_marker(struct data d, enum markertype type, char *ref) data_add_marker() argument
247 m->ref = ref; data_add_marker()
H A Dlivetree.c419 if (streq(m->ref, label)) for_each_property()
512 struct node *get_node_by_ref(struct node *tree, const char *ref) get_node_by_ref() argument
514 if (streq(ref, "/")) get_node_by_ref()
516 else if (ref[0] == '/') get_node_by_ref()
517 return get_node_by_path(tree, ref); get_node_by_ref()
519 return get_node_by_label(tree, ref); get_node_by_ref()
H A Ddtc.h80 char *ref; member in struct:marker
119 struct data data_add_marker(struct data d, enum markertype type, char *ref);
216 struct node *get_node_by_ref(struct node *tree, const char *ref);
H A Dchecks.c359 check_duplicate_label(c, dt, m->ref, node, prop, m); check_duplicate_label_prop()
384 if (node != get_node_by_ref(root, m->ref)) for_each_marker_of_type()
467 refnode = get_node_by_ref(dt, m->ref); for_each_marker_of_type()
470 m->ref); for_each_marker_of_type()
491 refnode = get_node_by_ref(dt, m->ref); for_each_marker_of_type()
494 m->ref); for_each_marker_of_type()
/linux-4.4.14/arch/mips/ath79/
H A Dclock.c81 ath79_add_sys_clkdev("ref", ref_rate); ar71xx_clocks_init()
117 ath79_add_sys_clkdev("ref", ref_rate); ar724x_clocks_init()
150 ath79_add_sys_clkdev("ref", ref_rate); ar913x_clocks_init()
212 ath79_add_sys_clkdev("ref", ref_rate); ar933x_clocks_init()
218 clk_add_alias("uart", NULL, "ref", NULL); ar933x_clocks_init()
221 static u32 __init ar934x_get_pll_freq(u32 ref, u32 ref_div, u32 nint, u32 nfrac, ar934x_get_pll_freq() argument
227 t = ref; ar934x_get_pll_freq()
232 t = ref; ar934x_get_pll_freq()
346 ath79_add_sys_clkdev("ref", ref_rate); ar934x_clocks_init()
351 clk_add_alias("wdt", NULL, "ref", NULL); ar934x_clocks_init()
352 clk_add_alias("uart", NULL, "ref", NULL); ar934x_clocks_init()
433 ath79_add_sys_clkdev("ref", ref_rate); qca955x_clocks_init()
438 clk_add_alias("wdt", NULL, "ref", NULL); qca955x_clocks_init()
439 clk_add_alias("uart", NULL, "ref", NULL); qca955x_clocks_init()
/linux-4.4.14/drivers/gpu/drm/nouveau/
H A Dnouveau_bo.h51 nouveau_bo_ref(struct nouveau_bo *ref, struct nouveau_bo **pnvbo) nouveau_bo_ref() argument
59 *pnvbo = ref ? nouveau_bo(ttm_bo_reference(&ref->bo)) : NULL; nouveau_bo_ref()
H A Dnouveau_ttm.c279 nouveau_ttm_mem_global_init(struct drm_global_reference *ref) nouveau_ttm_mem_global_init() argument
281 return ttm_mem_global_init(ref->object); nouveau_ttm_mem_global_init()
285 nouveau_ttm_mem_global_release(struct drm_global_reference *ref) nouveau_ttm_mem_global_release() argument
287 ttm_mem_global_release(ref->object); nouveau_ttm_mem_global_release()
310 global_ref = &drm->ttm.bo_global_ref.ref; nouveau_ttm_global_init()
333 drm_global_item_unref(&drm->ttm.bo_global_ref.ref); nouveau_ttm_global_release()
387 drm->ttm.bo_global_ref.ref.object, nouveau_ttm_init()
/linux-4.4.14/include/xen/
H A Dgrant_table.h93 int gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly);
101 void gnttab_end_foreign_access(grant_ref_t ref, int readonly,
106 unsigned long gnttab_end_foreign_transfer_ref(grant_ref_t ref);
107 unsigned long gnttab_end_foreign_transfer(grant_ref_t ref);
109 int gnttab_query_foreign_access(grant_ref_t ref);
116 void gnttab_free_grant_reference(grant_ref_t ref);
131 void gnttab_grant_foreign_access_ref(grant_ref_t ref, domid_t domid,
136 grant_ref_t ref, domid_t domid, gnttab_page_grant_foreign_access_ref_one()
139 gnttab_grant_foreign_access_ref(ref, domid, xen_page_to_gfn(page), gnttab_page_grant_foreign_access_ref_one()
148 uint32_t flags, grant_ref_t ref, domid_t domid) gnttab_set_map_op()
158 map->ref = ref; gnttab_set_map_op()
135 gnttab_page_grant_foreign_access_ref_one( grant_ref_t ref, domid_t domid, struct page *page, int readonly) gnttab_page_grant_foreign_access_ref_one() argument
147 gnttab_set_map_op(struct gnttab_map_grant_ref *map, phys_addr_t addr, uint32_t flags, grant_ref_t ref, domid_t domid) gnttab_set_map_op() argument
/linux-4.4.14/drivers/macintosh/
H A Dwindfarm_smu_sat.c37 struct kref ref; member in struct:wf_sat
180 static void wf_sat_release(struct kref *ref) wf_sat_release() argument
182 struct wf_sat *sat = container_of(ref, struct wf_sat, ref); wf_sat_release()
195 kref_put(&sat->ref, wf_sat_release); wf_sat_sensor_release()
223 kref_init(&sat->ref); wf_sat_probe()
296 kref_get(&sat->ref); wf_sat_probe()
323 kref_get(&sat->ref); wf_sat_probe()
346 kref_put(&sat->ref, wf_sat_release); wf_sat_remove()
H A Dwindfarm_ad7417_sensor.c28 struct kref ref; member in struct:wf_ad7417_priv
153 static void wf_ad7417_release(struct kref *ref) wf_ad7417_release() argument
155 struct wf_ad7417_priv *pv = container_of(ref, wf_ad7417_release()
156 struct wf_ad7417_priv, ref); wf_ad7417_release()
165 kref_put(&pv->ref, wf_ad7417_release); wf_ad7417_sensor_release()
188 kref_get(&pv->ref); wf_ad7417_add_sensor()
269 kref_init(&pv->ref); wf_ad7417_probe()
305 kref_put(&pv->ref, wf_ad7417_release); wf_ad7417_remove()
H A Dwindfarm_core.c152 struct wf_control *ct = container_of(kref, struct wf_control, ref); wf_control_release()
223 kref_init(&new_ct->ref); wf_register_control()
253 kref_put(&ct->ref, wf_control_release); wf_unregister_control()
261 kref_get(&ct->ref); wf_get_control()
269 kref_put(&ct->ref, wf_control_release); wf_put_control()
282 struct wf_sensor *sr = container_of(kref, struct wf_sensor, ref); wf_sensor_release()
318 kref_init(&new_sr->ref); wf_register_sensor()
356 kref_get(&sr->ref); wf_get_sensor()
364 kref_put(&sr->ref, wf_sensor_release); wf_put_sensor()
H A Dwindfarm_fcu_controls.c57 struct kref ref; member in struct:wf_fcu_priv
72 static void wf_fcu_release(struct kref *ref) wf_fcu_release() argument
74 struct wf_fcu_priv *pv = container_of(ref, struct wf_fcu_priv, ref); wf_fcu_release()
83 kref_put(&fan->fcu_priv->ref, wf_fcu_release); wf_fcu_fan_release()
398 kref_get(&pv->ref); wf_fcu_add_fan()
529 kref_init(&pv->ref); wf_fcu_probe()
576 kref_put(&pv->ref, wf_fcu_release); wf_fcu_remove()
H A Dwindfarm.h42 struct kref ref; member in struct:wf_control
110 struct kref ref; member in struct:wf_sensor
/linux-4.4.14/fs/nfs/
H A Dpnfs_dev.c87 if (atomic_read(&d->ref)) _lookup_deviceid()
180 if (d != NULL && !atomic_inc_not_zero(&d->ref)) __nfs4_find_get_deviceid()
210 atomic_inc(&new->ref); nfs4_find_get_deviceid()
243 /* balance the initial ref set in pnfs_insert_deviceid */ nfs4_delete_deviceid()
258 atomic_set(&d->ref, 1); nfs4_init_deviceid_node()
269 * Note that since the test for d->ref == 0 is sufficient to establish
276 if (atomic_add_unless(&d->ref, -1, 2)) nfs4_put_deviceid_node()
280 if (!atomic_dec_and_test(&d->ref)) nfs4_put_deviceid_node()
320 if (d->nfs_client == clp && atomic_read(&d->ref)) { _deviceid_purge_client()
H A Dcallback_proc.c376 struct referring_call *ref; referring_call_exists() local
393 ref = &rclist->rcl_refcalls[j]; referring_call_exists()
401 ref->rc_sequenceid, ref->rc_slotid); referring_call_exists()
404 status = (test_bit(ref->rc_slotid, tbl->used_slots) && referring_call_exists()
405 tbl->slots[ref->rc_slotid].seq_nr == referring_call_exists()
406 ref->rc_sequenceid); referring_call_exists()
H A Ddns_resolve.c103 static void nfs_dns_ent_put(struct kref *ref) nfs_dns_ent_put() argument
107 item = container_of(ref, struct nfs_dns_ent, h.ref); nfs_dns_ent_put()
/linux-4.4.14/include/drm/ttm/
H A Dttm_object.h50 * Describes what type of reference a ref object holds.
106 * only be held by a ref object.
146 * @dma_buf: Non ref-coutned pointer to a struct dma_buf created from this
233 * Checks that the base object is shareable and adds a ref object to it.
235 * Adding a ref object to a base object is basically like referencing the
240 * When user-space takes a lock, it can add a ref object to that lock to
241 * make sure the lock is released if the application dies. A ref object
255 * @ref_type: Ref type of the ref object to be dereferenced.
257 * Unreference a ref object with type @ref_type
259 * references, the ref object will be destroyed and the base object
300 * data structures needed for ttm base and ref objects.
/linux-4.4.14/fs/ubifs/
H A Dlog.c180 struct ubifs_ref_node *ref; ubifs_add_bud_to_log() local
185 ref = kzalloc(c->ref_node_alsz, GFP_NOFS); ubifs_add_bud_to_log()
186 if (!ref) { ubifs_add_bud_to_log()
240 ref->ch.node_type = UBIFS_REF_NODE; ubifs_add_bud_to_log()
241 ref->lnum = cpu_to_le32(bud->lnum); ubifs_add_bud_to_log()
242 ref->offs = cpu_to_le32(bud->start); ubifs_add_bud_to_log()
243 ref->jhead = cpu_to_le32(jhead); ubifs_add_bud_to_log()
271 dbg_log("write ref LEB %d:%d", ubifs_add_bud_to_log()
273 err = ubifs_write_node(c, ref, UBIFS_REF_NODE_SZ, c->lhead_lnum, ubifs_add_bud_to_log()
283 kfree(ref); ubifs_add_bud_to_log()
288 kfree(ref); ubifs_add_bud_to_log()
363 struct ubifs_ref_node *ref; ubifs_log_start_commit() local
395 dbg_log("add ref to LEB %d:%d for jhead %s", ubifs_log_start_commit()
397 ref = buf + len; ubifs_log_start_commit()
398 ref->ch.node_type = UBIFS_REF_NODE; ubifs_log_start_commit()
399 ref->lnum = cpu_to_le32(lnum); ubifs_log_start_commit()
400 ref->offs = cpu_to_le32(offs); ubifs_log_start_commit()
401 ref->jhead = cpu_to_le32(i); ubifs_log_start_commit()
403 ubifs_prepare_node(c, ref, UBIFS_REF_NODE_SZ, 0); ubifs_log_start_commit()
657 struct ubifs_ref_node *ref = snod->node; ubifs_consolidate_log() local
658 int ref_lnum = le32_to_cpu(ref->lnum); ubifs_consolidate_log()
H A Dreplay.c778 * @ref: the reference node to validate
786 static int validate_ref(struct ubifs_info *c, const struct ubifs_ref_node *ref) validate_ref() argument
789 int lnum = le32_to_cpu(ref->lnum); validate_ref()
790 unsigned int offs = le32_to_cpu(ref->offs); validate_ref()
791 unsigned int jhead = le32_to_cpu(ref->jhead); validate_ref()
794 * ref->offs may point to the end of LEB when the journal head points validate_ref()
917 const struct ubifs_ref_node *ref = snod->node; replay_log_leb() local
919 err = validate_ref(c, ref); replay_log_leb()
925 err = add_replay_bud(c, le32_to_cpu(ref->lnum), replay_log_leb()
926 le32_to_cpu(ref->offs), replay_log_leb()
927 le32_to_cpu(ref->jhead), replay_log_leb()
/linux-4.4.14/arch/xtensa/kernel/
H A DMakefile28 # .text .ref.text .*init.text .*exit.text .text.*
34 -e ':c; s/\*(\([^)]*\)\(\.[a-z]*it\|\.ref\)\.text/*(\1\2.literal \2.{text}/; tc; ' \
/linux-4.4.14/include/drm/
H A Ddrm_global.h50 extern int drm_global_item_ref(struct drm_global_reference *ref);
51 extern void drm_global_item_unref(struct drm_global_reference *ref);
/linux-4.4.14/arch/m68k/include/asm/
H A Dmcftimer.h39 #define MCFTIMER_TMR_ENORI 0x0010 /* Enable ref interrupt */
40 #define MCFTIMER_TMR_DISORI 0x0000 /* Disable ref interrupt */
/linux-4.4.14/net/tipc/
H A Dname_table.h57 * @ref: publishing port
75 u32 ref; member in struct:publication
106 int tipc_nametbl_withdraw(struct net *net, u32 type, u32 lower, u32 ref,
110 u32 node, u32 ref, u32 key);
112 u32 lower, u32 node, u32 ref,
H A Dname_distr.c64 i->ref = htonl(p->ref); publ_to_item()
278 publ->node, publ->ref, publ->key); tipc_publ_purge()
285 " (type=%u, lower=%u, node=0x%x, ref=%u, key=%u)\n", tipc_publ_purge()
286 publ->type, publ->lower, publ->node, publ->ref, tipc_publ_purge()
318 ntohl(i->ref), ntohl(i->key)); tipc_update_nametbl()
326 node, ntohl(i->ref), tipc_update_nametbl()
H A Dname_table.c135 publ->ref = port_ref; publ_create()
257 if ((publ->ref == port) && (publ->key == key) && tipc_nameseq_insert_publ()
334 TIPC_PUBLISHED, publ->ref, tipc_nameseq_insert_publ()
354 u32 ref, u32 key) tipc_nameseq_remove_publ()
370 if ((publ->key == key) && (publ->ref == ref) && tipc_nameseq_remove_publ()
404 TIPC_WITHDRAWN, publ->ref, tipc_nameseq_remove_publ()
436 crs->ref, crs->node, tipc_nameseq_subscribe()
489 u32 lower, u32 node, u32 ref, tipc_nametbl_remove_publ()
499 publ = tipc_nameseq_remove_publ(net, seq, lower, node, ref, key); tipc_nametbl_remove_publ()
532 u32 ref = 0; tipc_nametbl_translate() local
590 ref = publ->ref; tipc_nametbl_translate()
597 return ref; tipc_nametbl_translate()
638 tipc_plist_push(dports, publ->ref); tipc_nametbl_mc_translate()
687 int tipc_nametbl_withdraw(struct net *net, u32 type, u32 lower, u32 ref, tipc_nametbl_withdraw() argument
696 ref, key); tipc_nametbl_withdraw()
706 "(type=%u, lower=%u, ref=%u, key=%u)\n", tipc_nametbl_withdraw()
707 type, lower, ref, key); tipc_nametbl_withdraw()
805 publ->ref, publ->key); tipc_purge_publications()
889 if (nla_put_u32(msg->skb, TIPC_NLA_PUBL_REF, p->ref)) __tipc_nl_add_nametable_publ()
351 tipc_nameseq_remove_publ(struct net *net, struct name_seq *nseq, u32 inst, u32 node, u32 ref, u32 key) tipc_nameseq_remove_publ() argument
488 tipc_nametbl_remove_publ(struct net *net, u32 type, u32 lower, u32 node, u32 ref, u32 key) tipc_nametbl_remove_publ() argument
H A Dname_distr.h49 * @ref: publishing port reference
66 __be32 ref; member in struct:distr_item
/linux-4.4.14/drivers/usb/mon/
H A Dmon_main.c33 * This must be called with mon_lock taken because of mbus->ref.
56 kref_get(&mbus->ref); mon_reader_add()
62 * This is called with mon_lock taken, so we can decrement mbus->ref.
75 kref_put(&mbus->ref, mon_bus_drop); mon_reader_del()
220 kref_put(&mbus->ref, mon_bus_drop); mon_bus_remove()
272 struct mon_bus *mbus = container_of(r, struct mon_bus, ref); mon_bus_drop()
289 kref_init(&mbus->ref); mon_bus_init()
316 kref_init(&mbus->ref); mon_bus0_init()
413 atomic_set(&mbus->ref.refcount, 2); /* Force leak */ mon_exit()
417 kref_put(&mbus->ref, mon_bus_drop); mon_exit()
H A Dusb_mon.h32 struct kref ref; /* Under mon_lock */ member in struct:mon_bus
/linux-4.4.14/drivers/gpu/drm/radeon/
H A Drv740_dpm.c36 u32 ref = 0; rv740_get_decoded_reference_divider() local
40 ref = 1; rv740_get_decoded_reference_divider()
43 ref = 2; rv740_get_decoded_reference_divider()
46 ref = 3; rv740_get_decoded_reference_divider()
49 ref = 2; rv740_get_decoded_reference_divider()
52 ref = 3; rv740_get_decoded_reference_divider()
55 ref = 4; rv740_get_decoded_reference_divider()
58 ref = 5; rv740_get_decoded_reference_divider()
62 ref = 0; rv740_get_decoded_reference_divider()
66 return ref; rv740_get_decoded_reference_divider()
H A Dradeon_legacy_tv.c46 /* tv pll setting for 27 mhz ref clk */
55 /* tv pll setting for 14 mhz ref clk */
170 { /* NTSC timing for 27 Mhz ref clk */
185 { /* PAL timing for 27 Mhz ref clk */
200 { /* NTSC timing for 14 Mhz ref clk */
215 { /* PAL timing for 14 Mhz ref clk */
/linux-4.4.14/include/xen/interface/io/
H A Dnetif.h71 * ring-ref keys are written as before, simplifying the backend processing
76 * event-channel (or event-channel-{tx,rx}) and {tx,rx}-ring-ref keys,
84 * /local/domain/1/device/vif/0/queue-0/tx-ring-ref = "<ring-ref-tx0>"
85 * /local/domain/1/device/vif/0/queue-0/rx-ring-ref = "<ring-ref-rx0>"
89 * /local/domain/1/device/vif/0/queue-1/tx-ring-ref = "<ring-ref-tx1>"
90 * /local/domain/1/device/vif/0/queue-1/rx-ring-ref = "<ring-ref-rx1"
H A Dtpmif.h19 * 3. Frontend populates ring-ref, event-channel, feature-protocol-v2
/linux-4.4.14/drivers/staging/rdma/hfi1/
H A Dmmap.c62 * @ref: a pointer to the kref within struct hfi1_mmap_info
64 void hfi1_release_mmap_info(struct kref *ref) hfi1_release_mmap_info() argument
67 container_of(ref, struct hfi1_mmap_info, ref); hfi1_release_mmap_info()
86 kref_get(&ip->ref); hfi1_vma_open()
93 kref_put(&ip->ref, hfi1_release_mmap_info); hfi1_vma_close()
172 kref_init(&ip->ref); hfi1_create_mmap_info()
/linux-4.4.14/include/trace/events/
H A Dbtrfs.h452 TP_PROTO(struct btrfs_delayed_ref_node *ref,
456 TP_ARGS(ref, full_ref, action),
470 __entry->bytenr = ref->bytenr;
471 __entry->num_bytes = ref->num_bytes;
476 __entry->type = ref->type;
477 __entry->seq = ref->seq;
494 TP_PROTO(struct btrfs_delayed_ref_node *ref,
498 TP_ARGS(ref, full_ref, action)
503 TP_PROTO(struct btrfs_delayed_ref_node *ref,
507 TP_ARGS(ref, full_ref, action)
512 TP_PROTO(struct btrfs_delayed_ref_node *ref,
516 TP_ARGS(ref, full_ref, action),
531 __entry->bytenr = ref->bytenr;
532 __entry->num_bytes = ref->num_bytes;
538 __entry->type = ref->type;
539 __entry->seq = ref->seq;
558 TP_PROTO(struct btrfs_delayed_ref_node *ref,
562 TP_ARGS(ref, full_ref, action)
567 TP_PROTO(struct btrfs_delayed_ref_node *ref,
571 TP_ARGS(ref, full_ref, action)
576 TP_PROTO(struct btrfs_delayed_ref_node *ref,
580 TP_ARGS(ref, head_ref, action),
590 __entry->bytenr = ref->bytenr;
591 __entry->num_bytes = ref->num_bytes;
605 TP_PROTO(struct btrfs_delayed_ref_node *ref,
609 TP_ARGS(ref, head_ref, action)
614 TP_PROTO(struct btrfs_delayed_ref_node *ref,
618 TP_ARGS(ref, head_ref, action)
H A Dkvm.h249 TP_PROTO(ulong gfn, int level, struct kvm_memory_slot *slot, int ref),
250 TP_ARGS(gfn, level, slot, ref),
264 __entry->referenced = ref;
/linux-4.4.14/drivers/md/persistent-data/
H A Ddm-space-map-common.h28 * Any entry that has a ref count higher than 2 gets entered in the ref
29 * count tree. The leaf values for this tree is the 32-bit ref count.
/linux-4.4.14/arch/arm/mach-omap2/
H A Dhsmmc.h14 * capabilities OR'd (ref. linux/mmc/host.h) */
/linux-4.4.14/crypto/asymmetric_keys/
H A Dx509_public_key.c83 key_ref_t ref; x509_request_asymmetric_key() local
114 ref = keyring_search(make_key_ref(keyring, 1), x509_request_asymmetric_key()
116 if (IS_ERR(ref)) x509_request_asymmetric_key()
117 pr_debug("Request for key '%s' err %ld\n", req, PTR_ERR(ref)); x509_request_asymmetric_key()
120 if (IS_ERR(ref)) { x509_request_asymmetric_key()
121 switch (PTR_ERR(ref)) { x509_request_asymmetric_key()
128 return ERR_CAST(ref); x509_request_asymmetric_key()
132 key = key_ref_to_ptr(ref); x509_request_asymmetric_key()
/linux-4.4.14/scripts/gdb/linux/
H A Dmodules.py76 gdb.write("{address} {name:<19} {size:>8} {ref}".format(
80 ref=str(module['refcnt']['counter'])))
/linux-4.4.14/arch/arm/common/
H A Dicst.c32 u64 dividend = p->ref * 2 * (u64)(vco.v + 8); icst_hz()
82 fref_div = (2 * p->ref) / rd; icst_hz_to_vco()
/linux-4.4.14/drivers/infiniband/core/
H A Duverbs_main.c151 static void ib_uverbs_release_event_file(struct kref *ref) ib_uverbs_release_event_file() argument
154 container_of(ref, struct ib_uverbs_event_file, ref); ib_uverbs_release_event_file()
173 kref_put(&ev_file->ref, ib_uverbs_release_event_file); ib_uverbs_release_ucq()
318 static void ib_uverbs_release_file(struct kref *ref) ib_uverbs_release_file() argument
321 container_of(ref, struct ib_uverbs_file, ref); ib_uverbs_release_file()
450 kref_put(&file->uverbs_file->ref, ib_uverbs_release_file); ib_uverbs_event_close()
451 kref_put(&file->ref, ib_uverbs_release_event_file); ib_uverbs_event_close()
584 kref_put(&file->async_file->ref, ib_uverbs_release_event_file); ib_uverbs_free_async_event_file()
600 kref_init(&ev_file->ref); ib_uverbs_alloc_event_file()
605 kref_get(&ev_file->uverbs_file->ref); ib_uverbs_alloc_event_file()
622 kref_get(&uverbs_file->async_file->ref); ib_uverbs_alloc_event_file()
638 kref_put(&uverbs_file->async_file->ref, ib_uverbs_release_event_file); ib_uverbs_alloc_event_file()
643 kref_put(&ev_file->uverbs_file->ref, ib_uverbs_release_file); ib_uverbs_alloc_event_file()
644 kref_put(&ev_file->ref, ib_uverbs_release_event_file); ib_uverbs_alloc_event_file()
650 * takes a ref to the event file struct that it returns; if
670 kref_get(&ev_file->ref); ib_uverbs_lookup_comp_file()
923 kref_init(&file->ref); ib_uverbs_open()
964 kref_put(&file->async_file->ref, ib_uverbs_release_event_file); ib_uverbs_close()
966 kref_put(&file->ref, ib_uverbs_release_file); ib_uverbs_close()
1182 kref_get(&file->ref); ib_uverbs_free_hw_resources()
1196 kref_put(&file->ref, ib_uverbs_release_file); ib_uverbs_free_hw_resources()
1244 * their files, then the uverbs device ref count will be zero ib_uverbs_remove_one()
/linux-4.4.14/drivers/staging/media/lirc/
H A Dlirc_zilog.c70 struct kref ref; member in struct:IR_rx
86 struct kref ref; member in struct:IR_tx
99 struct kref ref; member in struct:IR
166 kref_get(&ir->ref); get_ir_device()
169 kref_get(&ir->ref); get_ir_device()
175 static void release_ir_device(struct kref *ref) release_ir_device() argument
177 struct IR *ir = container_of(ref, struct IR, ref); release_ir_device()
202 return kref_put(&ir->ref, release_ir_device); put_ir_device()
205 released = kref_put(&ir->ref, release_ir_device); put_ir_device()
219 kref_get(&rx->ref); get_ir_rx()
235 static void release_ir_rx(struct kref *ref) release_ir_rx() argument
237 struct IR_rx *rx = container_of(ref, struct IR_rx, ref); release_ir_rx()
258 released = kref_put(&rx->ref, release_ir_rx); put_ir_rx()
281 kref_get(&tx->ref); get_ir_tx()
286 static void release_ir_tx(struct kref *ref) release_ir_tx() argument
288 struct IR_tx *tx = container_of(ref, struct IR_tx, ref); release_ir_tx()
303 released = kref_put(&tx->ref, release_ir_tx); put_ir_tx()
1482 kref_init(&ir->ref); ir_probe()
1522 kref_init(&tx->ref); ir_probe()
1532 /* An ir ref goes to the struct IR_tx instance */ ir_probe()
1535 /* A tx ref goes to the i2c_client */ ir_probe()
1566 kref_init(&rx->ref); ir_probe()
1575 /* An ir ref goes to the struct IR_rx instance */ ir_probe()
1578 /* An rx ref goes to the i2c_client */ ir_probe()
1586 /* An ir ref goes to the new rx polling kthread */ ir_probe()
1594 /* Failed kthread, so put back the ir ref */ ir_probe()
1596 /* Failure exit, so put back rx ref from i2c_client */ ir_probe()
/linux-4.4.14/drivers/media/v4l2-core/
H A Dv4l2-ctrls.c1722 struct v4l2_ctrl_ref *ref, *next_ref; v4l2_ctrl_handler_free() local
1731 list_for_each_entry_safe(ref, next_ref, &hdl->ctrl_refs, node) { v4l2_ctrl_handler_free()
1732 list_del(&ref->node); v4l2_ctrl_handler_free()
1733 kfree(ref); v4l2_ctrl_handler_free()
1759 struct v4l2_ctrl_ref *ref; find_private_ref() local
1762 list_for_each_entry(ref, &hdl->ctrl_refs, node) { find_private_ref()
1765 if (V4L2_CTRL_ID2CLASS(ref->ctrl->id) == V4L2_CTRL_CLASS_USER && find_private_ref()
1766 V4L2_CTRL_DRIVER_PRIV(ref->ctrl->id)) { find_private_ref()
1767 if (!ref->ctrl->is_int) find_private_ref()
1770 return ref; find_private_ref()
1780 struct v4l2_ctrl_ref *ref; find_ref() local
1795 ref = hdl->buckets ? hdl->buckets[bucket] : NULL; find_ref()
1796 while (ref && ref->ctrl->id != id) find_ref()
1797 ref = ref->next; find_ref()
1799 if (ref) find_ref()
1800 hdl->cached = ref; /* cache it! */ find_ref()
1801 return ref; find_ref()
1808 struct v4l2_ctrl_ref *ref = NULL; find_ref_lock() local
1812 ref = find_ref(hdl, id); find_ref_lock()
1815 return ref; find_ref_lock()
1821 struct v4l2_ctrl_ref *ref = find_ref_lock(hdl, id); v4l2_ctrl_find() local
1823 return ref ? ref->ctrl : NULL; v4l2_ctrl_find()
1831 struct v4l2_ctrl_ref *ref; handler_new_ref() local
1876 list_for_each_entry(ref, &hdl->ctrl_refs, node) { handler_new_ref()
1877 if (ref->ctrl->id < id) handler_new_ref()
1880 if (ref->ctrl->id == id) { handler_new_ref()
1884 list_add(&new_ref->node, ref->node.prev); handler_new_ref()
2224 struct v4l2_ctrl_ref *ref; v4l2_ctrl_add_handler() local
2233 list_for_each_entry(ref, &add->ctrl_refs, node) { v4l2_ctrl_add_handler()
2234 struct v4l2_ctrl *ctrl = ref->ctrl; v4l2_ctrl_add_handler()
2469 struct v4l2_ctrl_ref *ref; v4l2_query_ext_ctrl() local
2478 ref = find_ref(hdl, id); v4l2_query_ext_ctrl()
2498 ref = NULL; /* Yes, so there is no next control */ v4l2_query_ext_ctrl()
2499 } else if (ref) { v4l2_query_ext_ctrl()
2502 list_for_each_entry_continue(ref, &hdl->ctrl_refs, node) { v4l2_query_ext_ctrl()
2503 is_compound = ref->ctrl->is_array || v4l2_query_ext_ctrl()
2504 ref->ctrl->type >= V4L2_CTRL_COMPOUND_TYPES; v4l2_query_ext_ctrl()
2505 if (id < ref->ctrl->id && v4l2_query_ext_ctrl()
2509 if (&ref->node == &hdl->ctrl_refs) v4l2_query_ext_ctrl()
2510 ref = NULL; v4l2_query_ext_ctrl()
2516 list_for_each_entry(ref, &hdl->ctrl_refs, node) { v4l2_query_ext_ctrl()
2517 is_compound = ref->ctrl->is_array || v4l2_query_ext_ctrl()
2518 ref->ctrl->type >= V4L2_CTRL_COMPOUND_TYPES; v4l2_query_ext_ctrl()
2519 if (id < ref->ctrl->id && v4l2_query_ext_ctrl()
2523 if (&ref->node == &hdl->ctrl_refs) v4l2_query_ext_ctrl()
2524 ref = NULL; v4l2_query_ext_ctrl()
2529 if (!ref) v4l2_query_ext_ctrl()
2532 ctrl = ref->ctrl; v4l2_query_ext_ctrl()
2707 struct v4l2_ctrl_ref *ref; prepare_ext_ctrls() local
2720 ref = find_ref_lock(hdl, id); prepare_ext_ctrls()
2721 if (ref == NULL) prepare_ext_ctrls()
2723 ctrl = ref->ctrl; prepare_ext_ctrls()
2730 ref = find_ref_lock(hdl, ctrl->cluster[0]->id); prepare_ext_ctrls()
2743 /* Store the ref to the master control of the cluster */ prepare_ext_ctrls()
2744 h->mref = ref; prepare_ext_ctrls()
H A Dv4l2-device.c41 kref_init(&v4l2_dev->ref); v4l2_device_register()
61 static void v4l2_device_release(struct kref *ref) v4l2_device_release() argument
64 container_of(ref, struct v4l2_device, ref); v4l2_device_release()
72 return kref_put(&v4l2_dev->ref, v4l2_device_release); v4l2_device_put()
/linux-4.4.14/drivers/gpu/drm/virtio/
H A Dvirtgpu_ttm.c53 static int virtio_gpu_ttm_mem_global_init(struct drm_global_reference *ref) virtio_gpu_ttm_mem_global_init() argument
55 return ttm_mem_global_init(ref->object); virtio_gpu_ttm_mem_global_init()
58 static void virtio_gpu_ttm_mem_global_release(struct drm_global_reference *ref) virtio_gpu_ttm_mem_global_release() argument
60 ttm_mem_global_release(ref->object); virtio_gpu_ttm_mem_global_release()
84 global_ref = &vgdev->mman.bo_global_ref.ref; virtio_gpu_ttm_global_init()
103 drm_global_item_unref(&vgdev->mman.bo_global_ref.ref); virtio_gpu_ttm_global_fini()
440 vgdev->mman.bo_global_ref.ref.object, virtio_gpu_ttm_init()
/linux-4.4.14/drivers/gpu/drm/qxl/
H A Dqxl_ttm.c50 static int qxl_ttm_mem_global_init(struct drm_global_reference *ref) qxl_ttm_mem_global_init() argument
52 return ttm_mem_global_init(ref->object); qxl_ttm_mem_global_init()
55 static void qxl_ttm_mem_global_release(struct drm_global_reference *ref) qxl_ttm_mem_global_release() argument
57 ttm_mem_global_release(ref->object); qxl_ttm_mem_global_release()
81 global_ref = &qdev->mman.bo_global_ref.ref; qxl_ttm_global_init()
100 drm_global_item_unref(&qdev->mman.bo_global_ref.ref); qxl_ttm_global_fini()
399 qdev->mman.bo_global_ref.ref.object, qxl_ttm_init()
/linux-4.4.14/drivers/gpu/drm/ast/
H A Dast_ttm.c39 ast_ttm_mem_global_init(struct drm_global_reference *ref) ast_ttm_mem_global_init() argument
41 return ttm_mem_global_init(ref->object); ast_ttm_mem_global_init()
45 ast_ttm_mem_global_release(struct drm_global_reference *ref) ast_ttm_mem_global_release() argument
47 ttm_mem_global_release(ref->object); ast_ttm_mem_global_release()
69 global_ref = &ast->ttm.bo_global_ref.ref; ast_ttm_global_init()
89 drm_global_item_unref(&ast->ttm.bo_global_ref.ref); ast_ttm_global_release()
261 ast->ttm.bo_global_ref.ref.object, ast_mm_init()
/linux-4.4.14/drivers/gpu/drm/bochs/
H A Dbochs_mm.c19 static int bochs_ttm_mem_global_init(struct drm_global_reference *ref) bochs_ttm_mem_global_init() argument
21 return ttm_mem_global_init(ref->object); bochs_ttm_mem_global_init()
24 static void bochs_ttm_mem_global_release(struct drm_global_reference *ref) bochs_ttm_mem_global_release() argument
26 ttm_mem_global_release(ref->object); bochs_ttm_mem_global_release()
48 global_ref = &bochs->ttm.bo_global_ref.ref; bochs_ttm_global_init()
68 drm_global_item_unref(&bochs->ttm.bo_global_ref.ref); bochs_ttm_global_release()
227 bochs->ttm.bo_global_ref.ref.object, bochs_mm_init()
/linux-4.4.14/drivers/gpu/drm/cirrus/
H A Dcirrus_ttm.c39 cirrus_ttm_mem_global_init(struct drm_global_reference *ref) cirrus_ttm_mem_global_init() argument
41 return ttm_mem_global_init(ref->object); cirrus_ttm_mem_global_init()
45 cirrus_ttm_mem_global_release(struct drm_global_reference *ref) cirrus_ttm_mem_global_release() argument
47 ttm_mem_global_release(ref->object); cirrus_ttm_mem_global_release()
69 global_ref = &cirrus->ttm.bo_global_ref.ref; cirrus_ttm_global_init()
89 drm_global_item_unref(&cirrus->ttm.bo_global_ref.ref); cirrus_ttm_global_release()
261 cirrus->ttm.bo_global_ref.ref.object, cirrus_mm_init()
/linux-4.4.14/drivers/gpu/drm/mgag200/
H A Dmgag200_ttm.c39 mgag200_ttm_mem_global_init(struct drm_global_reference *ref) mgag200_ttm_mem_global_init() argument
41 return ttm_mem_global_init(ref->object); mgag200_ttm_mem_global_init()
45 mgag200_ttm_mem_global_release(struct drm_global_reference *ref) mgag200_ttm_mem_global_release() argument
47 ttm_mem_global_release(ref->object); mgag200_ttm_mem_global_release()
69 global_ref = &ast->ttm.bo_global_ref.ref; mgag200_ttm_global_init()
89 drm_global_item_unref(&ast->ttm.bo_global_ref.ref); mgag200_ttm_global_release()
261 mdev->ttm.bo_global_ref.ref.object, mgag200_mm_init()
/linux-4.4.14/drivers/net/
H A Dxen-netfront.c215 grant_ref_t ref = queue->grant_rx_ref[i]; xennet_get_rx_ref() local
217 return ref; xennet_get_rx_ref()
293 grant_ref_t ref; xennet_alloc_rx_buffers() local
306 ref = gnttab_claim_grant_reference(&queue->gref_rx_head); xennet_alloc_rx_buffers()
307 BUG_ON((signed short)ref < 0); xennet_alloc_rx_buffers()
308 queue->grant_rx_ref[id] = ref; xennet_alloc_rx_buffers()
313 gnttab_page_grant_foreign_access_ref_one(ref, xennet_alloc_rx_buffers()
318 req->gref = ref; xennet_alloc_rx_buffers()
431 grant_ref_t ref; xennet_tx_setup_grant() local
439 ref = gnttab_claim_grant_reference(&queue->gref_tx_head); xennet_tx_setup_grant()
440 BUG_ON((signed short)ref < 0); xennet_tx_setup_grant()
442 gnttab_grant_foreign_access_ref(ref, queue->info->xbdev->otherend_id, xennet_tx_setup_grant()
447 queue->grant_tx_ref[id] = ref; xennet_tx_setup_grant()
450 tx->gref = ref; xennet_tx_setup_grant()
707 grant_ref_t ref) xennet_move_rx_slot()
713 queue->grant_rx_ref[new] = ref; xennet_move_rx_slot()
715 RING_GET_REQUEST(&queue->rx, queue->rx.req_prod_pvt)->gref = ref; xennet_move_rx_slot()
731 grant_ref_t ref; xennet_get_extras() local
755 ref = xennet_get_rx_ref(queue, cons); xennet_get_extras()
756 xennet_move_rx_slot(queue, skb, ref); xennet_get_extras()
772 grant_ref_t ref = xennet_get_rx_ref(queue, cons); xennet_get_responses() local
789 xennet_move_rx_slot(queue, skb, ref); xennet_get_responses()
799 if (ref == GRANT_INVALID_REF) { xennet_get_responses()
807 ret = gnttab_end_foreign_access_ref(ref, 0); xennet_get_responses()
810 gnttab_release_grant_reference(&queue->gref_rx_head, ref); xennet_get_responses()
827 ref = xennet_get_rx_ref(queue, cons + slots); xennet_get_responses()
1130 int id, ref; xennet_release_rx_bufs() local
1142 ref = queue->grant_rx_ref[id]; xennet_release_rx_bufs()
1143 if (ref == GRANT_INVALID_REF) xennet_release_rx_bufs()
1148 /* gnttab_end_foreign_access() needs a page ref until xennet_release_rx_bufs()
1152 gnttab_end_foreign_access(ref, 0, xennet_release_rx_bufs()
1377 static void xennet_end_access(int ref, void *page) xennet_end_access() argument
1380 if (ref != GRANT_INVALID_REF) xennet_end_access()
1381 gnttab_end_foreign_access(ref, 0, (unsigned long)page); xennet_end_access()
1684 err = xenbus_printf(*xbt, path, "tx-ring-ref", "%u", write_queue_xenstore_keys()
1687 message = "writing tx-ring-ref"; write_queue_xenstore_keys()
1691 err = xenbus_printf(*xbt, path, "rx-ring-ref", "%u", write_queue_xenstore_keys()
1694 message = "writing rx-ring-ref"; write_queue_xenstore_keys()
706 xennet_move_rx_slot(struct netfront_queue *queue, struct sk_buff *skb, grant_ref_t ref) xennet_move_rx_slot() argument
/linux-4.4.14/arch/tile/include/hv/
H A Dnetio_intf.h333 * This structure is opaque and accessed through the @ref ingress.
377 * been processed by @ref netio_populate_buffer() or
378 * @ref netio_populate_prepend_buffer(). This structure is opaque
379 * and accessed through the @ref egress.
534 * through the @ref ingress and the @ref egress.
551 * packet data must obey the memory coherency rules outlined in @ref input.
609 * by @ref netio_get_packet(), on which @ref netio_populate_buffer() or
610 * @ref netio_populate_prepend_buffer have not been called). Use of this
630 * @ref netio_populate_buffer() or @ref netio_populate_prepend_buffer()
631 * have been called, or one retrieved by @ref netio_get_buffer()). Use of
649 * packet (i.e. one on which @ref netio_populate_buffer() or
650 * @ref netio_populate_prepend_buffer() have been called, or one
651 * retrieved by @ref netio_get_buffer()), and zero if the packet
652 * is an 'ingress' packet (i.e. one retrieved by @ref netio_get_packet(),
964 * This value is reliable if @ref NETIO_PKT_ETHERTYPE_RECOGNIZED_M()
1147 * Note that packets for which @ref NETIO_PKT_ETHERTYPE_RECOGNIZED()
1150 * @ref NETIO_PKT_BAD_M() instead of explicitly checking status with this
1261 * Note that packets for which @ref NETIO_PKT_ETHERTYPE_RECOGNIZED()
1264 * @ref NETIO_PKT_BAD() instead of explicitly checking status with this
1553 * This value is reliable if @ref NETIO_PKT_ETHERTYPE_RECOGNIZED()
1770 * functions like @ref NETIO_PKT_L3_DATA() to get a pointer to the L3 payload.
1790 * functions like @ref NETIO_PKT_L3_DATA() to get a pointer to the L3 payload.
1901 * See @ref netio_populate_prepend_buffer() to get a full description of
1917 * See @ref netio_populate_prepend_buffer() to get a full description of
1932 * See @ref netio_populate_prepend_buffer() to get a full description of
2112 * The @ref netio_input_register() function uses this structure to define
2222 and netio_input_unregister(). See @ref link for more information
2239 being retrieved by @ref netio_get_packet(). If the IPP's distribution
2243 for the same queue_id (see @ref drops). This value must
2263 retrieved via @ref netio_get_buffer(). This value must be greater
2264 than zero if the application will ever use @ref netio_get_buffer()
2266 ::NETIO_MAX_SEND_BUFFERS. See @ref epp for more details on empty
2274 operation; this may speed the execution of @ref netio_get_buffer().
2275 This may be no larger than @ref num_send_buffers_small_total. See @ref
2283 @ref netio_get_buffer(). This value must be greater than zero if the
2284 application will ever use @ref netio_get_buffer() to allocate empty
2286 See @ref epp for more details on empty buffer caching.
2293 operation; this may speed the execution of @ref netio_get_buffer().
2294 This may be no larger than @ref num_send_buffers_large_total. See @ref
2302 @ref netio_get_buffer(). This value must be greater than zero if the
2303 application will ever use @ref netio_get_buffer() to allocate empty
2305 See @ref epp for more details on empty buffer caching.
2312 operation; this may speed the execution of @ref netio_get_buffer().
2313 This may be no larger than @ref num_send_buffers_jumbo_total. See @ref
2334 See @ref buffer_node_weights for an explanation of buffer placement.
2381 For the weights to be effective, @ref total_buffer_size must be
2382 nonzero. If @ref total_buffer_size is zero, causing the default
2393 If @ref total_buffer_size is nonzero, but all weights are zero,
2399 if the ::NETIO_STRICT_HOMING flag is specified in @ref flags, then a
2409 ::NETIO_FIXED_BUFFER_VA is specified in @ref flags; see the
2420 Modifying this value is generally only helpful when using @ref
2445 /** Registration flags; used in the @ref netio_input_config_t structure.
2486 in @ref netio_input_config_t::fixed_buffer_va). This allows multiple
2491 @ref netio_input_config_t::fixed_buffer_va must be a
2492 multiple of 16 MB, and the packet buffers will occupy @ref
2496 shared library code or data, @ref netio_input_register() will return
2499 use 0xb0000000 minus @ref netio_input_config_t::total_buffer_size.
2518 ::NETIO_NO_RECV. See @ref link for more information on link
2528 or the I/O shim is full. See @ref link for more information on link
2549 See @ref link for more information on link management.
2558 See @ref link for more information on link management.
2566 See @ref link for more information on link management.
2573 ::NETIO_NO_RECV. See @ref link for more information on link management.
2754 * Flags and structures for @ref netio_get() and @ref netio_set().
2761 /** Interface MAC address. This address is only valid with @ref netio_get().
2792 /** IPP statistics. This address is only valid with @ref netio_get(). The
2799 * flags. With @ref netio_get(), this will indicate which flags are
2809 * With @ref netio_set(), this will attempt to immediately bring up the
2821 /** Current link state. This address is only valid with @ref netio_get().
2833 /** Packet Coherence protocol. This address is only valid with @ref netio_get().
2839 * flags, which specify the desired state for the link. With @ref
2850 * With @ref netio_get(), this will indicate the desired state for the
2858 * address passed to @ref netio_get(). */
2955 * address passed to @ref netio_get() or @ref netio_set(). */
2968 uint64_t addr; /**< Value to pass to @ref netio_get() or
2969 * @ref netio_set(). */
/linux-4.4.14/drivers/misc/mic/scif/
H A Dscif_mmap.c128 * the window has not yet been unregistered we can drop the ref count
347 /* Initiate window destruction if ref count is zero */ scif_put_pages()
422 * No more failures expected. Bump up the ref count for all
454 * 1) Decrement ref count.
455 * 2) If the ref count drops to zero then send a SCIF_MUNMAP message to peer.
511 struct kref ref; member in struct:vma_pvt
514 static void vma_pvt_release(struct kref *ref) vma_pvt_release() argument
516 struct vma_pvt *vmapvt = container_of(ref, struct vma_pvt, ref); vma_pvt_release()
541 kref_get(&vmapvt->ref); scif_vma_open()
594 kref_put(&vmapvt->ref, vma_pvt_release); scif_munmap()
641 kref_init(&vmapvt->ref); scif_mmap()
/linux-4.4.14/net/sched/
H A Dsch_atm.c51 int ref; /* reference count */ member in struct:atm_flow_data
117 flow->ref++; atm_tc_get()
139 if (--flow->ref) atm_tc_put()
289 flow->ref = 1; atm_tc_change()
320 if (flow->ref < 2) { atm_tc_delete()
321 pr_err("atm_tc_delete: flow->ref == %d\n", flow->ref); atm_tc_delete()
324 if (flow->ref > 2) atm_tc_delete()
553 p->link.ref = 1; atm_tc_init()
579 if (flow->ref > 1) atm_tc_destroy()
580 pr_err("atm_destroy: %p->ref = %d\n", flow, flow->ref); atm_tc_destroy()
H A Dematch.c190 u32 ref; tcf_em_validate() local
192 if (data_len < sizeof(ref)) tcf_em_validate()
194 ref = *(u32 *) data; tcf_em_validate()
196 if (ref >= tree_hdr->nmatches) tcf_em_validate()
202 if (ref <= idx) tcf_em_validate()
206 em->data = ref; tcf_em_validate()
H A Dact_api.c447 tcf_action_dump_old(struct sk_buff *skb, struct tc_action *a, int bind, int ref) tcf_action_dump_old() argument
449 return a->ops->dump(skb, a, bind, ref); tcf_action_dump_old()
453 tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int bind, int ref) tcf_action_dump_1() argument
466 err = tcf_action_dump_old(skb, a, bind, ref); tcf_action_dump_1()
479 tcf_action_dump(struct sk_buff *skb, struct list_head *actions, int bind, int ref) tcf_action_dump() argument
489 err = tcf_action_dump_1(skb, a, bind, ref); list_for_each_entry()
661 u16 flags, int event, int bind, int ref) tca_get_fill()
680 if (tcf_action_dump(skb, actions, bind, ref) < 0) tca_get_fill()
660 tca_get_fill(struct sk_buff *skb, struct list_head *actions, u32 portid, u32 seq, u16 flags, int event, int bind, int ref) tca_get_fill() argument
H A Dact_connmark.c143 int bind, int ref) tcf_connmark_dump()
150 .refcnt = ci->tcf_refcnt - ref, tcf_connmark_dump()
142 tcf_connmark_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref) tcf_connmark_dump() argument
H A Dact_gact.c148 static int tcf_gact_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref) tcf_gact_dump() argument
154 .refcnt = gact->tcf_refcnt - ref, tcf_gact_dump()
H A Dact_pedit.c174 int bind, int ref) tcf_pedit_dump()
195 opt->refcnt = p->tcf_refcnt - ref; tcf_pedit_dump()
173 tcf_pedit_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref) tcf_pedit_dump() argument
H A Dact_simple.c137 int bind, int ref) tcf_simp_dump()
143 .refcnt = d->tcf_refcnt - ref, tcf_simp_dump()
136 tcf_simp_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref) tcf_simp_dump() argument
H A Dact_skbedit.c138 int bind, int ref) tcf_skbedit_dump()
144 .refcnt = d->tcf_refcnt - ref, tcf_skbedit_dump()
137 tcf_skbedit_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref) tcf_skbedit_dump() argument
H A Dact_vlan.c151 int bind, int ref) tcf_vlan_dump()
157 .refcnt = v->tcf_refcnt - ref, tcf_vlan_dump()
150 tcf_vlan_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref) tcf_vlan_dump() argument
/linux-4.4.14/fs/ocfs2/
H A Dsysfile.c118 /* get a ref in addition to the array ref */ ocfs2_get_system_file_inode()
126 /* this gets one ref thru iget */ ocfs2_get_system_file_inode()
/linux-4.4.14/include/net/caif/
H A Dcaif_dev.h81 * caif_client_register_refcnt - register ref-count functions provided by client.
84 * @hold: Function provided by client layer increasing ref-count
85 * @put: Function provided by client layer decreasing ref-count
/linux-4.4.14/drivers/net/ethernet/intel/i40e/
H A Di40e_hmc.c100 /* increment the ref count */ i40e_add_sd_table_entry()
125 * 4. Initializes the pd_entry's ref count to 1
206 * 3. Decrement the ref count for the pd _entry
236 /* get the entry and decrease its ref counter */ i40e_remove_pd_bp()
273 /* get the entry and decrease its ref counter */ i40e_prep_remove_sd_bp()
304 /* get the entry and decrease its ref counter */ i40e_remove_sd_bp_new()
/linux-4.4.14/drivers/char/hw_random/
H A Dcore.c99 struct hwrng *rng = container_of(kref, struct hwrng, ref); cleanup_rng()
130 kref_put(&current_rng->ref, cleanup_rng); drop_current_rng()
144 kref_get(&rng->ref); get_current_rng()
158 kref_put(&rng->ref, cleanup_rng); put_rng()
164 if (kref_get_unless_zero(&rng->ref)) hwrng_init()
175 kref_init(&rng->ref); hwrng_init()
/linux-4.4.14/drivers/isdn/hardware/eicon/
H A Ddi.h90 void assign_queue(ADAPTER *a, byte e_no, word ref);
91 byte get_assign(ADAPTER *a, word ref);
/linux-4.4.14/net/sunrpc/
H A Dsvcauth.c130 if (atomic_dec_and_lock(&dom->ref.refcount, &auth_domain_lock)) { auth_domain_put()
150 kref_get(&hp->ref); hlist_for_each_entry()
/linux-4.4.14/fs/quota/
H A Dquota_tree.c296 __le32 *ref; do_insert_tree() local
316 ref = (__le32 *)buf; do_insert_tree()
317 newblk = le32_to_cpu(ref[get_index(info, dquot->dq_id, depth)]); do_insert_tree()
325 le32_to_cpu(ref[get_index(info, do_insert_tree()
336 ref[get_index(info, dquot->dq_id, depth)] = do_insert_tree()
473 __le32 *ref = (__le32 *)buf; remove_tree() local
483 newblk = le32_to_cpu(ref[get_index(info, dquot->dq_id, depth)]); remove_tree()
492 ref[get_index(info, dquot->dq_id, depth)] = cpu_to_le32(0); remove_tree()
494 for (i = 0; i < (info->dqi_usable_bs >> 2) && !ref[i]; i++) remove_tree()
569 __le32 *ref = (__le32 *)buf; find_tree_dqentry() local
580 blk = le32_to_cpu(ref[get_index(info, dquot->dq_id, depth)]); find_tree_dqentry()
/linux-4.4.14/drivers/staging/speakup/
H A Ddevsynth.c78 /* zero it so if register fails, deregister will not ref invalid ptrs */ speakup_register_devsynth()
/linux-4.4.14/drivers/media/pci/cobalt/
H A Dm00479_clk_loss_detector_memmap_package.h30 /* Number of ref clk cycles before checking the clock under test */
/linux-4.4.14/drivers/gpu/drm/nouveau/nvkm/subdev/clk/
H A Dnv40.c47 u32 ref = 27000, khz = 0; read_pll_1() local
50 khz = ref * N / M; read_pll_1()
66 u32 ref = 27000, khz = 0; read_pll_2() local
69 khz = ref * N1 / M1; read_pll_2()
H A Dnv50.c56 u32 coef, ref = nvkm_clk_read(&clk->base, nv_clk_src_crystal); read_pll_src() local
69 nvkm_error(subdev, "ref: bad pll %06x\n", base); read_pll_src()
74 ref *= (coef & 0x01000000) ? 2 : 4; read_pll_src()
97 nvkm_error(subdev, "ref: bad pll %06x\n", base); read_pll_src()
119 return (ref * N / M) >> P; read_pll_src()
164 u32 ref = read_pll_ref(clk, base); read_pll() local
179 freq = ref * N1 / M1; read_pll()
/linux-4.4.14/net/ceph/
H A Dmsgpool.c81 kref_init(&msg->kref); /* retake single ref */ ceph_msgpool_put()
/linux-4.4.14/fs/notify/fanotify/
H A Dfanotify.h16 * We hold ref to this path so it may be dereferenced at any point
/linux-4.4.14/include/net/
H A Dtransp_v6.h62 " uid timeout inode ref pointer drops\n"
/linux-4.4.14/fs/xfs/libxfs/
H A Dxfs_sb.h22 * perag get/put wrappers for ref counting
/linux-4.4.14/arch/arm/include/asm/hardware/
H A Dicst.h18 unsigned long ref; member in struct:icst_params
/linux-4.4.14/drivers/tty/hvc/
H A Dhvc_xen.c375 int ret, evtchn, devid, ref, irq; xencons_connect_backend() local
396 ref = gnttab_claim_grant_reference(&gref_head); xencons_connect_backend()
397 if (ref < 0) xencons_connect_backend()
398 return ref; xencons_connect_backend()
399 gnttab_grant_foreign_access_ref(ref, info->xbdev->otherend_id, xencons_connect_backend()
408 ret = xenbus_printf(xbt, dev->nodename, "ring-ref", "%d", ref); xencons_connect_backend()
/linux-4.4.14/drivers/mtd/
H A Dmtd_blkdevs.c44 container_of(kref, struct mtd_blktrans_dev, ref); blktrans_dev_release()
62 kref_get(&dev->ref); blktrans_dev_get()
71 kref_put(&dev->ref, blktrans_dev_release); blktrans_dev_put()
201 kref_get(&dev->ref); blktrans_open()
230 kref_put(&dev->ref, blktrans_dev_release); blktrans_open()
250 kref_put(&dev->ref, blktrans_dev_release); blktrans_release()
372 kref_init(&new->ref); add_mtd_blktrans_dev()
/linux-4.4.14/drivers/net/can/mscan/
H A Dmpc5xxx_can.c140 * for clock-source values of 'ref' or 'sys' the CANCTL1[CLKSRC] mpc512x_can_get_clock()
147 * 'ref' clock is used mpc512x_can_get_clock()
156 else if (!strcmp(clock_source, "ref")) mpc512x_can_get_clock()
183 /* no spec so far, use the 'ref' clock */ mpc512x_can_get_clock()
185 clk_in = devm_clk_get(&ofdev->dev, "ref"); mpc512x_can_get_clock()
191 "clk fit, ref[%lu] (no div) freq[%lu]\n", mpc512x_can_get_clock()
222 clk_in = devm_clk_get(&ofdev->dev, "ref"); mpc512x_can_get_clock()
/linux-4.4.14/drivers/media/pci/zoran/
H A Dvideocodec.h81 master_data -> data ref. for the master (e.g. the zr36055,57,67)
82 readreg -> ref. to read-fn from register (setup by master, used by slave)
83 writereg -> ref. to write-fn to register (setup by master, used by slave)
87 slave_data -> data ref. for the slave (e.g. the zr36050,60)
88 check -> fn-ref. checks availability of an device, returns -EIO on failure or
96 set_mode -> this fn-ref. resets the entire codec, and sets up the mode
99 set_size -> this fn-ref. sets the norm and image size for
/linux-4.4.14/fs/notify/inotify/
H A Dinotify_user.c380 /* One ref for being in the idr, one ref we just took */ inotify_idr_find_locked()
411 /* removed from the idr, drop that ref */ do_inotify_remove_from_idr()
466 * One ref for being in the idr inotify_remove_from_idr()
467 * one ref held by the caller trying to kill us inotify_remove_from_idr()
468 * one ref grabbed by inotify_idr_find inotify_remove_from_idr()
474 /* we can't really recover with bad ref cnting.. */ inotify_remove_from_idr()
480 /* match the ref taken by inotify_idr_find_locked() */ inotify_remove_from_idr()
611 /* match the ref from fsnotify_init_mark() */ inotify_new_watch()
783 /* match ref taken by inotify_idr_find */ SYSCALL_DEFINE2()
/linux-4.4.14/drivers/infiniband/hw/mthca/
H A Dmthca_provider.h144 * Each struct mthca_cq/qp also has an ref count, protected by the
159 * - increment ref count in struct
162 * - decrement ref count; if zero, wake up waiters
166 * - remove pointer and decrement ref count
168 * - wait_event until ref count is zero
/linux-4.4.14/drivers/block/aoe/
H A Daoedev.c144 d->ref--; aoedev_put()
338 || d->ref) flush()
409 "aoe: %s holds ref: %s\n", skbfree()
450 d->ref++; aoedev_by_aoeaddr()
476 d->ref = 1; aoedev_by_aoeaddr()
/linux-4.4.14/include/linux/sunrpc/
H A Dcache.h38 * Entries have a ref count and a 'hashed' flag which counts the existence
56 struct kref ref; member in struct:cache_head
196 kref_get(&h->ref); cache_get()
203 if (atomic_read(&h->ref.refcount) <= 2 && cache_put()
206 kref_put(&h->ref, cd->cache_put); cache_put()
/linux-4.4.14/arch/x86/kernel/cpu/
H A Dperf_event_intel_uncore_nhmex.c556 if (!atomic_read(&er->ref) || er->config == config) { nhmex_mbox_get_shared_reg()
557 atomic_inc(&er->ref); nhmex_mbox_get_shared_reg()
583 if (__BITS_VALUE(atomic_read(&er->ref), idx, 8)) { nhmex_mbox_get_shared_reg()
590 if (!atomic_read(&er->ref) || !((er->config ^ config) & mask)) { nhmex_mbox_get_shared_reg()
591 atomic_add(1 << (idx * 8), &er->ref); nhmex_mbox_get_shared_reg()
613 atomic_dec(&er->ref); nhmex_mbox_put_shared_reg()
619 atomic_sub(1 << (idx * 8), &er->ref); nhmex_mbox_put_shared_reg()
997 if (!atomic_read(&er->ref) || er->config == reg1->config) { nhmex_rbox_get_constraint()
998 atomic_inc(&er->ref); nhmex_rbox_get_constraint()
1008 if (!__BITS_VALUE(atomic_read(&er->ref), idx - 2, 8) || nhmex_rbox_get_constraint()
1010 atomic_add(1 << ((idx - 2) * 8), &er->ref); nhmex_rbox_get_constraint()
1016 if (!atomic_read(&er->ref) || nhmex_rbox_get_constraint()
1020 atomic_inc(&er->ref); nhmex_rbox_get_constraint()
1072 atomic_sub(1 << ((idx - 2) * 8), &er->ref); nhmex_rbox_put_constraint()
1074 atomic_dec(&er->ref); nhmex_rbox_put_constraint()
/linux-4.4.14/drivers/s390/scsi/
H A Dzfcp_aux.c350 kref_init(&adapter->ref); zfcp_adapter_enqueue()
443 * @ref: pointer to struct kref
446 void zfcp_adapter_release(struct kref *ref) zfcp_adapter_release() argument
448 struct zfcp_adapter *adapter = container_of(ref, struct zfcp_adapter, zfcp_adapter_release()
449 ref); zfcp_adapter_release()
488 kref_get(&adapter->ref); zfcp_port_enqueue()
H A Dzfcp_ccw.c28 kref_get(&adapter->ref); zfcp_ccw_adapter_by_cdev()
38 kref_put(&adapter->ref, zfcp_adapter_release); zfcp_ccw_adapter_put()
171 kref_get(&adapter->ref); zfcp_ccw_set_online()
/linux-4.4.14/drivers/media/i2c/cx25840/
H A Dcx25840-audio.c68 * FIXME 28636363 ref_freq doesn't match VID PLL ref cx25840_set_audclk_freq()
105 * FIXME 28636363 ref_freq doesn't match VID PLL ref cx25840_set_audclk_freq()
142 * FIXME 28636363 ref_freq doesn't match VID PLL ref cx25840_set_audclk_freq()
181 * FIXME 28636363 ref_freq doesn't match VID PLL ref cx25840_set_audclk_freq()
222 * FIXME 28636363 ref_freq doesn't match VID PLL ref cx25840_set_audclk_freq()
263 * FIXME 28636363 ref_freq doesn't match VID PLL ref cx25840_set_audclk_freq()
/linux-4.4.14/drivers/scsi/
H A Dxen-scsifront.c374 int err, ref, ref_cnt = 0; map_data_for_request() local
419 ref = gnttab_claim_grant_reference(&gref_head); map_data_for_request()
420 BUG_ON(ref == -ENOSPC); map_data_for_request()
422 gnttab_grant_foreign_access_ref(ref, map_data_for_request()
425 shadow->gref[ref_cnt] = ref; map_data_for_request()
426 ring_req->seg[ref_cnt].gref = ref; map_data_for_request()
453 ref = gnttab_claim_grant_reference(&gref_head); scsi_for_each_sg()
454 BUG_ON(ref == -ENOSPC); scsi_for_each_sg()
456 gnttab_grant_foreign_access_ref(ref, scsi_for_each_sg()
461 shadow->gref[ref_cnt] = ref; scsi_for_each_sg()
462 seg->gref = ref; scsi_for_each_sg()
797 err = xenbus_printf(xbt, dev->nodename, "ring-ref", "%u", scsifront_init_ring()
800 xenbus_dev_fatal(dev, err, "%s", "writing ring-ref"); scsifront_init_ring()
/linux-4.4.14/drivers/clk/versatile/
H A Dclk-icst.c89 icst->params->ref = parent_rate; icst_recalc_rate()
112 icst->params->ref = parent_rate; icst_set_rate()
H A Dclk-impd1.c46 .ref = 24000000, /* 24 MHz */
64 .ref = 24000000, /* 24 MHz */
H A Dclk-realview.c24 .ref = 24000000,
/linux-4.4.14/drivers/cpufreq/
H A Dintegrator-cpufreq.c33 .ref = 24000000,
45 .ref = 24000000,
/linux-4.4.14/block/
H A Dt10-pi.c46 * 16 bit app tag, 32 bit reference tag. Type 3 does not define the ref
90 pr_err("%s: ref tag error at location %llu " \ t10_pi_verify()
/linux-4.4.14/kernel/
H A Duser.c141 * Locate the user_struct for the passed UID. If found, take a ref on it. The
142 * caller must undo that ref with free_uid().
/linux-4.4.14/fs/ocfs2/dlm/
H A Ddlmthread.c479 /* ref for dirty_list */ __dlm_dirty_lockres()
538 /* get an extra ref on lock */ dlm_flush_asts()
550 /* remove from list (including ref) */ dlm_flush_asts()
573 /* drop the extra ref. dlm_flush_asts()
582 /* get an extra ref on lock */ dlm_flush_asts()
595 /* remove from list (including ref) */ dlm_flush_asts()
625 /* drop the extra ref. dlm_flush_asts()
679 /* Drop dirty_list ref */ dlm_thread()
/linux-4.4.14/drivers/staging/lustre/lnet/klnds/socklnd/
H A Dsocklnd.c118 atomic_set(&peer->ksnp_refcount, 1); /* 1 ref for caller */ ksocknal_create_peer()
210 if (peer != NULL) /* +1 ref for caller? */ ksocknal_find_peer()
243 /* lose peerlist's ref */ ksocknal_unlink_peer_locked()
393 /* peer's routelist takes over my ref on 'route' */ ksocknal_add_route_locked()
437 ksocknal_route_decref(route); /* drop peer's ref */ ksocknal_del_route_locked()
474 /* always called with a ref on ni, so shutdown can't have started */ ksocknal_add_peer()
482 /* peer table takes my ref on peer */ ksocknal_add_peer()
520 /* Extra ref prevents peer disappearing until I'm done with it */ ksocknal_del_peer_locked()
597 ksocknal_peer_addref(peer); /* a ref for me... */ ksocknal_del_peer()
1042 /* 2 ref, 1 for conn, another extra ref prevents socket ksocknal_create_conn()
1047 atomic_set(&conn->ksnc_conn_refcount, 1); /* 1 ref for me */ ksocknal_create_conn()
1128 /* called with a ref on ni, so shutdown can't have started */ ksocknal_create_conn()
1134 * table (which takes my ref) */ ksocknal_create_conn()
1142 /* +1 ref for me */ ksocknal_create_conn()
1248 conn->ksnc_peer = peer; /* conn takes my ref on peer */ ksocknal_create_conn()
1408 /* ksnd_deathrow_conns takes over peer's ref */ ksocknal_close_conn_locked()
1437 ksocknal_route_decref(route); /* drop conn's ref on route */ ksocknal_close_conn_locked()
1566 /* extra ref for scheduler */ ksocknal_terminate_conn()
2498 CWARN("Active peer on shutdown: %s, ref %d, scnt %d, closing %d, accepting %d, err %d, zcookie %llu, txq %d, zc_req %d\n", ksocknal_debug_peerhash()
2509 CWARN("Route: ref %d, schd %d, conn %d, cnted %d, del %d\n", ksocknal_debug_peerhash()
2517 CWARN("Conn: ref %d, sref %d, t %d, c %d\n", ksocknal_debug_peerhash()
/linux-4.4.14/drivers/usb/gadget/legacy/
H A Dtcm_usb_gadget.h72 struct kref ref; member in struct:usbg_cmd
/linux-4.4.14/drivers/uwb/
H A Dumc-dev.c82 * resources. We take a ref to the device, to make sure it doesn't
/linux-4.4.14/drivers/media/dvb-frontends/
H A Dstv6110.c268 u32 divider, ref, p, presc, i, result_freq, vco_freq; stv6110_set_frequency() local
323 ref = priv->mclk / ((1 << (r_div_opt + 1)) * (1 << (p + 1))); stv6110_set_frequency()
324 divider = (((frequency * 1000) + (ref >> 1)) / ref); stv6110_set_frequency()
H A Dix2505v.c140 u8 gain, cc, ref, psc, local_osc, lpf; ix2505v_set_params() local
158 ref = 8; /* REF =1 */ ix2505v_set_params()
161 div_factor = (frequency * ref) / 40; /* local osc = 4Mhz */ ix2505v_set_params()
/linux-4.4.14/drivers/gpu/drm/gma500/
H A Dgtt.h48 int in_gart; /* Currently in the GART (ref ct) */
/linux-4.4.14/include/linux/amba/
H A Dkmi.h74 * div = (ref / 8MHz) - 1; 0 <= div <= 15
/linux-4.4.14/arch/mips/include/uapi/asm/
H A Dsiginfo.h84 void __user *_addr; /* faulting insn/memory ref. */
/linux-4.4.14/arch/ia64/include/uapi/asm/
H A Dsiginfo.h61 void __user *_addr; /* faulting insn/memory ref. */
/linux-4.4.14/net/mac802154/
H A Dllsec.h35 struct kref ref; member in struct:mac802154_llsec_key
H A Dllsec.c130 kref_init(&key->ref); llsec_key_alloc()
168 static void llsec_key_release(struct kref *ref) llsec_key_release() argument
173 key = container_of(ref, struct mac802154_llsec_key, ref); llsec_key_release()
185 kref_get(&key->ref); llsec_key_get()
191 kref_put(&key->ref, llsec_key_release); llsec_key_put()
/linux-4.4.14/include/media/
H A Dv4l2-device.h62 struct kref ref; member in struct:v4l2_device
63 /* Release function that is called when the ref count goes to 0. */
69 kref_get(&v4l2_dev->ref); v4l2_device_get()
/linux-4.4.14/drivers/staging/lustre/lnet/lnet/
H A Dlib-eq.c157 int *ref; LNetEQFree() local
176 cfs_percpt_for_each(ref, i, eq->eq_refs) { LNetEQFree()
177 LASSERT(*ref >= 0); LNetEQFree()
178 if (*ref == 0) LNetEQFree()
182 i, *ref); LNetEQFree()
H A Dmodule.c99 /* Passing LNET_PID_ANY only gives me a ref if the net is up lnet_ioctl()
/linux-4.4.14/drivers/staging/lustre/lustre/lov/
H A Dlov_pool.c207 * find pool to dec ref */ pool_proc_start()
418 /* ref count init to 1 because when created a pool is always used lov_pool_new()
434 /* get ref for /proc file */ lov_pool_new()
607 /* caller may no have a ref on pool if it got the pool lov_check_index_in_pool()
642 /* pool is ignored, so we remove ref on it */ lov_find_pool()
/linux-4.4.14/drivers/staging/lustre/lustre/ptlrpc/
H A Devents.c165 * since we don't have our own ref */ reply_in_callback()
362 /* req takes over the network's ref on rqbd */ request_in_callback()
364 /* req takes a ref on rqbd */ request_in_callback()
393 * net's ref on 'rs' */ reply_out_callback()
402 /* Last network callback. The net's ref on 'rs' stays put reply_out_callback()
/linux-4.4.14/drivers/md/bcache/
H A Dclosure.h315 * This is because after calling continue_at() you no longer have a ref on @cl,
317 * has a ref on its own closure which continue_at() drops.
329 * @cl have been dropped @cl's ref on its parent closure (as passed to
343 * The ref the caller of continue_at_nobarrier() had on @cl is now owned by @fn,
358 * free the memory occupied by @cl, and it is called with the ref on the parent
/linux-4.4.14/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/
H A Dbase.c470 nvkm_vm_ref(struct nvkm_vm *ref, struct nvkm_vm **ptr, struct nvkm_gpuobj *pgd) nvkm_vm_ref() argument
472 if (ref) { nvkm_vm_ref()
473 int ret = nvkm_vm_link(ref, pgd); nvkm_vm_ref()
477 kref_get(&ref->refcount); nvkm_vm_ref()
485 *ptr = ref; nvkm_vm_ref()
/linux-4.4.14/net/bluetooth/hidp/
H A Dcore.c844 /* Both HID and input systems drop a ref-count when unregistering the hidp_session_dev_add()
845 * device but they don't take a ref-count when registering them. Work hidp_session_dev_add()
902 * The new session object has an initial ref-count of 1.
922 kref_init(&session->ref); hidp_session_new()
967 /* increase ref-count of the given session by one */ hidp_session_get()
970 kref_get(&session->ref); hidp_session_get()
974 static void session_free(struct kref *ref) session_free() argument
976 struct hidp_session *session = container_of(ref, struct hidp_session, session_free()
977 ref); session_free()
988 /* decrease ref-count of the given session by one */ hidp_session_put()
991 kref_put(&session->ref, session_free); hidp_session_put()
/linux-4.4.14/drivers/mtd/ubi/
H A Dbuild.c421 * @ref: set to %1 on exit in case of failure if a reference to @ubi->dev was
427 static int ubi_sysfs_init(struct ubi_device *ubi, int *ref) ubi_sysfs_init() argument
440 *ref = 1; ubi_sysfs_init()
469 * @ref: set to %1 on exit in case of failure if a reference to @ubi->dev was
474 * resources it allocated, returns an error, and @ref is set to %0. However,
478 * object. The @ref argument is set to %1 in this case. The caller has to put
484 static int uif_init(struct ubi_device *ubi, int *ref) uif_init() argument
489 *ref = 0; uif_init()
517 err = ubi_sysfs_init(ubi, ref); uif_init()
535 if (*ref) uif_init()
855 int i, err, ref = 0; ubi_attach_mtd_dev() local
990 err = uif_init(ubi, &ref); ubi_attach_mtd_dev()
1042 ubi_assert(ref); ubi_attach_mtd_dev()
1051 if (ref) ubi_attach_mtd_dev()
/linux-4.4.14/drivers/block/
H A Dataflop.c222 int ref; member in struct:atari_floppy_struct
1565 if (floppy->ref != 1 && floppy->ref != -1) fd_locked_ioctl()
1672 if (floppy->ref != 1 && floppy->ref != -1) fd_locked_ioctl()
1847 if (p->ref && p->type != type) floppy_open()
1850 if (p->ref == -1 || (p->ref && mode & FMODE_EXCL)) floppy_open()
1854 p->ref = -1; floppy_open()
1856 p->ref++; floppy_open()
1867 if (p->ref < 0) floppy_open()
1868 p->ref = 0; floppy_open()
1870 p->ref--; floppy_open()
1893 if (p->ref < 0) floppy_release()
1894 p->ref = 0; floppy_release()
1895 else if (!p->ref--) { floppy_release()
1897 p->ref = 0; floppy_release()
/linux-4.4.14/drivers/block/xen-blkback/
H A Dxenbus.c854 err = xenbus_scanf(XBT_NIL, dev->otherend, "ring-ref", connect_ring()
858 xenbus_dev_fatal(dev, err, "reading %s/ring-ref", connect_ring()
863 pr_info("%s:using single page: ring-ref %d\n", dev->otherend, connect_ring()
880 snprintf(ring_ref_name, RINGREF_NAME_LEN, "ring-ref%u", i); connect_ring()
889 pr_info("ring-ref%u: %u\n", i, ring_ref[i]); connect_ring()
943 xenbus_dev_fatal(dev, err, "mapping ring-ref port %u", evtchn); connect_ring()
/linux-4.4.14/arch/arm/boot/dts/
H A DMakefile704 uniphier-ph1-ld4-ref.dtb \
705 uniphier-ph1-ld6b-ref.dtb \
706 uniphier-ph1-pro4-ref.dtb \
707 uniphier-ph1-sld3-ref.dtb \
708 uniphier-ph1-sld8-ref.dtb \
723 wm8505-ref.dtb \
/linux-4.4.14/include/xen/interface/
H A Dgrant_table.h245 * GNTTABOP_map_grant_ref: Map the grant entry (<dom>,<ref>) for access
266 grant_ref_t ref; member in struct:gnttab_map_grant_ref
333 * <domid, ref>.
343 grant_ref_t ref; member in struct:gnttab_transfer
377 grant_ref_t ref; member in union:gnttab_copy::__anon14723::__anon14724
489 grant_ref_t ref; member in union:gnttab_cache_flush::__anon14725
/linux-4.4.14/drivers/staging/android/ion/
H A Dion.c99 * @ref: reference count
110 struct kref ref; member in struct:ion_handle
193 kref_init(&buffer->ref); ion_buffer_create()
284 struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref); _ion_buffer_destroy()
300 kref_get(&buffer->ref); ion_buffer_get()
305 return kref_put(&buffer->ref, _ion_buffer_destroy); ion_buffer_put()
347 kref_init(&handle->ref); ion_handle_create()
361 struct ion_handle *handle = container_of(kref, struct ion_handle, ref); ion_handle_destroy()
387 kref_get(&handle->ref); ion_handle_get()
396 ret = kref_put(&handle->ref, ion_handle_destroy); ion_handle_put()
838 ion_handle_destroy(&handle->ref); ion_client_destroy()
1439 atomic_read(&buffer->ref.refcount)); ion_debug_heap_show()
/linux-4.4.14/drivers/net/wireless/iwlwifi/mvm/
H A Dphy-ctxt.c224 ctxt->ref); iwl_mvm_phy_ctxt_add()
241 ctxt->ref++; iwl_mvm_phy_ctxt_ref()
268 ctxt->ref--; iwl_mvm_phy_ctxt_unref()
/linux-4.4.14/scripts/
H A Dget_maintainer.pl1541 foreach my $ref (@{$commit_author}) {
1542 print STDERR " Author: @{$ref}[1]\n";
1547 foreach my $ref (@{$commit_signer}) {
1548 print STDERR " @{$ref}[2]: @{$ref}[1]\n";
1829 foreach my $ref(@{$commit_author_hash{$authors[$i]}}) {
1830 if (@{$ref}[0] eq $commits[$i] &&
1831 @{$ref}[1] eq $subjects[$i]) {
1864 foreach my $ref(@{$commit_signer_hash{$signer}}) {
1865 if (@{$ref}[0] eq $commit &&
1866 @{$ref}[1] eq $subject &&
1867 @{$ref}[2] eq $type) {
/linux-4.4.14/include/sound/
H A Dak4113.h162 /* 11.2896 MHz ref. Xtal freq. */
164 /* 12.288 MHz ref. Xtal freq. */
166 /* 24.576 MHz ref. Xtal freq. */
/linux-4.4.14/drivers/gpu/drm/amd/amdgpu/
H A Damdgpu_pll.c69 * amdgpu_pll_get_fb_ref_div - feedback and ref divider calculation
139 /* determine allowed ref divider range */ amdgpu_pll_compute()
247 DRM_DEBUG_KMS("%d - %d, pll dividers - fb: %d.%d ref: %d, post %d\n", amdgpu_pll_compute()
/linux-4.4.14/drivers/usb/core/
H A Dport.c252 * Also, drop the HiSpeed ref taken above. link_peers()
285 * usb_port_runtime_resume() event which takes a SuperSpeed ref unlink_peers()
303 /* Drop the SuperSpeed ref held on behalf of the active HiSpeed port */ unlink_peers()
306 /* Drop the ref taken above */ unlink_peers()
/linux-4.4.14/drivers/sh/clk/
H A Dcore.c368 kref_init(&mapping->ref); clk_establish_mapping()
377 kref_get(&mapping->ref); clk_establish_mapping()
391 mapping = container_of(kref, struct clk_mapping, ref); clk_destroy_mapping()
404 kref_put(&mapping->ref, clk_destroy_mapping); clk_teardown_mapping()
/linux-4.4.14/drivers/pinctrl/mvebu/
H A Dpinctrl-armada-38x.c232 MPP_VAR_FUNCTION(1, "ref", "clk_out1", V_88F6810_PLUS),
248 MPP_VAR_FUNCTION(3, "ref", "clk_out0", V_88F6810_PLUS),
294 MPP_VAR_FUNCTION(1, "ref", "clk_out0", V_88F6810_PLUS),
299 MPP_VAR_FUNCTION(1, "ref", "clk_out1", V_88F6810_PLUS),
H A Dpinctrl-armada-39x.c215 MPP_VAR_FUNCTION(1, "ref", "clk_out1", V_88F6920_PLUS),
227 MPP_VAR_FUNCTION(3, "ref", "clk_out0", V_88F6920_PLUS),
275 MPP_VAR_FUNCTION(1, "ref", "clk_out0", V_88F6920_PLUS),
280 MPP_VAR_FUNCTION(1, "ref", "clk_out1", V_88F6920_PLUS),

Completed in 4886 milliseconds

12345