Searched refs:path (Results 1 - 200 of 2428) sorted by relevance

1234567891011>>

/linux-4.4.14/drivers/thunderbolt/
H A Dpath.c2 * Thunderbolt Cactus Ridge driver - path/tunnel functionality
31 * tb_path_alloc() - allocate a thunderbolt path
37 struct tb_path *path = kzalloc(sizeof(*path), GFP_KERNEL); tb_path_alloc() local
38 if (!path) tb_path_alloc()
40 path->hops = kcalloc(num_hops, sizeof(*path->hops), GFP_KERNEL); tb_path_alloc()
41 if (!path->hops) { tb_path_alloc()
42 kfree(path); tb_path_alloc()
45 path->tb = tb; tb_path_alloc()
46 path->path_length = num_hops; tb_path_alloc()
47 return path; tb_path_alloc()
51 * tb_path_free() - free a deactivated path
53 void tb_path_free(struct tb_path *path) tb_path_free() argument
55 if (path->activated) { tb_path_free()
56 tb_WARN(path->tb, "trying to free an activated path\n") tb_path_free()
59 kfree(path->hops); tb_path_free()
60 kfree(path); tb_path_free()
63 static void __tb_path_deallocate_nfc(struct tb_path *path, int first_hop) __tb_path_deallocate_nfc() argument
66 for (i = first_hop; i < path->path_length; i++) { __tb_path_deallocate_nfc()
67 res = tb_port_add_nfc_credits(path->hops[i].in_port, __tb_path_deallocate_nfc()
68 -path->nfc_credits); __tb_path_deallocate_nfc()
70 tb_port_warn(path->hops[i].in_port, __tb_path_deallocate_nfc()
76 static void __tb_path_deactivate_hops(struct tb_path *path, int first_hop) __tb_path_deactivate_hops() argument
80 for (i = first_hop; i < path->path_length; i++) { __tb_path_deactivate_hops()
81 res = tb_port_write(path->hops[i].in_port, &hop, TB_CFG_HOPS, __tb_path_deactivate_hops()
82 2 * path->hops[i].in_hop_index, 2); __tb_path_deactivate_hops()
84 tb_port_warn(path->hops[i].in_port, __tb_path_deactivate_hops()
86 i, path->hops[i].in_hop_index); __tb_path_deactivate_hops()
90 void tb_path_deactivate(struct tb_path *path) tb_path_deactivate() argument
92 if (!path->activated) { tb_path_deactivate()
93 tb_WARN(path->tb, "trying to deactivate an inactive path\n"); tb_path_deactivate()
96 tb_info(path->tb, tb_path_deactivate()
97 "deactivating path from %llx:%x to %llx:%x\n", tb_path_deactivate()
98 tb_route(path->hops[0].in_port->sw), tb_path_deactivate()
99 path->hops[0].in_port->port, tb_path_deactivate()
100 tb_route(path->hops[path->path_length - 1].out_port->sw), tb_path_deactivate()
101 path->hops[path->path_length - 1].out_port->port); tb_path_deactivate()
102 __tb_path_deactivate_hops(path, 0); tb_path_deactivate()
103 __tb_path_deallocate_nfc(path, 0); tb_path_deactivate()
104 path->activated = false; tb_path_deactivate()
108 * tb_path_activate() - activate a path
110 * Activate a path starting with the last hop and iterating backwards. The
111 * caller must fill path->hops before calling tb_path_activate().
115 int tb_path_activate(struct tb_path *path) tb_path_activate() argument
119 if (path->activated) { tb_path_activate()
120 tb_WARN(path->tb, "trying to activate already activated path\n"); tb_path_activate()
124 tb_info(path->tb, tb_path_activate()
125 "activating path from %llx:%x to %llx:%x\n", tb_path_activate()
126 tb_route(path->hops[0].in_port->sw), tb_path_activate()
127 path->hops[0].in_port->port, tb_path_activate()
128 tb_route(path->hops[path->path_length - 1].out_port->sw), tb_path_activate()
129 path->hops[path->path_length - 1].out_port->port); tb_path_activate()
132 for (i = path->path_length - 1; i >= 0; i--) { tb_path_activate()
133 if (path->hops[i].in_counter_index == -1) tb_path_activate()
135 res = tb_port_clear_counter(path->hops[i].in_port, tb_path_activate()
136 path->hops[i].in_counter_index); tb_path_activate()
142 for (i = path->path_length - 1; i >= 0; i--) { tb_path_activate()
143 res = tb_port_add_nfc_credits(path->hops[i].in_port, tb_path_activate()
144 path->nfc_credits); tb_path_activate()
146 __tb_path_deallocate_nfc(path, i); tb_path_activate()
152 for (i = path->path_length - 1; i >= 0; i--) { tb_path_activate()
159 * defunct) firmeware path. This causes the hotplug operation to tb_path_activate()
166 res = tb_port_write(path->hops[i].in_port, &hop, TB_CFG_HOPS, tb_path_activate()
167 2 * path->hops[i].in_hop_index, 2); tb_path_activate()
169 __tb_path_deactivate_hops(path, i); tb_path_activate()
170 __tb_path_deallocate_nfc(path, 0); tb_path_activate()
175 hop.next_hop = path->hops[i].next_hop_index; tb_path_activate()
176 hop.out_port = path->hops[i].out_port->port; tb_path_activate()
178 hop.initial_credits = (i == path->path_length - 1) ? 16 : 7; tb_path_activate()
183 out_mask = (i == path->path_length - 1) ? tb_path_activate()
186 hop.weight = path->weight; tb_path_activate()
188 hop.priority = path->priority; tb_path_activate()
189 hop.drop_packages = path->drop_packages; tb_path_activate()
190 hop.counter = path->hops[i].in_counter_index; tb_path_activate()
191 hop.counter_enable = path->hops[i].in_counter_index != -1; tb_path_activate()
192 hop.ingress_fc = path->ingress_fc_enable & in_mask; tb_path_activate()
193 hop.egress_fc = path->egress_fc_enable & out_mask; tb_path_activate()
194 hop.ingress_shared_buffer = path->ingress_shared_buffer tb_path_activate()
196 hop.egress_shared_buffer = path->egress_shared_buffer tb_path_activate()
200 tb_port_info(path->hops[i].in_port, "Writing hop %d, index %d", tb_path_activate()
201 i, path->hops[i].in_hop_index); tb_path_activate()
202 tb_dump_hop(path->hops[i].in_port, &hop); tb_path_activate()
203 res = tb_port_write(path->hops[i].in_port, &hop, TB_CFG_HOPS, tb_path_activate()
204 2 * path->hops[i].in_hop_index, 2); tb_path_activate()
206 __tb_path_deactivate_hops(path, i); tb_path_activate()
207 __tb_path_deallocate_nfc(path, 0); tb_path_activate()
211 path->activated = true; tb_path_activate()
212 tb_info(path->tb, "path activation complete\n"); tb_path_activate()
215 tb_WARN(path->tb, "path activation failed\n"); tb_path_activate()
220 * tb_path_is_invalid() - check whether any ports on the path are invalid
222 * Return: Returns true if the path is invalid, false otherwise.
224 bool tb_path_is_invalid(struct tb_path *path) tb_path_is_invalid() argument
227 for (i = 0; i < path->path_length; i++) { tb_path_is_invalid()
228 if (path->hops[i].in_port->sw->is_unplugged) tb_path_is_invalid()
230 if (path->hops[i].out_port->sw->is_unplugged) tb_path_is_invalid()
H A DMakefile2 thunderbolt-objs := nhi.o ctl.o tb.o switch.o cap.o path.o tunnel_pci.o eeprom.o
H A Dtunnel_pci.c31 static void tb_pci_init_path(struct tb_path *path) tb_pci_init_path() argument
33 path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL; tb_pci_init_path()
34 path->egress_shared_buffer = TB_PATH_NONE; tb_pci_init_path()
35 path->ingress_fc_enable = TB_PATH_ALL; tb_pci_init_path()
36 path->ingress_shared_buffer = TB_PATH_NONE; tb_pci_init_path()
37 path->priority = 3; tb_pci_init_path()
38 path->weight = 1; tb_pci_init_path()
39 path->drop_packages = 0; tb_pci_init_path()
40 path->nfc_credits = 0; tb_pci_init_path()
53 * my thunderbolt devices). Therefore at most ONE path per device may be
131 * tb_pci_is_invalid - check whether an activated path is still valid
/linux-4.4.14/drivers/video/fbdev/mmp/
H A Dcore.c28 static struct mmp_overlay *path_get_overlay(struct mmp_path *path, path_get_overlay() argument
31 if (path && overlay_id < path->overlay_num) path_get_overlay()
32 return &path->overlays[overlay_id]; path_get_overlay()
36 static int path_check_status(struct mmp_path *path) path_check_status() argument
39 for (i = 0; i < path->overlay_num; i++) path_check_status()
40 if (path->overlays[i].status) path_check_status()
53 static int path_get_modelist(struct mmp_path *path, path_get_modelist() argument
56 BUG_ON(!path || !modelist); path_get_modelist()
58 if (path->panel && path->panel->get_modelist) path_get_modelist()
59 return path->panel->get_modelist(path->panel, modelist); path_get_modelist()
65 * panel list is used to pair panel/path when path/panel registered
66 * path list is used for both buffer driver and platdriver
67 * plat driver do path register/unregister
69 * buffer driver get registered path
76 * mmp_register_panel - register panel to panel_list and connect to path
80 * to panel_list and connect to path which matchs panel->plat_path_name.
81 * no error returns when no matching path is found as path register after
86 struct mmp_path *path; mmp_register_panel() local
93 /* try to register to path */ mmp_register_panel()
94 list_for_each_entry(path, &path_list, node) { mmp_register_panel()
95 if (!strcmp(panel->plat_path_name, path->name)) { mmp_register_panel()
96 dev_info(panel->dev, "connect to path %s\n", mmp_register_panel()
97 path->name); mmp_register_panel()
98 path->panel = panel; mmp_register_panel()
112 * from panel_list and disconnect from path.
116 struct mmp_path *path; mmp_unregister_panel() local
121 list_for_each_entry(path, &path_list, node) { mmp_unregister_panel()
122 if (path->panel && path->panel == panel) { mmp_unregister_panel()
123 dev_info(panel->dev, "disconnect from path %s\n", mmp_unregister_panel()
124 path->name); mmp_unregister_panel()
125 path->panel = NULL; mmp_unregister_panel()
134 * mmp_get_path - get path by name
135 * @p: path name
137 * this function checks path name in path_list and return matching path
138 * return NULL if no matching path
142 struct mmp_path *path; mmp_get_path() local
146 list_for_each_entry(path, &path_list, node) { mmp_get_path()
147 if (!strcmp(name, path->name)) { mmp_get_path()
154 return found ? path : NULL; mmp_get_path()
159 * mmp_register_path - init and register path by path_info
160 * @p: path info provided by display controller
162 * this function init by path info and register path to path_list
163 * this function also try to connect path with panel by name
169 struct mmp_path *path = NULL; mmp_register_path() local
174 path = kzalloc(size, GFP_KERNEL); mmp_register_path()
175 if (!path) mmp_register_path()
178 /* path set */ mmp_register_path()
179 mutex_init(&path->access_ok); mmp_register_path()
180 path->dev = info->dev; mmp_register_path()
181 path->id = info->id; mmp_register_path()
182 path->name = info->name; mmp_register_path()
183 path->output_type = info->output_type; mmp_register_path()
184 path->overlay_num = info->overlay_num; mmp_register_path()
185 path->plat_data = info->plat_data; mmp_register_path()
186 path->ops.set_mode = info->set_mode; mmp_register_path()
192 dev_info(path->dev, "get panel %s\n", panel->name); mmp_register_path()
193 path->panel = panel; mmp_register_path()
198 dev_info(path->dev, "register %s, overlay_num %d\n", mmp_register_path()
199 path->name, path->overlay_num); mmp_register_path()
202 if (!path->ops.check_status) mmp_register_path()
203 path->ops.check_status = path_check_status; mmp_register_path()
204 if (!path->ops.get_overlay) mmp_register_path()
205 path->ops.get_overlay = path_get_overlay; mmp_register_path()
206 if (!path->ops.get_modelist) mmp_register_path()
207 path->ops.get_modelist = path_get_modelist; mmp_register_path()
210 for (i = 0; i < path->overlay_num; i++) { mmp_register_path()
211 path->overlays[i].path = path; mmp_register_path()
212 path->overlays[i].id = i; mmp_register_path()
213 mutex_init(&path->overlays[i].access_ok); mmp_register_path()
214 path->overlays[i].ops = info->overlay_ops; mmp_register_path()
218 list_add_tail(&path->node, &path_list); mmp_register_path()
221 return path; mmp_register_path()
226 * mmp_unregister_path - unregister and destroy path
227 * @p: path to be destroyed.
229 * this function registers path and destroys it.
231 void mmp_unregister_path(struct mmp_path *path) mmp_unregister_path() argument
235 if (!path) mmp_unregister_path()
240 list_del(&path->node); mmp_unregister_path()
243 for (i = 0; i < path->overlay_num; i++) mmp_unregister_path()
244 mutex_destroy(&path->overlays[i].access_ok); mmp_unregister_path()
246 mutex_destroy(&path->access_ok); mmp_unregister_path()
248 kfree(path); mmp_unregister_path()
/linux-4.4.14/include/linux/
H A Dpath.h7 struct path { struct
12 extern void path_get(const struct path *);
13 extern void path_put(const struct path *);
15 static inline int path_equal(const struct path *path1, const struct path *path2) path_equal()
H A Dnamei.h5 #include <linux/path.h>
23 * - internal "there are more path components" flag
47 extern int user_path_at_empty(int, const char __user *, unsigned, struct path *, int *empty);
50 struct path *path) user_path_at()
52 return user_path_at_empty(dfd, name, flags, path, NULL); user_path_at()
55 static inline int user_path(const char __user *name, struct path *path) user_path() argument
57 return user_path_at_empty(AT_FDCWD, name, LOOKUP_FOLLOW, path, NULL); user_path()
60 static inline int user_lpath(const char __user *name, struct path *path) user_lpath() argument
62 return user_path_at_empty(AT_FDCWD, name, 0, path, NULL); user_lpath()
65 static inline int user_path_dir(const char __user *name, struct path *path) user_path_dir() argument
68 LOOKUP_FOLLOW | LOOKUP_DIRECTORY, path, NULL); user_path_dir()
71 extern int kern_path(const char *, unsigned, struct path *);
73 extern struct dentry *kern_path_create(int, const char *, struct path *, unsigned int);
74 extern struct dentry *user_path_create(int, const char __user *, struct path *, unsigned int);
75 extern void done_path_create(struct path *, struct dentry *);
76 extern struct dentry *kern_path_locked(const char *, struct path *);
77 extern int kern_path_mountpoint(int, const char *, struct path *, unsigned int);
81 extern int follow_down_one(struct path *);
82 extern int follow_down(struct path *);
83 extern int follow_up(struct path *);
88 extern void nd_jump_link(struct path *path);
49 user_path_at(int dfd, const char __user *name, unsigned flags, struct path *path) user_path_at() argument
H A Ddcookies.h4 * Persistent cookie-path mappings
19 struct path;
47 int get_dcookie(struct path *path, unsigned long *cookie);
61 static inline int get_dcookie(struct path *path, unsigned long *cookie) get_dcookie() argument
H A Dfs_struct.h4 #include <linux/path.h>
14 struct path root, pwd;
20 extern void set_fs_root(struct fs_struct *, const struct path *);
21 extern void set_fs_pwd(struct fs_struct *, const struct path *);
26 static inline void get_fs_root(struct fs_struct *fs, struct path *root) get_fs_root()
34 static inline void get_fs_pwd(struct fs_struct *fs, struct path *pwd) get_fs_pwd()
H A Dearlycpio.h14 struct cpio_data find_cpio_data(const char *path, void *data, size_t len,
H A Dfsnotify.h29 static inline int fsnotify_parent(struct path *path, struct dentry *dentry, __u32 mask) fsnotify_parent() argument
32 dentry = path->dentry; fsnotify_parent()
34 return __fsnotify_parent(path, dentry, mask); fsnotify_parent()
40 struct path *path = &file->f_path; fsnotify_perm() local
56 ret = fsnotify_parent(path, NULL, fsnotify_mask); fsnotify_perm()
60 return fsnotify(inode, fsnotify_mask, path, FSNOTIFY_EVENT_PATH, NULL, 0); fsnotify_perm()
196 struct path *path = &file->f_path; fsnotify_access() local
204 fsnotify_parent(path, NULL, mask); fsnotify_access()
205 fsnotify(inode, mask, path, FSNOTIFY_EVENT_PATH, NULL, 0); fsnotify_access()
214 struct path *path = &file->f_path; fsnotify_modify() local
222 fsnotify_parent(path, NULL, mask); fsnotify_modify()
223 fsnotify(inode, mask, path, FSNOTIFY_EVENT_PATH, NULL, 0); fsnotify_modify()
232 struct path *path = &file->f_path; fsnotify_open() local
239 fsnotify_parent(path, NULL, mask); fsnotify_open()
240 fsnotify(inode, mask, path, FSNOTIFY_EVENT_PATH, NULL, 0); fsnotify_open()
248 struct path *path = &file->f_path; fsnotify_close() local
257 fsnotify_parent(path, NULL, mask); fsnotify_close()
258 fsnotify(inode, mask, path, FSNOTIFY_EVENT_PATH, NULL, 0); fsnotify_close()
H A Dlsm_audit.h21 #include <linux/path.h>
44 struct path path; member in struct:lsm_ioctlop_audit
63 struct path path; member in union:common_audit_data::__anon12524
H A Dmount.h73 struct path;
82 extern struct vfsmount *mnt_clone_internal(struct path *path);
85 struct path;
86 extern struct vfsmount *clone_private_mount(struct path *path);
H A Dreciprocal_div.h17 * slow-path with reciprocal_value(). The fast-path can then just use
H A Dproc_ns.h11 struct path;
70 extern void *ns_get_path(struct path *path, struct task_struct *task,
/linux-4.4.14/tools/perf/util/
H A Dabspath.c22 const char *make_nonrelative_path(const char *path) make_nonrelative_path() argument
26 if (is_absolute_path(path)) { make_nonrelative_path()
27 if (strlcpy(buf, path, PATH_MAX) >= PATH_MAX) make_nonrelative_path()
28 die("Too long path: %.*s", 60, path); make_nonrelative_path()
33 if (snprintf(buf, PATH_MAX, "%s/%s", cwd, path) >= PATH_MAX) make_nonrelative_path()
34 die("Too long path: %.*s", 60, path); make_nonrelative_path()
H A Dpath.c15 static char bad_path[] = "/bad-path/";
51 static char *cleanup_path(char *path) cleanup_path() argument
54 if (!memcmp(path, "./", 2)) { cleanup_path()
55 path += 2; cleanup_path()
56 while (*path == '/') cleanup_path()
57 path++; cleanup_path()
59 return path; cleanup_path()
84 char path[PATH_MAX]; perf_pathdup() local
87 (void)perf_vsnpath(path, sizeof(path), fmt, args); perf_pathdup()
89 return xstrdup(path); perf_pathdup()
127 /* strip arbitrary amount of directory separators at end of path */ chomp_trailing_dir_sep()
128 static inline int chomp_trailing_dir_sep(const char *path, int len) chomp_trailing_dir_sep() argument
130 while (len && is_dir_sep(path[len - 1])) chomp_trailing_dir_sep()
136 * If path ends with suffix (complete path components), returns the
140 char *strip_path_suffix(const char *path, const char *suffix) strip_path_suffix() argument
142 int path_len = strlen(path), suffix_len = strlen(suffix); strip_path_suffix()
148 if (is_dir_sep(path[path_len - 1])) { strip_path_suffix()
151 path_len = chomp_trailing_dir_sep(path, path_len); strip_path_suffix()
154 else if (path[--path_len] != suffix[--suffix_len]) strip_path_suffix()
158 if (path_len && !is_dir_sep(path[path_len - 1])) strip_path_suffix()
160 return strndup(path, chomp_trailing_dir_sep(path, path_len)); strip_path_suffix()
H A Ddata.c19 if (!file->path) { check_pipe()
23 if (!strcmp(file->path, "-")) check_pipe()
37 if (!stat(file->path, &st) && st.st_size) { check_backup()
41 file->path); check_backup()
43 rename(file->path, oldname); check_backup()
55 fd = open(file->path, O_RDONLY); open_file_read()
59 pr_err("failed to open %s: %s", file->path, open_file_read()
61 if (err == ENOENT && !strcmp(file->path, "perf.data")) open_file_read()
72 file->path); open_file_read()
78 file->path); open_file_read()
98 fd = open(file->path, O_CREAT|O_RDWR|O_TRUNC, S_IRUSR|S_IWUSR); open_file_write()
101 pr_err("failed to open %s : %s\n", file->path, open_file_write()
123 if (!file->path) perf_data_file__open()
124 file->path = "perf.data"; perf_data_file__open()
H A Dexec_cmd.c12 const char *system_path(const char *path) system_path() argument
17 if (is_absolute_path(path)) system_path()
18 return path; system_path()
20 strbuf_addf(&d, "%s/%s", prefix, path); system_path()
21 path = strbuf_detach(&d, NULL); system_path()
22 return path; system_path()
70 static void add_path(struct strbuf *out, const char *path) add_path() argument
72 if (path && *path) { add_path()
73 if (is_absolute_path(path)) add_path()
74 strbuf_addstr(out, path); add_path()
76 strbuf_addstr(out, make_nonrelative_path(path)); add_path()
H A Dexec_cmd.h5 extern const char *perf_extract_argv0_path(const char *path);
10 extern const char *system_path(const char *path);
H A Dtrace-event-info.c107 char *path; record_header_files() local
111 path = get_tracing_file("events/header_page"); record_header_files()
112 if (!path) { record_header_files()
117 if (stat(path, &st) < 0) { record_header_files()
118 pr_debug("can't read '%s'", path); record_header_files()
127 if (record_file(path, 8) < 0) { record_header_files()
132 put_tracing_file(path); record_header_files()
134 path = get_tracing_file("events/header_event"); record_header_files()
135 if (!path) { record_header_files()
141 if (stat(path, &st) < 0) { record_header_files()
142 pr_debug("can't read '%s'", path); record_header_files()
151 if (record_file(path, 8) < 0) { record_header_files()
158 put_tracing_file(path); record_header_files()
242 char *path; record_ftrace_files() local
245 path = get_tracing_file("events/ftrace"); record_ftrace_files()
246 if (!path) { record_ftrace_files()
251 ret = copy_event_system(path, tps); record_ftrace_files()
253 put_tracing_file(path); record_ftrace_files()
273 char *path; record_event_files() local
280 path = get_tracing_file("events"); record_event_files()
281 if (!path) { record_event_files()
286 dir = opendir(path); record_event_files()
289 pr_debug("can't read directory '%s'", path); record_event_files()
317 if (asprintf(&sys, "%s/%s", path, dent->d_name) < 0) { record_event_files()
337 put_tracing_file(path); record_event_files()
349 * different path) couldn't be read. record_proc_kallsyms()
357 char *path; record_ftrace_printk() local
361 path = get_tracing_file("printk_formats"); record_ftrace_printk()
362 if (!path) { record_ftrace_printk()
367 ret = stat(path, &st); record_ftrace_printk()
375 err = record_file(path, 4); record_ftrace_printk()
378 put_tracing_file(path); record_ftrace_printk()
398 struct tracepoint_path path, *ppath = &path; get_tracepoints_path() local
423 put_tracepoints_path(&path); list_for_each_entry()
430 return nr_tracepoints > 0 ? path.next : NULL;
H A Dpmu.c45 char path[PATH_MAX]; perf_pmu__format_parse() local
52 snprintf(path, PATH_MAX, "%s/%s", dir, name); perf_pmu__format_parse()
55 file = fopen(path, "r"); perf_pmu__format_parse()
76 char path[PATH_MAX]; pmu_format() local
82 snprintf(path, PATH_MAX, pmu_format()
85 if (stat(path, &st) < 0) pmu_format()
88 if (perf_pmu__format_parse(path, format)) pmu_format()
100 char path[PATH_MAX]; perf_pmu__parse_scale() local
103 snprintf(path, PATH_MAX, "%s/%s.scale", dir, name); perf_pmu__parse_scale()
105 fd = open(path, O_RDONLY); perf_pmu__parse_scale()
146 char path[PATH_MAX]; perf_pmu__parse_unit() local
150 snprintf(path, PATH_MAX, "%s/%s.unit", dir, name); perf_pmu__parse_unit()
152 fd = open(path, O_RDONLY); perf_pmu__parse_unit()
177 char path[PATH_MAX]; perf_pmu__parse_per_pkg() local
180 snprintf(path, PATH_MAX, "%s/%s.per-pkg", dir, name); perf_pmu__parse_per_pkg()
182 fd = open(path, O_RDONLY); perf_pmu__parse_per_pkg()
195 char path[PATH_MAX]; perf_pmu__parse_snapshot() local
198 snprintf(path, PATH_MAX, "%s/%s.snapshot", dir, name); perf_pmu__parse_snapshot()
200 fd = open(path, O_RDONLY); perf_pmu__parse_snapshot()
292 char path[PATH_MAX]; pmu_aliases_parse() local
305 snprintf(path, PATH_MAX, "%s/%s", dir, name); pmu_aliases_parse()
307 file = fopen(path, "r"); pmu_aliases_parse()
309 pr_debug("Cannot open %s\n", path); pmu_aliases_parse()
329 char path[PATH_MAX]; pmu_aliases() local
335 snprintf(path, PATH_MAX, pmu_aliases()
338 if (stat(path, &st) < 0) pmu_aliases()
341 if (pmu_aliases_parse(path, head)) pmu_aliases()
374 char path[PATH_MAX]; pmu_type() local
382 snprintf(path, PATH_MAX, pmu_type()
385 if (stat(path, &st) < 0) pmu_type()
388 file = fopen(path, "r"); pmu_type()
402 char path[PATH_MAX]; pmu_read_sysfs() local
410 snprintf(path, PATH_MAX, pmu_read_sysfs()
413 dir = opendir(path); pmu_read_sysfs()
430 char path[PATH_MAX]; pmu_cpumask() local
438 snprintf(path, PATH_MAX, pmu_cpumask()
441 if (stat(path, &st) < 0) pmu_cpumask()
444 file = fopen(path, "r"); pmu_cpumask()
1064 char path[PATH_MAX]; perf_pmu__open_file() local
1071 snprintf(path, PATH_MAX, perf_pmu__open_file()
1074 if (stat(path, &st) < 0) perf_pmu__open_file()
1077 return fopen(path, "r"); perf_pmu__open_file()
H A Dcache.h10 #define CMD_EXEC_PATH "--exec-path"
60 static inline int is_absolute_path(const char *path) is_absolute_path() argument
62 return path[0] == '/'; is_absolute_path()
65 const char *make_nonrelative_path(const char *path);
66 char *strip_path_suffix(const char *path, const char *suffix);
H A Dcpumap.c247 char path[PATH_MAX]; cpu__get_topology_int() local
249 snprintf(path, PATH_MAX, cpu__get_topology_int()
252 return sysfs__read_int(path, value); cpu__get_topology_int()
351 static int get_max_num(char *path, int *max) get_max_num() argument
357 if (filename__read_str(path, &buf, &num)) get_max_num()
386 char path[PATH_MAX]; set_max_cpu_num() local
397 ret = snprintf(path, PATH_MAX, "%s/devices/system/cpu/possible", mnt); set_max_cpu_num()
399 pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX); set_max_cpu_num()
403 ret = get_max_num(path, &max_cpu_num); set_max_cpu_num()
414 char path[PATH_MAX]; set_max_node_num() local
425 ret = snprintf(path, PATH_MAX, "%s/devices/system/node/possible", mnt); set_max_node_num()
427 pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX); set_max_node_num()
431 ret = get_max_num(path, &max_node_num); set_max_node_num()
463 char path[PATH_MAX]; cpu__setup_cpunode_map() local
475 n = snprintf(path, PATH_MAX, "%s/devices/system/node", mnt); cpu__setup_cpunode_map()
477 pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX); cpu__setup_cpunode_map()
481 dir1 = opendir(path); cpu__setup_cpunode_map()
490 n = snprintf(buf, PATH_MAX, "%s/%s", path, dent1->d_name); cpu__setup_cpunode_map()
492 pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX); cpu__setup_cpunode_map()
H A Dllvm-utils.c35 if (!strcmp(var, "clang-path")) perf_llvm_config()
55 char *env, *path, *tmp = NULL; search_program() local
78 path = strtok_r(env, ":", &tmp); search_program()
79 while (path) { search_program()
80 scnprintf(buf, sizeof(buf), "%s/%s", path, name); search_program()
86 path = strtok_r(NULL, ":", &tmp); search_program()
206 " \t -emit-llvm -o - | /path/to/llc -march=bpf -filetype=obj -o -\"\n" version_notice()
207 " \t(Replace /path/to/llc with path to your llc)\n\n" version_notice()
328 int llvm__compile_bpf(const char *path, void **p_obj_buf, llvm__compile_bpf() argument
350 " \tand 'clang-path' option in [llvm] section of ~/.perfconfig.\n"); llvm__compile_bpf()
385 * Since we may reset clang's working dir, path of source file llvm__compile_bpf()
386 * should be transferred into absolute path, except we want llvm__compile_bpf()
390 (path[0] == '-') ? path : llvm__compile_bpf()
391 make_nonrelative_path(path)); llvm__compile_bpf()
396 pr_err("ERROR:\tunable to compile %s\n", path); llvm__compile_bpf()
399 pr_err(" \t\tclang -target bpf -O2 -c %s\n", path); llvm__compile_bpf()
H A Dtrace-event.c78 char path[PATH_MAX]; tp_format() local
83 scnprintf(path, PATH_MAX, "%s/%s/%s/format", tp_format()
86 err = filename__read_str(path, &data, &size); tp_format()
H A Dthread_map.c95 char path[256]; thread_map__new_by_uid() local
118 snprintf(path, sizeof(path), "/proc/%s", dirent.d_name); thread_map__new_by_uid()
120 if (stat(path, &st) != 0) thread_map__new_by_uid()
126 snprintf(path, sizeof(path), "/proc/%d/task", pid); thread_map__new_by_uid()
127 items = scandir(path, &namelist, filter, NULL); thread_map__new_by_uid()
361 char *path; get_comm() local
365 if (asprintf(&path, "%s/%d/comm", procfs__mountpoint(), pid) == -1) get_comm()
368 err = filename__read_str(path, comm, &size); get_comm()
379 free(path); get_comm()
/linux-4.4.14/arch/powerpc/platforms/pseries/
H A Dof_helpers.h6 struct device_node *pseries_of_derive_parent(const char *path);
H A Dof_helpers.c10 * @path: the full_name of a node to be added to the tree
13 * described by path. E.g., for path = "/foo/bar", returns
16 struct device_node *pseries_of_derive_parent(const char *path) pseries_of_derive_parent() argument
23 tail = kbasename(path) - 1; pseries_of_derive_parent()
25 /* reject if path is "/" */ pseries_of_derive_parent()
26 if (!strcmp(path, "/")) pseries_of_derive_parent()
29 if (tail > path) { pseries_of_derive_parent()
30 parent_path = kstrndup(path, tail - path, GFP_KERNEL); pseries_of_derive_parent()
/linux-4.4.14/fs/btrfs/
H A Dinode-item.c25 static int find_name_in_backref(struct btrfs_path *path, const char *name, find_name_in_backref() argument
36 leaf = path->nodes[0]; find_name_in_backref()
37 item_size = btrfs_item_size_nr(leaf, path->slots[0]); find_name_in_backref()
38 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]); find_name_in_backref()
54 int btrfs_find_name_in_ext_backref(struct btrfs_path *path, u64 ref_objectid, btrfs_find_name_in_ext_backref() argument
66 leaf = path->nodes[0]; btrfs_find_name_in_ext_backref()
67 item_size = btrfs_item_size_nr(leaf, path->slots[0]); btrfs_find_name_in_ext_backref()
68 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]); btrfs_find_name_in_ext_backref()
98 struct btrfs_path *path, btrfs_lookup_inode_extref()
111 ret = btrfs_search_slot(trans, root, &key, path, ins_len, cow); btrfs_lookup_inode_extref()
116 if (!btrfs_find_name_in_ext_backref(path, ref_objectid, name, name_len, &extref)) btrfs_lookup_inode_extref()
127 struct btrfs_path *path; btrfs_del_inode_extref() local
141 path = btrfs_alloc_path(); btrfs_del_inode_extref()
142 if (!path) btrfs_del_inode_extref()
145 path->leave_spinning = 1; btrfs_del_inode_extref()
147 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); btrfs_del_inode_extref()
158 if (!btrfs_find_name_in_ext_backref(path, ref_objectid, btrfs_del_inode_extref()
165 leaf = path->nodes[0]; btrfs_del_inode_extref()
166 item_size = btrfs_item_size_nr(leaf, path->slots[0]); btrfs_del_inode_extref()
175 ret = btrfs_del_item(trans, root, path); btrfs_del_inode_extref()
180 item_start = btrfs_item_ptr_offset(leaf, path->slots[0]); btrfs_del_inode_extref()
185 btrfs_truncate_item(root, path, item_size - del_len, 1); btrfs_del_inode_extref()
188 btrfs_free_path(path); btrfs_del_inode_extref()
198 struct btrfs_path *path; btrfs_del_inode_ref() local
214 path = btrfs_alloc_path(); btrfs_del_inode_ref()
215 if (!path) btrfs_del_inode_ref()
218 path->leave_spinning = 1; btrfs_del_inode_ref()
220 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); btrfs_del_inode_ref()
228 if (!find_name_in_backref(path, name, name_len, &ref)) { btrfs_del_inode_ref()
233 leaf = path->nodes[0]; btrfs_del_inode_ref()
234 item_size = btrfs_item_size_nr(leaf, path->slots[0]); btrfs_del_inode_ref()
240 ret = btrfs_del_item(trans, root, path); btrfs_del_inode_ref()
245 item_start = btrfs_item_ptr_offset(leaf, path->slots[0]); btrfs_del_inode_ref()
248 btrfs_truncate_item(root, path, item_size - sub_item_len, 1); btrfs_del_inode_ref()
250 btrfs_free_path(path); btrfs_del_inode_ref()
279 struct btrfs_path *path; btrfs_insert_inode_extref() local
288 path = btrfs_alloc_path(); btrfs_insert_inode_extref()
289 if (!path) btrfs_insert_inode_extref()
292 path->leave_spinning = 1; btrfs_insert_inode_extref()
293 ret = btrfs_insert_empty_item(trans, root, path, &key, btrfs_insert_inode_extref()
296 if (btrfs_find_name_in_ext_backref(path, ref_objectid, btrfs_insert_inode_extref()
300 btrfs_extend_item(root, path, ins_len); btrfs_insert_inode_extref()
306 leaf = path->nodes[0]; btrfs_insert_inode_extref()
307 item = btrfs_item_nr(path->slots[0]); btrfs_insert_inode_extref()
308 ptr = (unsigned long)btrfs_item_ptr(leaf, path->slots[0], char); btrfs_insert_inode_extref()
312 btrfs_set_inode_extref_name_len(path->nodes[0], extref, name_len); btrfs_insert_inode_extref()
313 btrfs_set_inode_extref_index(path->nodes[0], extref, index); btrfs_insert_inode_extref()
314 btrfs_set_inode_extref_parent(path->nodes[0], extref, ref_objectid); btrfs_insert_inode_extref()
317 write_extent_buffer(path->nodes[0], name, ptr, name_len); btrfs_insert_inode_extref()
318 btrfs_mark_buffer_dirty(path->nodes[0]); btrfs_insert_inode_extref()
321 btrfs_free_path(path); btrfs_insert_inode_extref()
325 /* Will return 0, -ENOMEM, -EMLINK, or -EEXIST or anything from the CoW path */ btrfs_insert_inode_ref()
331 struct btrfs_path *path; btrfs_insert_inode_ref() local
342 path = btrfs_alloc_path(); btrfs_insert_inode_ref()
343 if (!path) btrfs_insert_inode_ref()
346 path->leave_spinning = 1; btrfs_insert_inode_ref()
347 path->skip_release_on_error = 1; btrfs_insert_inode_ref()
348 ret = btrfs_insert_empty_item(trans, root, path, &key, btrfs_insert_inode_ref()
353 if (find_name_in_backref(path, name, name_len, &ref)) btrfs_insert_inode_ref()
356 old_size = btrfs_item_size_nr(path->nodes[0], path->slots[0]); btrfs_insert_inode_ref()
357 btrfs_extend_item(root, path, ins_len); btrfs_insert_inode_ref()
358 ref = btrfs_item_ptr(path->nodes[0], path->slots[0], btrfs_insert_inode_ref()
361 btrfs_set_inode_ref_name_len(path->nodes[0], ref, name_len); btrfs_insert_inode_ref()
362 btrfs_set_inode_ref_index(path->nodes[0], ref, index); btrfs_insert_inode_ref()
367 if (find_name_in_backref(path, name, name_len, &ref)) btrfs_insert_inode_ref()
374 ref = btrfs_item_ptr(path->nodes[0], path->slots[0], btrfs_insert_inode_ref()
376 btrfs_set_inode_ref_name_len(path->nodes[0], ref, name_len); btrfs_insert_inode_ref()
377 btrfs_set_inode_ref_index(path->nodes[0], ref, index); btrfs_insert_inode_ref()
380 write_extent_buffer(path->nodes[0], name, ptr, name_len); btrfs_insert_inode_ref()
381 btrfs_mark_buffer_dirty(path->nodes[0]); btrfs_insert_inode_ref()
384 btrfs_free_path(path); btrfs_insert_inode_ref()
403 struct btrfs_path *path, u64 objectid) btrfs_insert_empty_inode()
411 ret = btrfs_insert_empty_item(trans, root, path, &key, btrfs_insert_empty_inode()
417 *root, struct btrfs_path *path, btrfs_lookup_inode()
427 ret = btrfs_search_slot(trans, root, location, path, ins_len, cow); btrfs_lookup_inode()
429 location->offset == (u64)-1 && path->slots[0] != 0) { btrfs_lookup_inode()
430 slot = path->slots[0] - 1; btrfs_lookup_inode()
431 leaf = path->nodes[0]; btrfs_lookup_inode()
435 path->slots[0]--; btrfs_lookup_inode()
96 btrfs_lookup_inode_extref(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, const char *name, int name_len, u64 inode_objectid, u64 ref_objectid, int ins_len, int cow) btrfs_lookup_inode_extref() argument
401 btrfs_insert_empty_inode(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, u64 objectid) btrfs_insert_empty_inode() argument
416 btrfs_lookup_inode(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, struct btrfs_key *location, int mod) btrfs_lookup_inode() argument
H A Dorphan.c25 struct btrfs_path *path; btrfs_insert_orphan_item() local
33 path = btrfs_alloc_path(); btrfs_insert_orphan_item()
34 if (!path) btrfs_insert_orphan_item()
37 ret = btrfs_insert_empty_item(trans, root, path, &key, 0); btrfs_insert_orphan_item()
39 btrfs_free_path(path); btrfs_insert_orphan_item()
46 struct btrfs_path *path; btrfs_del_orphan_item() local
54 path = btrfs_alloc_path(); btrfs_del_orphan_item()
55 if (!path) btrfs_del_orphan_item()
58 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); btrfs_del_orphan_item()
66 ret = btrfs_del_item(trans, root, path); btrfs_del_orphan_item()
69 btrfs_free_path(path); btrfs_del_orphan_item()
H A Duuid-tree.c38 struct btrfs_path *path = NULL; btrfs_uuid_tree_lookup() local
50 path = btrfs_alloc_path(); btrfs_uuid_tree_lookup()
51 if (!path) { btrfs_uuid_tree_lookup()
57 ret = btrfs_search_slot(NULL, uuid_root, &key, path, 0, 0); btrfs_uuid_tree_lookup()
65 eb = path->nodes[0]; btrfs_uuid_tree_lookup()
66 slot = path->slots[0]; btrfs_uuid_tree_lookup()
89 btrfs_free_path(path); btrfs_uuid_tree_lookup()
98 struct btrfs_path *path = NULL; btrfs_uuid_tree_add() local
116 path = btrfs_alloc_path(); btrfs_uuid_tree_add()
117 if (!path) { btrfs_uuid_tree_add()
122 ret = btrfs_insert_empty_item(trans, uuid_root, path, &key, btrfs_uuid_tree_add()
126 eb = path->nodes[0]; btrfs_uuid_tree_add()
127 slot = path->slots[0]; btrfs_uuid_tree_add()
134 btrfs_extend_item(uuid_root, path, sizeof(subid_le)); btrfs_uuid_tree_add()
135 eb = path->nodes[0]; btrfs_uuid_tree_add()
136 slot = path->slots[0]; btrfs_uuid_tree_add()
153 btrfs_free_path(path); btrfs_uuid_tree_add()
162 struct btrfs_path *path = NULL; btrfs_uuid_tree_rem() local
179 path = btrfs_alloc_path(); btrfs_uuid_tree_rem()
180 if (!path) { btrfs_uuid_tree_rem()
185 ret = btrfs_search_slot(trans, uuid_root, &key, path, -1, 1); btrfs_uuid_tree_rem()
196 eb = path->nodes[0]; btrfs_uuid_tree_rem()
197 slot = path->slots[0]; btrfs_uuid_tree_rem()
223 ret = btrfs_del_item(trans, uuid_root, path); btrfs_uuid_tree_rem()
231 btrfs_truncate_item(uuid_root, path, item_size - sizeof(subid), 1); btrfs_uuid_tree_rem()
234 btrfs_free_path(path); btrfs_uuid_tree_rem()
264 struct btrfs_path *path; btrfs_uuid_tree_iterate() local
271 path = btrfs_alloc_path(); btrfs_uuid_tree_iterate()
272 if (!path) { btrfs_uuid_tree_iterate()
282 ret = btrfs_search_forward(root, &key, path, 0); btrfs_uuid_tree_iterate()
291 leaf = path->nodes[0]; btrfs_uuid_tree_iterate()
292 slot = path->slots[0]; btrfs_uuid_tree_iterate()
320 btrfs_release_path(path); btrfs_uuid_tree_iterate()
341 ret = btrfs_next_item(root, path); btrfs_uuid_tree_iterate()
350 btrfs_free_path(path); btrfs_uuid_tree_iterate()
H A Droot-tree.c72 * path: the path we search
83 struct btrfs_path *path, struct btrfs_root_item *root_item, btrfs_find_root()
91 ret = btrfs_search_slot(NULL, root, search_key, path, 0, 0); btrfs_find_root()
100 if (path->slots[0] == 0) btrfs_find_root()
102 path->slots[0]--; btrfs_find_root()
106 l = path->nodes[0]; btrfs_find_root()
107 slot = path->slots[0]; btrfs_find_root()
121 btrfs_release_path(path); btrfs_find_root()
140 struct btrfs_path *path; btrfs_update_root() local
147 path = btrfs_alloc_path(); btrfs_update_root()
148 if (!path) btrfs_update_root()
151 ret = btrfs_search_slot(trans, root, key, path, 0, 1); btrfs_update_root()
158 btrfs_print_leaf(root, path->nodes[0]); btrfs_update_root()
164 l = path->nodes[0]; btrfs_update_root()
165 slot = path->slots[0]; btrfs_update_root()
175 btrfs_release_path(path); btrfs_update_root()
176 ret = btrfs_search_slot(trans, root, key, path, btrfs_update_root()
183 ret = btrfs_del_item(trans, root, path); btrfs_update_root()
188 btrfs_release_path(path); btrfs_update_root()
189 ret = btrfs_insert_empty_item(trans, root, path, btrfs_update_root()
195 l = path->nodes[0]; btrfs_update_root()
196 slot = path->slots[0]; btrfs_update_root()
207 btrfs_mark_buffer_dirty(path->nodes[0]); btrfs_update_root()
209 btrfs_free_path(path); btrfs_update_root()
226 struct btrfs_path *path; btrfs_find_orphan_roots() local
237 path = btrfs_alloc_path(); btrfs_find_orphan_roots()
238 if (!path) btrfs_find_orphan_roots()
249 ret = btrfs_search_slot(NULL, tree_root, &key, path, 0, 0); btrfs_find_orphan_roots()
255 leaf = path->nodes[0]; btrfs_find_orphan_roots()
256 if (path->slots[0] >= btrfs_header_nritems(leaf)) { btrfs_find_orphan_roots()
257 ret = btrfs_next_leaf(tree_root, path); btrfs_find_orphan_roots()
262 leaf = path->nodes[0]; btrfs_find_orphan_roots()
265 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); btrfs_find_orphan_roots()
266 btrfs_release_path(path); btrfs_find_orphan_roots()
282 btrfs_release_path(path); btrfs_find_orphan_roots()
331 btrfs_free_path(path); btrfs_find_orphan_roots()
339 struct btrfs_path *path; btrfs_del_root() local
342 path = btrfs_alloc_path(); btrfs_del_root()
343 if (!path) btrfs_del_root()
345 ret = btrfs_search_slot(trans, root, key, path, -1, 1); btrfs_del_root()
351 ret = btrfs_del_item(trans, root, path); btrfs_del_root()
353 btrfs_free_path(path); btrfs_del_root()
363 struct btrfs_path *path; btrfs_del_root_ref() local
371 path = btrfs_alloc_path(); btrfs_del_root_ref()
372 if (!path) btrfs_del_root_ref()
379 ret = btrfs_search_slot(trans, tree_root, &key, path, -1, 1); btrfs_del_root_ref()
382 leaf = path->nodes[0]; btrfs_del_root_ref()
383 ref = btrfs_item_ptr(leaf, path->slots[0], btrfs_del_root_ref()
392 ret = btrfs_del_item(trans, tree_root, path); btrfs_del_root_ref()
401 btrfs_release_path(path); btrfs_del_root_ref()
409 btrfs_free_path(path); btrfs_del_root_ref()
426 * Will return 0, -ENOMEM, or anything from the CoW path
435 struct btrfs_path *path; btrfs_add_root_ref() local
440 path = btrfs_alloc_path(); btrfs_add_root_ref()
441 if (!path) btrfs_add_root_ref()
448 ret = btrfs_insert_empty_item(trans, tree_root, path, &key, btrfs_add_root_ref()
452 btrfs_free_path(path); btrfs_add_root_ref()
456 leaf = path->nodes[0]; btrfs_add_root_ref()
457 ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref); btrfs_add_root_ref()
466 btrfs_release_path(path); btrfs_add_root_ref()
473 btrfs_free_path(path); btrfs_add_root_ref()
82 btrfs_find_root(struct btrfs_root *root, struct btrfs_key *search_key, struct btrfs_path *path, struct btrfs_root_item *root_item, struct btrfs_key *root_key) btrfs_find_root() argument
H A Ddir-item.c35 struct btrfs_path *path, insert_with_overflow()
46 ret = btrfs_insert_empty_item(trans, root, path, cpu_key, data_size); insert_with_overflow()
49 di = btrfs_match_dir_item_name(root, path, name, name_len); insert_with_overflow()
52 btrfs_extend_item(root, path, data_size); insert_with_overflow()
56 leaf = path->nodes[0]; insert_with_overflow()
57 item = btrfs_item_nr(path->slots[0]); insert_with_overflow()
58 ptr = btrfs_item_ptr(leaf, path->slots[0], char); insert_with_overflow()
70 struct btrfs_path *path, u64 objectid, btrfs_insert_xattr_item()
89 dir_item = insert_with_overflow(trans, root, path, &key, data_size, btrfs_insert_xattr_item()
95 leaf = path->nodes[0]; btrfs_insert_xattr_item()
107 btrfs_mark_buffer_dirty(path->nodes[0]); btrfs_insert_xattr_item()
127 struct btrfs_path *path; btrfs_insert_dir_item() local
139 path = btrfs_alloc_path(); btrfs_insert_dir_item()
140 if (!path) btrfs_insert_dir_item()
142 path->leave_spinning = 1; btrfs_insert_dir_item()
147 dir_item = insert_with_overflow(trans, root, path, &key, data_size, btrfs_insert_dir_item()
156 leaf = path->nodes[0]; btrfs_insert_dir_item()
173 btrfs_release_path(path); btrfs_insert_dir_item()
178 btrfs_free_path(path); btrfs_insert_dir_item()
193 struct btrfs_path *path, u64 dir, btrfs_lookup_dir_item()
207 ret = btrfs_search_slot(trans, root, &key, path, ins_len, cow); btrfs_lookup_dir_item()
213 return btrfs_match_dir_item_name(root, path, name, name_len); btrfs_lookup_dir_item()
225 struct btrfs_path *path; btrfs_check_dir_item_collision() local
228 path = btrfs_alloc_path(); btrfs_check_dir_item_collision()
229 if (!path) btrfs_check_dir_item_collision()
236 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); btrfs_check_dir_item_collision()
249 di = btrfs_match_dir_item_name(root, path, name, name_len); btrfs_check_dir_item_collision()
261 leaf = path->nodes[0]; btrfs_check_dir_item_collision()
262 slot = path->slots[0]; btrfs_check_dir_item_collision()
271 btrfs_free_path(path); btrfs_check_dir_item_collision()
286 struct btrfs_path *path, u64 dir, btrfs_lookup_dir_index_item()
299 ret = btrfs_search_slot(trans, root, &key, path, ins_len, cow); btrfs_lookup_dir_index_item()
304 return btrfs_match_dir_item_name(root, path, name, name_len); btrfs_lookup_dir_index_item()
309 struct btrfs_path *path, u64 dirid, btrfs_search_dir_index_item()
322 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); btrfs_search_dir_index_item()
326 leaf = path->nodes[0]; btrfs_search_dir_index_item()
330 if (path->slots[0] >= nritems) { btrfs_search_dir_index_item()
331 ret = btrfs_next_leaf(root, path); btrfs_search_dir_index_item()
336 leaf = path->nodes[0]; btrfs_search_dir_index_item()
341 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); btrfs_search_dir_index_item()
345 di = btrfs_match_dir_item_name(root, path, name, name_len); btrfs_search_dir_index_item()
349 path->slots[0]++; btrfs_search_dir_index_item()
356 struct btrfs_path *path, u64 dir, btrfs_lookup_xattr()
368 ret = btrfs_search_slot(trans, root, &key, path, ins_len, cow); btrfs_lookup_xattr()
374 return btrfs_match_dir_item_name(root, path, name, name_len); btrfs_lookup_xattr()
378 * helper function to look at the directory item pointed to by 'path'
383 struct btrfs_path *path, btrfs_match_dir_item_name()
393 leaf = path->nodes[0]; btrfs_match_dir_item_name()
394 dir_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dir_item); btrfs_match_dir_item_name()
398 total_len = btrfs_item_size_nr(leaf, path->slots[0]); btrfs_match_dir_item_name()
422 struct btrfs_path *path, btrfs_delete_one_dir_name()
431 leaf = path->nodes[0]; btrfs_delete_one_dir_name()
434 item_len = btrfs_item_size_nr(leaf, path->slots[0]); btrfs_delete_one_dir_name()
436 ret = btrfs_del_item(trans, root, path); btrfs_delete_one_dir_name()
442 start = btrfs_item_ptr_offset(leaf, path->slots[0]); btrfs_delete_one_dir_name()
445 btrfs_truncate_item(root, path, item_len - sub_item_len, 1); btrfs_delete_one_dir_name()
32 insert_with_overflow(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, struct btrfs_key *cpu_key, u32 data_size, const char *name, int name_len) insert_with_overflow() argument
68 btrfs_insert_xattr_item(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, u64 objectid, const char *name, u16 name_len, const void *data, u16 data_len) btrfs_insert_xattr_item() argument
191 btrfs_lookup_dir_item(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, u64 dir, const char *name, int name_len, int mod) btrfs_lookup_dir_item() argument
284 btrfs_lookup_dir_index_item(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, u64 dir, u64 objectid, const char *name, int name_len, int mod) btrfs_lookup_dir_index_item() argument
308 btrfs_search_dir_index_item(struct btrfs_root *root, struct btrfs_path *path, u64 dirid, const char *name, int name_len) btrfs_search_dir_index_item() argument
354 btrfs_lookup_xattr(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, u64 dir, const char *name, u16 name_len, int mod) btrfs_lookup_xattr() argument
382 btrfs_match_dir_item_name(struct btrfs_root *root, struct btrfs_path *path, const char *name, int name_len) btrfs_match_dir_item_name() argument
420 btrfs_delete_one_dir_name(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, struct btrfs_dir_item *di) btrfs_delete_one_dir_name() argument
H A Dfile-item.c50 struct btrfs_path *path; btrfs_insert_file_extent() local
53 path = btrfs_alloc_path(); btrfs_insert_file_extent()
54 if (!path) btrfs_insert_file_extent()
60 path->leave_spinning = 1; btrfs_insert_file_extent()
61 ret = btrfs_insert_empty_item(trans, root, path, &file_key, btrfs_insert_file_extent()
66 leaf = path->nodes[0]; btrfs_insert_file_extent()
67 item = btrfs_item_ptr(leaf, path->slots[0], btrfs_insert_file_extent()
82 btrfs_free_path(path); btrfs_insert_file_extent()
89 struct btrfs_path *path, btrfs_lookup_csum()
104 ret = btrfs_search_slot(trans, root, &file_key, path, 0, cow); btrfs_lookup_csum()
107 leaf = path->nodes[0]; btrfs_lookup_csum()
110 if (path->slots[0] == 0) btrfs_lookup_csum()
112 path->slots[0]--; btrfs_lookup_csum()
113 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); btrfs_lookup_csum()
119 csums_in_item = btrfs_item_size_nr(leaf, path->slots[0]); btrfs_lookup_csum()
129 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_csum_item); btrfs_lookup_csum()
141 struct btrfs_path *path, u64 objectid, btrfs_lookup_file_extent()
152 ret = btrfs_search_slot(trans, root, &file_key, path, ins_len, cow); btrfs_lookup_file_extent()
169 struct btrfs_path *path; __btrfs_lookup_bio_sums() local
181 path = btrfs_alloc_path(); __btrfs_lookup_bio_sums()
182 if (!path) __btrfs_lookup_bio_sums()
191 btrfs_free_path(path); __btrfs_lookup_bio_sums()
205 path->reada = 2; __btrfs_lookup_bio_sums()
216 path->search_commit_root = 1; __btrfs_lookup_bio_sums()
217 path->skip_locking = 1; __btrfs_lookup_bio_sums()
237 btrfs_release_path(path); __btrfs_lookup_bio_sums()
239 path, disk_bytenr, 0); __btrfs_lookup_bio_sums()
254 btrfs_release_path(path); __btrfs_lookup_bio_sums()
257 btrfs_item_key_to_cpu(path->nodes[0], &found_key, __btrfs_lookup_bio_sums()
258 path->slots[0]); __btrfs_lookup_bio_sums()
261 item_size = btrfs_item_size_nr(path->nodes[0], __btrfs_lookup_bio_sums()
262 path->slots[0]); __btrfs_lookup_bio_sums()
266 item = btrfs_item_ptr(path->nodes[0], path->slots[0], __btrfs_lookup_bio_sums()
278 read_extent_buffer(path->nodes[0], csum, __btrfs_lookup_bio_sums()
291 btrfs_free_path(path); __btrfs_lookup_bio_sums()
311 struct btrfs_path *path; btrfs_lookup_csums_range() local
325 path = btrfs_alloc_path(); btrfs_lookup_csums_range()
326 if (!path) btrfs_lookup_csums_range()
330 path->skip_locking = 1; btrfs_lookup_csums_range()
331 path->reada = 2; btrfs_lookup_csums_range()
332 path->search_commit_root = 1; btrfs_lookup_csums_range()
339 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); btrfs_lookup_csums_range()
342 if (ret > 0 && path->slots[0] > 0) { btrfs_lookup_csums_range()
343 leaf = path->nodes[0]; btrfs_lookup_csums_range()
344 btrfs_item_key_to_cpu(leaf, &key, path->slots[0] - 1); btrfs_lookup_csums_range()
350 btrfs_item_size_nr(leaf, path->slots[0] - 1)) btrfs_lookup_csums_range()
351 path->slots[0]--; btrfs_lookup_csums_range()
356 leaf = path->nodes[0]; btrfs_lookup_csums_range()
357 if (path->slots[0] >= btrfs_header_nritems(leaf)) { btrfs_lookup_csums_range()
358 ret = btrfs_next_leaf(root, path); btrfs_lookup_csums_range()
363 leaf = path->nodes[0]; btrfs_lookup_csums_range()
366 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); btrfs_lookup_csums_range()
375 size = btrfs_item_size_nr(leaf, path->slots[0]); btrfs_lookup_csums_range()
378 path->slots[0]++; btrfs_lookup_csums_range()
383 item = btrfs_item_ptr(path->nodes[0], path->slots[0], btrfs_lookup_csums_range()
403 read_extent_buffer(path->nodes[0], btrfs_lookup_csums_range()
411 path->slots[0]++; btrfs_lookup_csums_range()
422 btrfs_free_path(path); btrfs_lookup_csums_range()
507 * key to describe the csum pointed to by the path, and it expects
517 struct btrfs_path *path, truncate_one_csum()
527 leaf = path->nodes[0]; truncate_one_csum()
528 csum_end = btrfs_item_size_nr(leaf, path->slots[0]) / csum_size; truncate_one_csum()
541 btrfs_truncate_item(root, path, new_size, 1); truncate_one_csum()
553 btrfs_truncate_item(root, path, new_size, 0); truncate_one_csum()
556 btrfs_set_item_key_safe(root->fs_info, path, key); truncate_one_csum()
569 struct btrfs_path *path; btrfs_del_csums() local
580 path = btrfs_alloc_path(); btrfs_del_csums()
581 if (!path) btrfs_del_csums()
589 path->leave_spinning = 1; btrfs_del_csums()
590 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); btrfs_del_csums()
592 if (path->slots[0] == 0) btrfs_del_csums()
594 path->slots[0]--; btrfs_del_csums()
599 leaf = path->nodes[0]; btrfs_del_csums()
600 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); btrfs_del_csums()
610 csum_end = btrfs_item_size_nr(leaf, path->slots[0]) / csum_size; btrfs_del_csums()
620 ret = btrfs_del_item(trans, root, path); btrfs_del_csums()
636 * But we can't drop the path because the btrfs_del_csums()
653 path->slots[0]); btrfs_del_csums()
663 ret = btrfs_split_item(trans, root, path, &key, offset); btrfs_del_csums()
671 truncate_one_csum(root, path, &key, bytenr, len); btrfs_del_csums()
675 btrfs_release_path(path); btrfs_del_csums()
679 btrfs_free_path(path); btrfs_del_csums()
689 struct btrfs_path *path; btrfs_csum_file_blocks() local
704 path = btrfs_alloc_path(); btrfs_csum_file_blocks()
705 if (!path) btrfs_csum_file_blocks()
715 item = btrfs_lookup_csum(trans, root, path, bytenr, 1); btrfs_csum_file_blocks()
718 leaf = path->nodes[0]; btrfs_csum_file_blocks()
719 item_end = btrfs_item_ptr(leaf, path->slots[0], btrfs_csum_file_blocks()
722 btrfs_item_size_nr(leaf, path->slots[0])); btrfs_csum_file_blocks()
732 leaf = path->nodes[0]; btrfs_csum_file_blocks()
733 item_size = btrfs_item_size_nr(leaf, path->slots[0]); btrfs_csum_file_blocks()
740 int slot = path->slots[0] + 1; btrfs_csum_file_blocks()
742 nritems = btrfs_header_nritems(path->nodes[0]); btrfs_csum_file_blocks()
743 if (!nritems || (path->slots[0] >= nritems - 1)) { btrfs_csum_file_blocks()
744 ret = btrfs_next_leaf(root, path); btrfs_csum_file_blocks()
749 slot = path->slots[0]; btrfs_csum_file_blocks()
751 btrfs_item_key_to_cpu(path->nodes[0], &found_key, slot); btrfs_csum_file_blocks()
766 btrfs_release_path(path); btrfs_csum_file_blocks()
767 ret = btrfs_search_slot(trans, root, &file_key, path, btrfs_csum_file_blocks()
773 if (path->slots[0] == 0) btrfs_csum_file_blocks()
775 path->slots[0]--; btrfs_csum_file_blocks()
778 leaf = path->nodes[0]; btrfs_csum_file_blocks()
779 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); btrfs_csum_file_blocks()
789 if (csum_offset == btrfs_item_size_nr(leaf, path->slots[0]) / btrfs_csum_file_blocks()
810 diff = diff - btrfs_item_size_nr(leaf, path->slots[0]); btrfs_csum_file_blocks()
815 btrfs_extend_item(root, path, diff); btrfs_csum_file_blocks()
821 btrfs_release_path(path); btrfs_csum_file_blocks()
837 path->leave_spinning = 1; btrfs_csum_file_blocks()
838 ret = btrfs_insert_empty_item(trans, root, path, &file_key, btrfs_csum_file_blocks()
840 path->leave_spinning = 0; btrfs_csum_file_blocks()
845 leaf = path->nodes[0]; btrfs_csum_file_blocks()
847 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_csum_item); btrfs_csum_file_blocks()
849 btrfs_item_size_nr(leaf, path->slots[0])); btrfs_csum_file_blocks()
865 btrfs_mark_buffer_dirty(path->nodes[0]); btrfs_csum_file_blocks()
867 btrfs_release_path(path); btrfs_csum_file_blocks()
872 btrfs_free_path(path); btrfs_csum_file_blocks()
880 const struct btrfs_path *path, btrfs_extent_item_to_extent_map()
886 struct extent_buffer *leaf = path->nodes[0]; btrfs_extent_item_to_extent_map()
887 const int slot = path->slots[0]; btrfs_extent_item_to_extent_map()
87 btrfs_lookup_csum(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, u64 bytenr, int cow) btrfs_lookup_csum() argument
139 btrfs_lookup_file_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, u64 objectid, u64 offset, int mod) btrfs_lookup_file_extent() argument
516 truncate_one_csum(struct btrfs_root *root, struct btrfs_path *path, struct btrfs_key *key, u64 bytenr, u64 len) truncate_one_csum() argument
879 btrfs_extent_item_to_extent_map(struct inode *inode, const struct btrfs_path *path, struct btrfs_file_extent_item *fi, const bool new_inline, struct extent_map *em) btrfs_extent_item_to_extent_map() argument
H A Dctree.c29 *root, struct btrfs_path *path, int level);
32 struct btrfs_path *path, int data_size, int extend);
40 static void del_ptr(struct btrfs_root *root, struct btrfs_path *path,
47 struct btrfs_path *path; btrfs_alloc_path() local
48 path = kmem_cache_zalloc(btrfs_path_cachep, GFP_NOFS); btrfs_alloc_path()
49 return path; btrfs_alloc_path()
53 * set all locked nodes in the path to blocking locks. This should
75 * retake all the spinlocks in the path. You can safely use NULL
106 /* this also releases the path */ btrfs_free_path()
116 * path release drops references on the extent buffers in the path
117 * and it drops any locks held by this path
1354 tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct btrfs_path *path, tree_mod_log_rewind() argument
1370 btrfs_set_path_blocking(path); tree_mod_log_rewind()
1395 btrfs_clear_path_blocking(path, NULL, BTRFS_READ_LOCK); tree_mod_log_rewind()
1888 struct btrfs_path *path, int level) balance_level()
1897 int orig_slot = path->slots[level]; balance_level()
1903 mid = path->nodes[level]; balance_level()
1905 WARN_ON(path->locks[level] != BTRFS_WRITE_LOCK && balance_level()
1906 path->locks[level] != BTRFS_WRITE_LOCK_BLOCKING); balance_level()
1912 parent = path->nodes[level + 1]; balance_level()
1913 pslot = path->slots[level + 1]; balance_level()
1949 path->locks[level] = 0; balance_level()
1950 path->nodes[level] = NULL; balance_level()
1953 /* once for the path */ balance_level()
2007 del_ptr(root, path, level + 1, pslot + 1); balance_level()
2051 del_ptr(root, path, level + 1, pslot); balance_level()
2066 /* update the path */ balance_level()
2071 path->nodes[level] = left; balance_level()
2072 path->slots[level + 1] -= 1; balance_level()
2073 path->slots[level] = orig_slot; balance_level()
2080 path->slots[level] = orig_slot; balance_level()
2085 btrfs_node_blockptr(path->nodes[level], path->slots[level])) balance_level()
2093 if (path->nodes[level] != left) balance_level()
2106 struct btrfs_path *path, int level) push_nodes_for_insert()
2115 int orig_slot = path->slots[level]; push_nodes_for_insert()
2120 mid = path->nodes[level]; push_nodes_for_insert()
2124 parent = path->nodes[level + 1]; push_nodes_for_insert()
2125 pslot = path->slots[level + 1]; push_nodes_for_insert()
2164 path->nodes[level] = left; push_nodes_for_insert()
2165 path->slots[level + 1] -= 1; push_nodes_for_insert()
2166 path->slots[level] = orig_slot; push_nodes_for_insert()
2172 path->slots[level] = orig_slot; push_nodes_for_insert()
2218 path->nodes[level] = right; push_nodes_for_insert()
2219 path->slots[level + 1] += 1; push_nodes_for_insert()
2220 path->slots[level] = orig_slot - push_nodes_for_insert()
2241 struct btrfs_path *path, reada_for_search()
2251 int direction = path->reada; reada_for_search()
2260 if (!path->nodes[level]) reada_for_search()
2263 node = path->nodes[level]; reada_for_search()
2288 if (path->reada < 0 && objectid) { reada_for_search()
2307 struct btrfs_path *path, int level) reada_for_balance()
2317 parent = path->nodes[level + 1]; reada_for_balance()
2322 slot = path->slots[level + 1]; reada_for_balance()
2355 * in the tree. The exceptions are when our path goes through slot 0, because
2359 * callers might also have set path->keep_locks, which tells this code to keep
2360 * the lock if the path points to the last slot in the block. This is part of
2366 static noinline void unlock_up(struct btrfs_path *path, int level, unlock_up() argument
2376 if (!path->nodes[i]) unlock_up()
2378 if (!path->locks[i]) unlock_up()
2380 if (!no_skips && path->slots[i] == 0) { unlock_up()
2384 if (!no_skips && path->keep_locks) { unlock_up()
2386 t = path->nodes[i]; unlock_up()
2388 if (nritems < 1 || path->slots[i] >= nritems - 1) { unlock_up()
2396 t = path->nodes[i]; unlock_up()
2397 if (i >= lowest_unlock && i > skip_level && path->locks[i]) { unlock_up()
2398 btrfs_tree_unlock_rw(t, path->locks[i]); unlock_up()
2399 path->locks[i] = 0; unlock_up()
2410 * This releases any locks held in the path starting at level and
2418 noinline void btrfs_unlock_up_safe(struct btrfs_path *path, int level) btrfs_unlock_up_safe() argument
2422 if (path->keep_locks) btrfs_unlock_up_safe()
2426 if (!path->nodes[i]) btrfs_unlock_up_safe()
2428 if (!path->locks[i]) btrfs_unlock_up_safe()
2430 btrfs_tree_unlock_rw(path->nodes[i], path->locks[i]); btrfs_unlock_up_safe()
2431 path->locks[i] = 0; btrfs_unlock_up_safe()
2437 * in cache without setting the path to blocking. If we find the block
2438 * we return zero and the path is unchanged.
2440 * If we can't find the block, we set the path blocking and do some
2523 * drop the path, -EAGAIN is returned and btrfs_search_slot must
2622 int btrfs_find_item(struct btrfs_root *fs_root, struct btrfs_path *path, btrfs_find_item() argument
2630 ASSERT(path); btrfs_find_item()
2637 ret = btrfs_search_slot(NULL, fs_root, &key, path, 0, 0); btrfs_find_item()
2641 eb = path->nodes[0]; btrfs_find_item()
2642 if (ret && path->slots[0] >= btrfs_header_nritems(eb)) { btrfs_find_item()
2643 ret = btrfs_next_leaf(fs_root, path); btrfs_find_item()
2646 eb = path->nodes[0]; btrfs_find_item()
2649 btrfs_item_key_to_cpu(eb, found_key, path->slots[0]); btrfs_find_item()
2658 * look for key in the tree. path is filled in with nodes along the way
2660 * level of the path (level 0)
2662 * If the key isn't found, the path points to the slot where it should
2767 * setup the path here so we can release it under lock btrfs_search_slot()
2773 * then we don't want to set the path blocking, btrfs_search_slot()
2929 * we don't really know what they plan on doing with the path btrfs_search_slot()
2947 * The resulting path and return value will be set up as if we called
3074 * a return value of 1 means the path is at the position where the btrfs_search_slot_for_read()
3076 * but in case the previous item is the last in a leaf, path points btrfs_search_slot_for_read()
3135 struct btrfs_path *path, fixup_low_keys()
3142 int tslot = path->slots[i]; fixup_low_keys()
3143 if (!path->nodes[i]) fixup_low_keys()
3145 t = path->nodes[i]; fixup_low_keys()
3148 btrfs_mark_buffer_dirty(path->nodes[i]); fixup_low_keys()
3161 struct btrfs_path *path, btrfs_set_item_key_safe()
3168 eb = path->nodes[0]; btrfs_set_item_key_safe()
3169 slot = path->slots[0]; btrfs_set_item_key_safe()
3183 fixup_low_keys(fs_info, path, &disk_key, 1); btrfs_set_item_key_safe()
3333 struct btrfs_path *path, int level) insert_new_root()
3341 BUG_ON(path->nodes[level]); insert_new_root()
3342 BUG_ON(path->nodes[level-1] != root->node); insert_new_root()
3344 lower = path->nodes[level-1]; insert_new_root()
3389 path->nodes[level] = c; insert_new_root()
3390 path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING; insert_new_root()
3391 path->slots[level] = 0; insert_new_root()
3403 struct btrfs_root *root, struct btrfs_path *path, insert_ptr()
3411 BUG_ON(!path->nodes[level]); insert_ptr()
3412 btrfs_assert_tree_locked(path->nodes[level]); insert_ptr()
3413 lower = path->nodes[level]; insert_ptr()
3440 * split the node at the specified level in path in two.
3441 * The path is corrected to point to the appropriate node after the split
3450 struct btrfs_path *path, int level) split_node()
3459 c = path->nodes[level]; split_node()
3472 ret = insert_new_root(trans, root, path, level + 1); split_node()
3476 ret = push_nodes_for_insert(trans, root, path, level); split_node()
3477 c = path->nodes[level]; split_node()
3525 insert_ptr(trans, root, path, &disk_key, split->start, split_node()
3526 path->slots[level + 1] + 1, level + 1); split_node()
3528 if (path->slots[level] >= mid) { split_node()
3529 path->slots[level] -= mid; split_node()
3532 path->nodes[level] = split; split_node()
3533 path->slots[level + 1] += 1; split_node()
3594 struct btrfs_path *path, __push_leaf_right()
3600 struct extent_buffer *left = path->nodes[0]; __push_leaf_right()
3601 struct extent_buffer *upper = path->nodes[1]; __push_leaf_right()
3621 if (path->slots[0] >= left_nritems) __push_leaf_right()
3624 slot = path->slots[1]; __push_leaf_right()
3630 if (path->slots[0] > i) __push_leaf_right()
3632 if (path->slots[0] == i) { __push_leaf_right()
3639 if (path->slots[0] == i) __push_leaf_right()
3710 /* then fixup the leaf pointer in the path */ __push_leaf_right()
3711 if (path->slots[0] >= left_nritems) { __push_leaf_right()
3712 path->slots[0] -= left_nritems; __push_leaf_right()
3713 if (btrfs_header_nritems(path->nodes[0]) == 0) __push_leaf_right()
3714 clean_tree_block(trans, root->fs_info, path->nodes[0]); __push_leaf_right()
3715 btrfs_tree_unlock(path->nodes[0]); __push_leaf_right()
3716 free_extent_buffer(path->nodes[0]); __push_leaf_right()
3717 path->nodes[0] = right; __push_leaf_right()
3718 path->slots[1] += 1; __push_leaf_right()
3732 * push some data in the path leaf to the right, trying to free up at
3742 *root, struct btrfs_path *path, push_leaf_right()
3746 struct extent_buffer *left = path->nodes[0]; push_leaf_right()
3754 if (!path->nodes[1]) push_leaf_right()
3757 slot = path->slots[1]; push_leaf_right()
3758 upper = path->nodes[1]; push_leaf_right()
3762 btrfs_assert_tree_locked(path->nodes[1]); push_leaf_right()
3789 if (path->slots[0] == left_nritems && !empty) { push_leaf_right()
3796 path->nodes[0] = right; push_leaf_right()
3797 path->slots[0] = 0; push_leaf_right()
3798 path->slots[1]++; push_leaf_right()
3802 return __push_leaf_right(trans, root, path, min_data_size, empty, push_leaf_right()
3811 * push some data in the path leaf to the left, trying to free up at
3820 struct btrfs_path *path, int data_size, __push_leaf_left()
3826 struct extent_buffer *right = path->nodes[0]; __push_leaf_left()
3849 if (path->slots[0] < i) __push_leaf_left()
3851 if (path->slots[0] == i) { __push_leaf_left()
3858 if (path->slots[0] == i) __push_leaf_left()
3941 fixup_low_keys(root->fs_info, path, &disk_key, 1); __push_leaf_left()
3943 /* then fixup the leaf pointer in the path */ __push_leaf_left()
3944 if (path->slots[0] < push_items) { __push_leaf_left()
3945 path->slots[0] += old_left_nritems; __push_leaf_left()
3946 btrfs_tree_unlock(path->nodes[0]); __push_leaf_left()
3947 free_extent_buffer(path->nodes[0]); __push_leaf_left()
3948 path->nodes[0] = left; __push_leaf_left()
3949 path->slots[1] -= 1; __push_leaf_left()
3953 path->slots[0] -= push_items; __push_leaf_left()
3955 BUG_ON(path->slots[0] < 0); __push_leaf_left()
3964 * push some data in the path leaf to the left, trying to free up at
3972 *root, struct btrfs_path *path, int min_data_size, push_leaf_left()
3975 struct extent_buffer *right = path->nodes[0]; push_leaf_left()
3982 slot = path->slots[1]; push_leaf_left()
3985 if (!path->nodes[1]) push_leaf_left()
3992 btrfs_assert_tree_locked(path->nodes[1]); push_leaf_left()
3994 left = read_node_slot(root, path->nodes[1], slot - 1); push_leaf_left()
4009 path->nodes[1], slot - 1, &left); push_leaf_left()
4023 return __push_leaf_left(trans, root, path, min_data_size, push_leaf_left()
4033 * split the path's leaf in two, making sure there is at least data_size
4034 * available for the resulting leaf level of the path.
4038 struct btrfs_path *path, copy_for_split()
4078 insert_ptr(trans, root, path, &disk_key, right->start, copy_for_split()
4079 path->slots[1] + 1, 1); copy_for_split()
4083 BUG_ON(path->slots[0] != slot); copy_for_split()
4086 btrfs_tree_unlock(path->nodes[0]); copy_for_split()
4087 free_extent_buffer(path->nodes[0]); copy_for_split()
4088 path->nodes[0] = right; copy_for_split()
4089 path->slots[0] -= mid; copy_for_split()
4090 path->slots[1] += 1; copy_for_split()
4096 BUG_ON(path->slots[0] < 0); copy_for_split()
4111 struct btrfs_path *path, push_for_double_split()
4120 slot = path->slots[0]; push_for_double_split()
4121 if (slot < btrfs_header_nritems(path->nodes[0])) push_for_double_split()
4122 space_needed -= btrfs_leaf_free_space(root, path->nodes[0]); push_for_double_split()
4128 ret = push_leaf_right(trans, root, path, 1, space_needed, 0, slot); push_for_double_split()
4135 nritems = btrfs_header_nritems(path->nodes[0]); push_for_double_split()
4140 if (path->slots[0] == 0 || path->slots[0] == nritems) push_for_double_split()
4143 if (btrfs_leaf_free_space(root, path->nodes[0]) >= data_size) push_for_double_split()
4147 slot = path->slots[0]; push_for_double_split()
4148 ret = push_leaf_left(trans, root, path, 1, space_needed, 0, slot); push_for_double_split()
4161 * split the path's leaf in two, making sure there is at least data_size
4162 * available for the resulting leaf level of the path.
4169 struct btrfs_path *path, int data_size, split_leaf()
4185 l = path->nodes[0]; split_leaf()
4186 slot = path->slots[0]; split_leaf()
4192 if (data_size && path->nodes[1]) { split_leaf()
4198 wret = push_leaf_right(trans, root, path, space_needed, split_leaf()
4203 wret = push_leaf_left(trans, root, path, space_needed, split_leaf()
4208 l = path->nodes[0]; split_leaf()
4215 if (!path->nodes[1]) { split_leaf()
4216 ret = insert_new_root(trans, root, path, 1); split_leaf()
4222 l = path->nodes[0]; split_leaf()
4223 slot = path->slots[0]; split_leaf()
4292 insert_ptr(trans, root, path, &disk_key, right->start, split_leaf()
4293 path->slots[1] + 1, 1); split_leaf()
4294 btrfs_tree_unlock(path->nodes[0]); split_leaf()
4295 free_extent_buffer(path->nodes[0]); split_leaf()
4296 path->nodes[0] = right; split_leaf()
4297 path->slots[0] = 0; split_leaf()
4298 path->slots[1] += 1; split_leaf()
4301 insert_ptr(trans, root, path, &disk_key, right->start, split_leaf()
4302 path->slots[1], 1); split_leaf()
4303 btrfs_tree_unlock(path->nodes[0]); split_leaf()
4304 free_extent_buffer(path->nodes[0]); split_leaf()
4305 path->nodes[0] = right; split_leaf()
4306 path->slots[0] = 0; split_leaf()
4307 if (path->slots[1] == 0) split_leaf()
4308 fixup_low_keys(fs_info, path, &disk_key, 1); split_leaf()
4314 copy_for_split(trans, root, path, l, right, slot, mid, nritems); split_leaf()
4325 push_for_double_split(trans, root, path, data_size); split_leaf()
4327 if (btrfs_leaf_free_space(root, path->nodes[0]) >= data_size) split_leaf()
4334 struct btrfs_path *path, int ins_len) setup_leaf_for_split()
4343 leaf = path->nodes[0]; setup_leaf_for_split()
4344 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); setup_leaf_for_split()
4352 item_size = btrfs_item_size_nr(leaf, path->slots[0]); setup_leaf_for_split()
4354 fi = btrfs_item_ptr(leaf, path->slots[0], setup_leaf_for_split()
4358 btrfs_release_path(path); setup_leaf_for_split()
4360 path->keep_locks = 1; setup_leaf_for_split()
4361 path->search_for_split = 1; setup_leaf_for_split()
4362 ret = btrfs_search_slot(trans, root, &key, path, 0, 1); setup_leaf_for_split()
4363 path->search_for_split = 0; setup_leaf_for_split()
4370 leaf = path->nodes[0]; setup_leaf_for_split()
4372 if (item_size != btrfs_item_size_nr(leaf, path->slots[0])) setup_leaf_for_split()
4376 if (btrfs_leaf_free_space(root, path->nodes[0]) >= ins_len) setup_leaf_for_split()
4380 fi = btrfs_item_ptr(leaf, path->slots[0], setup_leaf_for_split()
4386 btrfs_set_path_blocking(path); setup_leaf_for_split()
4387 ret = split_leaf(trans, root, &key, path, ins_len, 1); setup_leaf_for_split()
4391 path->keep_locks = 0; setup_leaf_for_split()
4392 btrfs_unlock_up_safe(path, 1); setup_leaf_for_split()
4395 path->keep_locks = 0; setup_leaf_for_split()
4401 struct btrfs_path *path, split_item()
4415 leaf = path->nodes[0]; split_item()
4418 btrfs_set_path_blocking(path); split_item()
4420 item = btrfs_item_nr(path->slots[0]); split_item()
4429 path->slots[0]), item_size); split_item()
4431 slot = path->slots[0] + 1; split_item()
4456 btrfs_item_ptr_offset(leaf, path->slots[0]), split_item()
4475 * The path may be released by this operation. After
4476 * the split, the path is pointing to the old item. The
4487 struct btrfs_path *path, btrfs_split_item()
4492 ret = setup_leaf_for_split(trans, root, path, btrfs_split_item()
4497 ret = split_item(trans, root, path, new_key, split_offset); btrfs_split_item()
4511 struct btrfs_path *path, btrfs_duplicate_item()
4518 leaf = path->nodes[0]; btrfs_duplicate_item()
4519 item_size = btrfs_item_size_nr(leaf, path->slots[0]); btrfs_duplicate_item()
4520 ret = setup_leaf_for_split(trans, root, path, btrfs_duplicate_item()
4525 path->slots[0]++; btrfs_duplicate_item()
4526 setup_items_for_insert(root, path, new_key, &item_size, btrfs_duplicate_item()
4529 leaf = path->nodes[0]; btrfs_duplicate_item()
4531 btrfs_item_ptr_offset(leaf, path->slots[0]), btrfs_duplicate_item()
4532 btrfs_item_ptr_offset(leaf, path->slots[0] - 1), btrfs_duplicate_item()
4538 * make the item pointed to by the path smaller. new_size indicates
4543 void btrfs_truncate_item(struct btrfs_root *root, struct btrfs_path *path, btrfs_truncate_item() argument
4559 leaf = path->nodes[0]; btrfs_truncate_item()
4560 slot = path->slots[0]; btrfs_truncate_item()
4626 fixup_low_keys(root->fs_info, path, &disk_key, 1); btrfs_truncate_item()
4640 * make the item pointed to by the path bigger, data_size is the added size.
4642 void btrfs_extend_item(struct btrfs_root *root, struct btrfs_path *path, btrfs_extend_item() argument
4657 leaf = path->nodes[0]; btrfs_extend_item()
4666 slot = path->slots[0]; btrfs_extend_item()
4712 void setup_items_for_insert(struct btrfs_root *root, struct btrfs_path *path, setup_items_for_insert() argument
4725 if (path->slots[0] == 0) { setup_items_for_insert()
4727 fixup_low_keys(root->fs_info, path, &disk_key, 1); setup_items_for_insert()
4729 btrfs_unlock_up_safe(path, 1); setup_items_for_insert()
4733 leaf = path->nodes[0]; setup_items_for_insert()
4734 slot = path->slots[0]; setup_items_for_insert()
4801 * This does all the path init required, making room in the tree if needed.
4805 struct btrfs_path *path, btrfs_insert_empty_items()
4819 ret = btrfs_search_slot(trans, root, cpu_key, path, total_size, 1); btrfs_insert_empty_items()
4825 slot = path->slots[0]; btrfs_insert_empty_items()
4828 setup_items_for_insert(root, path, cpu_key, data_size, btrfs_insert_empty_items()
4835 * This does all the path init required, making room in the tree if needed.
4842 struct btrfs_path *path; btrfs_insert_item() local
4846 path = btrfs_alloc_path(); btrfs_insert_item()
4847 if (!path) btrfs_insert_item()
4849 ret = btrfs_insert_empty_item(trans, root, path, cpu_key, data_size); btrfs_insert_item()
4851 leaf = path->nodes[0]; btrfs_insert_item()
4852 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]); btrfs_insert_item()
4856 btrfs_free_path(path); btrfs_insert_item()
4866 static void del_ptr(struct btrfs_root *root, struct btrfs_path *path, del_ptr() argument
4869 struct extent_buffer *parent = path->nodes[level]; del_ptr()
4899 fixup_low_keys(root->fs_info, path, &disk_key, level + 1); del_ptr()
4905 * a helper function to delete the leaf pointed to by path->slots[1] and
4906 * path->nodes[1].
4908 * This deletes the pointer in path->nodes[1] and frees the leaf
4911 * The path must have already been setup for deleting the leaf, including
4912 * all the proper balancing. path->nodes[1] must be locked.
4916 struct btrfs_path *path, btrfs_del_leaf()
4920 del_ptr(root, path, 1, path->slots[1]); btrfs_del_leaf()
4926 btrfs_unlock_up_safe(path, 0); btrfs_del_leaf()
4935 * delete the item at the leaf level in path. If that empties
4939 struct btrfs_path *path, int slot, int nr) btrfs_del_items()
4953 leaf = path->nodes[0]; btrfs_del_items()
4991 btrfs_set_path_blocking(path); btrfs_del_items()
4993 btrfs_del_leaf(trans, root, path, leaf); btrfs_del_items()
5001 fixup_low_keys(root->fs_info, path, &disk_key, 1); btrfs_del_items()
5006 /* push_leaf_left fixes the path. btrfs_del_items()
5007 * make sure the path still points to our leaf btrfs_del_items()
5010 slot = path->slots[1]; btrfs_del_items()
5013 btrfs_set_path_blocking(path); btrfs_del_items()
5014 wret = push_leaf_left(trans, root, path, 1, 1, btrfs_del_items()
5019 if (path->nodes[0] == leaf && btrfs_del_items()
5021 wret = push_leaf_right(trans, root, path, 1, btrfs_del_items()
5028 path->slots[1] = slot; btrfs_del_items()
5029 btrfs_del_leaf(trans, root, path, leaf); btrfs_del_items()
5033 /* if we're still in the path, make sure btrfs_del_items()
5038 if (path->nodes[0] == leaf) btrfs_del_items()
5054 * This may release the path, and so you may lose any locks held at the
5057 int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path) btrfs_prev_leaf() argument
5063 btrfs_item_key_to_cpu(path->nodes[0], &key, 0); btrfs_prev_leaf()
5078 btrfs_release_path(path); btrfs_prev_leaf()
5079 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); btrfs_prev_leaf()
5082 btrfs_item_key(path->nodes[0], &found_key, 0); btrfs_prev_leaf()
5086 * before we released our path. And after we released our path, that btrfs_prev_leaf()
5106 * key and get a writable path.
5108 * This does lock as it descends, and path->keep_locks should be set
5111 * This honors path->lowest_level to prevent descent past a given level
5122 struct btrfs_path *path, btrfs_search_forward()
5132 int keep_locks = path->keep_locks; btrfs_search_forward()
5134 path->keep_locks = 1; btrfs_search_forward()
5138 WARN_ON(path->nodes[level]); btrfs_search_forward()
5139 path->nodes[level] = cur; btrfs_search_forward()
5140 path->locks[level] = BTRFS_READ_LOCK; btrfs_search_forward()
5151 /* at the lowest level, we're done, setup the path and exit */ btrfs_search_forward()
5152 if (level == path->lowest_level) { btrfs_search_forward()
5156 path->slots[level] = slot; btrfs_search_forward()
5182 path->slots[level] = slot; btrfs_search_forward()
5183 btrfs_set_path_blocking(path); btrfs_search_forward()
5184 sret = btrfs_find_next_key(root, path, min_key, level, btrfs_search_forward()
5187 btrfs_release_path(path); btrfs_search_forward()
5195 path->slots[level] = slot; btrfs_search_forward()
5196 if (level == path->lowest_level) { btrfs_search_forward()
5200 btrfs_set_path_blocking(path); btrfs_search_forward()
5206 path->locks[level - 1] = BTRFS_READ_LOCK; btrfs_search_forward()
5207 path->nodes[level - 1] = cur; btrfs_search_forward()
5208 unlock_up(path, level, 1, 0, NULL); btrfs_search_forward()
5209 btrfs_clear_path_blocking(path, NULL, 0); btrfs_search_forward()
5212 path->keep_locks = keep_locks; btrfs_search_forward()
5214 btrfs_unlock_up_safe(path, path->lowest_level + 1); btrfs_search_forward()
5215 btrfs_set_path_blocking(path); btrfs_search_forward()
5222 struct btrfs_path *path, tree_move_down()
5226 path->nodes[*level - 1] = read_node_slot(root, path->nodes[*level], tree_move_down()
5227 path->slots[*level]); tree_move_down()
5228 path->slots[*level - 1] = 0; tree_move_down()
5233 struct btrfs_path *path, tree_move_next_or_upnext()
5238 nritems = btrfs_header_nritems(path->nodes[*level]); tree_move_next_or_upnext()
5240 path->slots[*level]++; tree_move_next_or_upnext()
5242 while (path->slots[*level] >= nritems) { tree_move_next_or_upnext()
5247 path->slots[*level] = 0; tree_move_next_or_upnext()
5248 free_extent_buffer(path->nodes[*level]); tree_move_next_or_upnext()
5249 path->nodes[*level] = NULL; tree_move_next_or_upnext()
5251 path->slots[*level]++; tree_move_next_or_upnext()
5253 nritems = btrfs_header_nritems(path->nodes[*level]); tree_move_next_or_upnext()
5264 struct btrfs_path *path, tree_advance()
5272 ret = tree_move_next_or_upnext(root, path, level, root_level); tree_advance()
5274 tree_move_down(root, path, level, root_level); tree_advance()
5279 btrfs_item_key_to_cpu(path->nodes[*level], key, tree_advance()
5280 path->slots[*level]); tree_advance()
5282 btrfs_node_key_to_cpu(path->nodes[*level], key, tree_advance()
5283 path->slots[*level]); tree_advance()
5575 * and fixup the path. It looks for and returns the next key in the
5576 * tree based on the current path and the min_trans parameters.
5581 * path->keep_locks should be set to 1 on the search made before
5584 int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path, btrfs_find_next_key() argument
5590 WARN_ON(!path->keep_locks); btrfs_find_next_key()
5592 if (!path->nodes[level]) btrfs_find_next_key()
5595 slot = path->slots[level] + 1; btrfs_find_next_key()
5596 c = path->nodes[level]; btrfs_find_next_key()
5603 !path->nodes[level + 1]) btrfs_find_next_key()
5606 if (path->locks[level + 1]) { btrfs_find_next_key()
5617 orig_lowest = path->lowest_level; btrfs_find_next_key()
5618 btrfs_release_path(path); btrfs_find_next_key()
5619 path->lowest_level = level; btrfs_find_next_key()
5620 ret = btrfs_search_slot(NULL, root, &cur_key, path, btrfs_find_next_key()
5622 path->lowest_level = orig_lowest; btrfs_find_next_key()
5626 c = path->nodes[level]; btrfs_find_next_key()
5627 slot = path->slots[level]; btrfs_find_next_key()
5654 int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path) btrfs_next_leaf() argument
5656 return btrfs_next_old_leaf(root, path, 0); btrfs_next_leaf()
5659 int btrfs_next_old_leaf(struct btrfs_root *root, struct btrfs_path *path, btrfs_next_old_leaf() argument
5669 int old_spinning = path->leave_spinning; btrfs_next_old_leaf()
5672 nritems = btrfs_header_nritems(path->nodes[0]); btrfs_next_old_leaf()
5676 btrfs_item_key_to_cpu(path->nodes[0], &key, nritems - 1); btrfs_next_old_leaf()
5681 btrfs_release_path(path); btrfs_next_old_leaf()
5683 path->keep_locks = 1; btrfs_next_old_leaf()
5684 path->leave_spinning = 1; btrfs_next_old_leaf()
5687 ret = btrfs_search_old_slot(root, &key, path, time_seq); btrfs_next_old_leaf()
5689 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); btrfs_next_old_leaf()
5690 path->keep_locks = 0; btrfs_next_old_leaf()
5695 nritems = btrfs_header_nritems(path->nodes[0]); btrfs_next_old_leaf()
5697 * by releasing the path above we dropped all our locks. A balance btrfs_next_old_leaf()
5700 * advance the path if there are now more items available. btrfs_next_old_leaf()
5702 if (nritems > 0 && path->slots[0] < nritems - 1) { btrfs_next_old_leaf()
5704 path->slots[0]++; btrfs_next_old_leaf()
5710 * - after releasing the path above, someone has removed the item that btrfs_next_old_leaf()
5718 * with ret > 0, the key isn't found, the path points to the slot btrfs_next_old_leaf()
5719 * where it should be inserted, so the path->slots[0] item must be the btrfs_next_old_leaf()
5722 if (nritems > 0 && ret > 0 && path->slots[0] == nritems - 1) { btrfs_next_old_leaf()
5728 if (!path->nodes[level]) { btrfs_next_old_leaf()
5733 slot = path->slots[level] + 1; btrfs_next_old_leaf()
5734 c = path->nodes[level]; btrfs_next_old_leaf()
5750 next_rw_lock = path->locks[level]; btrfs_next_old_leaf()
5751 ret = read_block_for_search(NULL, root, path, &next, level, btrfs_next_old_leaf()
5757 btrfs_release_path(path); btrfs_next_old_leaf()
5761 if (!path->skip_locking) { btrfs_next_old_leaf()
5772 btrfs_release_path(path); btrfs_next_old_leaf()
5777 btrfs_set_path_blocking(path); btrfs_next_old_leaf()
5779 btrfs_clear_path_blocking(path, next, btrfs_next_old_leaf()
5786 path->slots[level] = slot; btrfs_next_old_leaf()
5789 c = path->nodes[level]; btrfs_next_old_leaf()
5790 if (path->locks[level]) btrfs_next_old_leaf()
5791 btrfs_tree_unlock_rw(c, path->locks[level]); btrfs_next_old_leaf()
5794 path->nodes[level] = next; btrfs_next_old_leaf()
5795 path->slots[level] = 0; btrfs_next_old_leaf()
5796 if (!path->skip_locking) btrfs_next_old_leaf()
5797 path->locks[level] = next_rw_lock; btrfs_next_old_leaf()
5801 ret = read_block_for_search(NULL, root, path, &next, level, btrfs_next_old_leaf()
5807 btrfs_release_path(path); btrfs_next_old_leaf()
5811 if (!path->skip_locking) { btrfs_next_old_leaf()
5814 btrfs_set_path_blocking(path); btrfs_next_old_leaf()
5816 btrfs_clear_path_blocking(path, next, btrfs_next_old_leaf()
5824 unlock_up(path, 0, 1, 0, NULL); btrfs_next_old_leaf()
5825 path->leave_spinning = old_spinning; btrfs_next_old_leaf()
5827 btrfs_set_path_blocking(path); btrfs_next_old_leaf()
5839 struct btrfs_path *path, u64 min_objectid, btrfs_previous_item()
5848 if (path->slots[0] == 0) { btrfs_previous_item()
5849 btrfs_set_path_blocking(path); btrfs_previous_item()
5850 ret = btrfs_prev_leaf(root, path); btrfs_previous_item()
5854 path->slots[0]--; btrfs_previous_item()
5856 leaf = path->nodes[0]; btrfs_previous_item()
5860 if (path->slots[0] == nritems) btrfs_previous_item()
5861 path->slots[0]--; btrfs_previous_item()
5863 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); btrfs_previous_item()
5882 struct btrfs_path *path, u64 min_objectid) btrfs_previous_extent_item()
5890 if (path->slots[0] == 0) { btrfs_previous_extent_item()
5891 btrfs_set_path_blocking(path); btrfs_previous_extent_item()
5892 ret = btrfs_prev_leaf(root, path); btrfs_previous_extent_item()
5896 path->slots[0]--; btrfs_previous_extent_item()
5898 leaf = path->nodes[0]; btrfs_previous_extent_item()
5902 if (path->slots[0] == nritems) btrfs_previous_extent_item()
5903 path->slots[0]--; btrfs_previous_extent_item()
5905 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); btrfs_previous_extent_item()
1886 balance_level(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, int level) balance_level() argument
2104 push_nodes_for_insert(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, int level) push_nodes_for_insert() argument
2240 reada_for_search(struct btrfs_root *root, struct btrfs_path *path, int level, int slot, u64 objectid) reada_for_search() argument
2306 reada_for_balance(struct btrfs_root *root, struct btrfs_path *path, int level) reada_for_balance() argument
3134 fixup_low_keys(struct btrfs_fs_info *fs_info, struct btrfs_path *path, struct btrfs_disk_key *key, int level) fixup_low_keys() argument
3160 btrfs_set_item_key_safe(struct btrfs_fs_info *fs_info, struct btrfs_path *path, struct btrfs_key *new_key) btrfs_set_item_key_safe() argument
3331 insert_new_root(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, int level) insert_new_root() argument
3402 insert_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, struct btrfs_disk_key *key, u64 bytenr, int slot, int level) insert_ptr() argument
3448 split_node(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, int level) split_node() argument
3592 __push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, int data_size, int empty, struct extent_buffer *right, int free_space, u32 left_nritems, u32 min_slot) __push_leaf_right() argument
3741 push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, int min_data_size, int data_size, int empty, u32 min_slot) push_leaf_right() argument
3818 __push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, int data_size, int empty, struct extent_buffer *left, int free_space, u32 right_nritems, u32 max_slot) __push_leaf_left() argument
3971 push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, int min_data_size, int data_size, int empty, u32 max_slot) push_leaf_left() argument
4036 copy_for_split(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, struct extent_buffer *l, struct extent_buffer *right, int slot, int mid, int nritems) copy_for_split() argument
4109 push_for_double_split(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, int data_size) push_for_double_split() argument
4166 split_leaf(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_key *ins_key, struct btrfs_path *path, int data_size, int extend) split_leaf() argument
4332 setup_leaf_for_split(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, int ins_len) setup_leaf_for_split() argument
4399 split_item(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, struct btrfs_key *new_key, unsigned long split_offset) split_item() argument
4485 btrfs_split_item(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, struct btrfs_key *new_key, unsigned long split_offset) btrfs_split_item() argument
4509 btrfs_duplicate_item(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, struct btrfs_key *new_key) btrfs_duplicate_item() argument
4803 btrfs_insert_empty_items(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, struct btrfs_key *cpu_key, u32 *data_size, int nr) btrfs_insert_empty_items() argument
4914 btrfs_del_leaf(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, struct extent_buffer *leaf) btrfs_del_leaf() argument
4938 btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, int slot, int nr) btrfs_del_items() argument
5121 btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key, struct btrfs_path *path, u64 min_trans) btrfs_search_forward() argument
5221 tree_move_down(struct btrfs_root *root, struct btrfs_path *path, int *level, int root_level) tree_move_down() argument
5232 tree_move_next_or_upnext(struct btrfs_root *root, struct btrfs_path *path, int *level, int root_level) tree_move_next_or_upnext() argument
5263 tree_advance(struct btrfs_root *root, struct btrfs_path *path, int *level, int root_level, int allow_down, struct btrfs_key *key) tree_advance() argument
5838 btrfs_previous_item(struct btrfs_root *root, struct btrfs_path *path, u64 min_objectid, int type) btrfs_previous_item() argument
5881 btrfs_previous_extent_item(struct btrfs_root *root, struct btrfs_path *path, u64 min_objectid) btrfs_previous_extent_item() argument
H A Dtree-defrag.c35 struct btrfs_path *path = NULL; btrfs_defrag_leaves() local
55 path = btrfs_alloc_path(); btrfs_defrag_leaves()
56 if (!path) btrfs_defrag_leaves()
82 path->keep_locks = 1; btrfs_defrag_leaves()
84 ret = btrfs_search_forward(root, &key, path, min_trans); btrfs_defrag_leaves()
91 btrfs_release_path(path); btrfs_defrag_leaves()
92 wret = btrfs_search_slot(trans, root, &key, path, 0, 1); btrfs_defrag_leaves()
98 if (!path->nodes[1]) { btrfs_defrag_leaves()
102 path->slots[1] = btrfs_header_nritems(path->nodes[1]); btrfs_defrag_leaves()
103 next_key_ret = btrfs_find_next_key(root, path, &key, 1, btrfs_defrag_leaves()
106 path->nodes[1], 0, btrfs_defrag_leaves()
118 btrfs_free_path(path); btrfs_defrag_leaves()
H A Dexport.c157 struct btrfs_path *path; btrfs_get_parent() local
164 path = btrfs_alloc_path(); btrfs_get_parent()
165 if (!path) btrfs_get_parent()
179 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); btrfs_get_parent()
184 if (path->slots[0] == 0) { btrfs_get_parent()
189 path->slots[0]--; btrfs_get_parent()
190 leaf = path->nodes[0]; btrfs_get_parent()
192 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); btrfs_get_parent()
199 ref = btrfs_item_ptr(leaf, path->slots[0], btrfs_get_parent()
205 btrfs_free_path(path); btrfs_get_parent()
216 btrfs_free_path(path); btrfs_get_parent()
225 struct btrfs_path *path; btrfs_get_name() local
244 path = btrfs_alloc_path(); btrfs_get_name()
245 if (!path) btrfs_get_name()
247 path->leave_spinning = 1; btrfs_get_name()
260 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); btrfs_get_name()
262 btrfs_free_path(path); btrfs_get_name()
266 path->slots[0]--; btrfs_get_name()
268 btrfs_free_path(path); btrfs_get_name()
272 leaf = path->nodes[0]; btrfs_get_name()
275 rref = btrfs_item_ptr(leaf, path->slots[0], btrfs_get_name()
280 iref = btrfs_item_ptr(leaf, path->slots[0], btrfs_get_name()
287 btrfs_free_path(path); btrfs_get_name()
H A Dxattr.c40 struct btrfs_path *path; __btrfs_getxattr() local
45 path = btrfs_alloc_path(); __btrfs_getxattr()
46 if (!path) __btrfs_getxattr()
50 di = btrfs_lookup_xattr(NULL, root, path, btrfs_ino(inode), name, __btrfs_getxattr()
60 leaf = path->nodes[0]; __btrfs_getxattr()
87 btrfs_free_path(path); __btrfs_getxattr()
97 struct btrfs_path *path; do_setxattr() local
104 path = btrfs_alloc_path(); do_setxattr()
105 if (!path) do_setxattr()
107 path->skip_release_on_error = 1; do_setxattr()
110 di = btrfs_lookup_xattr(trans, root, path, btrfs_ino(inode), do_setxattr()
117 ret = btrfs_delete_one_dir_name(trans, root, path, di); do_setxattr()
125 * path - we can't race with a concurrent xattr delete, because the VFS do_setxattr()
130 di = btrfs_lookup_xattr(NULL, root, path, btrfs_ino(inode), do_setxattr()
138 btrfs_release_path(path); do_setxattr()
142 ret = btrfs_insert_xattr_item(trans, root, path, btrfs_ino(inode), do_setxattr()
151 btrfs_assert_tree_locked(path->nodes[0]); do_setxattr()
152 di = btrfs_match_dir_item_name(root, path, name, name_len); do_setxattr()
159 di = btrfs_match_dir_item_name(root, path, name, name_len); do_setxattr()
178 const int slot = path->slots[0]; do_setxattr()
179 struct extent_buffer *leaf = path->nodes[0]; do_setxattr()
198 btrfs_extend_item(root, path, do_setxattr()
201 btrfs_truncate_item(root, path, data_size, 1); do_setxattr()
204 ret = btrfs_delete_one_dir_name(trans, root, path, di); do_setxattr()
207 btrfs_extend_item(root, path, data_size); do_setxattr()
220 * Insert, and we had space for the xattr, so path->slots[0] is do_setxattr()
226 btrfs_free_path(path); do_setxattr()
266 struct btrfs_path *path; btrfs_listxattr() local
283 path = btrfs_alloc_path(); btrfs_listxattr()
284 if (!path) btrfs_listxattr()
286 path->reada = 2; btrfs_listxattr()
289 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); btrfs_listxattr()
294 leaf = path->nodes[0]; btrfs_listxattr()
295 slot = path->slots[0]; btrfs_listxattr()
297 /* this is where we start walking through the path */ btrfs_listxattr()
303 ret = btrfs_next_leaf(root, path); btrfs_listxattr()
344 path->slots[0]++; btrfs_listxattr()
349 btrfs_free_path(path); btrfs_listxattr()
H A Dtree-log.c104 struct btrfs_path *path, u64 objectid);
108 struct btrfs_path *path,
324 * root is the tree we are copying into, and path is a scratch
325 * path for use in this function (it should be released on entry and
336 struct btrfs_path *path, overwrite_item()
356 ret = btrfs_search_slot(NULL, root, key, path, 0, 0); overwrite_item()
363 u32 dst_size = btrfs_item_size_nr(path->nodes[0], overwrite_item()
364 path->slots[0]); overwrite_item()
369 btrfs_release_path(path); overwrite_item()
375 btrfs_release_path(path); overwrite_item()
383 dst_ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]); overwrite_item()
384 read_extent_buffer(path->nodes[0], dst_copy, dst_ptr, overwrite_item()
397 btrfs_release_path(path); overwrite_item()
410 item = btrfs_item_ptr(path->nodes[0], path->slots[0], overwrite_item()
412 nbytes = btrfs_inode_nbytes(path->nodes[0], item); overwrite_item()
447 btrfs_release_path(path); overwrite_item()
449 path->skip_release_on_error = 1; overwrite_item()
450 ret = btrfs_insert_empty_item(trans, root, path, overwrite_item()
452 path->skip_release_on_error = 0; overwrite_item()
457 found_size = btrfs_item_size_nr(path->nodes[0], overwrite_item()
458 path->slots[0]); overwrite_item()
460 btrfs_truncate_item(root, path, item_size, 1); overwrite_item()
462 btrfs_extend_item(root, path, overwrite_item()
467 dst_ptr = btrfs_item_ptr_offset(path->nodes[0], overwrite_item()
468 path->slots[0]); overwrite_item()
487 struct extent_buffer *dst_eb = path->nodes[0]; overwrite_item()
511 S_ISDIR(btrfs_inode_mode(path->nodes[0], dst_item))) { overwrite_item()
513 saved_i_size = btrfs_inode_size(path->nodes[0], overwrite_item()
518 copy_extent_buffer(path->nodes[0], eb, dst_ptr, overwrite_item()
524 btrfs_set_inode_size(path->nodes[0], dst_item, saved_i_size); overwrite_item()
531 if (btrfs_inode_generation(path->nodes[0], dst_item) == 0) { overwrite_item()
532 btrfs_set_inode_generation(path->nodes[0], dst_item, overwrite_item()
537 btrfs_mark_buffer_dirty(path->nodes[0]); overwrite_item()
538 btrfs_release_path(path); overwrite_item()
566 * subvolume 'root'. path is released on entry and should be released
579 struct btrfs_path *path, replay_one_extent()
626 ret = btrfs_lookup_file_extent(trans, root, path, btrfs_ino(inode), replay_one_extent()
637 leaf = path->nodes[0]; replay_one_extent()
638 existing = btrfs_item_ptr(leaf, path->slots[0], replay_one_extent()
651 btrfs_release_path(path); replay_one_extent()
655 btrfs_release_path(path); replay_one_extent()
668 ret = btrfs_insert_empty_item(trans, root, path, key, replay_one_extent()
672 dest_offset = btrfs_item_ptr_offset(path->nodes[0], replay_one_extent()
673 path->slots[0]); replay_one_extent()
674 copy_extent_buffer(path->nodes[0], eb, dest_offset, replay_one_extent()
710 btrfs_release_path(path); replay_one_extent()
796 btrfs_release_path(path); replay_one_extent()
800 ret = overwrite_item(trans, root, path, eb, slot, key); replay_one_extent()
823 struct btrfs_path *path, drop_one_dir_item()
834 leaf = path->nodes[0]; drop_one_dir_item()
843 btrfs_release_path(path); drop_one_dir_item()
851 ret = link_to_fixup_dir(trans, root, path, location.objectid); drop_one_dir_item()
872 struct btrfs_path *path, inode_in_dir()
880 di = btrfs_lookup_dir_index_item(NULL, root, path, dirid, inode_in_dir()
883 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location); inode_in_dir()
888 btrfs_release_path(path); inode_in_dir()
890 di = btrfs_lookup_dir_item(NULL, root, path, dirid, name, name_len, 0); inode_in_dir()
892 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location); inode_in_dir()
899 btrfs_release_path(path); inode_in_dir()
918 struct btrfs_path *path; backref_in_log() local
928 path = btrfs_alloc_path(); backref_in_log()
929 if (!path) backref_in_log()
932 ret = btrfs_search_slot(NULL, log, key, path, 0, 0); backref_in_log()
936 ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]); backref_in_log()
939 if (btrfs_find_name_in_ext_backref(path, ref_objectid, backref_in_log()
946 item_size = btrfs_item_size_nr(path->nodes[0], path->slots[0]); backref_in_log()
950 found_name_len = btrfs_inode_ref_name_len(path->nodes[0], ref); backref_in_log()
953 ret = memcmp_extent_buffer(path->nodes[0], name, backref_in_log()
963 btrfs_free_path(path); backref_in_log()
969 struct btrfs_path *path, __add_inode_ref()
990 ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0); __add_inode_ref()
996 leaf = path->nodes[0]; __add_inode_ref()
1008 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]); __add_inode_ref()
1009 ptr_end = ptr + btrfs_item_size_nr(leaf, path->slots[0]); __add_inode_ref()
1027 btrfs_release_path(path); __add_inode_ref()
1052 btrfs_release_path(path); __add_inode_ref()
1055 extref = btrfs_lookup_inode_extref(NULL, root, path, name, namelen, __add_inode_ref()
1064 leaf = path->nodes[0]; __add_inode_ref()
1066 item_size = btrfs_item_size_nr(leaf, path->slots[0]); __add_inode_ref()
1067 base = btrfs_item_ptr_offset(leaf, path->slots[0]); __add_inode_ref()
1097 btrfs_release_path(path); __add_inode_ref()
1123 btrfs_release_path(path); __add_inode_ref()
1126 di = btrfs_lookup_dir_index_item(trans, root, path, btrfs_ino(dir), __add_inode_ref()
1129 ret = drop_one_dir_item(trans, root, path, dir, di); __add_inode_ref()
1133 btrfs_release_path(path); __add_inode_ref()
1136 di = btrfs_lookup_dir_item(trans, root, path, btrfs_ino(dir), __add_inode_ref()
1139 ret = drop_one_dir_item(trans, root, path, dir, di); __add_inode_ref()
1143 btrfs_release_path(path); __add_inode_ref()
1193 * root is the destination we are replaying into, and path is for temp
1199 struct btrfs_path *path, add_inode_ref()
1273 if (!inode_in_dir(root, path, btrfs_ino(dir), btrfs_ino(inode), add_inode_ref()
1284 ret = __add_inode_ref(trans, root, path, log, add_inode_ref()
1316 ret = overwrite_item(trans, root, path, eb, slot, key); add_inode_ref()
1318 btrfs_release_path(path); add_inode_ref()
1338 struct inode *inode, struct btrfs_path *path) count_inode_extrefs()
1352 ret = btrfs_find_one_extref(root, inode_objectid, offset, path, count_inode_extrefs()
1357 leaf = path->nodes[0]; count_inode_extrefs()
1358 item_size = btrfs_item_size_nr(leaf, path->slots[0]); count_inode_extrefs()
1359 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]); count_inode_extrefs()
1372 btrfs_release_path(path); count_inode_extrefs()
1374 btrfs_release_path(path); count_inode_extrefs()
1382 struct inode *inode, struct btrfs_path *path) count_inode_refs()
1397 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); count_inode_refs()
1401 if (path->slots[0] == 0) count_inode_refs()
1403 path->slots[0]--; count_inode_refs()
1406 btrfs_item_key_to_cpu(path->nodes[0], &key, count_inode_refs()
1407 path->slots[0]); count_inode_refs()
1411 ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]); count_inode_refs()
1412 ptr_end = ptr + btrfs_item_size_nr(path->nodes[0], count_inode_refs()
1413 path->slots[0]); count_inode_refs()
1418 name_len = btrfs_inode_ref_name_len(path->nodes[0], count_inode_refs()
1426 if (path->slots[0] > 0) { count_inode_refs()
1427 path->slots[0]--; count_inode_refs()
1431 btrfs_release_path(path); count_inode_refs()
1433 btrfs_release_path(path); count_inode_refs()
1452 struct btrfs_path *path; fixup_inode_link_count() local
1457 path = btrfs_alloc_path(); fixup_inode_link_count()
1458 if (!path) fixup_inode_link_count()
1461 ret = count_inode_refs(root, inode, path); fixup_inode_link_count()
1467 ret = count_inode_extrefs(root, inode, path); fixup_inode_link_count()
1483 ret = replay_dir_deletes(trans, root, NULL, path, fixup_inode_link_count()
1492 btrfs_free_path(path); fixup_inode_link_count()
1498 struct btrfs_path *path) fixup_inode_link_counts()
1508 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); fixup_inode_link_counts()
1513 if (path->slots[0] == 0) fixup_inode_link_counts()
1515 path->slots[0]--; fixup_inode_link_counts()
1518 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); fixup_inode_link_counts()
1523 ret = btrfs_del_item(trans, root, path); fixup_inode_link_counts()
1527 btrfs_release_path(path); fixup_inode_link_counts()
1546 btrfs_release_path(path); fixup_inode_link_counts()
1558 struct btrfs_path *path, link_to_fixup_dir()
1573 ret = btrfs_insert_empty_item(trans, root, path, &key, 0); link_to_fixup_dir()
1575 btrfs_release_path(path); link_to_fixup_dir()
1668 struct btrfs_path *path, replay_one_name()
1701 exists = btrfs_lookup_inode(trans, root, path, &log_key, 0); replay_one_name()
1706 btrfs_release_path(path); replay_one_name()
1709 dst_di = btrfs_lookup_dir_item(trans, root, path, key->objectid, replay_one_name()
1712 dst_di = btrfs_lookup_dir_index_item(trans, root, path, replay_one_name()
1730 btrfs_dir_item_key_to_cpu(path->nodes[0], dst_di, &found_key); replay_one_name()
1735 btrfs_dir_type(path->nodes[0], dst_di) == log_type) { replay_one_name()
1747 ret = drop_one_dir_item(trans, root, path, dir, dst_di); replay_one_name()
1754 btrfs_release_path(path); replay_one_name()
1773 btrfs_release_path(path); replay_one_name()
1793 struct btrfs_path *path, replay_one_dir_item()
1812 ret = replay_one_name(trans, root, path, eb, di, key); replay_one_dir_item()
1880 struct btrfs_path *path, find_dir_range()
1897 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); find_dir_range()
1901 if (path->slots[0] == 0) find_dir_range()
1903 path->slots[0]--; find_dir_range()
1906 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); find_dir_range()
1912 item = btrfs_item_ptr(path->nodes[0], path->slots[0], find_dir_range()
1914 found_end = btrfs_dir_log_end(path->nodes[0], item); find_dir_range()
1925 nritems = btrfs_header_nritems(path->nodes[0]); find_dir_range()
1926 if (path->slots[0] >= nritems) { find_dir_range()
1927 ret = btrfs_next_leaf(root, path); find_dir_range()
1931 path->slots[0]++; find_dir_range()
1934 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); find_dir_range()
1940 item = btrfs_item_ptr(path->nodes[0], path->slots[0], find_dir_range()
1942 found_end = btrfs_dir_log_end(path->nodes[0], item); find_dir_range()
1947 btrfs_release_path(path); find_dir_range()
1959 struct btrfs_path *path, check_item_in_log()
1978 eb = path->nodes[0]; check_item_in_log()
1979 slot = path->slots[0]; check_item_in_log()
2012 btrfs_release_path(path); check_item_in_log()
2021 path, location.objectid); check_item_in_log()
2041 ret = btrfs_search_slot(NULL, root, dir_key, path, check_item_in_log()
2059 btrfs_release_path(path); check_item_in_log()
2067 struct btrfs_path *path, replay_xattr_deletes()
2084 ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0); replay_xattr_deletes()
2088 nritems = btrfs_header_nritems(path->nodes[0]); replay_xattr_deletes()
2089 for (i = path->slots[0]; i < nritems; i++) { replay_xattr_deletes()
2096 btrfs_item_key_to_cpu(path->nodes[0], &key, i); replay_xattr_deletes()
2102 di = btrfs_item_ptr(path->nodes[0], i, struct btrfs_dir_item); replay_xattr_deletes()
2103 total_size = btrfs_item_size_nr(path->nodes[0], i); replay_xattr_deletes()
2106 u16 name_len = btrfs_dir_name_len(path->nodes[0], di); replay_xattr_deletes()
2107 u16 data_len = btrfs_dir_data_len(path->nodes[0], di); replay_xattr_deletes()
2116 read_extent_buffer(path->nodes[0], name, replay_xattr_deletes()
2124 btrfs_release_path(path); replay_xattr_deletes()
2125 di = btrfs_lookup_xattr(trans, root, path, ino, replay_xattr_deletes()
2134 path, di); replay_xattr_deletes()
2137 btrfs_release_path(path); replay_xattr_deletes()
2150 ret = btrfs_next_leaf(root, path); replay_xattr_deletes()
2157 btrfs_release_path(path); replay_xattr_deletes()
2175 struct btrfs_path *path, replay_dir_deletes()
2209 ret = find_dir_range(log, path, dirid, key_type, replay_dir_deletes()
2218 ret = btrfs_search_slot(NULL, root, &dir_key, path, replay_dir_deletes()
2223 nritems = btrfs_header_nritems(path->nodes[0]); replay_dir_deletes()
2224 if (path->slots[0] >= nritems) { replay_dir_deletes()
2225 ret = btrfs_next_leaf(root, path); replay_dir_deletes()
2229 btrfs_item_key_to_cpu(path->nodes[0], &found_key, replay_dir_deletes()
2230 path->slots[0]); replay_dir_deletes()
2238 ret = check_item_in_log(trans, root, log, path, replay_dir_deletes()
2247 btrfs_release_path(path); replay_dir_deletes()
2258 btrfs_release_path(path); replay_dir_deletes()
2262 btrfs_release_path(path); replay_dir_deletes()
2283 struct btrfs_path *path; replay_one_buffer() local
2299 path = btrfs_alloc_path(); replay_one_buffer()
2300 if (!path) replay_one_buffer()
2316 path, key.objectid); replay_one_buffer()
2322 root, log, path, key.objectid, 0); replay_one_buffer()
2326 ret = overwrite_item(wc->trans, root, path, replay_one_buffer()
2343 path, key.objectid); replay_one_buffer()
2350 ret = replay_one_dir_item(wc->trans, root, path, replay_one_buffer()
2361 ret = overwrite_item(wc->trans, root, path, replay_one_buffer()
2367 ret = add_inode_ref(wc->trans, root, log, path, replay_one_buffer()
2373 ret = replay_one_extent(wc->trans, root, path, replay_one_buffer()
2378 ret = replay_one_dir_item(wc->trans, root, path, replay_one_buffer()
2384 btrfs_free_path(path); replay_one_buffer()
2390 struct btrfs_path *path, int *level, walk_down_log_tree()
2408 cur = path->nodes[*level]; walk_down_log_tree()
2412 if (path->slots[*level] >= walk_down_log_tree()
2416 bytenr = btrfs_node_blockptr(cur, path->slots[*level]); walk_down_log_tree()
2417 ptr_gen = btrfs_node_ptr_generation(cur, path->slots[*level]); walk_down_log_tree()
2420 parent = path->nodes[*level]; walk_down_log_tree()
2434 path->slots[*level]++; walk_down_log_tree()
2470 if (path->nodes[*level-1]) walk_down_log_tree()
2471 free_extent_buffer(path->nodes[*level-1]); walk_down_log_tree()
2472 path->nodes[*level-1] = next; walk_down_log_tree()
2474 path->slots[*level] = 0; walk_down_log_tree()
2480 path->slots[*level] = btrfs_header_nritems(path->nodes[*level]); walk_down_log_tree()
2488 struct btrfs_path *path, int *level, walk_up_log_tree()
2496 for (i = *level; i < BTRFS_MAX_LEVEL - 1 && path->nodes[i]; i++) { walk_up_log_tree()
2497 slot = path->slots[i]; walk_up_log_tree()
2498 if (slot + 1 < btrfs_header_nritems(path->nodes[i])) { walk_up_log_tree()
2499 path->slots[i]++; walk_up_log_tree()
2505 if (path->nodes[*level] == root->node) walk_up_log_tree()
2506 parent = path->nodes[*level]; walk_up_log_tree()
2508 parent = path->nodes[*level + 1]; walk_up_log_tree()
2511 ret = wc->process_func(root, path->nodes[*level], wc, walk_up_log_tree()
2512 btrfs_header_generation(path->nodes[*level])); walk_up_log_tree()
2519 next = path->nodes[*level]; walk_up_log_tree()
2532 path->nodes[*level]->start, walk_up_log_tree()
2533 path->nodes[*level]->len); walk_up_log_tree()
2537 free_extent_buffer(path->nodes[*level]); walk_up_log_tree()
2538 path->nodes[*level] = NULL; walk_up_log_tree()
2556 struct btrfs_path *path; walk_log_tree() local
2559 path = btrfs_alloc_path(); walk_log_tree()
2560 if (!path) walk_log_tree()
2565 path->nodes[level] = log->node; walk_log_tree()
2567 path->slots[level] = 0; walk_log_tree()
2570 wret = walk_down_log_tree(trans, log, path, &level, wc); walk_log_tree()
2578 wret = walk_up_log_tree(trans, log, path, &level, wc); walk_log_tree()
2588 if (path->nodes[orig_level]) { walk_log_tree()
2589 ret = wc->process_func(log, path->nodes[orig_level], wc, walk_log_tree()
2590 btrfs_header_generation(path->nodes[orig_level])); walk_log_tree()
2596 next = path->nodes[orig_level]; walk_log_tree()
2616 btrfs_free_path(path); walk_log_tree()
3069 struct btrfs_path *path; btrfs_del_dir_entries_in_log() local
3085 path = btrfs_alloc_path(); btrfs_del_dir_entries_in_log()
3086 if (!path) { btrfs_del_dir_entries_in_log()
3091 di = btrfs_lookup_dir_item(trans, log, path, dir_ino, btrfs_del_dir_entries_in_log()
3098 ret = btrfs_delete_one_dir_name(trans, log, path, di); btrfs_del_dir_entries_in_log()
3105 btrfs_release_path(path); btrfs_del_dir_entries_in_log()
3106 di = btrfs_lookup_dir_index_item(trans, log, path, dir_ino, btrfs_del_dir_entries_in_log()
3113 ret = btrfs_delete_one_dir_name(trans, log, path, di); btrfs_del_dir_entries_in_log()
3130 btrfs_release_path(path); btrfs_del_dir_entries_in_log()
3132 ret = btrfs_search_slot(trans, log, &key, path, 0, 1); btrfs_del_dir_entries_in_log()
3141 item = btrfs_item_ptr(path->nodes[0], path->slots[0], btrfs_del_dir_entries_in_log()
3143 i_size = btrfs_inode_size(path->nodes[0], item); btrfs_del_dir_entries_in_log()
3148 btrfs_set_inode_size(path->nodes[0], item, i_size); btrfs_del_dir_entries_in_log()
3149 btrfs_mark_buffer_dirty(path->nodes[0]); btrfs_del_dir_entries_in_log()
3152 btrfs_release_path(path); btrfs_del_dir_entries_in_log()
3155 btrfs_free_path(path); btrfs_del_dir_entries_in_log()
3208 struct btrfs_path *path, insert_dir_log_key()
3222 ret = btrfs_insert_empty_item(trans, log, path, &key, sizeof(*item)); insert_dir_log_key()
3226 item = btrfs_item_ptr(path->nodes[0], path->slots[0], insert_dir_log_key()
3228 btrfs_set_dir_log_end(path->nodes[0], item, last_offset); insert_dir_log_key()
3229 btrfs_mark_buffer_dirty(path->nodes[0]); insert_dir_log_key()
3230 btrfs_release_path(path); insert_dir_log_key()
3241 struct btrfs_path *path, log_dir_items()
3263 ret = btrfs_search_forward(root, &min_key, path, trans->transid); log_dir_items()
3273 btrfs_release_path(path); log_dir_items()
3274 ret = btrfs_search_slot(NULL, root, &min_key, path, 0, 0); log_dir_items()
3276 btrfs_release_path(path); log_dir_items()
3279 ret = btrfs_previous_item(root, path, ino, key_type); log_dir_items()
3288 btrfs_item_key_to_cpu(path->nodes[0], &tmp, log_dir_items()
3289 path->slots[0]); log_dir_items()
3297 ret = btrfs_previous_item(root, path, ino, key_type); log_dir_items()
3300 btrfs_item_key_to_cpu(path->nodes[0], &tmp, path->slots[0]); log_dir_items()
3304 path->nodes[0], path->slots[0], log_dir_items()
3312 btrfs_release_path(path); log_dir_items()
3315 ret = btrfs_search_slot(NULL, root, &min_key, path, 0, 0); log_dir_items()
3325 src = path->nodes[0]; log_dir_items()
3327 for (i = path->slots[0]; i < nritems; i++) { log_dir_items()
3372 path->slots[0] = nritems; log_dir_items()
3378 ret = btrfs_next_leaf(root, path); log_dir_items()
3383 btrfs_item_key_to_cpu(path->nodes[0], &tmp, path->slots[0]); log_dir_items()
3388 if (btrfs_header_generation(path->nodes[0]) != trans->transid) { log_dir_items()
3390 path->nodes[0], path->slots[0], log_dir_items()
3400 btrfs_release_path(path); log_dir_items()
3409 ret = insert_dir_log_key(trans, log, path, key_type, log_dir_items()
3431 struct btrfs_path *path, log_directory_changes()
3444 ret = log_dir_items(trans, root, inode, path, log_directory_changes()
3469 struct btrfs_path *path, drop_objectid_items()
3482 ret = btrfs_search_slot(trans, log, &key, path, -1, 1); drop_objectid_items()
3487 if (path->slots[0] == 0) drop_objectid_items()
3490 path->slots[0]--; drop_objectid_items()
3491 btrfs_item_key_to_cpu(path->nodes[0], &found_key, drop_objectid_items()
3492 path->slots[0]); drop_objectid_items()
3499 ret = btrfs_bin_search(path->nodes[0], &found_key, 0, drop_objectid_items()
3502 ret = btrfs_del_items(trans, log, path, start_slot, drop_objectid_items()
3503 path->slots[0] - start_slot + 1); drop_objectid_items()
3510 btrfs_release_path(path); drop_objectid_items()
3512 btrfs_release_path(path); drop_objectid_items()
3574 struct btrfs_root *log, struct btrfs_path *path, log_inode_item()
3580 ret = btrfs_insert_empty_item(trans, log, path, log_inode_item()
3585 inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0], log_inode_item()
3587 fill_inode_item(trans, path->nodes[0], inode_item, inode, 0, 0); log_inode_item()
3588 btrfs_release_path(path); log_inode_item()
3798 * path and re-search for the first extent key we found, and then walk copy_items()
3802 /* btrfs_prev_leaf could return 1 without releasing the path */ copy_items()
3866 * Need to let the callers know we dropped the path so they should copy_items()
4034 struct btrfs_path *path, log_one_extent()
4061 ret = __btrfs_drop_extents(trans, log, inode, path, em->start, log_one_extent()
4072 ret = btrfs_insert_empty_item(trans, log, path, &key, log_one_extent()
4077 leaf = path->nodes[0]; log_one_extent()
4078 fi = btrfs_item_ptr(leaf, path->slots[0], log_one_extent()
4120 btrfs_release_path(path); log_one_extent()
4128 struct btrfs_path *path, btrfs_log_changed_extents()
4188 ret = log_one_extent(trans, inode, root, em, path, logged_list, btrfs_log_changed_extents()
4197 btrfs_release_path(path); btrfs_log_changed_extents()
4202 struct btrfs_path *path, u64 *size_ret) logged_inode_size()
4211 ret = btrfs_search_slot(NULL, log, &key, path, 0, 0); logged_inode_size()
4219 item = btrfs_item_ptr(path->nodes[0], path->slots[0], logged_inode_size()
4221 *size_ret = btrfs_inode_size(path->nodes[0], item); logged_inode_size()
4224 btrfs_release_path(path); logged_inode_size()
4240 struct btrfs_path *path, btrfs_log_all_xattrs()
4253 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); btrfs_log_all_xattrs()
4258 int slot = path->slots[0]; btrfs_log_all_xattrs()
4259 struct extent_buffer *leaf = path->nodes[0]; btrfs_log_all_xattrs()
4266 ret = copy_items(trans, inode, dst_path, path, btrfs_log_all_xattrs()
4275 ret = btrfs_next_leaf(root, path); btrfs_log_all_xattrs()
4290 path->slots[0]++; btrfs_log_all_xattrs()
4296 ret = copy_items(trans, inode, dst_path, path, btrfs_log_all_xattrs()
4327 * by copy_items(). We also only need to do this in the full sync path, where we
4328 * lookup for extents from the fs/subvol tree only. In the fast path case, we
4335 struct btrfs_path *path) btrfs_log_trailing_hole()
4353 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); btrfs_log_trailing_hole()
4358 ASSERT(path->slots[0] > 0); btrfs_log_trailing_hole()
4359 path->slots[0]--; btrfs_log_trailing_hole()
4360 leaf = path->nodes[0]; btrfs_log_trailing_hole()
4361 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); btrfs_log_trailing_hole()
4378 extent = btrfs_item_ptr(leaf, path->slots[0], btrfs_log_trailing_hole()
4384 path->slots[0], btrfs_log_trailing_hole()
4397 btrfs_release_path(path); btrfs_log_trailing_hole()
4551 struct btrfs_path *path; btrfs_log_inode() local
4570 path = btrfs_alloc_path(); btrfs_log_inode()
4571 if (!path) btrfs_log_inode()
4575 btrfs_free_path(path); btrfs_log_inode()
4609 btrfs_free_path(path); btrfs_log_inode()
4627 ret = drop_objectid_items(trans, log, path, ino, max_key_type); btrfs_log_inode()
4643 err = logged_inode_size(log, inode, path, btrfs_log_inode()
4652 ret = drop_objectid_items(trans, log, path, ino, btrfs_log_inode()
4672 ret = drop_objectid_items(trans, log, path, ino, btrfs_log_inode()
4689 path, trans->transid); btrfs_log_inode()
4705 ret = btrfs_check_ref_name_override(path->nodes[0], btrfs_log_inode()
4706 path->slots[0], btrfs_log_inode()
4722 ret = copy_items(trans, inode, dst_path, path, btrfs_log_inode()
4731 btrfs_release_path(path); btrfs_log_inode()
4737 src = path->nodes[0]; btrfs_log_inode()
4738 if (ins_nr && ins_start_slot + ins_nr == path->slots[0]) { btrfs_log_inode()
4742 ins_start_slot = path->slots[0]; btrfs_log_inode()
4747 ret = copy_items(trans, inode, dst_path, path, &last_extent, btrfs_log_inode()
4756 btrfs_release_path(path); btrfs_log_inode()
4760 ins_start_slot = path->slots[0]; btrfs_log_inode()
4763 nritems = btrfs_header_nritems(path->nodes[0]); btrfs_log_inode()
4764 path->slots[0]++; btrfs_log_inode()
4765 if (path->slots[0] < nritems) { btrfs_log_inode()
4766 btrfs_item_key_to_cpu(path->nodes[0], &min_key, btrfs_log_inode()
4767 path->slots[0]); btrfs_log_inode()
4771 ret = copy_items(trans, inode, dst_path, path, btrfs_log_inode()
4781 btrfs_release_path(path); btrfs_log_inode()
4793 ret = copy_items(trans, inode, dst_path, path, &last_extent, btrfs_log_inode()
4804 btrfs_release_path(path); btrfs_log_inode()
4806 err = btrfs_log_all_xattrs(trans, root, inode, path, dst_path); btrfs_log_inode()
4810 btrfs_release_path(path); btrfs_log_inode()
4812 err = btrfs_log_trailing_hole(trans, root, inode, path); btrfs_log_inode()
4817 btrfs_release_path(path); btrfs_log_inode()
4879 ret = log_directory_changes(trans, root, inode, path, dst_path, btrfs_log_inode()
4898 btrfs_free_path(path); btrfs_log_inode()
5030 struct btrfs_path *path; log_new_dir_dentries() local
5035 path = btrfs_alloc_path(); log_new_dir_dentries()
5036 if (!path) log_new_dir_dentries()
5041 btrfs_free_path(path); log_new_dir_dentries()
5062 btrfs_release_path(path); log_new_dir_dentries()
5063 ret = btrfs_search_forward(log, &min_key, path, trans->transid); log_new_dir_dentries()
5072 leaf = path->nodes[0]; log_new_dir_dentries()
5074 for (i = path->slots[0]; i < nritems; i++) { log_new_dir_dentries()
5111 btrfs_release_path(path); log_new_dir_dentries()
5130 ret = btrfs_next_leaf(log, path); log_new_dir_dentries()
5148 btrfs_free_path(path); log_new_dir_dentries()
5157 struct btrfs_path *path; btrfs_log_all_parents() local
5162 path = btrfs_alloc_path(); btrfs_log_all_parents()
5163 if (!path) btrfs_log_all_parents()
5165 path->skip_locking = 1; btrfs_log_all_parents()
5166 path->search_commit_root = 1; btrfs_log_all_parents()
5171 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); btrfs_log_all_parents()
5176 struct extent_buffer *leaf = path->nodes[0]; btrfs_log_all_parents()
5177 int slot = path->slots[0]; btrfs_log_all_parents()
5183 ret = btrfs_next_leaf(root, path); btrfs_log_all_parents()
5232 path->slots[0]++; btrfs_log_all_parents()
5236 btrfs_free_path(path); btrfs_log_all_parents()
5435 struct btrfs_path *path; btrfs_recover_log_trees() local
5447 path = btrfs_alloc_path(); btrfs_recover_log_trees()
5448 if (!path) btrfs_recover_log_trees()
5475 ret = btrfs_search_slot(NULL, log_root_tree, &key, path, 0, 0); btrfs_recover_log_trees()
5483 if (path->slots[0] == 0) btrfs_recover_log_trees()
5485 path->slots[0]--; btrfs_recover_log_trees()
5487 btrfs_item_key_to_cpu(path->nodes[0], &found_key, btrfs_recover_log_trees()
5488 path->slots[0]); btrfs_recover_log_trees()
5489 btrfs_release_path(path); btrfs_recover_log_trees()
5522 path); btrfs_recover_log_trees()
5537 btrfs_release_path(path); btrfs_recover_log_trees()
5552 btrfs_free_path(path); btrfs_recover_log_trees()
5568 btrfs_free_path(path); btrfs_recover_log_trees()
334 overwrite_item(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, struct extent_buffer *eb, int slot, struct btrfs_key *key) overwrite_item() argument
577 replay_one_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, struct extent_buffer *eb, int slot, struct btrfs_key *key) replay_one_extent() argument
821 drop_one_dir_item(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, struct inode *dir, struct btrfs_dir_item *di) drop_one_dir_item() argument
871 inode_in_dir(struct btrfs_root *root, struct btrfs_path *path, u64 dirid, u64 objectid, u64 index, const char *name, int name_len) inode_in_dir() argument
967 __add_inode_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, struct btrfs_root *log_root, struct inode *dir, struct inode *inode, struct extent_buffer *eb, u64 inode_objectid, u64 parent_objectid, u64 ref_index, char *name, int namelen, int *search_done) __add_inode_ref() argument
1196 add_inode_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_root *log, struct btrfs_path *path, struct extent_buffer *eb, int slot, struct btrfs_key *key) add_inode_ref() argument
1337 count_inode_extrefs(struct btrfs_root *root, struct inode *inode, struct btrfs_path *path) count_inode_extrefs() argument
1381 count_inode_refs(struct btrfs_root *root, struct inode *inode, struct btrfs_path *path) count_inode_refs() argument
1496 fixup_inode_link_counts(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path) fixup_inode_link_counts() argument
1556 link_to_fixup_dir(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, u64 objectid) link_to_fixup_dir() argument
1666 replay_one_name(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, struct extent_buffer *eb, struct btrfs_dir_item *di, struct btrfs_key *key) replay_one_name() argument
1791 replay_one_dir_item(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, struct extent_buffer *eb, int slot, struct btrfs_key *key) replay_one_dir_item() argument
1879 find_dir_range(struct btrfs_root *root, struct btrfs_path *path, u64 dirid, int key_type, u64 *start_ret, u64 *end_ret) find_dir_range() argument
1956 check_item_in_log(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_root *log, struct btrfs_path *path, struct btrfs_path *log_path, struct inode *dir, struct btrfs_key *dir_key) check_item_in_log() argument
2064 replay_xattr_deletes(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_root *log, struct btrfs_path *path, const u64 ino) replay_xattr_deletes() argument
2172 replay_dir_deletes(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_root *log, struct btrfs_path *path, u64 dirid, int del_all) replay_dir_deletes() argument
2388 walk_down_log_tree(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, int *level, struct walk_control *wc) walk_down_log_tree() argument
2486 walk_up_log_tree(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, int *level, struct walk_control *wc) walk_up_log_tree() argument
3206 insert_dir_log_key(struct btrfs_trans_handle *trans, struct btrfs_root *log, struct btrfs_path *path, int key_type, u64 dirid, u64 first_offset, u64 last_offset) insert_dir_log_key() argument
3239 log_dir_items(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct inode *inode, struct btrfs_path *path, struct btrfs_path *dst_path, int key_type, struct btrfs_log_ctx *ctx, u64 min_offset, u64 *last_offset_ret) log_dir_items() argument
3429 log_directory_changes(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct inode *inode, struct btrfs_path *path, struct btrfs_path *dst_path, struct btrfs_log_ctx *ctx) log_directory_changes() argument
3467 drop_objectid_items(struct btrfs_trans_handle *trans, struct btrfs_root *log, struct btrfs_path *path, u64 objectid, int max_key_type) drop_objectid_items() argument
3573 log_inode_item(struct btrfs_trans_handle *trans, struct btrfs_root *log, struct btrfs_path *path, struct inode *inode) log_inode_item() argument
4031 log_one_extent(struct btrfs_trans_handle *trans, struct inode *inode, struct btrfs_root *root, const struct extent_map *em, struct btrfs_path *path, const struct list_head *logged_list, struct btrfs_log_ctx *ctx) log_one_extent() argument
4125 btrfs_log_changed_extents(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct inode *inode, struct btrfs_path *path, struct list_head *logged_list, struct btrfs_log_ctx *ctx) btrfs_log_changed_extents() argument
4201 logged_inode_size(struct btrfs_root *log, struct inode *inode, struct btrfs_path *path, u64 *size_ret) logged_inode_size() argument
4237 btrfs_log_all_xattrs(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct inode *inode, struct btrfs_path *path, struct btrfs_path *dst_path) btrfs_log_all_xattrs() argument
4332 btrfs_log_trailing_hole(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct inode *inode, struct btrfs_path *path) btrfs_log_trailing_hole() argument
H A Dinode-map.c35 struct btrfs_path *path; caching_kthread() local
44 path = btrfs_alloc_path(); caching_kthread()
45 if (!path) caching_kthread()
49 path->skip_locking = 1; caching_kthread()
50 path->search_commit_root = 1; caching_kthread()
51 path->reada = 2; caching_kthread()
60 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); caching_kthread()
68 leaf = path->nodes[0]; caching_kthread()
69 slot = path->slots[0]; caching_kthread()
71 ret = btrfs_next_leaf(root, path); caching_kthread()
79 leaf = path->nodes[0]; caching_kthread()
89 btrfs_release_path(path); caching_kthread()
114 path->slots[0]++; caching_kthread()
132 btrfs_free_path(path); caching_kthread()
166 * through the extent tree, and this can keep ino allocation path start_caching()
396 struct btrfs_path *path; btrfs_save_ino_cache() local
418 path = btrfs_alloc_path(); btrfs_save_ino_cache()
419 if (!path) btrfs_save_ino_cache()
442 inode = lookup_free_ino_inode(root, path); btrfs_save_ino_cache()
452 ret = create_free_ino_inode(root, trans, path); btrfs_save_ino_cache()
503 ret = btrfs_write_out_ino_cache(root, trans, path, inode); btrfs_save_ino_cache()
514 btrfs_free_path(path); btrfs_save_ino_cache()
520 struct btrfs_path *path; btrfs_find_highest_objectid() local
527 path = btrfs_alloc_path(); btrfs_find_highest_objectid()
528 if (!path) btrfs_find_highest_objectid()
534 ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0); btrfs_find_highest_objectid()
538 if (path->slots[0] > 0) { btrfs_find_highest_objectid()
539 slot = path->slots[0] - 1; btrfs_find_highest_objectid()
540 l = path->nodes[0]; btrfs_find_highest_objectid()
549 btrfs_free_path(path); btrfs_find_highest_objectid()
H A Dbackref.h36 struct btrfs_path *path, struct btrfs_key *found_key,
49 struct btrfs_path *path,
57 char *btrfs_ref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path,
64 struct btrfs_path *path);
68 u64 start_off, struct btrfs_path *path,
H A Dbackref.c247 static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path, add_all_parents() argument
264 eb = path->nodes[level]; add_all_parents()
272 * We normally enter this function with the path already pointing to add_all_parents()
276 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) { add_all_parents()
278 ret = btrfs_next_leaf(root, path); add_all_parents()
280 ret = btrfs_next_old_leaf(root, path, time_seq); add_all_parents()
284 eb = path->nodes[0]; add_all_parents()
285 slot = path->slots[0]; add_all_parents()
322 ret = btrfs_next_item(root, path); add_all_parents()
324 ret = btrfs_next_old_item(root, path, time_seq); add_all_parents()
339 struct btrfs_path *path, u64 time_seq, __resolve_indirect_ref()
371 if (path->search_commit_root) __resolve_indirect_ref()
383 path->lowest_level = level; __resolve_indirect_ref()
385 ret = btrfs_search_slot(NULL, root, &ref->key_for_search, path, __resolve_indirect_ref()
388 ret = btrfs_search_old_slot(root, &ref->key_for_search, path, __resolve_indirect_ref()
402 eb = path->nodes[level]; __resolve_indirect_ref()
409 eb = path->nodes[level]; __resolve_indirect_ref()
412 ret = add_all_parents(root, path, parents, ref, level, time_seq, __resolve_indirect_ref()
415 path->lowest_level = 0; __resolve_indirect_ref()
416 btrfs_release_path(path); __resolve_indirect_ref()
424 struct btrfs_path *path, u64 time_seq, __resolve_indirect_refs()
456 err = __resolve_indirect_ref(fs_info, path, time_seq, ref, list_for_each_entry_safe()
715 struct btrfs_path *path, u64 bytenr, __add_inline_refs()
733 leaf = path->nodes[0]; __add_inline_refs()
734 slot = path->slots[0]; __add_inline_refs()
828 struct btrfs_path *path, u64 bytenr, __add_keyed_refs()
838 ret = btrfs_next_item(extent_root, path); __add_keyed_refs()
846 slot = path->slots[0]; __add_keyed_refs()
847 leaf = path->nodes[0]; __add_keyed_refs()
935 struct btrfs_path *path; local
956 path = btrfs_alloc_path();
957 if (!path)
960 path->search_commit_root = 1;
961 path->skip_locking = 1;
965 path->skip_locking = 1;
968 * grab both a lock on the path and a lock on the delayed ref head.
975 ret = btrfs_search_slot(trans, fs_info->extent_root, &key, path, 0, 0);
998 btrfs_release_path(path);
1021 if (path->slots[0]) {
1025 path->slots[0]--;
1026 leaf = path->nodes[0];
1027 slot = path->slots[0];
1032 ret = __add_inline_refs(fs_info, path, bytenr,
1037 ret = __add_keyed_refs(fs_info, path, bytenr,
1043 btrfs_release_path(path);
1053 ret = __resolve_indirect_refs(fs_info, path, time_seq, &prefs,
1122 btrfs_free_path(path);
1315 u64 start_off, struct btrfs_path *path, btrfs_find_one_extref()
1330 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); btrfs_find_one_extref()
1335 leaf = path->nodes[0]; btrfs_find_one_extref()
1336 slot = path->slots[0]; btrfs_find_one_extref()
1347 ret = btrfs_next_leaf(root, path); btrfs_find_one_extref()
1371 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]); btrfs_find_one_extref()
1383 * this iterates to turn a name (from iref/extref) into a full filesystem path.
1384 * Elements of the path are separated by '/' and the path is guaranteed to be
1385 * 0-terminated. the path is only given within the current file system.
1390 * in case the path buffer would overflow, the pointer is decremented further
1393 * required for the path to fit into the buffer. in that case, the returned
1396 char *btrfs_ref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path, btrfs_ref_to_path() argument
1407 int leave_spinning = path->leave_spinning; btrfs_ref_to_path()
1413 path->leave_spinning = 1; btrfs_ref_to_path()
1420 if (!path->skip_locking) btrfs_ref_to_path()
1424 ret = btrfs_find_item(fs_root, path, parent, 0, btrfs_ref_to_path()
1437 slot = path->slots[0]; btrfs_ref_to_path()
1438 eb = path->nodes[0]; btrfs_ref_to_path()
1439 /* make sure we can use eb after releasing the path */ btrfs_ref_to_path()
1441 if (!path->skip_locking) btrfs_ref_to_path()
1443 path->nodes[0] = NULL; btrfs_ref_to_path()
1444 path->locks[0] = 0; btrfs_ref_to_path()
1446 btrfs_release_path(path); btrfs_ref_to_path()
1458 btrfs_release_path(path); btrfs_ref_to_path()
1459 path->leave_spinning = leave_spinning; btrfs_ref_to_path()
1468 * this makes the path point to (logical EXTENT_ITEM *)
1473 struct btrfs_path *path, struct btrfs_key *found_key, extent_from_logical()
1491 ret = btrfs_search_slot(NULL, fs_info->extent_root, &key, path, 0, 0); extent_from_logical()
1495 ret = btrfs_previous_extent_item(fs_info->extent_root, path, 0); extent_from_logical()
1501 btrfs_item_key_to_cpu(path->nodes[0], found_key, path->slots[0]); extent_from_logical()
1513 eb = path->nodes[0]; extent_from_logical()
1514 item_size = btrfs_item_size_nr(eb, path->slots[0]); extent_from_logical()
1517 ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item); extent_from_logical()
1735 struct btrfs_path *path, iterate_inodes_from_logical()
1742 int search_commit_root = path->search_commit_root; iterate_inodes_from_logical()
1744 ret = extent_from_logical(fs_info, logical, path, &found_key, &flags); iterate_inodes_from_logical()
1745 btrfs_release_path(path); iterate_inodes_from_logical()
1763 struct btrfs_path *path, iterate_inode_refs()
1779 ret = btrfs_find_item(fs_root, path, inum, iterate_inode_refs()
1792 slot = path->slots[0]; iterate_inode_refs()
1793 eb = btrfs_clone_extent_buffer(path->nodes[0]); iterate_inode_refs()
1801 btrfs_release_path(path); iterate_inode_refs()
1808 /* path must be released before calling iterate()! */ iterate_inode_refs()
1823 btrfs_release_path(path); iterate_inode_refs()
1829 struct btrfs_path *path, iterate_inode_extrefs()
1844 ret = btrfs_find_one_extref(fs_root, inum, offset, path, &extref, iterate_inode_extrefs()
1854 slot = path->slots[0]; iterate_inode_extrefs()
1855 eb = btrfs_clone_extent_buffer(path->nodes[0]); iterate_inode_extrefs()
1864 btrfs_release_path(path); iterate_inode_extrefs()
1890 btrfs_release_path(path); iterate_inode_extrefs()
1896 struct btrfs_path *path, iterate_irefs_t *iterate, iterate_irefs()
1902 ret = iterate_inode_refs(inum, fs_root, path, iterate, ctx); iterate_irefs()
1908 ret = iterate_inode_extrefs(inum, fs_root, path, iterate, ctx); iterate_irefs()
1916 * returns 0 if the path could be dumped (probably truncated)
1953 * is has been created large enough. each path is zero-terminated and accessed
1993 * total_bytes to allocate are passed, note that space usable for actual path
1998 struct btrfs_path *path) init_ipath()
2013 ifp->btrfs_path = path; init_ipath()
338 __resolve_indirect_ref(struct btrfs_fs_info *fs_info, struct btrfs_path *path, u64 time_seq, struct __prelim_ref *ref, struct ulist *parents, const u64 *extent_item_pos, u64 total_refs) __resolve_indirect_ref() argument
423 __resolve_indirect_refs(struct btrfs_fs_info *fs_info, struct btrfs_path *path, u64 time_seq, struct list_head *head, const u64 *extent_item_pos, u64 total_refs, u64 root_objectid) __resolve_indirect_refs() argument
714 __add_inline_refs(struct btrfs_fs_info *fs_info, struct btrfs_path *path, u64 bytenr, int *info_level, struct list_head *prefs, u64 *total_refs, u64 inum) __add_inline_refs() argument
827 __add_keyed_refs(struct btrfs_fs_info *fs_info, struct btrfs_path *path, u64 bytenr, int info_level, struct list_head *prefs, u64 inum) __add_keyed_refs() argument
1314 btrfs_find_one_extref(struct btrfs_root *root, u64 inode_objectid, u64 start_off, struct btrfs_path *path, struct btrfs_inode_extref **ret_extref, u64 *found_off) btrfs_find_one_extref() argument
1472 extent_from_logical(struct btrfs_fs_info *fs_info, u64 logical, struct btrfs_path *path, struct btrfs_key *found_key, u64 *flags_ret) extent_from_logical() argument
1734 iterate_inodes_from_logical(u64 logical, struct btrfs_fs_info *fs_info, struct btrfs_path *path, iterate_extent_inodes_t *iterate, void *ctx) iterate_inodes_from_logical() argument
1762 iterate_inode_refs(u64 inum, struct btrfs_root *fs_root, struct btrfs_path *path, iterate_irefs_t *iterate, void *ctx) iterate_inode_refs() argument
1828 iterate_inode_extrefs(u64 inum, struct btrfs_root *fs_root, struct btrfs_path *path, iterate_irefs_t *iterate, void *ctx) iterate_inode_extrefs() argument
1895 iterate_irefs(u64 inum, struct btrfs_root *fs_root, struct btrfs_path *path, iterate_irefs_t *iterate, void *ctx) iterate_irefs() argument
1997 init_ipath(s32 total_bytes, struct btrfs_root *fs_root, struct btrfs_path *path) init_ipath() argument
H A Ddelayed-inode.c724 struct btrfs_path *path, btrfs_batch_insert_items()
740 BUG_ON(!path->nodes[0]); btrfs_batch_insert_items()
742 leaf = path->nodes[0]; btrfs_batch_insert_items()
775 * to sleep, so we set all locked nodes in the path to blocking locks btrfs_batch_insert_items()
778 btrfs_set_path_blocking(path); btrfs_batch_insert_items()
801 btrfs_clear_path_blocking(path, NULL, 0); btrfs_batch_insert_items()
804 setup_items_for_insert(root, path, keys, data_size, btrfs_batch_insert_items()
808 slot = path->slots[0]; btrfs_batch_insert_items()
835 struct btrfs_path *path, btrfs_insert_delayed_item()
842 ret = btrfs_insert_empty_item(trans, root, path, &delayed_item->key, btrfs_insert_delayed_item()
847 leaf = path->nodes[0]; btrfs_insert_delayed_item()
849 ptr = btrfs_item_ptr(leaf, path->slots[0], char); btrfs_insert_delayed_item()
864 struct btrfs_path *path, btrfs_insert_delayed_items()
877 ret = btrfs_insert_delayed_item(trans, root, path, curr); btrfs_insert_delayed_items()
879 btrfs_release_path(path); btrfs_insert_delayed_items()
887 path->slots[0]++; btrfs_insert_delayed_items()
888 btrfs_batch_insert_items(root, path, curr); btrfs_insert_delayed_items()
891 btrfs_mark_buffer_dirty(path->nodes[0]); btrfs_insert_delayed_items()
893 btrfs_release_path(path); btrfs_insert_delayed_items()
904 struct btrfs_path *path, btrfs_batch_delete_items()
914 BUG_ON(!path->nodes[0]); btrfs_batch_delete_items()
916 leaf = path->nodes[0]; btrfs_batch_delete_items()
918 i = path->slots[0]; btrfs_batch_delete_items()
951 ret = btrfs_del_items(trans, root, path, path->slots[0], nitems); btrfs_batch_delete_items()
966 struct btrfs_path *path, btrfs_delete_delayed_items()
979 ret = btrfs_search_slot(trans, root, &curr->key, path, -1, 1); btrfs_delete_delayed_items()
991 btrfs_release_path(path); btrfs_delete_delayed_items()
999 btrfs_batch_delete_items(trans, root, path, curr); btrfs_delete_delayed_items()
1000 btrfs_release_path(path); btrfs_delete_delayed_items()
1005 btrfs_release_path(path); btrfs_delete_delayed_items()
1039 struct btrfs_path *path, __btrfs_update_delayed_inode()
1057 ret = btrfs_lookup_inode(trans, root, path, &key, mod); __btrfs_update_delayed_inode()
1059 btrfs_release_path(path); __btrfs_update_delayed_inode()
1065 leaf = path->nodes[0]; __btrfs_update_delayed_inode()
1066 inode_item = btrfs_item_ptr(leaf, path->slots[0], __btrfs_update_delayed_inode()
1075 path->slots[0]++; __btrfs_update_delayed_inode()
1076 if (path->slots[0] >= btrfs_header_nritems(leaf)) __btrfs_update_delayed_inode()
1079 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); __btrfs_update_delayed_inode()
1092 btrfs_del_item(trans, root, path); __btrfs_update_delayed_inode()
1096 btrfs_release_path(path); __btrfs_update_delayed_inode()
1104 btrfs_release_path(path); __btrfs_update_delayed_inode()
1108 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); __btrfs_update_delayed_inode()
1114 leaf = path->nodes[0]; __btrfs_update_delayed_inode()
1115 path->slots[0]--; __btrfs_update_delayed_inode()
1121 struct btrfs_path *path, btrfs_update_delayed_inode()
1132 ret = __btrfs_update_delayed_inode(trans, root, path, node); btrfs_update_delayed_inode()
1139 struct btrfs_path *path, __btrfs_commit_inode_delayed_items()
1144 ret = btrfs_insert_delayed_items(trans, path, node->root, node); __btrfs_commit_inode_delayed_items()
1148 ret = btrfs_delete_delayed_items(trans, path, node->root, node); __btrfs_commit_inode_delayed_items()
1152 ret = btrfs_update_delayed_inode(trans, node->root, path, node); __btrfs_commit_inode_delayed_items()
1167 struct btrfs_path *path; __btrfs_run_delayed_items() local
1175 path = btrfs_alloc_path(); __btrfs_run_delayed_items()
1176 if (!path) __btrfs_run_delayed_items()
1178 path->leave_spinning = 1; __btrfs_run_delayed_items()
1187 ret = __btrfs_commit_inode_delayed_items(trans, path, __btrfs_run_delayed_items()
1203 btrfs_free_path(path); __btrfs_run_delayed_items()
1225 struct btrfs_path *path; btrfs_commit_inode_delayed_items() local
1240 path = btrfs_alloc_path(); btrfs_commit_inode_delayed_items()
1241 if (!path) { btrfs_commit_inode_delayed_items()
1245 path->leave_spinning = 1; btrfs_commit_inode_delayed_items()
1250 ret = __btrfs_commit_inode_delayed_items(trans, path, delayed_node); btrfs_commit_inode_delayed_items()
1253 btrfs_free_path(path); btrfs_commit_inode_delayed_items()
1263 struct btrfs_path *path; btrfs_commit_inode_delayed_inode() local
1284 path = btrfs_alloc_path(); btrfs_commit_inode_delayed_inode()
1285 if (!path) { btrfs_commit_inode_delayed_inode()
1289 path->leave_spinning = 1; btrfs_commit_inode_delayed_inode()
1297 path, delayed_node); btrfs_commit_inode_delayed_inode()
1302 btrfs_free_path(path); btrfs_commit_inode_delayed_inode()
1336 struct btrfs_path *path; btrfs_async_run_delayed_root() local
1345 path = btrfs_alloc_path(); btrfs_async_run_delayed_root()
1346 if (!path) btrfs_async_run_delayed_root()
1357 path->leave_spinning = 1; btrfs_async_run_delayed_root()
1367 __btrfs_commit_inode_delayed_items(trans, path, delayed_node); btrfs_async_run_delayed_root()
1374 btrfs_release_path(path); btrfs_async_run_delayed_root()
1382 btrfs_free_path(path); btrfs_async_run_delayed_root()
723 btrfs_batch_insert_items(struct btrfs_root *root, struct btrfs_path *path, struct btrfs_delayed_item *item) btrfs_batch_insert_items() argument
833 btrfs_insert_delayed_item(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, struct btrfs_delayed_item *delayed_item) btrfs_insert_delayed_item() argument
863 btrfs_insert_delayed_items(struct btrfs_trans_handle *trans, struct btrfs_path *path, struct btrfs_root *root, struct btrfs_delayed_node *node) btrfs_insert_delayed_items() argument
902 btrfs_batch_delete_items(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, struct btrfs_delayed_item *item) btrfs_batch_delete_items() argument
965 btrfs_delete_delayed_items(struct btrfs_trans_handle *trans, struct btrfs_path *path, struct btrfs_root *root, struct btrfs_delayed_node *node) btrfs_delete_delayed_items() argument
1037 __btrfs_update_delayed_inode(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, struct btrfs_delayed_node *node) __btrfs_update_delayed_inode() argument
1119 btrfs_update_delayed_inode(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, struct btrfs_delayed_node *node) btrfs_update_delayed_inode() argument
1138 __btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans, struct btrfs_path *path, struct btrfs_delayed_node *node) __btrfs_commit_inode_delayed_items() argument
H A Dqgroup.c305 struct btrfs_path *path = NULL; btrfs_read_qgroup_config() local
321 path = btrfs_alloc_path(); btrfs_read_qgroup_config()
322 if (!path) { btrfs_read_qgroup_config()
336 ret = btrfs_search_slot_for_read(quota_root, &key, path, 1, 1); btrfs_read_qgroup_config()
343 slot = path->slots[0]; btrfs_read_qgroup_config()
344 l = path->nodes[0]; btrfs_read_qgroup_config()
416 ret = btrfs_next_item(quota_root, path); btrfs_read_qgroup_config()
422 btrfs_release_path(path); btrfs_read_qgroup_config()
430 ret = btrfs_search_slot_for_read(quota_root, &key, path, 1, 0); btrfs_read_qgroup_config()
434 slot = path->slots[0]; btrfs_read_qgroup_config()
435 l = path->nodes[0]; btrfs_read_qgroup_config()
458 ret = btrfs_next_item(quota_root, path); btrfs_read_qgroup_config()
473 btrfs_free_path(path); btrfs_read_qgroup_config()
514 struct btrfs_path *path; add_qgroup_relation_item() local
517 path = btrfs_alloc_path(); add_qgroup_relation_item()
518 if (!path) add_qgroup_relation_item()
525 ret = btrfs_insert_empty_item(trans, quota_root, path, &key, 0); add_qgroup_relation_item()
527 btrfs_mark_buffer_dirty(path->nodes[0]); add_qgroup_relation_item()
529 btrfs_free_path(path); add_qgroup_relation_item()
538 struct btrfs_path *path; del_qgroup_relation_item() local
541 path = btrfs_alloc_path(); del_qgroup_relation_item()
542 if (!path) del_qgroup_relation_item()
549 ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1); del_qgroup_relation_item()
558 ret = btrfs_del_item(trans, quota_root, path); del_qgroup_relation_item()
560 btrfs_free_path(path); del_qgroup_relation_item()
568 struct btrfs_path *path; add_qgroup_item() local
577 path = btrfs_alloc_path(); add_qgroup_item()
578 if (!path) add_qgroup_item()
591 ret = btrfs_insert_empty_item(trans, quota_root, path, &key, add_qgroup_item()
596 leaf = path->nodes[0]; add_qgroup_item()
597 qgroup_info = btrfs_item_ptr(leaf, path->slots[0], add_qgroup_item()
607 btrfs_release_path(path); add_qgroup_item()
610 ret = btrfs_insert_empty_item(trans, quota_root, path, &key, add_qgroup_item()
615 leaf = path->nodes[0]; add_qgroup_item()
616 qgroup_limit = btrfs_item_ptr(leaf, path->slots[0], add_qgroup_item()
628 btrfs_free_path(path); add_qgroup_item()
636 struct btrfs_path *path; del_qgroup_item() local
639 path = btrfs_alloc_path(); del_qgroup_item()
640 if (!path) del_qgroup_item()
646 ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1); del_qgroup_item()
655 ret = btrfs_del_item(trans, quota_root, path); del_qgroup_item()
659 btrfs_release_path(path); del_qgroup_item()
662 ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1); del_qgroup_item()
671 ret = btrfs_del_item(trans, quota_root, path); del_qgroup_item()
674 btrfs_free_path(path); del_qgroup_item()
682 struct btrfs_path *path; update_qgroup_limit_item() local
693 path = btrfs_alloc_path(); update_qgroup_limit_item()
694 if (!path) update_qgroup_limit_item()
697 ret = btrfs_search_slot(trans, root, &key, path, 0, 1); update_qgroup_limit_item()
704 l = path->nodes[0]; update_qgroup_limit_item()
705 slot = path->slots[0]; update_qgroup_limit_item()
716 btrfs_free_path(path); update_qgroup_limit_item()
724 struct btrfs_path *path; update_qgroup_info_item() local
738 path = btrfs_alloc_path(); update_qgroup_info_item()
739 if (!path) update_qgroup_info_item()
742 ret = btrfs_search_slot(trans, root, &key, path, 0, 1); update_qgroup_info_item()
749 l = path->nodes[0]; update_qgroup_info_item()
750 slot = path->slots[0]; update_qgroup_info_item()
761 btrfs_free_path(path); update_qgroup_info_item()
769 struct btrfs_path *path; update_qgroup_status_item() local
780 path = btrfs_alloc_path(); update_qgroup_status_item()
781 if (!path) update_qgroup_status_item()
784 ret = btrfs_search_slot(trans, root, &key, path, 0, 1); update_qgroup_status_item()
791 l = path->nodes[0]; update_qgroup_status_item()
792 slot = path->slots[0]; update_qgroup_status_item()
802 btrfs_free_path(path); update_qgroup_status_item()
812 struct btrfs_path *path; btrfs_clean_quota_tree() local
818 path = btrfs_alloc_path(); btrfs_clean_quota_tree()
819 if (!path) btrfs_clean_quota_tree()
822 path->leave_spinning = 1; btrfs_clean_quota_tree()
829 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); btrfs_clean_quota_tree()
832 leaf = path->nodes[0]; btrfs_clean_quota_tree()
841 path->slots[0] = 0; btrfs_clean_quota_tree()
842 ret = btrfs_del_items(trans, root, path, 0, nr); btrfs_clean_quota_tree()
846 btrfs_release_path(path); btrfs_clean_quota_tree()
851 btrfs_free_path(path); btrfs_clean_quota_tree()
860 struct btrfs_path *path = NULL; btrfs_quota_enable() local
891 path = btrfs_alloc_path(); btrfs_quota_enable()
892 if (!path) { btrfs_quota_enable()
901 ret = btrfs_insert_empty_item(trans, quota_root, path, &key, btrfs_quota_enable()
906 leaf = path->nodes[0]; btrfs_quota_enable()
907 ptr = btrfs_item_ptr(leaf, path->slots[0], btrfs_quota_enable()
922 btrfs_release_path(path); btrfs_quota_enable()
923 ret = btrfs_search_slot_for_read(tree_root, &key, path, 1, 0); btrfs_quota_enable()
931 slot = path->slots[0]; btrfs_quota_enable()
932 leaf = path->nodes[0]; btrfs_quota_enable()
947 ret = btrfs_next_item(tree_root, path); btrfs_quota_enable()
955 btrfs_release_path(path); btrfs_quota_enable()
970 btrfs_free_path(path); btrfs_quota_enable()
1105 * Quick path for updating qgroup with only excl refs.
2193 qgroup_rescan_leaf(struct btrfs_fs_info *fs_info, struct btrfs_path *path, qgroup_rescan_leaf() argument
2207 path, 1, 0); qgroup_rescan_leaf()
2224 btrfs_release_path(path); qgroup_rescan_leaf()
2229 btrfs_item_key_to_cpu(path->nodes[0], &found, qgroup_rescan_leaf()
2230 btrfs_header_nritems(path->nodes[0]) - 1); qgroup_rescan_leaf()
2234 scratch_leaf = btrfs_clone_extent_buffer(path->nodes[0]); qgroup_rescan_leaf()
2243 slot = path->slots[0]; qgroup_rescan_leaf()
2244 btrfs_release_path(path); qgroup_rescan_leaf()
2281 struct btrfs_path *path; btrfs_qgroup_rescan_worker() local
2286 path = btrfs_alloc_path(); btrfs_qgroup_rescan_worker()
2287 if (!path) btrfs_qgroup_rescan_worker()
2300 err = qgroup_rescan_leaf(fs_info, path, trans); btrfs_qgroup_rescan_worker()
2309 btrfs_free_path(path); btrfs_qgroup_rescan_worker()
H A Drelocation.c103 struct backref_node *path[BTRFS_MAX_LEVEL]; member in struct:backref_cache
365 * walk down backref nodes to find start of next reference path
1581 struct btrfs_path *path; get_new_location() local
1586 path = btrfs_alloc_path(); get_new_location()
1587 if (!path) get_new_location()
1591 ret = btrfs_lookup_file_extent(NULL, root, path, btrfs_ino(reloc_inode), get_new_location()
1600 leaf = path->nodes[0]; get_new_location()
1601 fi = btrfs_item_ptr(leaf, path->slots[0], get_new_location()
1617 btrfs_free_path(path);
1742 struct btrfs_path *path, int level) memcmp_node_keys()
1747 btrfs_node_key(path->nodes[level], &key2, path->slots[level]); memcmp_node_keys()
1763 struct btrfs_path *path, struct btrfs_key *next_key, replace_path()
1785 slot = path->slots[lowest_level]; replace_path()
1786 btrfs_node_key_to_cpu(path->nodes[lowest_level], &key, slot); replace_path()
1827 eb = path->nodes[level]; replace_path()
1829 path->slots[level]); replace_path()
1831 path->slots[level]); replace_path()
1843 memcmp_node_keys(parent, slot, path, level)) { replace_path()
1879 btrfs_node_key_to_cpu(path->nodes[level], &key, replace_path()
1880 path->slots[level]); replace_path()
1881 btrfs_release_path(path); replace_path()
1883 path->lowest_level = level; replace_path()
1884 ret = btrfs_search_slot(trans, src, &key, path, 0, 1); replace_path()
1885 path->lowest_level = 0; replace_path()
1895 btrfs_set_node_blockptr(path->nodes[level], replace_path()
1896 path->slots[level], old_bytenr); replace_path()
1897 btrfs_set_node_ptr_generation(path->nodes[level], replace_path()
1898 path->slots[level], old_ptr_gen); replace_path()
1899 btrfs_mark_buffer_dirty(path->nodes[level]); replace_path()
1902 path->nodes[level]->start, replace_path()
1911 path->nodes[level]->start, replace_path()
1920 btrfs_unlock_up_safe(path, 0); replace_path()
1934 int walk_up_reloc_tree(struct btrfs_root *root, struct btrfs_path *path, walk_up_reloc_tree() argument
1945 free_extent_buffer(path->nodes[i]); walk_up_reloc_tree()
1946 path->nodes[i] = NULL; walk_up_reloc_tree()
1949 for (i = *level; i < BTRFS_MAX_LEVEL && path->nodes[i]; i++) { walk_up_reloc_tree()
1950 eb = path->nodes[i]; walk_up_reloc_tree()
1952 while (path->slots[i] + 1 < nritems) { walk_up_reloc_tree()
1953 path->slots[i]++; walk_up_reloc_tree()
1954 if (btrfs_node_ptr_generation(eb, path->slots[i]) <= walk_up_reloc_tree()
1961 free_extent_buffer(path->nodes[i]); walk_up_reloc_tree()
1962 path->nodes[i] = NULL; walk_up_reloc_tree()
1971 int walk_down_reloc_tree(struct btrfs_root *root, struct btrfs_path *path, walk_down_reloc_tree() argument
1984 eb = path->nodes[i]; walk_down_reloc_tree()
1986 while (path->slots[i] < nritems) { walk_down_reloc_tree()
1987 ptr_gen = btrfs_node_ptr_generation(eb, path->slots[i]); walk_down_reloc_tree()
1990 path->slots[i]++; walk_down_reloc_tree()
1992 if (path->slots[i] >= nritems) { walk_down_reloc_tree()
2003 bytenr = btrfs_node_blockptr(eb, path->slots[i]); walk_down_reloc_tree()
2012 path->nodes[i - 1] = eb; walk_down_reloc_tree()
2013 path->slots[i - 1] = 0; walk_down_reloc_tree()
2090 static int find_next_key(struct btrfs_path *path, int level, find_next_key() argument
2095 if (!path->nodes[level]) find_next_key()
2097 if (path->slots[level] + 1 < find_next_key()
2098 btrfs_header_nritems(path->nodes[level])) { find_next_key()
2099 btrfs_node_key_to_cpu(path->nodes[level], key, find_next_key()
2100 path->slots[level] + 1); find_next_key()
2121 struct btrfs_path *path; merge_reloc_root() local
2130 path = btrfs_alloc_path(); merge_reloc_root()
2131 if (!path) merge_reloc_root()
2133 path->reada = 1; merge_reloc_root()
2141 path->nodes[level] = reloc_root->node; merge_reloc_root()
2142 path->slots[level] = 0; merge_reloc_root()
2148 path->lowest_level = level; merge_reloc_root()
2149 ret = btrfs_search_slot(NULL, reloc_root, &key, path, 0, 0); merge_reloc_root()
2150 path->lowest_level = 0; merge_reloc_root()
2152 btrfs_free_path(path); merge_reloc_root()
2156 btrfs_node_key_to_cpu(path->nodes[level], &next_key, merge_reloc_root()
2157 path->slots[level]); merge_reloc_root()
2160 btrfs_unlock_up_safe(path, 0); merge_reloc_root()
2184 ret = walk_down_reloc_tree(reloc_root, path, &level); merge_reloc_root()
2192 if (!find_next_key(path, level, &key) && merge_reloc_root()
2196 ret = replace_path(trans, root, reloc_root, path, merge_reloc_root()
2206 btrfs_node_key_to_cpu(path->nodes[level], &key, merge_reloc_root()
2207 path->slots[level]); merge_reloc_root()
2211 ret = walk_up_reloc_tree(reloc_root, path, &level); merge_reloc_root()
2220 btrfs_node_key(path->nodes[level], &root_item->drop_progress, merge_reloc_root()
2221 path->slots[level]); merge_reloc_root()
2244 btrfs_free_path(path); merge_reloc_root()
2507 /* setup backref node path for btrfs_reloc_cow_block */ select_reloc_root()
2509 rc->backref_cache.path[next->level] = next; select_reloc_root()
2640 struct btrfs_path *path, int lowest) do_relocation()
2656 path->lowest_level = node->level + 1; do_relocation()
2657 rc->backref_cache.path[node->level] = node; do_relocation()
2678 ret = btrfs_search_slot(trans, root, key, path, 0, 1); do_relocation()
2686 upper->eb = path->nodes[upper->level]; do_relocation()
2687 path->nodes[upper->level] = NULL; do_relocation()
2689 BUG_ON(upper->eb != path->nodes[upper->level]); do_relocation()
2693 path->locks[upper->level] = 0; do_relocation()
2695 slot = path->slots[upper->level]; do_relocation()
2696 btrfs_release_path(path); do_relocation()
2767 path->lowest_level = 0; do_relocation()
2775 struct btrfs_path *path) link_to_upper()
2780 return do_relocation(trans, rc, node, &key, path, 0); link_to_upper()
2785 struct btrfs_path *path, int err) finish_pending_nodes()
2801 ret = link_to_upper(trans, rc, node, path); finish_pending_nodes()
2903 struct btrfs_path *path) relocate_tree_block()
2934 path->lowest_level = node->level; relocate_tree_block()
2935 ret = btrfs_search_slot(trans, root, key, path, 0, 1); relocate_tree_block()
2936 btrfs_release_path(path); relocate_tree_block()
2943 ret = do_relocation(trans, rc, node, key, path, 1); relocate_tree_block()
2959 struct btrfs_path *path; relocate_tree_blocks() local
2965 path = btrfs_alloc_path(); relocate_tree_blocks()
2966 if (!path) { relocate_tree_blocks()
3002 path); relocate_tree_blocks()
3011 err = finish_pending_nodes(trans, rc, path, err); relocate_tree_blocks()
3014 btrfs_free_path(path); relocate_tree_blocks()
3237 struct btrfs_path *path, get_ref_objectid_v0()
3247 leaf = path->nodes[0]; get_ref_objectid_v0()
3248 slot = path->slots[0]; get_ref_objectid_v0()
3251 ret = btrfs_next_leaf(rc->extent_root, path); get_ref_objectid_v0()
3255 leaf = path->nodes[0]; get_ref_objectid_v0()
3256 slot = path->slots[0]; get_ref_objectid_v0()
3283 struct btrfs_path *path, add_tree_block()
3295 eb = path->nodes[0]; add_tree_block()
3296 item_size = btrfs_item_size_nr(eb, path->slots[0]); add_tree_block()
3300 ei = btrfs_item_ptr(eb, path->slots[0], add_tree_block()
3315 ret = get_ref_objectid_v0(rc, path, extent_key, add_tree_block()
3328 btrfs_release_path(path); add_tree_block()
3356 struct btrfs_path *path; __add_tree_block() local
3368 path = btrfs_alloc_path(); __add_tree_block()
3369 if (!path) __add_tree_block()
3381 path->search_commit_root = 1; __add_tree_block()
3382 path->skip_locking = 1; __add_tree_block()
3383 ret = btrfs_search_slot(NULL, rc->extent_root, &key, path, 0, 0); __add_tree_block()
3388 if (path->slots[0]) { __add_tree_block()
3389 path->slots[0]--; __add_tree_block()
3390 btrfs_item_key_to_cpu(path->nodes[0], &key, __add_tree_block()
3391 path->slots[0]); __add_tree_block()
3401 btrfs_release_path(path); __add_tree_block()
3407 ret = add_tree_block(rc, &key, path, blocks); __add_tree_block()
3409 btrfs_free_path(path); __add_tree_block()
3493 struct btrfs_path *path; find_data_references() local
3527 path = btrfs_alloc_path(); find_data_references()
3528 if (!path) find_data_references()
3530 path->reada = 1; find_data_references()
3545 path->search_commit_root = 1; find_data_references()
3546 path->skip_locking = 1; find_data_references()
3547 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); find_data_references()
3553 leaf = path->nodes[0]; find_data_references()
3568 path->slots[0] = nritems; find_data_references()
3572 while (path->slots[0] >= nritems) { find_data_references()
3573 ret = btrfs_next_leaf(root, path); find_data_references()
3581 leaf = path->nodes[0]; find_data_references()
3594 path->slots[0] = nritems; find_data_references()
3598 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); find_data_references()
3603 fi = btrfs_item_ptr(leaf, path->slots[0], find_data_references()
3642 path->slots[0] = nritems; find_data_references()
3644 path->slots[0]++; find_data_references()
3648 btrfs_free_path(path); find_data_references()
3658 struct btrfs_path *path,
3671 eb = path->nodes[0];
3672 ptr = btrfs_item_ptr_offset(eb, path->slots[0]);
3673 end = ptr + btrfs_item_size_nr(eb, path->slots[0]);
3705 eb = path->nodes[0];
3706 if (path->slots[0] >= btrfs_header_nritems(eb)) {
3707 ret = btrfs_next_leaf(rc->extent_root, path);
3714 eb = path->nodes[0];
3717 btrfs_item_key_to_cpu(eb, &key, path->slots[0]);
3731 dref = btrfs_item_ptr(eb, path->slots[0],
3742 path->slots[0]++;
3745 btrfs_release_path(path);
3755 int find_next_extent(struct reloc_control *rc, struct btrfs_path *path, find_next_extent() argument
3775 path->search_commit_root = 1; find_next_extent()
3776 path->skip_locking = 1; find_next_extent()
3777 ret = btrfs_search_slot(NULL, rc->extent_root, &key, path, find_next_extent()
3782 leaf = path->nodes[0]; find_next_extent()
3783 if (path->slots[0] >= btrfs_header_nritems(leaf)) { find_next_extent()
3784 ret = btrfs_next_leaf(rc->extent_root, path); find_next_extent()
3787 leaf = path->nodes[0]; find_next_extent()
3790 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); find_next_extent()
3798 path->slots[0]++; find_next_extent()
3804 path->slots[0]++; find_next_extent()
3811 path->slots[0]++; find_next_extent()
3820 btrfs_release_path(path); find_next_extent()
3832 btrfs_release_path(path); find_next_extent()
3909 struct btrfs_path *path; relocate_block_group() local
3917 path = btrfs_alloc_path(); relocate_block_group()
3918 if (!path) relocate_block_group()
3920 path->reada = 1; relocate_block_group()
3950 ret = find_next_extent(rc, path, &key); relocate_block_group()
3958 ei = btrfs_item_ptr(path->nodes[0], path->slots[0], relocate_block_group()
3960 item_size = btrfs_item_size_nr(path->nodes[0], path->slots[0]); relocate_block_group()
3962 flags = btrfs_extent_flags(path->nodes[0], ei); relocate_block_group()
3973 ret = get_ref_objectid_v0(rc, path, &key, &ref_owner, relocate_block_group()
3985 btrfs_release_path(path); relocate_block_group()
3987 path->search_commit_root = 1; relocate_block_group()
3988 path->skip_locking = 1; relocate_block_group()
3990 &key, path, 0, 0); relocate_block_group()
4003 ret = add_tree_block(rc, &key, path, &blocks); relocate_block_group()
4006 ret = add_data_references(rc, &key, path, &blocks); relocate_block_group()
4008 btrfs_release_path(path); relocate_block_group()
4059 btrfs_release_path(path); relocate_block_group()
4097 btrfs_free_path(path); relocate_block_group()
4104 struct btrfs_path *path; __insert_orphan_inode() local
4109 path = btrfs_alloc_path(); __insert_orphan_inode()
4110 if (!path) __insert_orphan_inode()
4113 ret = btrfs_insert_empty_inode(trans, root, path, objectid); __insert_orphan_inode()
4117 leaf = path->nodes[0]; __insert_orphan_inode()
4118 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_inode_item); __insert_orphan_inode()
4127 btrfs_free_path(path); __insert_orphan_inode()
4204 struct btrfs_path *path; btrfs_relocate_block_group() local
4225 path = btrfs_alloc_path(); btrfs_relocate_block_group()
4226 if (!path) { btrfs_relocate_block_group()
4232 path); btrfs_relocate_block_group()
4233 btrfs_free_path(path); btrfs_relocate_block_group()
4336 struct btrfs_path *path; btrfs_recover_relocation() local
4343 path = btrfs_alloc_path(); btrfs_recover_relocation()
4344 if (!path) btrfs_recover_relocation()
4346 path->reada = -1; btrfs_recover_relocation()
4354 path, 0, 0); btrfs_recover_relocation()
4360 if (path->slots[0] == 0) btrfs_recover_relocation()
4362 path->slots[0]--; btrfs_recover_relocation()
4364 leaf = path->nodes[0]; btrfs_recover_relocation()
4365 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); btrfs_recover_relocation()
4366 btrfs_release_path(path); btrfs_recover_relocation()
4402 btrfs_release_path(path); btrfs_recover_relocation()
4468 btrfs_free_path(path); btrfs_recover_relocation()
4564 node = rc->backref_cache.path[level]; btrfs_reloc_cow_block()
1741 memcmp_node_keys(struct extent_buffer *eb, int slot, struct btrfs_path *path, int level) memcmp_node_keys() argument
1761 replace_path(struct btrfs_trans_handle *trans, struct btrfs_root *dest, struct btrfs_root *src, struct btrfs_path *path, struct btrfs_key *next_key, int lowest_level, int max_level) replace_path() argument
2636 do_relocation(struct btrfs_trans_handle *trans, struct reloc_control *rc, struct backref_node *node, struct btrfs_key *key, struct btrfs_path *path, int lowest) do_relocation() argument
2772 link_to_upper(struct btrfs_trans_handle *trans, struct reloc_control *rc, struct backref_node *node, struct btrfs_path *path) link_to_upper() argument
2783 finish_pending_nodes(struct btrfs_trans_handle *trans, struct reloc_control *rc, struct btrfs_path *path, int err) finish_pending_nodes() argument
2899 relocate_tree_block(struct btrfs_trans_handle *trans, struct reloc_control *rc, struct backref_node *node, struct btrfs_key *key, struct btrfs_path *path) relocate_tree_block() argument
3236 get_ref_objectid_v0(struct reloc_control *rc, struct btrfs_path *path, struct btrfs_key *extent_key, u64 *ref_objectid, int *path_change) get_ref_objectid_v0() argument
3281 add_tree_block(struct reloc_control *rc, struct btrfs_key *extent_key, struct btrfs_path *path, struct rb_root *blocks) add_tree_block() argument
3656 add_data_references(struct reloc_control *rc, struct btrfs_key *extent_key, struct btrfs_path *path, struct rb_root *blocks) add_data_references() argument
H A Dsend.c43 * A fs_path is a helper to dynamically build path names with unknown size.
45 * It allows fast adding of path elements on the right side (normal path) and
46 * fast adding to the left side (reversed path). A reversed path can also be
61 * Average path length does not exceed 200 bytes, we'll have
376 * The real size of the buffer is bigger, this will let the fast path fs_path_ensure_buf()
499 struct btrfs_path *path; alloc_path_for_send() local
501 path = btrfs_alloc_path(); alloc_path_for_send()
502 if (!path) alloc_path_for_send()
504 path->search_commit_root = 1; alloc_path_for_send()
505 path->skip_locking = 1; alloc_path_for_send()
506 path->need_commit_sem = 1; alloc_path_for_send()
507 return path; alloc_path_for_send()
719 struct fs_path *path, struct fs_path *lnk) send_link()
723 verbose_printk("btrfs: send_link %s -> %s\n", path->start, lnk->start); send_link()
729 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path); send_link()
742 static int send_unlink(struct send_ctx *sctx, struct fs_path *path) send_unlink() argument
746 verbose_printk("btrfs: send_unlink %s\n", path->start); send_unlink()
752 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path); send_unlink()
764 static int send_rmdir(struct send_ctx *sctx, struct fs_path *path) send_rmdir() argument
768 verbose_printk("btrfs: send_rmdir %s\n", path->start); send_rmdir()
774 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path); send_rmdir()
786 static int __get_inode_info(struct btrfs_root *root, struct btrfs_path *path, __get_inode_info() argument
797 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); __get_inode_info()
804 ii = btrfs_item_ptr(path->nodes[0], path->slots[0], __get_inode_info()
807 *size = btrfs_inode_size(path->nodes[0], ii); __get_inode_info()
809 *gen = btrfs_inode_generation(path->nodes[0], ii); __get_inode_info()
811 *mode = btrfs_inode_mode(path->nodes[0], ii); __get_inode_info()
813 *uid = btrfs_inode_uid(path->nodes[0], ii); __get_inode_info()
815 *gid = btrfs_inode_gid(path->nodes[0], ii); __get_inode_info()
817 *rdev = btrfs_inode_rdev(path->nodes[0], ii); __get_inode_info()
827 struct btrfs_path *path; get_inode_info() local
830 path = alloc_path_for_send(); get_inode_info()
831 if (!path) get_inode_info()
833 ret = __get_inode_info(root, path, ino, size, gen, mode, uid, gid, get_inode_info()
835 btrfs_free_path(path); get_inode_info()
849 * path must point to the INODE_REF or INODE_EXTREF when called.
851 static int iterate_inode_ref(struct btrfs_root *root, struct btrfs_path *path, iterate_inode_ref() argument
855 struct extent_buffer *eb = path->nodes[0]; iterate_inode_ref()
863 int slot = path->slots[0]; iterate_inode_ref()
969 * path must point to the dir item when called.
971 static int iterate_dir_item(struct btrfs_root *root, struct btrfs_path *path, iterate_dir_item() argument
1004 eb = path->nodes[0]; iterate_dir_item()
1005 slot = path->slots[0]; iterate_dir_item()
1099 * Retrieve the first path of an inode. If an inode has more then one
1103 u64 ino, struct fs_path *path) get_inode_path()
1113 fs_path_reset(path); get_inode_path()
1135 __copy_first_ref, path); get_inode_path()
1148 struct btrfs_path *path; member in struct:backref_ctx
1222 ret = __get_inode_info(found->root, bctx->path, ino, &i_size, NULL, NULL, __iterate_backrefs()
1224 btrfs_release_path(bctx->path); __iterate_backrefs()
1275 * path must point to the extent item when called.
1278 struct btrfs_path *path, find_extent_clone()
1291 struct extent_buffer *eb = path->nodes[0]; find_extent_clone()
1303 /* We only use this path under the commit sem */ find_extent_clone()
1312 backref_ctx->path = tmp_path; find_extent_clone()
1324 fi = btrfs_item_ptr(eb, path->slots[0], find_extent_clone()
1454 struct btrfs_path *path; read_symlink() local
1462 path = alloc_path_for_send(); read_symlink()
1463 if (!path) read_symlink()
1469 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); read_symlink()
1488 ei = btrfs_item_ptr(path->nodes[0], path->slots[0], read_symlink()
1490 type = btrfs_file_extent_type(path->nodes[0], ei); read_symlink()
1491 compression = btrfs_file_extent_compression(path->nodes[0], ei); read_symlink()
1496 len = btrfs_file_extent_inline_len(path->nodes[0], path->slots[0], ei); read_symlink()
1498 ret = fs_path_add_from_extent_buffer(dest, path->nodes[0], off, len); read_symlink()
1501 btrfs_free_path(path); read_symlink()
1514 struct btrfs_path *path; gen_unique_name() local
1520 path = alloc_path_for_send(); gen_unique_name()
1521 if (!path) gen_unique_name()
1530 path, BTRFS_FIRST_FREE_OBJECTID, gen_unique_name()
1532 btrfs_release_path(path); gen_unique_name()
1550 path, BTRFS_FIRST_FREE_OBJECTID, gen_unique_name()
1552 btrfs_release_path(path); gen_unique_name()
1569 btrfs_free_path(path); gen_unique_name()
1677 struct btrfs_path *path; lookup_dir_item_inode() local
1679 path = alloc_path_for_send(); lookup_dir_item_inode()
1680 if (!path) lookup_dir_item_inode()
1683 di = btrfs_lookup_dir_item(NULL, root, path, lookup_dir_item_inode()
1693 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &key); lookup_dir_item_inode()
1699 *found_type = btrfs_dir_type(path->nodes[0], di); lookup_dir_item_inode()
1702 btrfs_free_path(path); lookup_dir_item_inode()
1716 struct btrfs_path *path; get_first_ref() local
1720 path = alloc_path_for_send(); get_first_ref()
1721 if (!path) get_first_ref()
1728 ret = btrfs_search_slot_for_read(root, &key, path, 1, 0); get_first_ref()
1732 btrfs_item_key_to_cpu(path->nodes[0], &found_key, get_first_ref()
1733 path->slots[0]); get_first_ref()
1743 iref = btrfs_item_ptr(path->nodes[0], path->slots[0], get_first_ref()
1745 len = btrfs_inode_ref_name_len(path->nodes[0], iref); get_first_ref()
1746 ret = fs_path_add_from_extent_buffer(name, path->nodes[0], get_first_ref()
1752 extref = btrfs_item_ptr(path->nodes[0], path->slots[0], get_first_ref()
1754 len = btrfs_inode_extref_name_len(path->nodes[0], extref); get_first_ref()
1755 ret = fs_path_add_from_extent_buffer(name, path->nodes[0], get_first_ref()
1757 parent_dir = btrfs_inode_extref_parent(path->nodes[0], extref); get_first_ref()
1761 btrfs_release_path(path); get_first_ref()
1773 btrfs_free_path(path); get_first_ref()
1944 * if it has to use the path as returned by get_cur_path or the orphan name.
2213 * We walk the path up to the root. For every inode in between, we check if it
2224 * tried to get the path to the dir items, it would get a path inside that
2307 struct btrfs_path *path; send_subvol_begin() local
2314 path = btrfs_alloc_path(); send_subvol_begin()
2315 if (!path) send_subvol_begin()
2320 btrfs_free_path(path); send_subvol_begin()
2329 &key, path, 1, 0); send_subvol_begin()
2337 leaf = path->nodes[0]; send_subvol_begin()
2338 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); send_subvol_begin()
2344 ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref); send_subvol_begin()
2347 btrfs_release_path(path); send_subvol_begin()
2385 btrfs_free_path(path); send_subvol_begin()
2483 struct btrfs_path *path = NULL; send_utimes() local
2494 path = alloc_path_for_send(); send_utimes()
2495 if (!path) { send_utimes()
2503 ret = btrfs_search_slot(NULL, sctx->send_root, &key, path, 0, 0); send_utimes()
2507 eb = path->nodes[0]; send_utimes()
2508 slot = path->slots[0]; send_utimes()
2529 btrfs_free_path(path); send_utimes()
2535 * a valid path yet because we did not process the refs yet. So, the inode
2625 struct btrfs_path *path = NULL; did_create_dir() local
2633 path = alloc_path_for_send(); did_create_dir()
2634 if (!path) { did_create_dir()
2642 ret = btrfs_search_slot(NULL, sctx->send_root, &key, path, 0, 0); did_create_dir()
2647 eb = path->nodes[0]; did_create_dir()
2648 slot = path->slots[0]; did_create_dir()
2650 ret = btrfs_next_leaf(sctx->send_root, path); did_create_dir()
2676 path->slots[0]++; did_create_dir()
2680 btrfs_free_path(path); did_create_dir()
2729 u64 dir_gen, struct fs_path *path) __record_ref()
2739 ref->full_path = path; __record_ref()
2794 struct fs_path *path) orphanize_inode()
2807 ret = send_rename(sctx, path, orphan); orphanize_inode()
2889 struct btrfs_path *path; can_rmdir() local
2901 path = alloc_path_for_send(); can_rmdir()
2902 if (!path) can_rmdir()
2908 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); can_rmdir()
2915 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) { can_rmdir()
2916 ret = btrfs_next_leaf(root, path); can_rmdir()
2923 btrfs_item_key_to_cpu(path->nodes[0], &found_key, can_rmdir()
2924 path->slots[0]); can_rmdir()
2929 di = btrfs_item_ptr(path->nodes[0], path->slots[0], can_rmdir()
2931 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &loc); can_rmdir()
2953 path->slots[0]++; can_rmdir()
2959 btrfs_free_path(path); can_rmdir()
3320 struct btrfs_path *path; wait_for_dest_dir_move() local
3331 path = alloc_path_for_send(); wait_for_dest_dir_move()
3332 if (!path) wait_for_dest_dir_move()
3339 ret = btrfs_search_slot(NULL, sctx->parent_root, &key, path, 0, 0); wait_for_dest_dir_move()
3347 di = btrfs_match_dir_item_name(sctx->parent_root, path, wait_for_dest_dir_move()
3361 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &di_key); wait_for_dest_dir_move()
3397 btrfs_free_path(path); wait_for_dest_dir_move()
3454 * that ancestor is processed to avoid path build infinite loops (done wait_for_parent_move()
3466 * the send stream or getting into infinite path build wait_for_parent_move()
3557 * get the path of the first ref as it would like while receiving at process_recorded_refs()
3973 struct btrfs_path *path, find_iref()
3986 ret = iterate_inode_ref(root, path, key, 0, __find_iref, &ctx); find_iref()
4069 struct btrfs_path *path; process_all_refs() local
4077 path = alloc_path_for_send(); process_all_refs()
4078 if (!path) process_all_refs()
4097 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); process_all_refs()
4102 eb = path->nodes[0]; process_all_refs()
4103 slot = path->slots[0]; process_all_refs()
4105 ret = btrfs_next_leaf(root, path); process_all_refs()
4120 ret = iterate_inode_ref(root, path, &found_key, 0, cb, sctx); process_all_refs()
4124 path->slots[0]++; process_all_refs()
4126 btrfs_release_path(path); process_all_refs()
4133 btrfs_free_path(path); process_all_refs()
4138 struct fs_path *path, send_set_xattr()
4148 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path); send_set_xattr()
4160 struct fs_path *path, send_remove_xattr()
4169 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path); send_remove_xattr()
4292 struct btrfs_path *path, find_xattr()
4306 ret = iterate_dir_item(root, path, key, __find_xattr, &ctx); find_xattr()
4390 struct btrfs_path *path; process_all_new_xattrs() local
4396 path = alloc_path_for_send(); process_all_new_xattrs()
4397 if (!path) process_all_new_xattrs()
4405 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); process_all_new_xattrs()
4410 eb = path->nodes[0]; process_all_new_xattrs()
4411 slot = path->slots[0]; process_all_new_xattrs()
4413 ret = btrfs_next_leaf(root, path); process_all_new_xattrs()
4430 ret = iterate_dir_item(root, path, &found_key, process_all_new_xattrs()
4435 path->slots[0]++; process_all_new_xattrs()
4439 btrfs_free_path(path); process_all_new_xattrs()
4732 struct btrfs_path *path; clone_range() local
4736 path = alloc_path_for_send(); clone_range()
4737 if (!path) clone_range()
4765 ret = btrfs_search_slot(NULL, clone_root->root, &key, path, 0, 0); clone_range()
4768 if (ret > 0 && path->slots[0] > 0) { clone_range()
4769 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0] - 1); clone_range()
4772 path->slots[0]--; clone_range()
4776 struct extent_buffer *leaf = path->nodes[0]; clone_range()
4777 int slot = path->slots[0]; clone_range()
4784 ret = btrfs_next_leaf(clone_root->root, path); clone_range()
4853 path->slots[0]++; clone_range()
4861 btrfs_free_path(path); clone_range()
4866 struct btrfs_path *path, send_write_or_clone()
4877 ei = btrfs_item_ptr(path->nodes[0], path->slots[0], send_write_or_clone()
4879 type = btrfs_file_extent_type(path->nodes[0], ei); send_write_or_clone()
4881 len = btrfs_file_extent_inline_len(path->nodes[0], send_write_or_clone()
4882 path->slots[0], ei); send_write_or_clone()
4890 len = btrfs_file_extent_num_bytes(path->nodes[0], ei); send_write_or_clone()
4904 disk_byte = btrfs_file_extent_disk_bytenr(path->nodes[0], ei); send_write_or_clone()
4905 data_offset = btrfs_file_extent_offset(path->nodes[0], ei); send_write_or_clone()
4921 struct btrfs_path *path = NULL; is_extent_unchanged() local
4938 path = alloc_path_for_send(); is_extent_unchanged()
4939 if (!path) is_extent_unchanged()
4980 ret = btrfs_search_slot_for_read(sctx->parent_root, &key, path, 0, 0); is_extent_unchanged()
4991 eb = path->nodes[0]; is_extent_unchanged()
4992 slot = path->slots[0]; is_extent_unchanged()
5050 ret = btrfs_next_item(sctx->parent_root, path); is_extent_unchanged()
5054 eb = path->nodes[0]; is_extent_unchanged()
5055 slot = path->slots[0]; is_extent_unchanged()
5081 btrfs_free_path(path); is_extent_unchanged()
5087 struct btrfs_path *path; get_last_extent() local
5095 path = alloc_path_for_send(); get_last_extent()
5096 if (!path) get_last_extent()
5104 ret = btrfs_search_slot_for_read(root, &key, path, 0, 1); get_last_extent()
5108 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); get_last_extent()
5112 fi = btrfs_item_ptr(path->nodes[0], path->slots[0], get_last_extent()
5114 type = btrfs_file_extent_type(path->nodes[0], fi); get_last_extent()
5116 u64 size = btrfs_file_extent_inline_len(path->nodes[0], get_last_extent()
5117 path->slots[0], fi); get_last_extent()
5122 btrfs_file_extent_num_bytes(path->nodes[0], fi); get_last_extent()
5126 btrfs_free_path(path); get_last_extent()
5130 static int maybe_send_hole(struct send_ctx *sctx, struct btrfs_path *path, maybe_send_hole() argument
5147 fi = btrfs_item_ptr(path->nodes[0], path->slots[0], maybe_send_hole()
5149 type = btrfs_file_extent_type(path->nodes[0], fi); maybe_send_hole()
5151 u64 size = btrfs_file_extent_inline_len(path->nodes[0], maybe_send_hole()
5152 path->slots[0], fi); maybe_send_hole()
5157 btrfs_file_extent_num_bytes(path->nodes[0], fi); maybe_send_hole()
5160 if (path->slots[0] == 0 && maybe_send_hole()
5181 struct btrfs_path *path, process_extent()
5191 ret = is_extent_unchanged(sctx, path, key); process_extent()
5202 ei = btrfs_item_ptr(path->nodes[0], path->slots[0], process_extent()
5204 type = btrfs_file_extent_type(path->nodes[0], ei); process_extent()
5219 if (btrfs_file_extent_disk_bytenr(path->nodes[0], ei) == 0) { process_extent()
5226 ret = find_extent_clone(sctx, path, key->objectid, key->offset, process_extent()
5231 ret = send_write_or_clone(sctx, path, key, found_clone); process_extent()
5235 ret = maybe_send_hole(sctx, path, key); process_extent()
5244 struct btrfs_path *path; process_all_extents() local
5251 path = alloc_path_for_send(); process_all_extents()
5252 if (!path) process_all_extents()
5258 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); process_all_extents()
5263 eb = path->nodes[0]; process_all_extents()
5264 slot = path->slots[0]; process_all_extents()
5267 ret = btrfs_next_leaf(root, path); process_all_extents()
5285 ret = process_extent(sctx, path, &found_key); process_all_extents()
5289 path->slots[0]++; process_all_extents()
5293 btrfs_free_path(path); process_all_extents()
5348 * the old path (pre move/rename) of our current inode, and the finish_inode_if_needed()
5682 static int compare_refs(struct send_ctx *sctx, struct btrfs_path *path, compare_refs() argument
5702 leaf = path->nodes[0]; compare_refs()
5703 item_size = btrfs_item_size_nr(leaf, path->slots[0]); compare_refs()
5704 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]); compare_refs()
5787 struct btrfs_path *path; full_send_tree() local
5791 path = alloc_path_for_send(); full_send_tree()
5792 if (!path) full_send_tree()
5799 ret = btrfs_search_slot_for_read(send_root, &key, path, 1, 0); full_send_tree()
5806 eb = path->nodes[0]; full_send_tree()
5807 slot = path->slots[0]; full_send_tree()
5810 ret = changed_cb(send_root, NULL, path, NULL, full_send_tree()
5819 ret = btrfs_next_item(send_root, path); full_send_tree()
5832 btrfs_free_path(path); full_send_tree()
718 send_link(struct send_ctx *sctx, struct fs_path *path, struct fs_path *lnk) send_link() argument
1102 get_inode_path(struct btrfs_root *root, u64 ino, struct fs_path *path) get_inode_path() argument
1277 find_extent_clone(struct send_ctx *sctx, struct btrfs_path *path, u64 ino, u64 data_offset, u64 ino_size, struct clone_root **found) find_extent_clone() argument
2728 __record_ref(struct list_head *head, u64 dir, u64 dir_gen, struct fs_path *path) __record_ref() argument
2793 orphanize_inode(struct send_ctx *sctx, u64 ino, u64 gen, struct fs_path *path) orphanize_inode() argument
3972 find_iref(struct btrfs_root *root, struct btrfs_path *path, struct btrfs_key *key, u64 dir, u64 dir_gen, struct fs_path *name) find_iref() argument
4137 send_set_xattr(struct send_ctx *sctx, struct fs_path *path, const char *name, int name_len, const char *data, int data_len) send_set_xattr() argument
4159 send_remove_xattr(struct send_ctx *sctx, struct fs_path *path, const char *name, int name_len) send_remove_xattr() argument
4291 find_xattr(struct btrfs_root *root, struct btrfs_path *path, struct btrfs_key *key, const char *name, int name_len, char **data, int *data_len) find_xattr() argument
4865 send_write_or_clone(struct send_ctx *sctx, struct btrfs_path *path, struct btrfs_key *key, struct clone_root *clone_root) send_write_or_clone() argument
5180 process_extent(struct send_ctx *sctx, struct btrfs_path *path, struct btrfs_key *key) process_extent() argument
H A Dextent-tree.c102 static int find_next_key(struct btrfs_path *path, int level,
404 struct btrfs_path *path; caching_thread() local
418 path = btrfs_alloc_path(); caching_thread()
419 if (!path) caching_thread()
439 path->skip_locking = 1; caching_thread()
440 path->search_commit_root = 1; caching_thread()
441 path->reada = 1; caching_thread()
452 ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0); caching_thread()
456 leaf = path->nodes[0]; caching_thread()
465 if (path->slots[0] < nritems) { caching_thread()
466 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); caching_thread()
468 ret = find_next_key(path, 0, &key); caching_thread()
476 btrfs_release_path(path); caching_thread()
483 ret = btrfs_next_leaf(extent_root, path); caching_thread()
488 leaf = path->nodes[0]; caching_thread()
500 btrfs_release_path(path); caching_thread()
505 path->slots[0]++; caching_thread()
530 path->slots[0]++; caching_thread()
559 btrfs_free_path(path); caching_thread()
781 struct btrfs_path *path; btrfs_lookup_data_extent() local
783 path = btrfs_alloc_path(); btrfs_lookup_data_extent()
784 if (!path) btrfs_lookup_data_extent()
790 ret = btrfs_search_slot(NULL, root->fs_info->extent_root, &key, path, btrfs_lookup_data_extent()
792 btrfs_free_path(path); btrfs_lookup_data_extent()
811 struct btrfs_path *path; btrfs_lookup_extent_info() local
829 path = btrfs_alloc_path(); btrfs_lookup_extent_info()
830 if (!path) btrfs_lookup_extent_info()
834 path->skip_locking = 1; btrfs_lookup_extent_info()
835 path->search_commit_root = 1; btrfs_lookup_extent_info()
847 &key, path, 0, 0); btrfs_lookup_extent_info()
852 if (path->slots[0]) { btrfs_lookup_extent_info()
853 path->slots[0]--; btrfs_lookup_extent_info()
854 btrfs_item_key_to_cpu(path->nodes[0], &key, btrfs_lookup_extent_info()
855 path->slots[0]); btrfs_lookup_extent_info()
864 leaf = path->nodes[0]; btrfs_lookup_extent_info()
865 item_size = btrfs_item_size_nr(leaf, path->slots[0]); btrfs_lookup_extent_info()
867 ei = btrfs_item_ptr(leaf, path->slots[0], btrfs_lookup_extent_info()
875 ei0 = btrfs_item_ptr(leaf, path->slots[0], btrfs_lookup_extent_info()
902 btrfs_release_path(path); btrfs_lookup_extent_info()
931 btrfs_free_path(path); btrfs_lookup_extent_info()
1044 struct btrfs_path *path, convert_extent_item_v0()
1058 leaf = path->nodes[0]; convert_extent_item_v0()
1059 BUG_ON(btrfs_item_size_nr(leaf, path->slots[0]) != sizeof(*ei0)); convert_extent_item_v0()
1061 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); convert_extent_item_v0()
1062 ei0 = btrfs_item_ptr(leaf, path->slots[0], convert_extent_item_v0()
1068 if (path->slots[0] >= btrfs_header_nritems(leaf)) { convert_extent_item_v0()
1069 ret = btrfs_next_leaf(root, path); convert_extent_item_v0()
1073 leaf = path->nodes[0]; convert_extent_item_v0()
1076 path->slots[0]); convert_extent_item_v0()
1079 path->slots[0]++; convert_extent_item_v0()
1082 ref0 = btrfs_item_ptr(leaf, path->slots[0], convert_extent_item_v0()
1088 btrfs_release_path(path); convert_extent_item_v0()
1094 ret = btrfs_search_slot(trans, root, &key, path, convert_extent_item_v0()
1100 btrfs_extend_item(root, path, new_size); convert_extent_item_v0()
1102 leaf = path->nodes[0]; convert_extent_item_v0()
1103 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); convert_extent_item_v0()
1160 struct btrfs_path *path, lookup_extent_data_ref()
1184 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); lookup_extent_data_ref()
1195 btrfs_release_path(path); lookup_extent_data_ref()
1196 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); lookup_extent_data_ref()
1207 leaf = path->nodes[0]; lookup_extent_data_ref()
1210 if (path->slots[0] >= nritems) { lookup_extent_data_ref()
1211 ret = btrfs_next_leaf(root, path); lookup_extent_data_ref()
1217 leaf = path->nodes[0]; lookup_extent_data_ref()
1222 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); lookup_extent_data_ref()
1227 ref = btrfs_item_ptr(leaf, path->slots[0], lookup_extent_data_ref()
1233 btrfs_release_path(path); lookup_extent_data_ref()
1239 path->slots[0]++; lookup_extent_data_ref()
1247 struct btrfs_path *path, insert_extent_data_ref()
1270 ret = btrfs_insert_empty_item(trans, root, path, &key, size); insert_extent_data_ref()
1274 leaf = path->nodes[0]; insert_extent_data_ref()
1277 ref = btrfs_item_ptr(leaf, path->slots[0], insert_extent_data_ref()
1289 ref = btrfs_item_ptr(leaf, path->slots[0], insert_extent_data_ref()
1294 btrfs_release_path(path); insert_extent_data_ref()
1296 ret = btrfs_insert_empty_item(trans, root, path, &key, insert_extent_data_ref()
1301 leaf = path->nodes[0]; insert_extent_data_ref()
1303 ref = btrfs_item_ptr(leaf, path->slots[0], insert_extent_data_ref()
1320 btrfs_release_path(path); insert_extent_data_ref()
1326 struct btrfs_path *path, remove_extent_data_ref()
1336 leaf = path->nodes[0]; remove_extent_data_ref()
1337 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); remove_extent_data_ref()
1340 ref1 = btrfs_item_ptr(leaf, path->slots[0], remove_extent_data_ref()
1344 ref2 = btrfs_item_ptr(leaf, path->slots[0], remove_extent_data_ref()
1350 ref0 = btrfs_item_ptr(leaf, path->slots[0], remove_extent_data_ref()
1362 ret = btrfs_del_item(trans, root, path); remove_extent_data_ref()
1372 ref0 = btrfs_item_ptr(leaf, path->slots[0], remove_extent_data_ref()
1382 static noinline u32 extent_data_ref_count(struct btrfs_path *path, extent_data_ref_count() argument
1391 leaf = path->nodes[0]; extent_data_ref_count()
1392 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); extent_data_ref_count()
1403 ref1 = btrfs_item_ptr(leaf, path->slots[0], extent_data_ref_count()
1407 ref2 = btrfs_item_ptr(leaf, path->slots[0], extent_data_ref_count()
1413 ref0 = btrfs_item_ptr(leaf, path->slots[0], extent_data_ref_count()
1425 struct btrfs_path *path, lookup_tree_block_ref()
1441 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); lookup_tree_block_ref()
1446 btrfs_release_path(path); lookup_tree_block_ref()
1448 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); lookup_tree_block_ref()
1458 struct btrfs_path *path, insert_tree_block_ref()
1474 ret = btrfs_insert_empty_item(trans, root, path, &key, 0); insert_tree_block_ref()
1475 btrfs_release_path(path); insert_tree_block_ref()
1496 static int find_next_key(struct btrfs_path *path, int level, find_next_key() argument
1501 if (!path->nodes[level]) find_next_key()
1503 if (path->slots[level] + 1 >= find_next_key()
1504 btrfs_header_nritems(path->nodes[level])) find_next_key()
1507 btrfs_item_key_to_cpu(path->nodes[level], key, find_next_key()
1508 path->slots[level] + 1); find_next_key()
1510 btrfs_node_key_to_cpu(path->nodes[level], key, find_next_key()
1511 path->slots[level] + 1); find_next_key()
1524 * if insert is true and there are too many inline back refs, the path
1533 struct btrfs_path *path, lookup_inline_extent_backref()
1562 path->keep_locks = 1; lookup_inline_extent_backref()
1576 ret = btrfs_search_slot(trans, root, &key, path, extra_size, 1); lookup_inline_extent_backref()
1588 if (path->slots[0]) { lookup_inline_extent_backref()
1589 path->slots[0]--; lookup_inline_extent_backref()
1590 btrfs_item_key_to_cpu(path->nodes[0], &key, lookup_inline_extent_backref()
1591 path->slots[0]); lookup_inline_extent_backref()
1601 btrfs_release_path(path); lookup_inline_extent_backref()
1614 leaf = path->nodes[0]; lookup_inline_extent_backref()
1615 item_size = btrfs_item_size_nr(leaf, path->slots[0]); lookup_inline_extent_backref()
1622 ret = convert_extent_item_v0(trans, root, path, owner, lookup_inline_extent_backref()
1628 leaf = path->nodes[0]; lookup_inline_extent_backref()
1629 item_size = btrfs_item_size_nr(leaf, path->slots[0]); lookup_inline_extent_backref()
1634 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); lookup_inline_extent_backref()
1704 if (find_next_key(path, 0, &key) == 0 && lookup_inline_extent_backref()
1714 path->keep_locks = 0; lookup_inline_extent_backref()
1715 btrfs_unlock_up_safe(path, 1); lookup_inline_extent_backref()
1725 struct btrfs_path *path, setup_inline_extent_backref()
1740 leaf = path->nodes[0]; setup_inline_extent_backref()
1741 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); setup_inline_extent_backref()
1747 btrfs_extend_item(root, path, size); setup_inline_extent_backref()
1749 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); setup_inline_extent_backref()
1757 end = (unsigned long)ei + btrfs_item_size_nr(leaf, path->slots[0]); setup_inline_extent_backref()
1786 struct btrfs_path *path, lookup_extent_backref()
1793 ret = lookup_inline_extent_backref(trans, root, path, ref_ret, lookup_extent_backref()
1799 btrfs_release_path(path); lookup_extent_backref()
1803 ret = lookup_tree_block_ref(trans, root, path, bytenr, parent, lookup_extent_backref()
1806 ret = lookup_extent_data_ref(trans, root, path, bytenr, parent, lookup_extent_backref()
1817 struct btrfs_path *path, update_inline_extent_backref()
1834 leaf = path->nodes[0]; update_inline_extent_backref()
1835 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); update_inline_extent_backref()
1867 item_size = btrfs_item_size_nr(leaf, path->slots[0]); update_inline_extent_backref()
1874 btrfs_truncate_item(root, path, item_size, 1); update_inline_extent_backref()
1882 struct btrfs_path *path, insert_inline_extent_backref()
1891 ret = lookup_inline_extent_backref(trans, root, path, &iref, insert_inline_extent_backref()
1896 update_inline_extent_backref(root, path, iref, insert_inline_extent_backref()
1899 setup_inline_extent_backref(root, path, iref, parent, insert_inline_extent_backref()
1909 struct btrfs_path *path, insert_extent_backref()
1916 ret = insert_tree_block_ref(trans, root, path, bytenr, insert_extent_backref()
1919 ret = insert_extent_data_ref(trans, root, path, bytenr, insert_extent_backref()
1928 struct btrfs_path *path, remove_extent_backref()
1936 update_inline_extent_backref(root, path, iref, remove_extent_backref()
1939 ret = remove_extent_data_ref(trans, root, path, refs_to_drop, remove_extent_backref()
1943 ret = btrfs_del_item(trans, root, path); remove_extent_backref()
2105 struct btrfs_path *path; __btrfs_inc_extent_ref() local
2114 path = btrfs_alloc_path(); __btrfs_inc_extent_ref()
2115 if (!path) __btrfs_inc_extent_ref()
2118 path->reada = 1; __btrfs_inc_extent_ref()
2119 path->leave_spinning = 1; __btrfs_inc_extent_ref()
2120 /* this will setup the path even if it fails to insert the back ref */ __btrfs_inc_extent_ref()
2121 ret = insert_inline_extent_backref(trans, fs_info->extent_root, path, __btrfs_inc_extent_ref()
2133 leaf = path->nodes[0]; __btrfs_inc_extent_ref()
2134 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); __btrfs_inc_extent_ref()
2135 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); __btrfs_inc_extent_ref()
2142 btrfs_release_path(path); __btrfs_inc_extent_ref()
2144 path->reada = 1; __btrfs_inc_extent_ref()
2145 path->leave_spinning = 1; __btrfs_inc_extent_ref()
2148 path, bytenr, parent, root_objectid, __btrfs_inc_extent_ref()
2153 btrfs_free_path(path); __btrfs_inc_extent_ref()
2228 struct btrfs_path *path; run_delayed_extent_op() local
2242 path = btrfs_alloc_path(); run_delayed_extent_op()
2243 if (!path) run_delayed_extent_op()
2257 path->reada = 1; run_delayed_extent_op()
2258 path->leave_spinning = 1; run_delayed_extent_op()
2260 path, 0, 1); run_delayed_extent_op()
2267 if (path->slots[0] > 0) { run_delayed_extent_op()
2268 path->slots[0]--; run_delayed_extent_op()
2269 btrfs_item_key_to_cpu(path->nodes[0], &key, run_delayed_extent_op()
2270 path->slots[0]); run_delayed_extent_op()
2277 btrfs_release_path(path); run_delayed_extent_op()
2291 leaf = path->nodes[0]; run_delayed_extent_op()
2292 item_size = btrfs_item_size_nr(leaf, path->slots[0]); run_delayed_extent_op()
2296 path, (u64)-1, 0); run_delayed_extent_op()
2301 leaf = path->nodes[0]; run_delayed_extent_op()
2302 item_size = btrfs_item_size_nr(leaf, path->slots[0]); run_delayed_extent_op()
2306 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); run_delayed_extent_op()
2311 btrfs_free_path(path); run_delayed_extent_op()
3005 struct btrfs_path *path, check_delayed_ref()
3026 btrfs_release_path(path); check_delayed_ref()
3067 struct btrfs_path *path, check_committed_ref()
3083 ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0); check_committed_ref()
3089 if (path->slots[0] == 0) check_committed_ref()
3092 path->slots[0]--; check_committed_ref()
3093 leaf = path->nodes[0]; check_committed_ref()
3094 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); check_committed_ref()
3100 item_size = btrfs_item_size_nr(leaf, path->slots[0]); check_committed_ref()
3107 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); check_committed_ref()
3140 struct btrfs_path *path; btrfs_cross_ref_exist() local
3144 path = btrfs_alloc_path(); btrfs_cross_ref_exist()
3145 if (!path) btrfs_cross_ref_exist()
3149 ret = check_committed_ref(trans, root, path, objectid, btrfs_cross_ref_exist()
3154 ret2 = check_delayed_ref(trans, root, path, objectid, btrfs_cross_ref_exist()
3166 btrfs_free_path(path); btrfs_cross_ref_exist()
3260 struct btrfs_path *path, write_one_cache_group()
3268 ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1); write_one_cache_group()
3275 leaf = path->nodes[0]; write_one_cache_group()
3276 bi = btrfs_item_ptr_offset(leaf, path->slots[0]); write_one_cache_group()
3280 btrfs_release_path(path); write_one_cache_group()
3317 struct btrfs_path *path) cache_save_setup()
3341 inode = lookup_free_space_inode(root, block_group, path); cache_save_setup()
3344 btrfs_release_path(path); cache_save_setup()
3355 ret = create_free_space_inode(root, trans, block_group, path); cache_save_setup()
3462 btrfs_release_path(path); cache_save_setup()
3478 struct btrfs_path *path; btrfs_setup_space_cache() local
3484 path = btrfs_alloc_path(); btrfs_setup_space_cache()
3485 if (!path) btrfs_setup_space_cache()
3492 cache_save_setup(cache, trans, path); btrfs_setup_space_cache()
3495 btrfs_free_path(path); btrfs_setup_space_cache()
3518 struct btrfs_path *path = NULL; btrfs_start_dirty_block_groups() local
3539 if (!path) { btrfs_start_dirty_block_groups()
3540 path = btrfs_alloc_path(); btrfs_start_dirty_block_groups()
3541 if (!path) btrfs_start_dirty_block_groups()
3563 &cache->io_ctl, path, btrfs_start_dirty_block_groups()
3583 cache_save_setup(cache, trans, path); btrfs_start_dirty_block_groups()
3587 ret = btrfs_write_out_cache(root, trans, cache, path); btrfs_start_dirty_block_groups()
3606 ret = write_one_cache_group(trans, root, path, cache); btrfs_start_dirty_block_groups()
3667 btrfs_free_path(path); btrfs_start_dirty_block_groups()
3678 struct btrfs_path *path; btrfs_write_dirty_block_groups() local
3682 path = btrfs_alloc_path(); btrfs_write_dirty_block_groups()
3683 if (!path) btrfs_write_dirty_block_groups()
3705 &cache->io_ctl, path, btrfs_write_dirty_block_groups()
3717 cache_save_setup(cache, trans, path); btrfs_write_dirty_block_groups()
3724 ret = btrfs_write_out_cache(root, trans, cache, path); btrfs_write_dirty_block_groups()
3738 ret = write_one_cache_group(trans, root, path, cache); btrfs_write_dirty_block_groups()
3753 &cache->io_ctl, path, cache->key.objectid); btrfs_write_dirty_block_groups()
3757 btrfs_free_path(path); btrfs_write_dirty_block_groups()
6421 struct btrfs_path *path; __btrfs_free_extent() local
6440 path = btrfs_alloc_path(); __btrfs_free_extent()
6441 if (!path) __btrfs_free_extent()
6444 path->reada = 1; __btrfs_free_extent()
6445 path->leave_spinning = 1; __btrfs_free_extent()
6453 ret = lookup_extent_backref(trans, extent_root, path, &iref, __btrfs_free_extent()
6458 extent_slot = path->slots[0]; __btrfs_free_extent()
6460 btrfs_item_key_to_cpu(path->nodes[0], &key, __btrfs_free_extent()
6474 if (path->slots[0] - extent_slot > 5) __btrfs_free_extent()
6479 item_size = btrfs_item_size_nr(path->nodes[0], extent_slot); __btrfs_free_extent()
6485 ret = remove_extent_backref(trans, extent_root, path, __btrfs_free_extent()
6492 btrfs_release_path(path); __btrfs_free_extent()
6493 path->leave_spinning = 1; __btrfs_free_extent()
6505 &key, path, -1, 1); __btrfs_free_extent()
6506 if (ret > 0 && skinny_metadata && path->slots[0]) { __btrfs_free_extent()
6511 path->slots[0]--; __btrfs_free_extent()
6512 btrfs_item_key_to_cpu(path->nodes[0], &key, __btrfs_free_extent()
6513 path->slots[0]); __btrfs_free_extent()
6525 btrfs_release_path(path); __btrfs_free_extent()
6527 &key, path, -1, 1); __btrfs_free_extent()
6535 path->nodes[0]); __btrfs_free_extent()
6541 extent_slot = path->slots[0]; __btrfs_free_extent()
6544 btrfs_print_leaf(extent_root, path->nodes[0]); __btrfs_free_extent()
6556 leaf = path->nodes[0]; __btrfs_free_extent()
6560 BUG_ON(found_extent || extent_slot != path->slots[0]); __btrfs_free_extent()
6561 ret = convert_extent_item_v0(trans, extent_root, path, __btrfs_free_extent()
6568 btrfs_release_path(path); __btrfs_free_extent()
6569 path->leave_spinning = 1; __btrfs_free_extent()
6575 ret = btrfs_search_slot(trans, extent_root, &key, path, __btrfs_free_extent()
6580 btrfs_print_leaf(extent_root, path->nodes[0]); __btrfs_free_extent()
6587 extent_slot = path->slots[0]; __btrfs_free_extent()
6588 leaf = path->nodes[0]; __btrfs_free_extent()
6627 ret = remove_extent_backref(trans, extent_root, path, __btrfs_free_extent()
6640 extent_data_ref_count(path, iref)); __btrfs_free_extent()
6642 BUG_ON(path->slots[0] != extent_slot); __btrfs_free_extent()
6644 BUG_ON(path->slots[0] != extent_slot + 1); __btrfs_free_extent()
6645 path->slots[0] = extent_slot; __btrfs_free_extent()
6651 ret = btrfs_del_items(trans, extent_root, path, path->slots[0], __btrfs_free_extent()
6657 btrfs_release_path(path); __btrfs_free_extent()
6673 btrfs_release_path(path); __btrfs_free_extent()
6676 btrfs_free_path(path); __btrfs_free_extent()
7627 struct btrfs_path *path; alloc_reserved_file_extent() local
7639 path = btrfs_alloc_path(); alloc_reserved_file_extent()
7640 if (!path) alloc_reserved_file_extent()
7643 path->leave_spinning = 1; alloc_reserved_file_extent()
7644 ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path, alloc_reserved_file_extent()
7647 btrfs_free_path(path); alloc_reserved_file_extent()
7651 leaf = path->nodes[0]; alloc_reserved_file_extent()
7652 extent_item = btrfs_item_ptr(leaf, path->slots[0], alloc_reserved_file_extent()
7675 btrfs_mark_buffer_dirty(path->nodes[0]); alloc_reserved_file_extent()
7676 btrfs_free_path(path); alloc_reserved_file_extent()
7699 struct btrfs_path *path; alloc_reserved_tree_block() local
7709 path = btrfs_alloc_path(); alloc_reserved_tree_block()
7710 if (!path) { alloc_reserved_tree_block()
7716 path->leave_spinning = 1; alloc_reserved_tree_block()
7717 ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path, alloc_reserved_tree_block()
7720 btrfs_free_path(path); alloc_reserved_tree_block()
7726 leaf = path->nodes[0]; alloc_reserved_tree_block()
7727 extent_item = btrfs_item_ptr(leaf, path->slots[0], alloc_reserved_tree_block()
7756 btrfs_free_path(path); alloc_reserved_tree_block()
8035 struct btrfs_path *path) reada_walk_down()
8049 if (path->slots[wc->level] < wc->reada_slot) { reada_walk_down()
8058 eb = path->nodes[wc->level]; reada_walk_down()
8062 for (slot = path->slots[wc->level]; slot < nritems; slot++) { reada_walk_down()
8070 if (slot == path->slots[wc->level]) reada_walk_down()
8187 * At the end of this function, we should have a path which has all
8196 struct btrfs_path *path, int root_level) adjust_slots_upwards()
8206 eb = path->nodes[level]; adjust_slots_upwards()
8208 path->slots[level]++; adjust_slots_upwards()
8209 slot = path->slots[level]; adjust_slots_upwards()
8217 btrfs_tree_unlock_rw(eb, path->locks[level]); adjust_slots_upwards()
8218 path->locks[level] = 0; adjust_slots_upwards()
8221 path->nodes[level] = NULL; adjust_slots_upwards()
8222 path->slots[level] = 0; adjust_slots_upwards()
8236 eb = path->nodes[root_level]; adjust_slots_upwards()
8237 if (path->slots[root_level] >= btrfs_header_nritems(eb)) adjust_slots_upwards()
8255 struct btrfs_path *path = NULL; account_shared_subtree() local
8274 path = btrfs_alloc_path(); account_shared_subtree()
8275 if (!path) account_shared_subtree()
8287 extent_buffer_get(root_eb); /* For path */ account_shared_subtree()
8288 path->nodes[root_level] = root_eb; account_shared_subtree()
8289 path->slots[root_level] = 0; account_shared_subtree()
8290 path->locks[root_level] = 0; /* so release_path doesn't try to unlock */ account_shared_subtree()
8294 if (path->nodes[level] == NULL) { account_shared_subtree()
8301 eb = path->nodes[level + 1]; account_shared_subtree()
8302 parent_slot = path->slots[level + 1]; account_shared_subtree()
8316 path->nodes[level] = eb; account_shared_subtree()
8317 path->slots[level] = 0; account_shared_subtree()
8321 path->locks[level] = BTRFS_READ_LOCK_BLOCKING; account_shared_subtree()
8330 ret = account_leaf_items(trans, root, path->nodes[level]); account_shared_subtree()
8335 ret = adjust_slots_upwards(root, path, root_level); account_shared_subtree()
8348 btrfs_free_path(path); account_shared_subtree()
8363 struct btrfs_path *path, walk_down_proc()
8367 struct extent_buffer *eb = path->nodes[level]; walk_down_proc()
8382 BUG_ON(!path->locks[level]); walk_down_proc()
8397 if (path->locks[level] && !wc->keep_locks) { walk_down_proc()
8398 btrfs_tree_unlock_rw(eb, path->locks[level]); walk_down_proc()
8399 path->locks[level] = 0; walk_down_proc()
8406 BUG_ON(!path->locks[level]); walk_down_proc()
8422 if (path->locks[level] && level > 0) { walk_down_proc()
8423 btrfs_tree_unlock_rw(eb, path->locks[level]); walk_down_proc()
8424 path->locks[level] = 0; walk_down_proc()
8444 struct btrfs_path *path, do_walk_down()
8458 generation = btrfs_node_ptr_generation(path->nodes[level], do_walk_down()
8459 path->slots[level]); do_walk_down()
8471 bytenr = btrfs_node_blockptr(path->nodes[level], path->slots[level]); do_walk_down()
8511 btrfs_node_key_to_cpu(path->nodes[level], &key, do_walk_down()
8512 path->slots[level]); do_walk_down()
8535 reada_walk_down(trans, root, wc, path); do_walk_down()
8549 path->nodes[level] = next; do_walk_down()
8550 path->slots[level] = 0; do_walk_down()
8551 path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING; do_walk_down()
8561 parent = path->nodes[level]->start; do_walk_down()
8564 btrfs_header_owner(path->nodes[level])); do_walk_down()
8603 struct btrfs_path *path, walk_up_proc()
8608 struct extent_buffer *eb = path->nodes[level]; walk_up_proc()
8616 ret = find_next_key(path, level + 1, &wc->update_progress); walk_up_proc()
8622 path->slots[level] = 0; walk_up_proc()
8629 if (!path->locks[level]) { walk_up_proc()
8633 path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING; walk_up_proc()
8640 btrfs_tree_unlock_rw(eb, path->locks[level]); walk_up_proc()
8641 path->locks[level] = 0; walk_up_proc()
8646 btrfs_tree_unlock_rw(eb, path->locks[level]); walk_up_proc()
8647 path->locks[level] = 0; walk_up_proc()
8654 BUG_ON(wc->refs[level] > 1 && !path->locks[level]); walk_up_proc()
8673 if (!path->locks[level] && walk_up_proc()
8677 path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING; walk_up_proc()
8690 parent = path->nodes[level + 1]->start; walk_up_proc()
8693 btrfs_header_owner(path->nodes[level + 1])); walk_up_proc()
8705 struct btrfs_path *path, walk_down_tree()
8713 ret = walk_down_proc(trans, root, path, wc, lookup_info); walk_down_tree()
8720 if (path->slots[level] >= walk_down_tree()
8721 btrfs_header_nritems(path->nodes[level])) walk_down_tree()
8724 ret = do_walk_down(trans, root, path, wc, &lookup_info); walk_down_tree()
8726 path->slots[level]++; walk_down_tree()
8737 struct btrfs_path *path, walk_up_tree()
8743 path->slots[level] = btrfs_header_nritems(path->nodes[level]); walk_up_tree()
8744 while (level < max_level && path->nodes[level]) { walk_up_tree()
8746 if (path->slots[level] + 1 < walk_up_tree()
8747 btrfs_header_nritems(path->nodes[level])) { walk_up_tree()
8748 path->slots[level]++; walk_up_tree()
8751 ret = walk_up_proc(trans, root, path, wc); walk_up_tree()
8755 if (path->locks[level]) { walk_up_tree()
8756 btrfs_tree_unlock_rw(path->nodes[level], walk_up_tree()
8757 path->locks[level]); walk_up_tree()
8758 path->locks[level] = 0; walk_up_tree()
8760 free_extent_buffer(path->nodes[level]); walk_up_tree()
8761 path->nodes[level] = NULL; walk_up_tree()
8785 struct btrfs_path *path; btrfs_drop_snapshot() local
8798 path = btrfs_alloc_path(); btrfs_drop_snapshot()
8799 if (!path) { btrfs_drop_snapshot()
8806 btrfs_free_path(path); btrfs_drop_snapshot()
8822 path->nodes[level] = btrfs_lock_root_node(root); btrfs_drop_snapshot()
8823 btrfs_set_lock_blocking(path->nodes[level]); btrfs_drop_snapshot()
8824 path->slots[level] = 0; btrfs_drop_snapshot()
8825 path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING; btrfs_drop_snapshot()
8835 path->lowest_level = level; btrfs_drop_snapshot()
8836 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); btrfs_drop_snapshot()
8837 path->lowest_level = 0; btrfs_drop_snapshot()
8845 * unlock our path, this is safe because only this btrfs_drop_snapshot()
8848 btrfs_unlock_up_safe(path, 0); btrfs_drop_snapshot()
8852 btrfs_tree_lock(path->nodes[level]); btrfs_drop_snapshot()
8853 btrfs_set_lock_blocking(path->nodes[level]); btrfs_drop_snapshot()
8854 path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING; btrfs_drop_snapshot()
8857 path->nodes[level]->start, btrfs_drop_snapshot()
8869 btrfs_tree_unlock(path->nodes[level]); btrfs_drop_snapshot()
8870 path->locks[level] = 0; btrfs_drop_snapshot()
8886 ret = walk_down_tree(trans, root, path, wc); btrfs_drop_snapshot()
8892 ret = walk_up_tree(trans, root, path, wc, BTRFS_MAX_LEVEL); btrfs_drop_snapshot()
8905 btrfs_node_key(path->nodes[level], btrfs_drop_snapshot()
8907 path->slots[level]); btrfs_drop_snapshot()
8939 btrfs_release_path(path); btrfs_drop_snapshot()
8950 ret = btrfs_find_root(tree_root, &root->root_key, path, btrfs_drop_snapshot()
8979 btrfs_free_path(path); btrfs_drop_snapshot()
9006 struct btrfs_path *path; btrfs_drop_subtree() local
9015 path = btrfs_alloc_path(); btrfs_drop_subtree()
9016 if (!path) btrfs_drop_subtree()
9021 btrfs_free_path(path); btrfs_drop_subtree()
9028 path->nodes[parent_level] = parent; btrfs_drop_subtree()
9029 path->slots[parent_level] = btrfs_header_nritems(parent); btrfs_drop_subtree()
9033 path->nodes[level] = node; btrfs_drop_subtree()
9034 path->slots[level] = 0; btrfs_drop_subtree()
9035 path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING; btrfs_drop_subtree()
9048 wret = walk_down_tree(trans, root, path, wc); btrfs_drop_subtree()
9054 wret = walk_up_tree(trans, root, path, wc, parent_level); btrfs_drop_subtree()
9062 btrfs_free_path(path); btrfs_drop_subtree()
9437 struct btrfs_path *path, struct btrfs_key *key) find_first_block_group()
9444 ret = btrfs_search_slot(NULL, root, key, path, 0, 0); find_first_block_group()
9449 slot = path->slots[0]; find_first_block_group()
9450 leaf = path->nodes[0]; find_first_block_group()
9452 ret = btrfs_next_leaf(root, path); find_first_block_group()
9466 path->slots[0]++; find_first_block_group()
9679 struct btrfs_path *path; btrfs_read_block_groups() local
9694 path = btrfs_alloc_path(); btrfs_read_block_groups()
9695 if (!path) btrfs_read_block_groups()
9697 path->reada = 1; btrfs_read_block_groups()
9707 ret = find_first_block_group(root, path, &key); btrfs_read_block_groups()
9713 leaf = path->nodes[0]; btrfs_read_block_groups()
9714 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); btrfs_read_block_groups()
9739 btrfs_item_ptr_offset(leaf, path->slots[0]), btrfs_read_block_groups()
9744 btrfs_release_path(path); btrfs_read_block_groups()
9851 btrfs_free_path(path); btrfs_read_block_groups()
10007 struct btrfs_path *path; btrfs_remove_block_group() local
10056 path = btrfs_alloc_path(); btrfs_remove_block_group()
10057 if (!path) { btrfs_remove_block_group()
10066 inode = lookup_free_space_inode(tree_root, block_group, path); btrfs_remove_block_group()
10081 &block_group->io_ctl, path, btrfs_remove_block_group()
10119 ret = btrfs_search_slot(trans, tree_root, &key, path, -1, 1); btrfs_remove_block_group()
10123 btrfs_release_path(path); btrfs_remove_block_group()
10125 ret = btrfs_del_item(trans, tree_root, path); btrfs_remove_block_group()
10128 btrfs_release_path(path); btrfs_remove_block_group()
10286 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); btrfs_remove_block_group()
10292 ret = btrfs_del_item(trans, root, path); btrfs_remove_block_group()
10294 btrfs_free_path(path); btrfs_remove_block_group()
1042 convert_extent_item_v0(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, u64 owner, u32 extra_size) convert_extent_item_v0() argument
1158 lookup_extent_data_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, u64 bytenr, u64 parent, u64 root_objectid, u64 owner, u64 offset) lookup_extent_data_ref() argument
1245 insert_extent_data_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, u64 bytenr, u64 parent, u64 root_objectid, u64 owner, u64 offset, int refs_to_add) insert_extent_data_ref() argument
1324 remove_extent_data_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, int refs_to_drop, int *last_ref) remove_extent_data_ref() argument
1423 lookup_tree_block_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, u64 bytenr, u64 parent, u64 root_objectid) lookup_tree_block_ref() argument
1456 insert_tree_block_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, u64 bytenr, u64 parent, u64 root_objectid) insert_tree_block_ref() argument
1531 lookup_inline_extent_backref(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, struct btrfs_extent_inline_ref **ref_ret, u64 bytenr, u64 num_bytes, u64 parent, u64 root_objectid, u64 owner, u64 offset, int insert) lookup_inline_extent_backref() argument
1724 setup_inline_extent_backref(struct btrfs_root *root, struct btrfs_path *path, struct btrfs_extent_inline_ref *iref, u64 parent, u64 root_objectid, u64 owner, u64 offset, int refs_to_add, struct btrfs_delayed_extent_op *extent_op) setup_inline_extent_backref() argument
1784 lookup_extent_backref(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, struct btrfs_extent_inline_ref **ref_ret, u64 bytenr, u64 num_bytes, u64 parent, u64 root_objectid, u64 owner, u64 offset) lookup_extent_backref() argument
1816 update_inline_extent_backref(struct btrfs_root *root, struct btrfs_path *path, struct btrfs_extent_inline_ref *iref, int refs_to_mod, struct btrfs_delayed_extent_op *extent_op, int *last_ref) update_inline_extent_backref() argument
1880 insert_inline_extent_backref(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, u64 bytenr, u64 num_bytes, u64 parent, u64 root_objectid, u64 owner, u64 offset, int refs_to_add, struct btrfs_delayed_extent_op *extent_op) insert_inline_extent_backref() argument
1907 insert_extent_backref(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, u64 bytenr, u64 parent, u64 root_objectid, u64 owner, u64 offset, int refs_to_add) insert_extent_backref() argument
1926 remove_extent_backref(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, struct btrfs_extent_inline_ref *iref, int refs_to_drop, int is_data, int *last_ref) remove_extent_backref() argument
3003 check_delayed_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, u64 objectid, u64 offset, u64 bytenr) check_delayed_ref() argument
3065 check_committed_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, u64 objectid, u64 offset, u64 bytenr) check_committed_ref() argument
3258 write_one_cache_group(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, struct btrfs_block_group_cache *cache) write_one_cache_group() argument
3315 cache_save_setup(struct btrfs_block_group_cache *block_group, struct btrfs_trans_handle *trans, struct btrfs_path *path) cache_save_setup() argument
8032 reada_walk_down(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct walk_control *wc, struct btrfs_path *path) reada_walk_down() argument
8195 adjust_slots_upwards(struct btrfs_root *root, struct btrfs_path *path, int root_level) adjust_slots_upwards() argument
8361 walk_down_proc(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, struct walk_control *wc, int lookup_info) walk_down_proc() argument
8442 do_walk_down(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, struct walk_control *wc, int *lookup_info) do_walk_down() argument
8601 walk_up_proc(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, struct walk_control *wc) walk_up_proc() argument
8703 walk_down_tree(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, struct walk_control *wc) walk_down_tree() argument
8735 walk_up_tree(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, struct walk_control *wc, int max_level) walk_up_tree() argument
9436 find_first_block_group(struct btrfs_root *root, struct btrfs_path *path, struct btrfs_key *key) find_first_block_group() argument
H A Dfree-space-cache.h56 *block_group, struct btrfs_path *path);
60 struct btrfs_path *path);
74 struct btrfs_path *path, u64 offset);
78 struct btrfs_path *path);
80 struct btrfs_path *path);
83 struct btrfs_path *path);
88 struct btrfs_path *path,
H A Dfile.c693 struct btrfs_path *path, u64 start, u64 end, __btrfs_drop_extents()
729 ret = btrfs_lookup_file_extent(trans, root, path, ino, __btrfs_drop_extents()
733 if (ret > 0 && path->slots[0] > 0 && search_start == start) { __btrfs_drop_extents()
734 leaf = path->nodes[0]; __btrfs_drop_extents()
735 btrfs_item_key_to_cpu(leaf, &key, path->slots[0] - 1); __btrfs_drop_extents()
738 path->slots[0]--; __btrfs_drop_extents()
743 leaf = path->nodes[0]; __btrfs_drop_extents()
744 if (path->slots[0] >= btrfs_header_nritems(leaf)) { __btrfs_drop_extents()
746 ret = btrfs_next_leaf(root, path); __btrfs_drop_extents()
754 leaf = path->nodes[0]; __btrfs_drop_extents()
758 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); __btrfs_drop_extents()
765 path->slots[0]++; __btrfs_drop_extents()
771 fi = btrfs_item_ptr(leaf, path->slots[0], __btrfs_drop_extents()
785 path->slots[0], fi); __btrfs_drop_extents()
804 path->slots[0]++; __btrfs_drop_extents()
812 btrfs_release_path(path); __btrfs_drop_extents()
829 ret = btrfs_duplicate_item(trans, root, path, __btrfs_drop_extents()
832 btrfs_release_path(path); __btrfs_drop_extents()
838 leaf = path->nodes[0]; __btrfs_drop_extents()
839 fi = btrfs_item_ptr(leaf, path->slots[0] - 1, __btrfs_drop_extents()
844 fi = btrfs_item_ptr(leaf, path->slots[0], __btrfs_drop_extents()
875 btrfs_set_item_key_safe(root->fs_info, path, &new_key); __btrfs_drop_extents()
907 path->slots[0]++; __btrfs_drop_extents()
918 del_slot = path->slots[0]; __btrfs_drop_extents()
921 BUG_ON(del_slot + del_nr != path->slots[0]); __btrfs_drop_extents()
945 if (path->slots[0] + 1 < btrfs_header_nritems(leaf)) { __btrfs_drop_extents()
946 path->slots[0]++; __btrfs_drop_extents()
950 ret = btrfs_del_items(trans, root, path, del_slot, __btrfs_drop_extents()
960 btrfs_release_path(path); __btrfs_drop_extents()
969 * Set path->slots[0] to first slot, so that after the delete __btrfs_drop_extents()
972 * path->slots[0] for our insertion (if replace_extent != 0). __btrfs_drop_extents()
974 path->slots[0] = del_slot; __btrfs_drop_extents()
975 ret = btrfs_del_items(trans, root, path, del_slot, del_nr); __btrfs_drop_extents()
980 leaf = path->nodes[0]; __btrfs_drop_extents()
983 * which case it unlocked our path, so check path->locks[0] matches a __btrfs_drop_extents()
987 (path->locks[0] == BTRFS_WRITE_LOCK_BLOCKING || __btrfs_drop_extents()
988 path->locks[0] == BTRFS_WRITE_LOCK) && __btrfs_drop_extents()
995 if (!del_nr && path->slots[0] < btrfs_header_nritems(leaf)) { __btrfs_drop_extents()
998 btrfs_item_key_to_cpu(leaf, &slot_key, path->slots[0]); __btrfs_drop_extents()
1000 path->slots[0]++; __btrfs_drop_extents()
1002 setup_items_for_insert(root, path, &key, __btrfs_drop_extents()
1011 btrfs_release_path(path); __btrfs_drop_extents()
1021 struct btrfs_path *path; btrfs_drop_extents() local
1024 path = btrfs_alloc_path(); btrfs_drop_extents()
1025 if (!path) btrfs_drop_extents()
1027 ret = __btrfs_drop_extents(trans, root, inode, path, start, end, NULL, btrfs_drop_extents()
1029 btrfs_free_path(path); btrfs_drop_extents()
1078 struct btrfs_path *path; btrfs_mark_extent_written() local
1095 path = btrfs_alloc_path(); btrfs_mark_extent_written()
1096 if (!path) btrfs_mark_extent_written()
1105 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); btrfs_mark_extent_written()
1108 if (ret > 0 && path->slots[0] > 0) btrfs_mark_extent_written()
1109 path->slots[0]--; btrfs_mark_extent_written()
1111 leaf = path->nodes[0]; btrfs_mark_extent_written()
1112 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); btrfs_mark_extent_written()
1114 fi = btrfs_item_ptr(leaf, path->slots[0], btrfs_mark_extent_written()
1129 if (extent_mergeable(leaf, path->slots[0] - 1, btrfs_mark_extent_written()
1133 btrfs_set_item_key_safe(root->fs_info, path, &new_key); btrfs_mark_extent_written()
1134 fi = btrfs_item_ptr(leaf, path->slots[0], btrfs_mark_extent_written()
1142 fi = btrfs_item_ptr(leaf, path->slots[0] - 1, btrfs_mark_extent_written()
1156 if (extent_mergeable(leaf, path->slots[0] + 1, btrfs_mark_extent_written()
1159 fi = btrfs_item_ptr(leaf, path->slots[0], btrfs_mark_extent_written()
1165 path->slots[0]++; btrfs_mark_extent_written()
1167 btrfs_set_item_key_safe(root->fs_info, path, &new_key); btrfs_mark_extent_written()
1169 fi = btrfs_item_ptr(leaf, path->slots[0], btrfs_mark_extent_written()
1187 ret = btrfs_duplicate_item(trans, root, path, &new_key); btrfs_mark_extent_written()
1189 btrfs_release_path(path); btrfs_mark_extent_written()
1197 leaf = path->nodes[0]; btrfs_mark_extent_written()
1198 fi = btrfs_item_ptr(leaf, path->slots[0] - 1, btrfs_mark_extent_written()
1204 fi = btrfs_item_ptr(leaf, path->slots[0], btrfs_mark_extent_written()
1222 path->slots[0]--; btrfs_mark_extent_written()
1230 if (extent_mergeable(leaf, path->slots[0] + 1, btrfs_mark_extent_written()
1234 btrfs_release_path(path); btrfs_mark_extent_written()
1238 del_slot = path->slots[0] + 1; btrfs_mark_extent_written()
1247 if (extent_mergeable(leaf, path->slots[0] - 1, btrfs_mark_extent_written()
1251 btrfs_release_path(path); btrfs_mark_extent_written()
1255 del_slot = path->slots[0]; btrfs_mark_extent_written()
1263 fi = btrfs_item_ptr(leaf, path->slots[0], btrfs_mark_extent_written()
1279 ret = btrfs_del_items(trans, root, path, del_slot, del_nr); btrfs_mark_extent_written()
1286 btrfs_free_path(path); btrfs_mark_extent_written()
1987 * bail out safely. For the fast path, when the full sync flag is not btrfs_sync_file()
2025 * example checking cross references in the nocow path). If we use join btrfs_sync_file()
2068 * just like in the non fast fsync path, where we check for the ordered btrfs_sync_file()
2152 struct btrfs_path *path, u64 offset, u64 end) fill_holes()
2169 ret = btrfs_search_slot(trans, root, &key, path, 0, 1); fill_holes()
2174 leaf = path->nodes[0]; fill_holes()
2175 if (hole_mergeable(inode, leaf, path->slots[0]-1, offset, end)) { fill_holes()
2178 path->slots[0]--; fill_holes()
2179 fi = btrfs_item_ptr(leaf, path->slots[0], fill_holes()
2190 if (hole_mergeable(inode, leaf, path->slots[0], offset, end)) { fill_holes()
2194 btrfs_set_item_key_safe(root->fs_info, path, &key); fill_holes()
2195 fi = btrfs_item_ptr(leaf, path->slots[0], fill_holes()
2205 btrfs_release_path(path); fill_holes()
2214 btrfs_release_path(path); fill_holes()
2284 struct btrfs_path *path; btrfs_punch_hole() local
2429 path = btrfs_alloc_path(); btrfs_punch_hole()
2430 if (!path) { btrfs_punch_hole()
2463 ret = __btrfs_drop_extents(trans, root, inode, path, btrfs_punch_hole()
2472 ret = fill_holes(trans, inode, path, cur_offset, btrfs_punch_hole()
2525 * map representing the existing hole), otherwise the fast fsync path btrfs_punch_hole()
2537 ret = fill_holes(trans, inode, path, cur_offset, drop_end); btrfs_punch_hole()
2557 btrfs_free_path(path); btrfs_punch_hole()
691 __btrfs_drop_extents(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct inode *inode, struct btrfs_path *path, u64 start, u64 end, u64 *drop_end, int drop_cache, int replace_extent, u32 extent_item_size, int *key_inserted) __btrfs_drop_extents() argument
2151 fill_holes(struct btrfs_trans_handle *trans, struct inode *inode, struct btrfs_path *path, u64 offset, u64 end) fill_holes() argument
H A Dinode.c137 struct btrfs_path *path, int extent_inserted, insert_inline_extent()
167 path->leave_spinning = 1; insert_inline_extent()
168 ret = btrfs_insert_empty_item(trans, root, path, &key, insert_inline_extent()
175 leaf = path->nodes[0]; insert_inline_extent()
176 ei = btrfs_item_ptr(leaf, path->slots[0], insert_inline_extent()
214 btrfs_release_path(path); insert_inline_extent()
252 struct btrfs_path *path; cow_file_range_inline() local
269 path = btrfs_alloc_path(); cow_file_range_inline()
270 if (!path) cow_file_range_inline()
275 btrfs_free_path(path); cow_file_range_inline()
287 ret = __btrfs_drop_extents(trans, root, inode, path, cow_file_range_inline()
297 ret = insert_inline_extent(trans, path, extent_inserted, cow_file_range_inline()
320 btrfs_free_path(path); cow_file_range_inline()
1223 struct btrfs_path *path; run_delalloc_nocow() local
1242 path = btrfs_alloc_path(); run_delalloc_nocow()
1243 if (!path) { run_delalloc_nocow()
1269 btrfs_free_path(path); run_delalloc_nocow()
1278 ret = btrfs_lookup_file_extent(trans, root, path, ino, run_delalloc_nocow()
1282 if (ret > 0 && path->slots[0] > 0 && check_prev) { run_delalloc_nocow()
1283 leaf = path->nodes[0]; run_delalloc_nocow()
1285 path->slots[0] - 1); run_delalloc_nocow()
1288 path->slots[0]--; run_delalloc_nocow()
1292 leaf = path->nodes[0]; run_delalloc_nocow()
1293 if (path->slots[0] >= btrfs_header_nritems(leaf)) { run_delalloc_nocow()
1294 ret = btrfs_next_leaf(root, path); run_delalloc_nocow()
1299 leaf = path->nodes[0]; run_delalloc_nocow()
1305 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); run_delalloc_nocow()
1311 path->slots[0]++; run_delalloc_nocow()
1324 fi = btrfs_item_ptr(leaf, path->slots[0], run_delalloc_nocow()
1338 path->slots[0]++; run_delalloc_nocow()
1378 path->slots[0], fi); run_delalloc_nocow()
1385 path->slots[0]++; run_delalloc_nocow()
1396 path->slots[0]++; run_delalloc_nocow()
1400 btrfs_release_path(path); run_delalloc_nocow()
1474 btrfs_release_path(path); run_delalloc_nocow()
1501 btrfs_free_path(path); run_delalloc_nocow()
2076 struct btrfs_path *path; insert_reserved_file_extent() local
2082 path = btrfs_alloc_path(); insert_reserved_file_extent()
2083 if (!path) insert_reserved_file_extent()
2095 ret = __btrfs_drop_extents(trans, root, inode, path, file_pos, insert_reserved_file_extent()
2106 path->leave_spinning = 1; insert_reserved_file_extent()
2107 ret = btrfs_insert_empty_item(trans, root, path, &ins, insert_reserved_file_extent()
2112 leaf = path->nodes[0]; insert_reserved_file_extent()
2113 fi = btrfs_item_ptr(leaf, path->slots[0], insert_reserved_file_extent()
2127 btrfs_release_path(path); insert_reserved_file_extent()
2144 btrfs_free_path(path); insert_reserved_file_extent()
2175 struct btrfs_path *path; member in struct:new_sa_defrag_extent
2250 struct btrfs_path *path = new->path; record_one_backref() local
2287 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); record_one_backref()
2295 leaf = path->nodes[0]; record_one_backref()
2296 slot = path->slots[0]; record_one_backref()
2299 ret = btrfs_next_leaf(root, path); record_one_backref()
2309 path->slots[0]++; record_one_backref()
2359 btrfs_release_path(path); record_one_backref()
2364 static noinline bool record_extent_backrefs(struct btrfs_path *path, record_extent_backrefs() argument
2371 new->path = path; record_extent_backrefs()
2376 path, record_one_backref, record_extent_backrefs()
2417 static noinline int relink_extent_backref(struct btrfs_path *path, relink_extent_backref() argument
2503 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); relink_extent_backref()
2511 extent = btrfs_item_ptr(path->nodes[0], path->slots[0], relink_extent_backref()
2514 if (btrfs_file_extent_generation(path->nodes[0], extent) != relink_extent_backref()
2518 btrfs_release_path(path); relink_extent_backref()
2538 path->leave_spinning = 1; relink_extent_backref()
2544 ret = btrfs_search_slot(trans, root, &key, path, 0, 1); relink_extent_backref()
2548 path->slots[0]--; relink_extent_backref()
2549 leaf = path->nodes[0]; relink_extent_backref()
2550 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); relink_extent_backref()
2552 fi = btrfs_item_ptr(leaf, path->slots[0], relink_extent_backref()
2567 btrfs_release_path(path); relink_extent_backref()
2572 ret = btrfs_insert_empty_item(trans, root, path, &key, relink_extent_backref()
2579 leaf = path->nodes[0]; relink_extent_backref()
2580 item = btrfs_item_ptr(leaf, path->slots[0], relink_extent_backref()
2595 btrfs_release_path(path); relink_extent_backref()
2608 btrfs_release_path(path); relink_extent_backref()
2609 path->leave_spinning = 0; relink_extent_backref()
2633 struct btrfs_path *path; relink_file_extents() local
2644 path = btrfs_alloc_path(); relink_file_extents()
2645 if (!path) relink_file_extents()
2648 if (!record_extent_backrefs(path, new)) { relink_file_extents()
2649 btrfs_free_path(path); relink_file_extents()
2652 btrfs_release_path(path); relink_file_extents()
2662 ret = relink_extent_backref(path, prev, backref); relink_file_extents()
2675 btrfs_free_path(path); relink_file_extents()
2688 struct btrfs_path *path; record_old_file_extents() local
2707 path = btrfs_alloc_path(); record_old_file_extents()
2708 if (!path) record_old_file_extents()
2715 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); record_old_file_extents()
2718 if (ret > 0 && path->slots[0] > 0) record_old_file_extents()
2719 path->slots[0]--; record_old_file_extents()
2732 l = path->nodes[0]; record_old_file_extents()
2733 slot = path->slots[0]; record_old_file_extents()
2736 ret = btrfs_next_leaf(root, path); record_old_file_extents()
2780 path->slots[0]++; record_old_file_extents()
2784 btrfs_free_path(path); record_old_file_extents()
2790 btrfs_free_path(path); record_old_file_extents()
3334 struct btrfs_path *path; btrfs_orphan_cleanup() local
3345 path = btrfs_alloc_path(); btrfs_orphan_cleanup()
3346 if (!path) { btrfs_orphan_cleanup()
3350 path->reada = -1; btrfs_orphan_cleanup()
3357 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); btrfs_orphan_cleanup()
3363 * is weird, but possible, so only screw with path if we didn't btrfs_orphan_cleanup()
3368 if (path->slots[0] == 0) btrfs_orphan_cleanup()
3370 path->slots[0]--; btrfs_orphan_cleanup()
3374 leaf = path->nodes[0]; btrfs_orphan_cleanup()
3375 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); btrfs_orphan_cleanup()
3383 /* release the path since we're done with it */ btrfs_orphan_cleanup()
3384 btrfs_release_path(path); btrfs_orphan_cleanup()
3503 /* release the path since we're done with it */ btrfs_orphan_cleanup()
3504 btrfs_release_path(path); btrfs_orphan_cleanup()
3528 btrfs_free_path(path); btrfs_orphan_cleanup()
3606 struct btrfs_path *path; btrfs_read_locked_inode() local
3622 path = btrfs_alloc_path(); btrfs_read_locked_inode()
3623 if (!path) btrfs_read_locked_inode()
3628 ret = btrfs_lookup_inode(NULL, root, path, &location, 0); btrfs_read_locked_inode()
3632 leaf = path->nodes[0]; btrfs_read_locked_inode()
3637 inode_item = btrfs_item_ptr(leaf, path->slots[0], btrfs_read_locked_inode()
3714 path->slots[0]++; btrfs_read_locked_inode()
3716 path->slots[0] >= btrfs_header_nritems(leaf)) btrfs_read_locked_inode()
3719 btrfs_item_key_to_cpu(leaf, &location, path->slots[0]); btrfs_read_locked_inode()
3723 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]); btrfs_read_locked_inode()
3741 maybe_acls = acls_after_inode_item(leaf, path->slots[0], btrfs_read_locked_inode()
3744 path->slots[0] = first_xattr_slot; btrfs_read_locked_inode()
3745 ret = btrfs_load_inode_props(inode, path); btrfs_read_locked_inode()
3752 btrfs_free_path(path); btrfs_read_locked_inode()
3785 btrfs_free_path(path); btrfs_read_locked_inode()
3846 struct btrfs_path *path; btrfs_update_inode_item() local
3850 path = btrfs_alloc_path(); btrfs_update_inode_item()
3851 if (!path) btrfs_update_inode_item()
3854 path->leave_spinning = 1; btrfs_update_inode_item()
3855 ret = btrfs_lookup_inode(trans, root, path, &BTRFS_I(inode)->location, btrfs_update_inode_item()
3863 leaf = path->nodes[0]; btrfs_update_inode_item()
3864 inode_item = btrfs_item_ptr(leaf, path->slots[0], btrfs_update_inode_item()
3872 btrfs_free_path(path); btrfs_update_inode_item()
3927 struct btrfs_path *path; __btrfs_unlink_inode() local
3936 path = btrfs_alloc_path(); __btrfs_unlink_inode()
3937 if (!path) { __btrfs_unlink_inode()
3942 path->leave_spinning = 1; __btrfs_unlink_inode()
3943 di = btrfs_lookup_dir_item(trans, root, path, dir_ino, __btrfs_unlink_inode()
3953 leaf = path->nodes[0]; __btrfs_unlink_inode()
3955 ret = btrfs_delete_one_dir_name(trans, root, path, di); __btrfs_unlink_inode()
3958 btrfs_release_path(path); __btrfs_unlink_inode()
4008 btrfs_free_path(path); __btrfs_unlink_inode()
4092 struct btrfs_path *path; btrfs_unlink_subvol() local
4100 path = btrfs_alloc_path(); btrfs_unlink_subvol()
4101 if (!path) btrfs_unlink_subvol()
4104 di = btrfs_lookup_dir_item(trans, root, path, dir_ino, btrfs_unlink_subvol()
4114 leaf = path->nodes[0]; btrfs_unlink_subvol()
4117 ret = btrfs_delete_one_dir_name(trans, root, path, di); btrfs_unlink_subvol()
4122 btrfs_release_path(path); btrfs_unlink_subvol()
4132 di = btrfs_search_dir_index_item(root, path, dir_ino, btrfs_unlink_subvol()
4143 leaf = path->nodes[0]; btrfs_unlink_subvol()
4144 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); btrfs_unlink_subvol()
4145 btrfs_release_path(path); btrfs_unlink_subvol()
4148 btrfs_release_path(path); btrfs_unlink_subvol()
4163 btrfs_free_path(path); btrfs_unlink_subvol()
4223 struct btrfs_path *path, truncate_inline_extent()
4228 struct extent_buffer *leaf = path->nodes[0]; truncate_inline_extent()
4229 int slot = path->slots[0]; truncate_inline_extent()
4246 * We release the path because to get the page we might need to truncate_inline_extent()
4249 btrfs_release_path(path); truncate_inline_extent()
4255 btrfs_truncate_item(root, path, size, 1); truncate_inline_extent()
4279 struct btrfs_path *path; btrfs_truncate_inode_items() local
4313 path = btrfs_alloc_path(); btrfs_truncate_inode_items()
4314 if (!path) btrfs_truncate_inode_items()
4316 path->reada = -1; btrfs_truncate_inode_items()
4355 path->leave_spinning = 1; btrfs_truncate_inode_items()
4356 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); btrfs_truncate_inode_items()
4366 if (path->slots[0] == 0) btrfs_truncate_inode_items()
4368 path->slots[0]--; btrfs_truncate_inode_items()
4373 leaf = path->nodes[0]; btrfs_truncate_inode_items()
4374 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); btrfs_truncate_inode_items()
4385 fi = btrfs_item_ptr(leaf, path->slots[0], btrfs_truncate_inode_items()
4393 path->slots[0], fi); btrfs_truncate_inode_items()
4461 * Need to release path in order to truncate a btrfs_truncate_inode_items()
4467 err = btrfs_del_items(trans, root, path, btrfs_truncate_inode_items()
4479 err = truncate_inline_extent(inode, path, btrfs_truncate_inode_items()
4497 pending_del_slot = path->slots[0]; btrfs_truncate_inode_items()
4500 path->slots[0] + 1 == pending_del_slot) { btrfs_truncate_inode_items()
4503 pending_del_slot = path->slots[0]; btrfs_truncate_inode_items()
4515 btrfs_set_path_blocking(path); btrfs_truncate_inode_items()
4540 if (path->slots[0] == 0 || btrfs_truncate_inode_items()
4541 path->slots[0] != pending_del_slot || btrfs_truncate_inode_items()
4544 ret = btrfs_del_items(trans, root, path, btrfs_truncate_inode_items()
4554 btrfs_release_path(path); btrfs_truncate_inode_items()
4574 path->slots[0]--; btrfs_truncate_inode_items()
4579 ret = btrfs_del_items(trans, root, path, pending_del_slot, btrfs_truncate_inode_items()
4588 btrfs_free_path(path); btrfs_truncate_inode_items()
5316 struct btrfs_path *path; btrfs_inode_by_name() local
5320 path = btrfs_alloc_path(); btrfs_inode_by_name()
5321 if (!path) btrfs_inode_by_name()
5324 di = btrfs_lookup_dir_item(NULL, root, path, btrfs_ino(dir), name, btrfs_inode_by_name()
5332 btrfs_dir_item_key_to_cpu(path->nodes[0], di, location); btrfs_inode_by_name()
5334 btrfs_free_path(path); btrfs_inode_by_name()
5352 struct btrfs_path *path; fixup_tree_root_location() local
5360 path = btrfs_alloc_path(); fixup_tree_root_location()
5361 if (!path) { fixup_tree_root_location()
5371 ret = btrfs_search_slot(NULL, root->fs_info->tree_root, &key, path, fixup_tree_root_location()
5379 leaf = path->nodes[0]; fixup_tree_root_location()
5380 ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref); fixup_tree_root_location()
5391 btrfs_release_path(path); fixup_tree_root_location()
5405 btrfs_free_path(path); fixup_tree_root_location()
5724 struct btrfs_path *path; btrfs_real_readdir() local
5749 path = btrfs_alloc_path(); btrfs_real_readdir()
5750 if (!path) btrfs_real_readdir()
5753 path->reada = 1; btrfs_real_readdir()
5765 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); btrfs_real_readdir()
5771 leaf = path->nodes[0]; btrfs_real_readdir()
5772 slot = path->slots[0]; btrfs_real_readdir()
5774 ret = btrfs_next_leaf(root, path); btrfs_real_readdir()
5856 path->slots[0]++; btrfs_real_readdir()
5907 btrfs_free_path(path); btrfs_real_readdir()
6004 struct btrfs_path *path; btrfs_set_inode_index_count() local
6012 path = btrfs_alloc_path(); btrfs_set_inode_index_count()
6013 if (!path) btrfs_set_inode_index_count()
6016 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); btrfs_set_inode_index_count()
6030 if (path->slots[0] == 0) { btrfs_set_inode_index_count()
6035 path->slots[0]--; btrfs_set_inode_index_count()
6037 leaf = path->nodes[0]; btrfs_set_inode_index_count()
6038 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); btrfs_set_inode_index_count()
6048 btrfs_free_path(path); btrfs_set_inode_index_count()
6096 struct btrfs_path *path; btrfs_new_inode() local
6104 path = btrfs_alloc_path(); btrfs_new_inode()
6105 if (!path) btrfs_new_inode()
6110 btrfs_free_path(path); btrfs_new_inode()
6132 btrfs_free_path(path); btrfs_new_inode()
6187 path->leave_spinning = 1; btrfs_new_inode()
6188 ret = btrfs_insert_empty_items(trans, root, path, key, sizes, nitems); btrfs_new_inode()
6200 inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0], btrfs_new_inode()
6202 memset_extent_buffer(path->nodes[0], 0, (unsigned long)inode_item, btrfs_new_inode()
6204 fill_inode_item(trans, path->nodes[0], inode_item, inode); btrfs_new_inode()
6207 ref = btrfs_item_ptr(path->nodes[0], path->slots[0] + 1, btrfs_new_inode()
6209 btrfs_set_inode_ref_name_len(path->nodes[0], ref, name_len); btrfs_new_inode()
6210 btrfs_set_inode_ref_index(path->nodes[0], ref, *index); btrfs_new_inode()
6212 write_extent_buffer(path->nodes[0], name, ptr, name_len); btrfs_new_inode()
6215 btrfs_mark_buffer_dirty(path->nodes[0]); btrfs_new_inode()
6216 btrfs_free_path(path); btrfs_new_inode()
6248 btrfs_free_path(path); btrfs_new_inode()
6699 static noinline int uncompress_inline(struct btrfs_path *path, uncompress_inline() argument
6705 struct extent_buffer *leaf = path->nodes[0]; uncompress_inline()
6716 btrfs_item_nr(path->slots[0])); uncompress_inline()
6750 struct btrfs_path *path = NULL; btrfs_get_extent() local
6787 if (!path) { btrfs_get_extent()
6788 path = btrfs_alloc_path(); btrfs_get_extent()
6789 if (!path) { btrfs_get_extent()
6797 path->reada = 1; btrfs_get_extent()
6800 ret = btrfs_lookup_file_extent(trans, root, path, btrfs_get_extent()
6808 if (path->slots[0] == 0) btrfs_get_extent()
6810 path->slots[0]--; btrfs_get_extent()
6813 leaf = path->nodes[0]; btrfs_get_extent()
6814 item = btrfs_item_ptr(leaf, path->slots[0], btrfs_get_extent()
6817 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); btrfs_get_extent()
6839 size = btrfs_file_extent_inline_len(leaf, path->slots[0], item); btrfs_get_extent()
6844 path->slots[0]++; btrfs_get_extent()
6845 if (path->slots[0] >= btrfs_header_nritems(leaf)) { btrfs_get_extent()
6846 ret = btrfs_next_leaf(root, path); btrfs_get_extent()
6853 leaf = path->nodes[0]; btrfs_get_extent()
6855 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); btrfs_get_extent()
6869 btrfs_extent_item_to_extent_map(inode, path, item, new_inline, em); btrfs_get_extent()
6884 size = btrfs_file_extent_inline_len(leaf, path->slots[0], item); btrfs_get_extent()
6896 ret = uncompress_inline(path, inode, page, btrfs_get_extent()
6922 btrfs_release_path(path); btrfs_get_extent()
6947 btrfs_release_path(path); btrfs_get_extent()
6996 btrfs_free_path(path); btrfs_get_extent()
7179 struct btrfs_path *path; can_nocow_extent() local
7194 path = btrfs_alloc_path(); can_nocow_extent()
7195 if (!path) can_nocow_extent()
7198 ret = btrfs_lookup_file_extent(NULL, root, path, btrfs_ino(inode), can_nocow_extent()
7203 slot = path->slots[0]; can_nocow_extent()
7213 leaf = path->nodes[0]; can_nocow_extent()
7274 btrfs_release_path(path); can_nocow_extent()
7311 btrfs_free_path(path); can_nocow_extent()
7573 * buffered path. btrfs_get_blocks_direct()
8800 * prepare_pages in the normal write path. btrfs_page_mkwrite()
9644 struct btrfs_path *path; btrfs_symlink() local
9703 path = btrfs_alloc_path(); btrfs_symlink()
9704 if (!path) { btrfs_symlink()
9712 err = btrfs_insert_empty_item(trans, root, path, &key, btrfs_symlink()
9715 btrfs_free_path(path); btrfs_symlink()
9718 leaf = path->nodes[0]; btrfs_symlink()
9719 ei = btrfs_item_ptr(leaf, path->slots[0], btrfs_symlink()
9732 btrfs_free_path(path); btrfs_symlink()
136 insert_inline_extent(struct btrfs_trans_handle *trans, struct btrfs_path *path, int extent_inserted, struct btrfs_root *root, struct inode *inode, u64 start, size_t size, size_t compressed_size, int compress_type, struct page **compressed_pages) insert_inline_extent() argument
4222 truncate_inline_extent(struct inode *inode, struct btrfs_path *path, struct btrfs_key *found_key, const u64 item_end, const u64 new_size) truncate_inline_extent() argument
H A Dvolumes.c553 * comes back (with new uuid and) with its mapper path? btrfs_free_stale_device()
555 * either use mapper or non mapper path throughout. btrfs_free_stale_device()
590 static noinline int device_list_add(const char *path, device_list_add() argument
625 name = rcu_string_strdup(path, GFP_NOFS); device_list_add()
639 } else if (!device->name || strcmp(device->name->str, path)) { device_list_add()
645 * from 'path' that means either device_list_add()
654 * Further in case of 1 and 2a above, the disk at 'path' device_list_add()
677 name = rcu_string_strdup(path, GFP_NOFS); device_list_add()
762 /* This is the initialized path, it is safe to release the devices. */ btrfs_close_extra_devices()
990 * Look for a btrfs signature on a device. This may be called out of the mount path
994 int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder, btrfs_scan_one_device() argument
1018 bdev = blkdev_get_by_path(path, flags, holder); btrfs_scan_one_device()
1058 ret = device_list_add(path, disk_super, devid, fs_devices_ret); btrfs_scan_one_device()
1068 printk(KERN_CONT "devid %llu transid %llu %s\n", devid, transid, path); btrfs_scan_one_device()
1092 struct btrfs_path *path; btrfs_account_dev_extents_size() local
1103 path = btrfs_alloc_path(); btrfs_account_dev_extents_size()
1104 if (!path) btrfs_account_dev_extents_size()
1106 path->reada = 2; btrfs_account_dev_extents_size()
1112 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); btrfs_account_dev_extents_size()
1116 ret = btrfs_previous_item(root, path, key.objectid, key.type); btrfs_account_dev_extents_size()
1122 l = path->nodes[0]; btrfs_account_dev_extents_size()
1123 slot = path->slots[0]; btrfs_account_dev_extents_size()
1125 ret = btrfs_next_leaf(root, path); btrfs_account_dev_extents_size()
1161 path->slots[0]++; btrfs_account_dev_extents_size()
1165 btrfs_free_path(path); btrfs_account_dev_extents_size()
1252 struct btrfs_path *path; find_free_dev_extent_start() local
1271 path = btrfs_alloc_path(); find_free_dev_extent_start()
1272 if (!path) find_free_dev_extent_start()
1284 path->reada = 2; find_free_dev_extent_start()
1285 path->search_commit_root = 1; find_free_dev_extent_start()
1286 path->skip_locking = 1; find_free_dev_extent_start()
1292 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); find_free_dev_extent_start()
1296 ret = btrfs_previous_item(root, path, key.objectid, key.type); find_free_dev_extent_start()
1302 l = path->nodes[0]; find_free_dev_extent_start()
1303 slot = path->slots[0]; find_free_dev_extent_start()
1305 ret = btrfs_next_leaf(root, path); find_free_dev_extent_start()
1368 path->slots[0]++; find_free_dev_extent_start()
1382 btrfs_release_path(path); find_free_dev_extent_start()
1399 btrfs_free_path(path); find_free_dev_extent_start()
1420 struct btrfs_path *path; btrfs_free_dev_extent() local
1427 path = btrfs_alloc_path(); btrfs_free_dev_extent()
1428 if (!path) btrfs_free_dev_extent()
1435 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); btrfs_free_dev_extent()
1437 ret = btrfs_previous_item(root, path, key.objectid, btrfs_free_dev_extent()
1441 leaf = path->nodes[0]; btrfs_free_dev_extent()
1442 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); btrfs_free_dev_extent()
1443 extent = btrfs_item_ptr(leaf, path->slots[0], btrfs_free_dev_extent()
1448 btrfs_release_path(path); btrfs_free_dev_extent()
1451 leaf = path->nodes[0]; btrfs_free_dev_extent()
1452 extent = btrfs_item_ptr(leaf, path->slots[0], btrfs_free_dev_extent()
1461 ret = btrfs_del_item(trans, root, path); btrfs_free_dev_extent()
1469 btrfs_free_path(path); btrfs_free_dev_extent()
1479 struct btrfs_path *path; btrfs_alloc_dev_extent() local
1487 path = btrfs_alloc_path(); btrfs_alloc_dev_extent()
1488 if (!path) btrfs_alloc_dev_extent()
1494 ret = btrfs_insert_empty_item(trans, root, path, &key, btrfs_alloc_dev_extent()
1499 leaf = path->nodes[0]; btrfs_alloc_dev_extent()
1500 extent = btrfs_item_ptr(leaf, path->slots[0], btrfs_alloc_dev_extent()
1512 btrfs_free_path(path); btrfs_alloc_dev_extent()
1541 struct btrfs_path *path; find_next_devid() local
1543 path = btrfs_alloc_path(); find_next_devid()
1544 if (!path) find_next_devid()
1551 ret = btrfs_search_slot(NULL, fs_info->chunk_root, &key, path, 0, 0); find_next_devid()
1557 ret = btrfs_previous_item(fs_info->chunk_root, path, find_next_devid()
1563 btrfs_item_key_to_cpu(path->nodes[0], &found_key, find_next_devid()
1564 path->slots[0]); find_next_devid()
1569 btrfs_free_path(path); find_next_devid()
1582 struct btrfs_path *path; btrfs_add_device() local
1590 path = btrfs_alloc_path(); btrfs_add_device()
1591 if (!path) btrfs_add_device()
1598 ret = btrfs_insert_empty_item(trans, root, path, &key, btrfs_add_device()
1603 leaf = path->nodes[0]; btrfs_add_device()
1604 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item); btrfs_add_device()
1629 btrfs_free_path(path); btrfs_add_device()
1634 * Function to update ctime/mtime for a given device path.
1653 struct btrfs_path *path; btrfs_rm_dev_item() local
1659 path = btrfs_alloc_path(); btrfs_rm_dev_item()
1660 if (!path) btrfs_rm_dev_item()
1665 btrfs_free_path(path); btrfs_rm_dev_item()
1672 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); btrfs_rm_dev_item()
1681 ret = btrfs_del_item(trans, root, path); btrfs_rm_dev_item()
1685 btrfs_free_path(path); btrfs_rm_dev_item()
1931 /* Update ctime/mtime for device path for libblkid */
2172 struct btrfs_path *path; btrfs_finish_sprout() local
2182 path = btrfs_alloc_path(); btrfs_finish_sprout()
2183 if (!path) btrfs_finish_sprout()
2192 ret = btrfs_search_slot(trans, root, &key, path, 0, 1); btrfs_finish_sprout()
2196 leaf = path->nodes[0]; btrfs_finish_sprout()
2198 if (path->slots[0] >= btrfs_header_nritems(leaf)) { btrfs_finish_sprout()
2199 ret = btrfs_next_leaf(root, path); btrfs_finish_sprout()
2204 leaf = path->nodes[0]; btrfs_finish_sprout()
2205 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); btrfs_finish_sprout()
2206 btrfs_release_path(path); btrfs_finish_sprout()
2210 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); btrfs_finish_sprout()
2215 dev_item = btrfs_item_ptr(leaf, path->slots[0], btrfs_finish_sprout()
2232 path->slots[0]++; btrfs_finish_sprout()
2237 btrfs_free_path(path); btrfs_finish_sprout()
2562 struct btrfs_path *path; btrfs_update_device() local
2570 path = btrfs_alloc_path(); btrfs_update_device()
2571 if (!path) btrfs_update_device()
2578 ret = btrfs_search_slot(trans, root, &key, path, 0, 1); btrfs_update_device()
2587 leaf = path->nodes[0]; btrfs_update_device()
2588 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item); btrfs_update_device()
2602 btrfs_free_path(path); btrfs_update_device()
2649 struct btrfs_path *path; btrfs_free_chunk() local
2653 path = btrfs_alloc_path(); btrfs_free_chunk()
2654 if (!path) btrfs_free_chunk()
2661 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); btrfs_free_chunk()
2671 ret = btrfs_del_item(trans, root, path); btrfs_free_chunk()
2676 btrfs_free_path(path); btrfs_free_chunk()
2839 * we release the path used to search the chunk/dev tree and before btrfs_relocate_chunk()
2875 struct btrfs_path *path; btrfs_relocate_sys_chunks() local
2885 path = btrfs_alloc_path(); btrfs_relocate_sys_chunks()
2886 if (!path) btrfs_relocate_sys_chunks()
2896 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0); btrfs_relocate_sys_chunks()
2903 ret = btrfs_previous_item(chunk_root, path, key.objectid, btrfs_relocate_sys_chunks()
2912 leaf = path->nodes[0]; btrfs_relocate_sys_chunks()
2913 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); btrfs_relocate_sys_chunks()
2915 chunk = btrfs_item_ptr(leaf, path->slots[0], btrfs_relocate_sys_chunks()
2918 btrfs_release_path(path); btrfs_relocate_sys_chunks()
2943 btrfs_free_path(path); btrfs_relocate_sys_chunks()
2953 struct btrfs_path *path; insert_balance_item() local
2958 path = btrfs_alloc_path(); insert_balance_item()
2959 if (!path) insert_balance_item()
2964 btrfs_free_path(path); insert_balance_item()
2972 ret = btrfs_insert_empty_item(trans, root, path, &key, insert_balance_item()
2977 leaf = path->nodes[0]; insert_balance_item()
2978 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item); insert_balance_item()
2993 btrfs_free_path(path); insert_balance_item()
3003 struct btrfs_path *path; del_balance_item() local
3007 path = btrfs_alloc_path(); del_balance_item()
3008 if (!path) del_balance_item()
3013 btrfs_free_path(path); del_balance_item()
3021 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); del_balance_item()
3029 ret = btrfs_del_item(trans, root, path); del_balance_item()
3031 btrfs_free_path(path); del_balance_item()
3387 struct btrfs_path *path; __btrfs_balance() local
3432 path = btrfs_alloc_path();
3433 if (!path) {
3464 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
3477 ret = btrfs_previous_item(chunk_root, path, 0,
3485 leaf = path->nodes[0];
3486 slot = path->slots[0];
3506 btrfs_release_path(path);
3579 btrfs_release_path(path);
3584 btrfs_free_path(path);
3621 /* cancel requested || normal exit path */ balance_need_close()
3861 struct btrfs_path *path; btrfs_recover_balance() local
3866 path = btrfs_alloc_path(); btrfs_recover_balance()
3867 if (!path) btrfs_recover_balance()
3874 ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0); btrfs_recover_balance()
3888 leaf = path->nodes[0]; btrfs_recover_balance()
3889 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item); btrfs_recover_balance()
3912 btrfs_free_path(path); btrfs_recover_balance()
3990 struct btrfs_path *path = NULL; btrfs_uuid_scan_kthread() local
3998 path = btrfs_alloc_path(); btrfs_uuid_scan_kthread()
3999 if (!path) { btrfs_uuid_scan_kthread()
4013 ret = btrfs_search_forward(root, &key, path, 0); btrfs_uuid_scan_kthread()
4026 eb = path->nodes[0]; btrfs_uuid_scan_kthread()
4027 slot = path->slots[0]; btrfs_uuid_scan_kthread()
4043 btrfs_release_path(path); btrfs_uuid_scan_kthread()
4090 btrfs_release_path(path); btrfs_uuid_scan_kthread()
4107 btrfs_free_path(path); btrfs_uuid_scan_kthread()
4250 struct btrfs_path *path; btrfs_shrink_device() local
4268 path = btrfs_alloc_path(); btrfs_shrink_device()
4269 if (!path) btrfs_shrink_device()
4272 path->reada = 2; btrfs_shrink_device()
4292 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); btrfs_shrink_device()
4298 ret = btrfs_previous_item(root, path, 0, key.type); btrfs_shrink_device()
4305 btrfs_release_path(path); btrfs_shrink_device()
4309 l = path->nodes[0]; btrfs_shrink_device()
4310 slot = path->slots[0]; btrfs_shrink_device()
4311 btrfs_item_key_to_cpu(l, &key, path->slots[0]); btrfs_shrink_device()
4315 btrfs_release_path(path); btrfs_shrink_device()
4324 btrfs_release_path(path); btrfs_shrink_device()
4329 btrfs_release_path(path); btrfs_shrink_device()
4399 btrfs_free_path(path); btrfs_shrink_device()
6551 struct btrfs_path *path; btrfs_read_chunk_tree() local
6560 path = btrfs_alloc_path(); btrfs_read_chunk_tree()
6561 if (!path) btrfs_read_chunk_tree()
6576 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); btrfs_read_chunk_tree()
6580 leaf = path->nodes[0]; btrfs_read_chunk_tree()
6581 slot = path->slots[0]; btrfs_read_chunk_tree()
6583 ret = btrfs_next_leaf(root, path); btrfs_read_chunk_tree()
6605 path->slots[0]++; btrfs_read_chunk_tree()
6612 btrfs_free_path(path); btrfs_read_chunk_tree()
6649 struct btrfs_path *path = NULL; btrfs_init_dev_stats() local
6652 path = btrfs_alloc_path(); btrfs_init_dev_stats()
6653 if (!path) { btrfs_init_dev_stats()
6666 ret = btrfs_search_slot(NULL, dev_root, &key, path, 0, 0); btrfs_init_dev_stats()
6670 btrfs_release_path(path); btrfs_init_dev_stats()
6673 slot = path->slots[0]; btrfs_init_dev_stats()
6674 eb = path->nodes[0]; btrfs_init_dev_stats()
6691 btrfs_release_path(path); btrfs_init_dev_stats()
6696 btrfs_free_path(path); btrfs_init_dev_stats()
6704 struct btrfs_path *path; update_dev_stat_item() local
6715 path = btrfs_alloc_path(); update_dev_stat_item()
6716 BUG_ON(!path); update_dev_stat_item()
6717 ret = btrfs_search_slot(trans, dev_root, &key, path, -1, 1); update_dev_stat_item()
6726 btrfs_item_size_nr(path->nodes[0], path->slots[0]) < sizeof(*ptr)) { update_dev_stat_item()
6728 ret = btrfs_del_item(trans, dev_root, path); update_dev_stat_item()
6740 btrfs_release_path(path); update_dev_stat_item()
6741 ret = btrfs_insert_empty_item(trans, dev_root, path, update_dev_stat_item()
6751 eb = path->nodes[0]; update_dev_stat_item()
6752 ptr = btrfs_item_ptr(eb, path->slots[0], struct btrfs_dev_stats_item); update_dev_stat_item()
6759 btrfs_free_path(path); update_dev_stat_item()
6891 /* Update ctime/mtime for device path for libblkid */ btrfs_scratch_superblocks()
/linux-4.4.14/drivers/md/
H A Ddm-path-selector.h20 * We provide an abstraction for the code that chooses which path
29 /* Information about a path selector type */
38 * Constructs a path selector object, takes custom arguments
44 * Add an opaque path object, along with some selector specific
45 * path args (eg, path priority).
47 int (*add_path) (struct path_selector *ps, struct dm_path *path,
51 * Chooses a path for this io, if no paths are available then
54 * repeat_count is the number of times to use the path before
56 * the path fails.
63 * Notify the selector that a path has failed.
68 * Ask selector to reinstate a path.
74 * or path selector status
76 int (*status) (struct path_selector *ps, struct dm_path *path,
79 int (*start_io) (struct path_selector *ps, struct dm_path *path,
81 int (*end_io) (struct path_selector *ps, struct dm_path *path,
85 /* Register a path selector */
88 /* Unregister a path selector */
91 /* Returns a registered path selector type */
94 /* Releases a path selector */
H A Ddm-queue-length.c12 * queue-length path selector - choose a path with the least number of
17 #include "dm-path-selector.h"
36 struct dm_path *path; member in struct:path_info
84 static int ql_status(struct path_selector *ps, struct dm_path *path, ql_status() argument
90 /* When called with NULL path, return selector status/args. */ ql_status()
91 if (!path) ql_status()
94 pi = path->pscontext; ql_status()
109 static int ql_add_path(struct path_selector *ps, struct dm_path *path, ql_add_path() argument
119 * <repeat_count>: The number of I/Os before switching path. ql_add_path()
132 /* Allocate the path information structure */ ql_add_path()
135 *error = "queue-length ps: Error allocating path information"; ql_add_path()
139 pi->path = path; ql_add_path()
143 path->pscontext = pi; ql_add_path()
150 static void ql_fail_path(struct path_selector *ps, struct dm_path *path) ql_fail_path() argument
153 struct path_info *pi = path->pscontext; ql_fail_path()
158 static int ql_reinstate_path(struct path_selector *ps, struct dm_path *path) ql_reinstate_path() argument
161 struct path_info *pi = path->pscontext; ql_reinstate_path()
169 * Select a path having the minimum number of in-flight I/Os
180 /* Change preferred (first in list) path to evenly balance. */ ql_select_path()
197 return best->path; ql_select_path()
200 static int ql_start_io(struct path_selector *ps, struct dm_path *path, ql_start_io() argument
203 struct path_info *pi = path->pscontext; ql_start_io()
210 static int ql_end_io(struct path_selector *ps, struct dm_path *path, ql_end_io() argument
213 struct path_info *pi = path->pscontext; ql_end_io()
262 DM_NAME " path selector to balance the number of in-flight I/Os"
H A Ddm-mpath.h16 void *pscontext; /* For path-selector use */
20 void dm_pg_init_complete(struct dm_path *path, unsigned err_flags);
H A Ddm-service-time.c8 * Throughput oriented path selector.
12 #include "dm-path-selector.h"
31 struct dm_path *path; member in struct:path_info
80 static int st_status(struct path_selector *ps, struct dm_path *path, st_status() argument
86 if (!path) st_status()
89 pi = path->pscontext; st_status()
106 static int st_add_path(struct path_selector *ps, struct dm_path *path, st_add_path() argument
117 * <repeat_count>: The number of I/Os before switching path. st_add_path()
120 * the path among all paths in the path-group. st_add_path()
123 * If '0' is given, the path isn't selected while st_add_path()
144 /* allocate the path */ st_add_path()
147 *error = "service-time ps: Error allocating path context"; st_add_path()
151 pi->path = path; st_add_path()
156 path->pscontext = pi; st_add_path()
163 static void st_fail_path(struct path_selector *ps, struct dm_path *path) st_fail_path() argument
166 struct path_info *pi = path->pscontext; st_fail_path()
171 static int st_reinstate_path(struct path_selector *ps, struct dm_path *path) st_reinstate_path() argument
174 struct path_info *pi = path->pscontext; st_reinstate_path()
205 * Case 1: Both have same throughput value. Choose less loaded path. st_compare_load()
211 * Case 2a: Both have same load. Choose higher throughput path. st_compare_load()
212 * Case 2b: One path has no throughput value. Choose the other one. st_compare_load()
219 * Case 3: Calculate service time. Choose faster path. st_compare_load()
253 * Case 4: Service time is equal. Choose higher throughput path. st_compare_load()
267 /* Change preferred (first in list) path to evenly balance. */ st_select_path()
279 return best->path; st_select_path()
282 static int st_start_io(struct path_selector *ps, struct dm_path *path, st_start_io() argument
285 struct path_info *pi = path->pscontext; st_start_io()
292 static int st_end_io(struct path_selector *ps, struct dm_path *path, st_end_io() argument
295 struct path_info *pi = path->pscontext; st_end_io()
341 MODULE_DESCRIPTION(DM_NAME " throughput oriented path selector");
H A Ddm-round-robin.c9 * Round-robin path selector.
14 #include "dm-path-selector.h"
26 struct dm_path *path; member in struct:path_info
85 static int rr_status(struct path_selector *ps, struct dm_path *path, rr_status() argument
91 if (!path) rr_status()
98 pi = path->pscontext; rr_status()
108 * Called during initialisation to register each path with an
111 static int rr_add_path(struct path_selector *ps, struct dm_path *path, rr_add_path() argument
124 /* First path argument is number of I/Os before switching path */ rr_add_path()
130 /* allocate the path */ rr_add_path()
133 *error = "round-robin ps: Error allocating path context"; rr_add_path()
137 pi->path = path; rr_add_path()
140 path->pscontext = pi; rr_add_path()
177 return pi ? pi->path : NULL; rr_select_path()
217 MODULE_DESCRIPTION(DM_NAME " round-robin multipath path selector");
H A Ddm-mpath.c11 #include "dm-path-selector.h"
39 struct dm_path path; member in struct:pgpath
43 #define path_to_pgpath(__pgp) container_of((__pgp), struct pgpath, path)
47 * Each has a path selector which controls which path gets used.
88 unsigned queue_if_no_path:1; /* Queue I/O if last path fails? */
165 dm_put_device(ti, pgpath->path.dev); list_for_each_entry_safe()
299 struct dm_path *path; __choose_path_in_pg() local
301 path = pg->ps.type->select_path(&pg->ps, &m->repeat_count, nr_bytes); __choose_path_in_pg()
302 if (!path) __choose_path_in_pg()
305 m->current_pgpath = path_to_pgpath(path); __choose_path_in_pg()
336 * Loop through priority groups until we find a valid path. __choose_pgpath()
416 bdev = pgpath->path.dev->bdev; __multipath_map()
441 &pgpath->path, __multipath_map()
493 * An event is triggered whenever a path is taken out of use.
494 * Includes path failure and PG bypass.
511 * <#paths> <#per-path selector args>
512 * [<path> [<arg>]* ]+ ]+
522 {0, 1024, "invalid number of path selector args"}, parse_path_selector()
527 ti->error = "unknown path selector type"; parse_path_selector()
540 ti->error = "path selector constructor failed"; parse_path_selector()
559 /* we need at least a path arg */ parse_path()
570 &p->path.dev); parse_path()
577 q = bdev_get_queue(p->path.dev->bdev); parse_path()
605 bdevname(p->path.dev->bdev, b)); parse_path()
610 dm_put_device(ti, p->path.dev); parse_path()
619 dm_put_device(ti, p->path.dev); parse_path()
625 r = ps->type->add_path(ps, &p->path, as->argc, as->argv, &ti->error); parse_path()
627 dm_put_device(ti, p->path.dev); parse_path()
685 ti->error = "not enough path parameters"; parse_priority_group()
942 * Take a path out of use.
954 DMWARN("Failing path %s.", pgpath->path.dev->name); fail_path()
956 pgpath->pg->ps.type->fail_path(&pgpath->pg->ps, &pgpath->path); fail_path()
966 pgpath->path.dev->name, m->nr_valid_paths); fail_path()
977 * Reinstate a previously-failed path
991 DMWARN("Reinstate path not supported by path selector %s", reinstate_path()
997 r = pgpath->pg->ps.type->reinstate_path(&pgpath->pg->ps, &pgpath->path); reinstate_path()
1012 pgpath->path.dev->name, m->nr_valid_paths); reinstate_path()
1036 if (pgpath->path.dev == dev) action_dev()
1160 * Fail path for now, so we do not ping pong pg_init_done()
1182 * We probably do not want to fail the path for a device pg_init_done()
1225 scsi_dh_activate(bdev_get_queue(pgpath->path.dev->bdev), activate_path()
1242 /* Anything else could be a path failure, so should be retried */ noretry_error()
1306 ps->type->end_io(ps, &pgpath->path, mpio->nr_bytes); multipath_end_io()
1315 * the last path fails we must error any remaining I/O.
1434 DMEMIT("%s %s %u ", p->path.dev->name, multipath_status()
1439 &p->path, type, result + sz, multipath_status()
1460 DMEMIT("%s ", p->path.dev->name); multipath_status()
1463 &p->path, type, result + sz, multipath_status()
1550 *bdev = m->current_pgpath->path.dev->bdev; multipath_prepare_ioctl()
1551 *mode = m->current_pgpath->path.dev->mode; multipath_prepare_ioctl()
1558 /* No path is available */ multipath_prepare_ioctl()
1597 ret = fn(ti, p->path.dev, ti->begin, ti->len, data); multipath_iterate_devices()
1609 struct request_queue *q = bdev_get_queue(pgpath->path.dev->bdev); __pgpath_busy()
1654 * If there is one non-busy active path at least, the path selector multipath_busy()
1670 * No active path in this pg, so this pg won't be used and multipath_busy()
1732 * path of the storage hardware device activation. dm_multipath_init()
H A Dmultipath.h28 int path; member in struct:multipath_bh
H A Ddm-uevent.c75 const char *path, dm_build_path_uevent()
107 if (add_uevent_var(&event->ku_env, "DM_PATH=%s", path)) { dm_build_path_uevent()
175 * dm_path_uevent - called to create a new path event and queue it
177 * @event_type: path event type enum
179 * @path: string containing pathname
184 const char *path, unsigned nr_valid_paths) dm_path_uevent()
197 path, nr_valid_paths); dm_path_uevent()
71 dm_build_path_uevent(struct mapped_device *md, struct dm_target *ti, enum kobject_action action, const char *dm_action, const char *path, unsigned nr_valid_paths) dm_build_path_uevent() argument
183 dm_path_uevent(enum dm_uevent_type event_type, struct dm_target *ti, const char *path, unsigned nr_valid_paths) dm_path_uevent() argument
H A Dmultipath.c89 struct md_rdev *rdev = conf->multipaths[mp_bh->path].rdev; multipath_end_request()
124 mp_bh->path = multipath_map(conf); multipath_make_request()
125 if (mp_bh->path < 0) { multipath_make_request()
130 multipath = conf->multipaths + mp_bh->path; multipath_make_request()
190 * Uh oh, we can do nothing if this is our last path, but multipath_error()
195 "multipath: only one IO path left and IO error.\n"); multipath_error()
211 " disabling IO path.\n" multipath_error()
246 int path; multipath_add_disk() local
256 for (path = first; path <= last; path++) multipath_add_disk()
257 if ((p=conf->multipaths+path)->rdev == NULL) { multipath_add_disk()
267 rdev->raid_disk = path; multipath_add_disk()
343 if ((mp_bh->path = multipath_map (conf))<0) { multipathd()
351 " to another IO path\n", multipathd()
356 conf->multipaths[mp_bh->path].rdev->data_offset; multipathd()
357 bio->bi_bdev = conf->multipaths[mp_bh->path].rdev->bdev; multipathd()
523 MODULE_DESCRIPTION("simple multi-path personality for MD");
/linux-4.4.14/tools/perf/tests/
H A Dtopology.c13 static int get_temp(char *path) get_temp() argument
17 strcpy(path, TEMPL); get_temp()
19 fd = mkstemp(path); get_temp()
29 static int session_write_header(char *path) session_write_header() argument
33 .path = path, session_write_header()
56 static int check_cpu_topology(char *path, struct cpu_map *map) check_cpu_topology() argument
60 .path = path, check_cpu_topology()
89 char path[PATH_MAX]; test_session_topology() local
93 TEST_ASSERT_VAL("can't get templ file", !get_temp(path)); test_session_topology()
95 pr_debug("templ file: %s\n", path); test_session_topology()
97 if (session_write_header(path)) test_session_topology()
106 if (check_cpu_topology(path, map)) test_session_topology()
113 unlink(path); test_session_topology()
H A Dkmod-path.c6 static int test(const char *path, bool alloc_name, bool alloc_ext, test() argument
14 !__kmod_path__parse(&m, path, alloc_name, alloc_ext)); test()
17 path, alloc_name, alloc_ext, m.kmod, m.comp, m.name, m.ext); test()
37 static int test_is_kernel_module(const char *path, int cpumode, bool expect) test_is_kernel_module() argument
40 (!!is_kernel_module(path, cpumode)) == (!!expect)); test_is_kernel_module()
42 path, cpumode, expect ? "true" : "false"); test_is_kernel_module()
46 #define T(path, an, ae, k, c, n, e) \
47 TEST_ASSERT_VAL("failed", !test(path, an, ae, k, c, n, e))
49 #define M(path, c, e) \
50 TEST_ASSERT_VAL("failed", !test_is_kernel_module(path, c, e))
54 /* path alloc_name alloc_ext kmod comp name ext */ test__kmod_path__parse()
63 /* path alloc_name alloc_ext kmod comp name ext */ test__kmod_path__parse()
72 /* path alloc_name alloc_ext kmod comp name ext */ test__kmod_path__parse()
81 /* path alloc_name alloc_ext kmod comp name ext */ test__kmod_path__parse()
90 /* path alloc_name alloc_ext kmod comp name ext */ test__kmod_path__parse()
99 /* path alloc_name alloc_ext kmod comp name ext */ test__kmod_path__parse()
108 /* path alloc_name alloc_ext kmod comp name ext */ test__kmod_path__parse()
117 /* path alloc_name alloc_ext kmod comp name ext */ test__kmod_path__parse()
126 /* path alloc_name alloc_ext kmod comp name ext */ test__kmod_path__parse()
135 /* path alloc_name alloc_ext kmod comp name ext */ test__kmod_path__parse()
H A Dpython-use.c16 if (asprintf(&cmd, "echo \"import sys ; sys.path.append('%s'); import perf\" | %s %s", test__python_use()
H A Dattr.py17 return '\'%s\' - %s' % (self.test.path, self.msg)
23 return '\'%s\'' % self.test.path
120 def __init__(self, path, options):
122 parser.read(path)
124 log.warning("running '%s'" % path)
126 self.path = path
140 self.load_events(path, self.expect)
148 def load_events(self, path, events):
150 parser_event.read(path)
290 -p path # perf binary
/linux-4.4.14/fs/nilfs2/
H A Dbtree.c38 struct nilfs_btree_path *path; nilfs_btree_alloc_path() local
41 path = kmem_cache_alloc(nilfs_btree_path_cache, GFP_NOFS); nilfs_btree_alloc_path()
42 if (path == NULL) nilfs_btree_alloc_path()
46 path[level].bp_bh = NULL; nilfs_btree_alloc_path()
47 path[level].bp_sib_bh = NULL; nilfs_btree_alloc_path()
48 path[level].bp_index = 0; nilfs_btree_alloc_path()
49 path[level].bp_oldreq.bpr_ptr = NILFS_BMAP_INVALID_PTR; nilfs_btree_alloc_path()
50 path[level].bp_newreq.bpr_ptr = NILFS_BMAP_INVALID_PTR; nilfs_btree_alloc_path()
51 path[level].bp_op = NULL; nilfs_btree_alloc_path()
55 return path; nilfs_btree_alloc_path()
58 static void nilfs_btree_free_path(struct nilfs_btree_path *path) nilfs_btree_free_path() argument
63 brelse(path[level].bp_bh); nilfs_btree_free_path()
65 kmem_cache_free(nilfs_btree_path_cache, path); nilfs_btree_free_path()
422 nilfs_btree_get_nonroot_node(const struct nilfs_btree_path *path, int level) nilfs_btree_get_nonroot_node() argument
424 return (struct nilfs_btree_node *)path[level].bp_bh->b_data; nilfs_btree_get_nonroot_node()
428 nilfs_btree_get_sib_node(const struct nilfs_btree_path *path, int level) nilfs_btree_get_sib_node() argument
430 return (struct nilfs_btree_node *)path[level].bp_sib_bh->b_data; nilfs_btree_get_sib_node()
440 const struct nilfs_btree_path *path, nilfs_btree_get_node()
449 node = nilfs_btree_get_nonroot_node(path, level); nilfs_btree_get_node()
536 struct nilfs_btree_path *path, nilfs_btree_do_lookup()
553 path[level].bp_bh = NULL; nilfs_btree_do_lookup()
554 path[level].bp_index = index; nilfs_btree_do_lookup()
561 p.node = nilfs_btree_get_node(btree, path, level + 1, nilfs_btree_do_lookup()
567 ret = __nilfs_btree_get_block(btree, ptr, &path[level].bp_bh, nilfs_btree_do_lookup()
572 node = nilfs_btree_get_nonroot_node(path, level); nilfs_btree_do_lookup()
586 path[level].bp_index = index; nilfs_btree_do_lookup()
598 struct nilfs_btree_path *path, nilfs_btree_do_lookup_last()
612 path[level].bp_bh = NULL; nilfs_btree_do_lookup_last()
613 path[level].bp_index = index; nilfs_btree_do_lookup_last()
617 ret = nilfs_btree_get_block(btree, ptr, &path[level].bp_bh); nilfs_btree_do_lookup_last()
620 node = nilfs_btree_get_nonroot_node(path, level); nilfs_btree_do_lookup_last()
625 path[level].bp_index = index; nilfs_btree_do_lookup_last()
637 * nilfs_btree_get_next_key - get next valid key from btree path array
639 * @path: array of nilfs_btree_path struct
647 const struct nilfs_btree_path *path, nilfs_btree_get_next_key()
660 node = nilfs_btree_get_nonroot_node(path, level); nilfs_btree_get_next_key()
662 index = path[level].bp_index + next_adj; nilfs_btree_get_next_key()
677 struct nilfs_btree_path *path; nilfs_btree_lookup() local
680 path = nilfs_btree_alloc_path(); nilfs_btree_lookup()
681 if (path == NULL) nilfs_btree_lookup()
684 ret = nilfs_btree_do_lookup(btree, path, key, ptrp, level, 0); nilfs_btree_lookup()
686 nilfs_btree_free_path(path); nilfs_btree_lookup()
694 struct nilfs_btree_path *path; nilfs_btree_lookup_contig() local
703 path = nilfs_btree_alloc_path(); nilfs_btree_lookup_contig()
704 if (path == NULL) nilfs_btree_lookup_contig()
707 ret = nilfs_btree_do_lookup(btree, path, key, &ptr, level, 1); nilfs_btree_lookup_contig()
723 node = nilfs_btree_get_node(btree, path, level, &ncmax); nilfs_btree_lookup_contig()
724 index = path[level].bp_index + 1; nilfs_btree_lookup_contig()
746 p.node = nilfs_btree_get_node(btree, path, level + 1, &p.ncmax); nilfs_btree_lookup_contig()
747 p.index = path[level + 1].bp_index + 1; nilfs_btree_lookup_contig()
753 path[level + 1].bp_index = p.index; nilfs_btree_lookup_contig()
755 brelse(path[level].bp_bh); nilfs_btree_lookup_contig()
756 path[level].bp_bh = NULL; nilfs_btree_lookup_contig()
758 ret = __nilfs_btree_get_block(btree, ptr2, &path[level].bp_bh, nilfs_btree_lookup_contig()
762 node = nilfs_btree_get_nonroot_node(path, level); nilfs_btree_lookup_contig()
765 path[level].bp_index = index; nilfs_btree_lookup_contig()
771 nilfs_btree_free_path(path); nilfs_btree_lookup_contig()
776 struct nilfs_btree_path *path, nilfs_btree_promote_key()
782 nilfs_btree_get_nonroot_node(path, level), nilfs_btree_promote_key()
783 path[level].bp_index, key); nilfs_btree_promote_key()
784 if (!buffer_dirty(path[level].bp_bh)) nilfs_btree_promote_key()
785 mark_buffer_dirty(path[level].bp_bh); nilfs_btree_promote_key()
786 } while ((path[level].bp_index == 0) && nilfs_btree_promote_key()
793 path[level].bp_index, key); nilfs_btree_promote_key()
798 struct nilfs_btree_path *path, nilfs_btree_do_insert()
805 node = nilfs_btree_get_nonroot_node(path, level); nilfs_btree_do_insert()
807 nilfs_btree_node_insert(node, path[level].bp_index, nilfs_btree_do_insert()
809 if (!buffer_dirty(path[level].bp_bh)) nilfs_btree_do_insert()
810 mark_buffer_dirty(path[level].bp_bh); nilfs_btree_do_insert()
812 if (path[level].bp_index == 0) nilfs_btree_do_insert()
813 nilfs_btree_promote_key(btree, path, level + 1, nilfs_btree_do_insert()
818 nilfs_btree_node_insert(node, path[level].bp_index, nilfs_btree_do_insert()
825 struct nilfs_btree_path *path, nilfs_btree_carry_left()
831 node = nilfs_btree_get_nonroot_node(path, level); nilfs_btree_carry_left()
832 left = nilfs_btree_get_sib_node(path, level); nilfs_btree_carry_left()
839 if (n > path[level].bp_index) { nilfs_btree_carry_left()
847 if (!buffer_dirty(path[level].bp_bh)) nilfs_btree_carry_left()
848 mark_buffer_dirty(path[level].bp_bh); nilfs_btree_carry_left()
849 if (!buffer_dirty(path[level].bp_sib_bh)) nilfs_btree_carry_left()
850 mark_buffer_dirty(path[level].bp_sib_bh); nilfs_btree_carry_left()
852 nilfs_btree_promote_key(btree, path, level + 1, nilfs_btree_carry_left()
856 brelse(path[level].bp_bh); nilfs_btree_carry_left()
857 path[level].bp_bh = path[level].bp_sib_bh; nilfs_btree_carry_left()
858 path[level].bp_sib_bh = NULL; nilfs_btree_carry_left()
859 path[level].bp_index += lnchildren; nilfs_btree_carry_left()
860 path[level + 1].bp_index--; nilfs_btree_carry_left()
862 brelse(path[level].bp_sib_bh); nilfs_btree_carry_left()
863 path[level].bp_sib_bh = NULL; nilfs_btree_carry_left()
864 path[level].bp_index -= n; nilfs_btree_carry_left()
867 nilfs_btree_do_insert(btree, path, level, keyp, ptrp); nilfs_btree_carry_left()
871 struct nilfs_btree_path *path, nilfs_btree_carry_right()
877 node = nilfs_btree_get_nonroot_node(path, level); nilfs_btree_carry_right()
878 right = nilfs_btree_get_sib_node(path, level); nilfs_btree_carry_right()
885 if (n > nchildren - path[level].bp_index) { nilfs_btree_carry_right()
893 if (!buffer_dirty(path[level].bp_bh)) nilfs_btree_carry_right()
894 mark_buffer_dirty(path[level].bp_bh); nilfs_btree_carry_right()
895 if (!buffer_dirty(path[level].bp_sib_bh)) nilfs_btree_carry_right()
896 mark_buffer_dirty(path[level].bp_sib_bh); nilfs_btree_carry_right()
898 path[level + 1].bp_index++; nilfs_btree_carry_right()
899 nilfs_btree_promote_key(btree, path, level + 1, nilfs_btree_carry_right()
901 path[level + 1].bp_index--; nilfs_btree_carry_right()
904 brelse(path[level].bp_bh); nilfs_btree_carry_right()
905 path[level].bp_bh = path[level].bp_sib_bh; nilfs_btree_carry_right()
906 path[level].bp_sib_bh = NULL; nilfs_btree_carry_right()
907 path[level].bp_index -= nilfs_btree_node_get_nchildren(node); nilfs_btree_carry_right()
908 path[level + 1].bp_index++; nilfs_btree_carry_right()
910 brelse(path[level].bp_sib_bh); nilfs_btree_carry_right()
911 path[level].bp_sib_bh = NULL; nilfs_btree_carry_right()
914 nilfs_btree_do_insert(btree, path, level, keyp, ptrp); nilfs_btree_carry_right()
918 struct nilfs_btree_path *path, nilfs_btree_split()
924 node = nilfs_btree_get_nonroot_node(path, level); nilfs_btree_split()
925 right = nilfs_btree_get_sib_node(path, level); nilfs_btree_split()
931 if (n > nchildren - path[level].bp_index) { nilfs_btree_split()
938 if (!buffer_dirty(path[level].bp_bh)) nilfs_btree_split()
939 mark_buffer_dirty(path[level].bp_bh); nilfs_btree_split()
940 if (!buffer_dirty(path[level].bp_sib_bh)) nilfs_btree_split()
941 mark_buffer_dirty(path[level].bp_sib_bh); nilfs_btree_split()
944 path[level].bp_index -= nilfs_btree_node_get_nchildren(node); nilfs_btree_split()
945 nilfs_btree_node_insert(right, path[level].bp_index, nilfs_btree_split()
949 *ptrp = path[level].bp_newreq.bpr_ptr; nilfs_btree_split()
951 brelse(path[level].bp_bh); nilfs_btree_split()
952 path[level].bp_bh = path[level].bp_sib_bh; nilfs_btree_split()
953 path[level].bp_sib_bh = NULL; nilfs_btree_split()
955 nilfs_btree_do_insert(btree, path, level, keyp, ptrp); nilfs_btree_split()
958 *ptrp = path[level].bp_newreq.bpr_ptr; nilfs_btree_split()
960 brelse(path[level].bp_sib_bh); nilfs_btree_split()
961 path[level].bp_sib_bh = NULL; nilfs_btree_split()
964 path[level + 1].bp_index++; nilfs_btree_split()
968 struct nilfs_btree_path *path, nilfs_btree_grow()
975 child = nilfs_btree_get_sib_node(path, level); nilfs_btree_grow()
984 if (!buffer_dirty(path[level].bp_sib_bh)) nilfs_btree_grow()
985 mark_buffer_dirty(path[level].bp_sib_bh); nilfs_btree_grow()
987 path[level].bp_bh = path[level].bp_sib_bh; nilfs_btree_grow()
988 path[level].bp_sib_bh = NULL; nilfs_btree_grow()
990 nilfs_btree_do_insert(btree, path, level, keyp, ptrp); nilfs_btree_grow()
993 *ptrp = path[level].bp_newreq.bpr_ptr; nilfs_btree_grow()
997 const struct nilfs_btree_path *path) nilfs_btree_find_near()
1002 if (path == NULL) nilfs_btree_find_near()
1007 if (path[level].bp_index > 0) { nilfs_btree_find_near()
1008 node = nilfs_btree_get_node(btree, path, level, &ncmax); nilfs_btree_find_near()
1010 path[level].bp_index - 1, nilfs_btree_find_near()
1017 node = nilfs_btree_get_node(btree, path, level, &ncmax); nilfs_btree_find_near()
1018 return nilfs_btree_node_get_ptr(node, path[level].bp_index, nilfs_btree_find_near()
1026 const struct nilfs_btree_path *path, nilfs_btree_find_target_v()
1036 ptr = nilfs_btree_find_near(btree, path); nilfs_btree_find_target_v()
1046 struct nilfs_btree_path *path, nilfs_btree_prepare_insert()
1061 path[level].bp_newreq.bpr_ptr = nilfs_btree_prepare_insert()
1062 nilfs_btree_find_target_v(btree, path, key); nilfs_btree_prepare_insert()
1066 ret = nilfs_bmap_prepare_alloc_ptr(btree, &path[level].bp_newreq, dat); nilfs_btree_prepare_insert()
1075 node = nilfs_btree_get_nonroot_node(path, level); nilfs_btree_prepare_insert()
1077 path[level].bp_op = nilfs_btree_do_insert; nilfs_btree_prepare_insert()
1082 parent = nilfs_btree_get_node(btree, path, level + 1, &ncmax); nilfs_btree_prepare_insert()
1083 pindex = path[level + 1].bp_index; nilfs_btree_prepare_insert()
1094 path[level].bp_sib_bh = bh; nilfs_btree_prepare_insert()
1095 path[level].bp_op = nilfs_btree_carry_left; nilfs_btree_prepare_insert()
1112 path[level].bp_sib_bh = bh; nilfs_btree_prepare_insert()
1113 path[level].bp_op = nilfs_btree_carry_right; nilfs_btree_prepare_insert()
1122 path[level].bp_newreq.bpr_ptr = nilfs_btree_prepare_insert()
1123 path[level - 1].bp_newreq.bpr_ptr + 1; nilfs_btree_prepare_insert()
1125 &path[level].bp_newreq, dat); nilfs_btree_prepare_insert()
1129 path[level].bp_newreq.bpr_ptr, nilfs_btree_prepare_insert()
1138 path[level].bp_sib_bh = bh; nilfs_btree_prepare_insert()
1139 path[level].bp_op = nilfs_btree_split; nilfs_btree_prepare_insert()
1146 path[level].bp_op = nilfs_btree_do_insert; nilfs_btree_prepare_insert()
1152 path[level].bp_newreq.bpr_ptr = path[level - 1].bp_newreq.bpr_ptr + 1; nilfs_btree_prepare_insert()
1153 ret = nilfs_bmap_prepare_alloc_ptr(btree, &path[level].bp_newreq, dat); nilfs_btree_prepare_insert()
1156 ret = nilfs_btree_get_new_block(btree, path[level].bp_newreq.bpr_ptr, nilfs_btree_prepare_insert()
1163 path[level].bp_sib_bh = bh; nilfs_btree_prepare_insert()
1164 path[level].bp_op = nilfs_btree_grow; nilfs_btree_prepare_insert()
1167 path[level].bp_op = nilfs_btree_do_insert; nilfs_btree_prepare_insert()
1179 nilfs_bmap_abort_alloc_ptr(btree, &path[level].bp_newreq, dat); nilfs_btree_prepare_insert()
1182 nilfs_btnode_delete(path[level].bp_sib_bh); nilfs_btree_prepare_insert()
1183 nilfs_bmap_abort_alloc_ptr(btree, &path[level].bp_newreq, dat); nilfs_btree_prepare_insert()
1187 nilfs_bmap_abort_alloc_ptr(btree, &path[level].bp_newreq, dat); nilfs_btree_prepare_insert()
1195 struct nilfs_btree_path *path, nilfs_btree_commit_insert()
1202 ptr = path[NILFS_BTREE_LEVEL_DATA].bp_newreq.bpr_ptr; nilfs_btree_commit_insert()
1210 &path[level - 1].bp_newreq, dat); nilfs_btree_commit_insert()
1211 path[level].bp_op(btree, path, level, &key, &ptr); nilfs_btree_commit_insert()
1220 struct nilfs_btree_path *path; nilfs_btree_insert() local
1224 path = nilfs_btree_alloc_path(); nilfs_btree_insert()
1225 if (path == NULL) nilfs_btree_insert()
1228 ret = nilfs_btree_do_lookup(btree, path, key, NULL, nilfs_btree_insert()
1236 ret = nilfs_btree_prepare_insert(btree, path, &level, key, ptr, &stats); nilfs_btree_insert()
1239 nilfs_btree_commit_insert(btree, path, level, key, ptr); nilfs_btree_insert()
1243 nilfs_btree_free_path(path); nilfs_btree_insert()
1248 struct nilfs_btree_path *path, nilfs_btree_do_delete()
1255 node = nilfs_btree_get_nonroot_node(path, level); nilfs_btree_do_delete()
1257 nilfs_btree_node_delete(node, path[level].bp_index, nilfs_btree_do_delete()
1259 if (!buffer_dirty(path[level].bp_bh)) nilfs_btree_do_delete()
1260 mark_buffer_dirty(path[level].bp_bh); nilfs_btree_do_delete()
1261 if (path[level].bp_index == 0) nilfs_btree_do_delete()
1262 nilfs_btree_promote_key(btree, path, level + 1, nilfs_btree_do_delete()
1266 nilfs_btree_node_delete(node, path[level].bp_index, nilfs_btree_do_delete()
1273 struct nilfs_btree_path *path, nilfs_btree_borrow_left()
1279 nilfs_btree_do_delete(btree, path, level, keyp, ptrp); nilfs_btree_borrow_left()
1281 node = nilfs_btree_get_nonroot_node(path, level); nilfs_btree_borrow_left()
1282 left = nilfs_btree_get_sib_node(path, level); nilfs_btree_borrow_left()
1291 if (!buffer_dirty(path[level].bp_bh)) nilfs_btree_borrow_left()
1292 mark_buffer_dirty(path[level].bp_bh); nilfs_btree_borrow_left()
1293 if (!buffer_dirty(path[level].bp_sib_bh)) nilfs_btree_borrow_left()
1294 mark_buffer_dirty(path[level].bp_sib_bh); nilfs_btree_borrow_left()
1296 nilfs_btree_promote_key(btree, path, level + 1, nilfs_btree_borrow_left()
1299 brelse(path[level].bp_sib_bh); nilfs_btree_borrow_left()
1300 path[level].bp_sib_bh = NULL; nilfs_btree_borrow_left()
1301 path[level].bp_index += n; nilfs_btree_borrow_left()
1305 struct nilfs_btree_path *path, nilfs_btree_borrow_right()
1311 nilfs_btree_do_delete(btree, path, level, keyp, ptrp); nilfs_btree_borrow_right()
1313 node = nilfs_btree_get_nonroot_node(path, level); nilfs_btree_borrow_right()
1314 right = nilfs_btree_get_sib_node(path, level); nilfs_btree_borrow_right()
1323 if (!buffer_dirty(path[level].bp_bh)) nilfs_btree_borrow_right()
1324 mark_buffer_dirty(path[level].bp_bh); nilfs_btree_borrow_right()
1325 if (!buffer_dirty(path[level].bp_sib_bh)) nilfs_btree_borrow_right()
1326 mark_buffer_dirty(path[level].bp_sib_bh); nilfs_btree_borrow_right()
1328 path[level + 1].bp_index++; nilfs_btree_borrow_right()
1329 nilfs_btree_promote_key(btree, path, level + 1, nilfs_btree_borrow_right()
1331 path[level + 1].bp_index--; nilfs_btree_borrow_right()
1333 brelse(path[level].bp_sib_bh); nilfs_btree_borrow_right()
1334 path[level].bp_sib_bh = NULL; nilfs_btree_borrow_right()
1338 struct nilfs_btree_path *path, nilfs_btree_concat_left()
1344 nilfs_btree_do_delete(btree, path, level, keyp, ptrp); nilfs_btree_concat_left()
1346 node = nilfs_btree_get_nonroot_node(path, level); nilfs_btree_concat_left()
1347 left = nilfs_btree_get_sib_node(path, level); nilfs_btree_concat_left()
1354 if (!buffer_dirty(path[level].bp_sib_bh)) nilfs_btree_concat_left()
1355 mark_buffer_dirty(path[level].bp_sib_bh); nilfs_btree_concat_left()
1357 nilfs_btnode_delete(path[level].bp_bh); nilfs_btree_concat_left()
1358 path[level].bp_bh = path[level].bp_sib_bh; nilfs_btree_concat_left()
1359 path[level].bp_sib_bh = NULL; nilfs_btree_concat_left()
1360 path[level].bp_index += nilfs_btree_node_get_nchildren(left); nilfs_btree_concat_left()
1364 struct nilfs_btree_path *path, nilfs_btree_concat_right()
1370 nilfs_btree_do_delete(btree, path, level, keyp, ptrp); nilfs_btree_concat_right()
1372 node = nilfs_btree_get_nonroot_node(path, level); nilfs_btree_concat_right()
1373 right = nilfs_btree_get_sib_node(path, level); nilfs_btree_concat_right()
1380 if (!buffer_dirty(path[level].bp_bh)) nilfs_btree_concat_right()
1381 mark_buffer_dirty(path[level].bp_bh); nilfs_btree_concat_right()
1383 nilfs_btnode_delete(path[level].bp_sib_bh); nilfs_btree_concat_right()
1384 path[level].bp_sib_bh = NULL; nilfs_btree_concat_right()
1385 path[level + 1].bp_index++; nilfs_btree_concat_right()
1389 struct nilfs_btree_path *path, nilfs_btree_shrink()
1395 nilfs_btree_do_delete(btree, path, level, keyp, ptrp); nilfs_btree_shrink()
1398 child = nilfs_btree_get_nonroot_node(path, level); nilfs_btree_shrink()
1408 nilfs_btnode_delete(path[level].bp_bh); nilfs_btree_shrink()
1409 path[level].bp_bh = NULL; nilfs_btree_shrink()
1413 struct nilfs_btree_path *path, nilfs_btree_nop()
1419 struct nilfs_btree_path *path, nilfs_btree_prepare_delete()
1434 for (level = NILFS_BTREE_LEVEL_NODE_MIN, dindex = path[level].bp_index; nilfs_btree_prepare_delete()
1437 node = nilfs_btree_get_nonroot_node(path, level); nilfs_btree_prepare_delete()
1438 path[level].bp_oldreq.bpr_ptr = nilfs_btree_prepare_delete()
1441 &path[level].bp_oldreq, dat); nilfs_btree_prepare_delete()
1446 path[level].bp_op = nilfs_btree_do_delete; nilfs_btree_prepare_delete()
1451 parent = nilfs_btree_get_node(btree, path, level + 1, &ncmax); nilfs_btree_prepare_delete()
1452 pindex = path[level + 1].bp_index; nilfs_btree_prepare_delete()
1464 path[level].bp_sib_bh = bh; nilfs_btree_prepare_delete()
1465 path[level].bp_op = nilfs_btree_borrow_left; nilfs_btree_prepare_delete()
1469 path[level].bp_sib_bh = bh; nilfs_btree_prepare_delete()
1470 path[level].bp_op = nilfs_btree_concat_left; nilfs_btree_prepare_delete()
1484 path[level].bp_sib_bh = bh; nilfs_btree_prepare_delete()
1485 path[level].bp_op = nilfs_btree_borrow_right; nilfs_btree_prepare_delete()
1489 path[level].bp_sib_bh = bh; nilfs_btree_prepare_delete()
1490 path[level].bp_op = nilfs_btree_concat_right; nilfs_btree_prepare_delete()
1508 path[level].bp_op = nilfs_btree_shrink; nilfs_btree_prepare_delete()
1511 path[level].bp_op = nilfs_btree_nop; nilfs_btree_prepare_delete()
1514 path[level].bp_op = nilfs_btree_do_delete; nilfs_btree_prepare_delete()
1522 path[level].bp_op = nilfs_btree_do_delete; nilfs_btree_prepare_delete()
1527 path[level].bp_oldreq.bpr_ptr = nilfs_btree_prepare_delete()
1531 ret = nilfs_bmap_prepare_end_ptr(btree, &path[level].bp_oldreq, dat); nilfs_btree_prepare_delete()
1542 nilfs_bmap_abort_end_ptr(btree, &path[level].bp_oldreq, dat); nilfs_btree_prepare_delete()
1545 brelse(path[level].bp_sib_bh); nilfs_btree_prepare_delete()
1546 nilfs_bmap_abort_end_ptr(btree, &path[level].bp_oldreq, dat); nilfs_btree_prepare_delete()
1554 struct nilfs_btree_path *path, nilfs_btree_commit_delete()
1560 nilfs_bmap_commit_end_ptr(btree, &path[level].bp_oldreq, dat); nilfs_btree_commit_delete()
1561 path[level].bp_op(btree, path, level, NULL, NULL); nilfs_btree_commit_delete()
1571 struct nilfs_btree_path *path; nilfs_btree_delete() local
1576 path = nilfs_btree_alloc_path(); nilfs_btree_delete()
1577 if (path == NULL) nilfs_btree_delete()
1580 ret = nilfs_btree_do_lookup(btree, path, key, NULL, nilfs_btree_delete()
1588 ret = nilfs_btree_prepare_delete(btree, path, &level, &stats, dat); nilfs_btree_delete()
1591 nilfs_btree_commit_delete(btree, path, level, dat); nilfs_btree_delete()
1595 nilfs_btree_free_path(path); nilfs_btree_delete()
1602 struct nilfs_btree_path *path; nilfs_btree_seek_key() local
1606 path = nilfs_btree_alloc_path(); nilfs_btree_seek_key()
1607 if (!path) nilfs_btree_seek_key()
1610 ret = nilfs_btree_do_lookup(btree, path, start, NULL, minlevel, 0); nilfs_btree_seek_key()
1614 ret = nilfs_btree_get_next_key(btree, path, minlevel, keyp); nilfs_btree_seek_key()
1616 nilfs_btree_free_path(path); nilfs_btree_seek_key()
1622 struct nilfs_btree_path *path; nilfs_btree_last_key() local
1625 path = nilfs_btree_alloc_path(); nilfs_btree_last_key()
1626 if (path == NULL) nilfs_btree_last_key()
1629 ret = nilfs_btree_do_lookup_last(btree, path, keyp, NULL); nilfs_btree_last_key()
1631 nilfs_btree_free_path(path); nilfs_btree_last_key()
1883 struct nilfs_btree_path *path, nilfs_btree_propagate_p()
1888 !buffer_dirty(path[level].bp_bh)) nilfs_btree_propagate_p()
1889 mark_buffer_dirty(path[level].bp_bh); nilfs_btree_propagate_p()
1895 struct nilfs_btree_path *path, nilfs_btree_prepare_update_v()
1901 parent = nilfs_btree_get_node(btree, path, level + 1, &ncmax); nilfs_btree_prepare_update_v()
1902 path[level].bp_oldreq.bpr_ptr = nilfs_btree_prepare_update_v()
1903 nilfs_btree_node_get_ptr(parent, path[level + 1].bp_index, nilfs_btree_prepare_update_v()
1905 path[level].bp_newreq.bpr_ptr = path[level].bp_oldreq.bpr_ptr + 1; nilfs_btree_prepare_update_v()
1906 ret = nilfs_dat_prepare_update(dat, &path[level].bp_oldreq.bpr_req, nilfs_btree_prepare_update_v()
1907 &path[level].bp_newreq.bpr_req); nilfs_btree_prepare_update_v()
1911 if (buffer_nilfs_node(path[level].bp_bh)) { nilfs_btree_prepare_update_v()
1912 path[level].bp_ctxt.oldkey = path[level].bp_oldreq.bpr_ptr; nilfs_btree_prepare_update_v()
1913 path[level].bp_ctxt.newkey = path[level].bp_newreq.bpr_ptr; nilfs_btree_prepare_update_v()
1914 path[level].bp_ctxt.bh = path[level].bp_bh; nilfs_btree_prepare_update_v()
1917 &path[level].bp_ctxt); nilfs_btree_prepare_update_v()
1920 &path[level].bp_oldreq.bpr_req, nilfs_btree_prepare_update_v()
1921 &path[level].bp_newreq.bpr_req); nilfs_btree_prepare_update_v()
1930 struct nilfs_btree_path *path, nilfs_btree_commit_update_v()
1936 nilfs_dat_commit_update(dat, &path[level].bp_oldreq.bpr_req, nilfs_btree_commit_update_v()
1937 &path[level].bp_newreq.bpr_req, nilfs_btree_commit_update_v()
1940 if (buffer_nilfs_node(path[level].bp_bh)) { nilfs_btree_commit_update_v()
1943 &path[level].bp_ctxt); nilfs_btree_commit_update_v()
1944 path[level].bp_bh = path[level].bp_ctxt.bh; nilfs_btree_commit_update_v()
1946 set_buffer_nilfs_volatile(path[level].bp_bh); nilfs_btree_commit_update_v()
1948 parent = nilfs_btree_get_node(btree, path, level + 1, &ncmax); nilfs_btree_commit_update_v()
1949 nilfs_btree_node_set_ptr(parent, path[level + 1].bp_index, nilfs_btree_commit_update_v()
1950 path[level].bp_newreq.bpr_ptr, ncmax); nilfs_btree_commit_update_v()
1954 struct nilfs_btree_path *path, nilfs_btree_abort_update_v()
1957 nilfs_dat_abort_update(dat, &path[level].bp_oldreq.bpr_req, nilfs_btree_abort_update_v()
1958 &path[level].bp_newreq.bpr_req); nilfs_btree_abort_update_v()
1959 if (buffer_nilfs_node(path[level].bp_bh)) nilfs_btree_abort_update_v()
1962 &path[level].bp_ctxt); nilfs_btree_abort_update_v()
1966 struct nilfs_btree_path *path, nilfs_btree_prepare_propagate_v()
1973 if (!buffer_nilfs_volatile(path[level].bp_bh)) { nilfs_btree_prepare_propagate_v()
1974 ret = nilfs_btree_prepare_update_v(btree, path, level, dat); nilfs_btree_prepare_propagate_v()
1979 !buffer_dirty(path[level].bp_bh)) { nilfs_btree_prepare_propagate_v()
1981 WARN_ON(buffer_nilfs_volatile(path[level].bp_bh)); nilfs_btree_prepare_propagate_v()
1982 ret = nilfs_btree_prepare_update_v(btree, path, level, dat); nilfs_btree_prepare_propagate_v()
1994 nilfs_btree_abort_update_v(btree, path, level, dat); nilfs_btree_prepare_propagate_v()
1995 if (!buffer_nilfs_volatile(path[level].bp_bh)) nilfs_btree_prepare_propagate_v()
1996 nilfs_btree_abort_update_v(btree, path, level, dat); nilfs_btree_prepare_propagate_v()
2001 struct nilfs_btree_path *path, nilfs_btree_commit_propagate_v()
2008 if (!buffer_nilfs_volatile(path[minlevel].bp_bh)) nilfs_btree_commit_propagate_v()
2009 nilfs_btree_commit_update_v(btree, path, minlevel, dat); nilfs_btree_commit_propagate_v()
2012 nilfs_btree_commit_update_v(btree, path, level, dat); nilfs_btree_commit_propagate_v()
2016 struct nilfs_btree_path *path, nilfs_btree_propagate_v()
2026 path[level].bp_bh = bh; nilfs_btree_propagate_v()
2027 ret = nilfs_btree_prepare_propagate_v(btree, path, level, &maxlevel, nilfs_btree_propagate_v()
2032 if (buffer_nilfs_volatile(path[level].bp_bh)) { nilfs_btree_propagate_v()
2033 parent = nilfs_btree_get_node(btree, path, level + 1, &ncmax); nilfs_btree_propagate_v()
2035 path[level + 1].bp_index, nilfs_btree_propagate_v()
2042 nilfs_btree_commit_propagate_v(btree, path, level, maxlevel, bh, dat); nilfs_btree_propagate_v()
2045 brelse(path[level].bp_bh); nilfs_btree_propagate_v()
2046 path[level].bp_bh = NULL; nilfs_btree_propagate_v()
2053 struct nilfs_btree_path *path; nilfs_btree_propagate() local
2060 path = nilfs_btree_alloc_path(); nilfs_btree_propagate()
2061 if (path == NULL) nilfs_btree_propagate()
2073 ret = nilfs_btree_do_lookup(btree, path, key, NULL, level + 1, 0); nilfs_btree_propagate()
2082 nilfs_btree_propagate_v(btree, path, level, bh) : nilfs_btree_propagate()
2083 nilfs_btree_propagate_p(btree, path, level, bh); nilfs_btree_propagate()
2086 nilfs_btree_free_path(path); nilfs_btree_propagate()
2171 struct nilfs_btree_path *path, nilfs_btree_assign_p()
2182 parent = nilfs_btree_get_node(btree, path, level + 1, &ncmax); nilfs_btree_assign_p()
2183 ptr = nilfs_btree_node_get_ptr(parent, path[level + 1].bp_index, nilfs_btree_assign_p()
2186 path[level].bp_ctxt.oldkey = ptr; nilfs_btree_assign_p()
2187 path[level].bp_ctxt.newkey = blocknr; nilfs_btree_assign_p()
2188 path[level].bp_ctxt.bh = *bh; nilfs_btree_assign_p()
2191 &path[level].bp_ctxt); nilfs_btree_assign_p()
2196 &path[level].bp_ctxt); nilfs_btree_assign_p()
2197 *bh = path[level].bp_ctxt.bh; nilfs_btree_assign_p()
2200 nilfs_btree_node_set_ptr(parent, path[level + 1].bp_index, blocknr, nilfs_btree_assign_p()
2203 key = nilfs_btree_node_get_key(parent, path[level + 1].bp_index); nilfs_btree_assign_p()
2212 struct nilfs_btree_path *path, nilfs_btree_assign_v()
2225 parent = nilfs_btree_get_node(btree, path, level + 1, &ncmax); nilfs_btree_assign_v()
2226 ptr = nilfs_btree_node_get_ptr(parent, path[level + 1].bp_index, nilfs_btree_assign_v()
2234 key = nilfs_btree_node_get_key(parent, path[level + 1].bp_index); nilfs_btree_assign_v()
2247 struct nilfs_btree_path *path; nilfs_btree_assign() local
2252 path = nilfs_btree_alloc_path(); nilfs_btree_assign()
2253 if (path == NULL) nilfs_btree_assign()
2265 ret = nilfs_btree_do_lookup(btree, path, key, NULL, level + 1, 0); nilfs_btree_assign()
2272 nilfs_btree_assign_v(btree, path, level, bh, blocknr, binfo) : nilfs_btree_assign()
2273 nilfs_btree_assign_p(btree, path, level, bh, blocknr, binfo); nilfs_btree_assign()
2276 nilfs_btree_free_path(path); nilfs_btree_assign()
2311 struct nilfs_btree_path *path; nilfs_btree_mark() local
2315 path = nilfs_btree_alloc_path(); nilfs_btree_mark()
2316 if (path == NULL) nilfs_btree_mark()
2319 ret = nilfs_btree_do_lookup(btree, path, key, &ptr, level + 1, 0); nilfs_btree_mark()
2337 nilfs_btree_free_path(path); nilfs_btree_mark()
439 nilfs_btree_get_node(const struct nilfs_bmap *btree, const struct nilfs_btree_path *path, int level, int *ncmaxp) nilfs_btree_get_node() argument
535 nilfs_btree_do_lookup(const struct nilfs_bmap *btree, struct nilfs_btree_path *path, __u64 key, __u64 *ptrp, int minlevel, int readahead) nilfs_btree_do_lookup() argument
597 nilfs_btree_do_lookup_last(const struct nilfs_bmap *btree, struct nilfs_btree_path *path, __u64 *keyp, __u64 *ptrp) nilfs_btree_do_lookup_last() argument
646 nilfs_btree_get_next_key(const struct nilfs_bmap *btree, const struct nilfs_btree_path *path, int minlevel, __u64 *nextkey) nilfs_btree_get_next_key() argument
775 nilfs_btree_promote_key(struct nilfs_bmap *btree, struct nilfs_btree_path *path, int level, __u64 key) nilfs_btree_promote_key() argument
797 nilfs_btree_do_insert(struct nilfs_bmap *btree, struct nilfs_btree_path *path, int level, __u64 *keyp, __u64 *ptrp) nilfs_btree_do_insert() argument
824 nilfs_btree_carry_left(struct nilfs_bmap *btree, struct nilfs_btree_path *path, int level, __u64 *keyp, __u64 *ptrp) nilfs_btree_carry_left() argument
870 nilfs_btree_carry_right(struct nilfs_bmap *btree, struct nilfs_btree_path *path, int level, __u64 *keyp, __u64 *ptrp) nilfs_btree_carry_right() argument
917 nilfs_btree_split(struct nilfs_bmap *btree, struct nilfs_btree_path *path, int level, __u64 *keyp, __u64 *ptrp) nilfs_btree_split() argument
967 nilfs_btree_grow(struct nilfs_bmap *btree, struct nilfs_btree_path *path, int level, __u64 *keyp, __u64 *ptrp) nilfs_btree_grow() argument
996 nilfs_btree_find_near(const struct nilfs_bmap *btree, const struct nilfs_btree_path *path) nilfs_btree_find_near() argument
1025 nilfs_btree_find_target_v(const struct nilfs_bmap *btree, const struct nilfs_btree_path *path, __u64 key) nilfs_btree_find_target_v() argument
1045 nilfs_btree_prepare_insert(struct nilfs_bmap *btree, struct nilfs_btree_path *path, int *levelp, __u64 key, __u64 ptr, struct nilfs_bmap_stats *stats) nilfs_btree_prepare_insert() argument
1194 nilfs_btree_commit_insert(struct nilfs_bmap *btree, struct nilfs_btree_path *path, int maxlevel, __u64 key, __u64 ptr) nilfs_btree_commit_insert() argument
1247 nilfs_btree_do_delete(struct nilfs_bmap *btree, struct nilfs_btree_path *path, int level, __u64 *keyp, __u64 *ptrp) nilfs_btree_do_delete() argument
1272 nilfs_btree_borrow_left(struct nilfs_bmap *btree, struct nilfs_btree_path *path, int level, __u64 *keyp, __u64 *ptrp) nilfs_btree_borrow_left() argument
1304 nilfs_btree_borrow_right(struct nilfs_bmap *btree, struct nilfs_btree_path *path, int level, __u64 *keyp, __u64 *ptrp) nilfs_btree_borrow_right() argument
1337 nilfs_btree_concat_left(struct nilfs_bmap *btree, struct nilfs_btree_path *path, int level, __u64 *keyp, __u64 *ptrp) nilfs_btree_concat_left() argument
1363 nilfs_btree_concat_right(struct nilfs_bmap *btree, struct nilfs_btree_path *path, int level, __u64 *keyp, __u64 *ptrp) nilfs_btree_concat_right() argument
1388 nilfs_btree_shrink(struct nilfs_bmap *btree, struct nilfs_btree_path *path, int level, __u64 *keyp, __u64 *ptrp) nilfs_btree_shrink() argument
1412 nilfs_btree_nop(struct nilfs_bmap *btree, struct nilfs_btree_path *path, int level, __u64 *keyp, __u64 *ptrp) nilfs_btree_nop() argument
1418 nilfs_btree_prepare_delete(struct nilfs_bmap *btree, struct nilfs_btree_path *path, int *levelp, struct nilfs_bmap_stats *stats, struct inode *dat) nilfs_btree_prepare_delete() argument
1553 nilfs_btree_commit_delete(struct nilfs_bmap *btree, struct nilfs_btree_path *path, int maxlevel, struct inode *dat) nilfs_btree_commit_delete() argument
1882 nilfs_btree_propagate_p(struct nilfs_bmap *btree, struct nilfs_btree_path *path, int level, struct buffer_head *bh) nilfs_btree_propagate_p() argument
1894 nilfs_btree_prepare_update_v(struct nilfs_bmap *btree, struct nilfs_btree_path *path, int level, struct inode *dat) nilfs_btree_prepare_update_v() argument
1929 nilfs_btree_commit_update_v(struct nilfs_bmap *btree, struct nilfs_btree_path *path, int level, struct inode *dat) nilfs_btree_commit_update_v() argument
1953 nilfs_btree_abort_update_v(struct nilfs_bmap *btree, struct nilfs_btree_path *path, int level, struct inode *dat) nilfs_btree_abort_update_v() argument
1965 nilfs_btree_prepare_propagate_v(struct nilfs_bmap *btree, struct nilfs_btree_path *path, int minlevel, int *maxlevelp, struct inode *dat) nilfs_btree_prepare_propagate_v() argument
2000 nilfs_btree_commit_propagate_v(struct nilfs_bmap *btree, struct nilfs_btree_path *path, int minlevel, int maxlevel, struct buffer_head *bh, struct inode *dat) nilfs_btree_commit_propagate_v() argument
2015 nilfs_btree_propagate_v(struct nilfs_bmap *btree, struct nilfs_btree_path *path, int level, struct buffer_head *bh) nilfs_btree_propagate_v() argument
2170 nilfs_btree_assign_p(struct nilfs_bmap *btree, struct nilfs_btree_path *path, int level, struct buffer_head **bh, sector_t blocknr, union nilfs_binfo *binfo) nilfs_btree_assign_p() argument
2211 nilfs_btree_assign_v(struct nilfs_bmap *btree, struct nilfs_btree_path *path, int level, struct buffer_head **bh, sector_t blocknr, union nilfs_binfo *binfo) nilfs_btree_assign_v() argument
/linux-4.4.14/drivers/video/fbdev/mmp/hw/
H A Dmmp_ctrl.c135 struct mmp_path *path = overlay->path; dmafetch_set_fmt() local
136 tmp = readl_relaxed(ctrl_regs(path) + dma_ctrl(0, path->id)); dmafetch_set_fmt()
139 writel_relaxed(tmp, ctrl_regs(path) + dma_ctrl(0, path->id)); dmafetch_set_fmt()
144 struct lcd_regs *regs = path_regs(overlay->path); overlay_set_win()
177 struct mmp_path *path = overlay->path; dmafetch_onoff() local
180 tmp = readl_relaxed(ctrl_regs(path) + dma_ctrl(0, path->id)); dmafetch_onoff()
183 writel(tmp, ctrl_regs(path) + dma_ctrl(0, path->id)); dmafetch_onoff()
187 static void path_enabledisable(struct mmp_path *path, int on) path_enabledisable() argument
190 mutex_lock(&path->access_ok); path_enabledisable()
191 tmp = readl_relaxed(ctrl_regs(path) + LCD_SCLK(path)); path_enabledisable()
196 writel_relaxed(tmp, ctrl_regs(path) + LCD_SCLK(path)); path_enabledisable()
197 mutex_unlock(&path->access_ok); path_enabledisable()
200 static void path_onoff(struct mmp_path *path, int on) path_onoff() argument
202 if (path->status == on) { path_onoff()
203 dev_info(path->dev, "path %s is already %s\n", path_onoff()
204 path->name, stat_name(path->status)); path_onoff()
209 path_enabledisable(path, 1); path_onoff()
211 if (path->panel && path->panel->set_onoff) path_onoff()
212 path->panel->set_onoff(path->panel, 1); path_onoff()
214 if (path->panel && path->panel->set_onoff) path_onoff()
215 path->panel->set_onoff(path->panel, 0); path_onoff()
217 path_enabledisable(path, 0); path_onoff()
219 path->status = on; path_onoff()
226 overlay->path->name, stat_name(overlay->status)); overlay_set_onoff()
231 if (overlay->path->ops.check_status(overlay->path) overlay_set_onoff()
232 != overlay->path->status) overlay_set_onoff()
233 path_onoff(overlay->path, on); overlay_set_onoff()
243 struct lcd_regs *regs = path_regs(overlay->path); overlay_set_addr()
258 static void path_set_mode(struct mmp_path *path, struct mmp_mode *mode) path_set_mode() argument
260 struct lcd_regs *regs = path_regs(path); path_set_mode()
262 link_config = path_to_path_plat(path)->link_config, path_set_mode()
263 dsi_rbswap = path_to_path_plat(path)->link_config; path_set_mode()
266 memcpy(&path->mode, mode, sizeof(struct mmp_mode)); path_set_mode()
268 mutex_lock(&path->access_ok); path_set_mode()
271 tmp = readl_relaxed(ctrl_regs(path) + intf_ctrl(path->id)) & 0x1; path_set_mode()
276 writel_relaxed(tmp, ctrl_regs(path) + intf_ctrl(path->id)); path_set_mode()
279 tmp = readl_relaxed(ctrl_regs(path) + intf_rbswap_ctrl(path->id)) & path_set_mode()
282 writel_relaxed(tmp, ctrl_regs(path) + intf_rbswap_ctrl(path->id)); path_set_mode()
296 if (path->output_type == PATH_OUT_DSI) path_set_mode()
304 sclk_src = clk_get_rate(path_to_ctrl(path)->clk); path_set_mode()
309 dev_info(path->dev, "%s sclk_src %d sclk_div 0x%x pclk %d\n", path_set_mode()
312 tmp = readl_relaxed(ctrl_regs(path) + LCD_SCLK(path)); path_set_mode()
315 writel_relaxed(tmp, ctrl_regs(path) + LCD_SCLK(path)); path_set_mode()
317 mutex_unlock(&path->access_ok); path_set_mode()
349 static void path_set_default(struct mmp_path *path) path_set_default() argument
351 struct lcd_regs *regs = path_regs(path); path_set_default()
354 path_config = path_to_path_plat(path)->path_config; path_set_default()
357 if (PATH_OUT_PARALLEL == path->output_type) { path_set_default()
359 tmp = readl_relaxed(ctrl_regs(path) + SPU_IOPAD_CONTROL); path_set_default()
362 writel_relaxed(tmp, ctrl_regs(path) + SPU_IOPAD_CONTROL); path_set_default()
365 /* Select path clock source */ path_set_default()
366 tmp = readl_relaxed(ctrl_regs(path) + LCD_SCLK(path)); path_set_default()
369 writel_relaxed(tmp, ctrl_regs(path) + LCD_SCLK(path)); path_set_default()
379 writel_relaxed(dma_ctrl1, ctrl_regs(path) + dma_ctrl(1, path->id)); path_set_default()
388 * bus arbiter for faster read if not tv path; path_set_default()
392 tmp = readl_relaxed(ctrl_regs(path) + dma_ctrl(0, path->id)); path_set_default()
394 if (PATH_TV == path->id) path_set_default()
396 writel_relaxed(tmp, ctrl_regs(path) + dma_ctrl(0, path->id)); path_set_default()
404 struct mmp_path *path = NULL; path_init() local
424 path = mmp_register_path(path_info); path_init()
425 if (!path) { path_init()
429 path_plat->path = path; path_init()
433 path_set_default(path); path_init()
444 mmp_unregister_path(path_plat->path); path_deinit()
541 /* path init */ mmphw_probe()
/linux-4.4.14/tools/testing/selftests/efivarfs/
H A Dcreate-read.c13 const char *path; main() local
18 fprintf(stderr, "usage: %s <path>\n", argv[0]); main()
22 path = argv[1]; main()
25 fd = open(path, O_RDWR | O_CREAT, 0600); main()
H A Dopen-unlink.c12 static int set_immutable(const char *path, int immutable) set_immutable() argument
19 fd = open(path, O_RDONLY); set_immutable()
43 static int get_immutable(const char *path) get_immutable() argument
50 fd = open(path, O_RDONLY); get_immutable()
69 const char *path; main() local
74 fprintf(stderr, "usage: %s <path>\n", argv[0]); main()
78 path = argv[1]; main()
88 fd = open(path, O_WRONLY | O_CREAT, 0600); main()
102 rc = get_immutable(path); main()
107 rc = set_immutable(path, 0); main()
114 fd = open(path, O_RDONLY); main()
120 if (unlink(path) < 0) { main()
/linux-4.4.14/security/apparmor/
H A Dpath.c19 #include <linux/path.h>
25 #include "include/path.h"
43 * d_namespace_path - lookup a name associated with a given path
44 * @path: path to lookup (NOT NULL)
45 * @buf: buffer to store path to (NOT NULL)
47 * @name: Returns - pointer for start of path name with in @buf (NOT NULL)
48 * @flags: flags controlling path lookup
50 * Handle path name lookup.
52 * Returns: %0 else error code if path lookup fails
53 * When no error the path name is returned in @name which points to
56 static int d_namespace_path(struct path *path, char *buf, int buflen, d_namespace_path() argument
63 if (path->mnt->mnt_flags & MNT_INTERNAL) { d_namespace_path()
65 res = dentry_path(path->dentry, buf, buflen); d_namespace_path()
71 if (path->dentry->d_sb->s_magic == PROC_SUPER_MAGIC && d_namespace_path()
83 struct path root; d_namespace_path()
85 res = __d_path(path, &root, buf, buflen); d_namespace_path()
88 res = d_absolute_path(path, buf, buflen); d_namespace_path()
89 if (!our_mnt(path->mnt)) d_namespace_path()
93 /* handle error conditions - and still allow a partial path to d_namespace_path()
100 res = dentry_path_raw(path->dentry, buf, buflen); d_namespace_path()
106 } else if (!our_mnt(path->mnt)) d_namespace_path()
117 if (d_unlinked(path->dentry) && d_is_positive(path->dentry) && d_namespace_path()
123 /* If the path is not connected to the expected root, d_namespace_path()
127 * specifically directed to connect the path, d_namespace_path()
129 * if in a chroot and doing chroot relative paths and the path d_namespace_path()
137 our_mnt(path->mnt))) { d_namespace_path()
138 /* disconnected path, don't return pathname starting d_namespace_path()
153 * @path: path to get name for (NOT NULL)
154 * @flags: flags controlling path lookup
157 * @name: Returns - contains position of path name in @buffer (NOT NULL)
161 static int get_name_to_buffer(struct path *path, int flags, char *buffer, get_name_to_buffer() argument
165 int error = d_namespace_path(path, buffer, size - adjust, name, flags); get_name_to_buffer()
178 *info = "Failed name lookup - disconnected path"; get_name_to_buffer()
190 * @path: path the file (NOT NULL)
191 * @flags: flags controlling path name generation
193 * @name: Returns - the generated path name if !error (NOT NULL)
194 * @info: Returns - information on why the path lookup failed (MAYBE NULL)
207 int aa_path_name(struct path *path, int flags, char **buffer, const char **name, aa_path_name() argument
222 error = get_name_to_buffer(path, flags, buf, size, &str, info); aa_path_name()
H A Dlsm.c35 #include "include/path.h"
146 * @path: path to check permission of (NOT NULL)
152 static int common_perm(int op, struct path *path, u32 mask, common_perm() argument
160 error = aa_path_perm(op, profile, path, 0, mask, cond); common_perm()
166 * common_perm_dir_dentry - common permission wrapper when path is dir, dentry
175 static int common_perm_dir_dentry(int op, struct path *dir, common_perm_dir_dentry()
179 struct path path = { dir->mnt, dentry }; common_perm_dir_dentry() local
181 return common_perm(op, &path, mask, cond); common_perm_dir_dentry()
196 struct path path = { mnt, dentry }; common_perm_mnt_dentry() local
201 return common_perm(op, &path, mask, &cond); common_perm_mnt_dentry()
213 static int common_perm_rm(int op, struct path *dir, common_perm_rm()
238 static int common_perm_create(int op, struct path *dir, struct dentry *dentry, common_perm_create()
249 static int apparmor_path_unlink(struct path *dir, struct dentry *dentry) apparmor_path_unlink()
254 static int apparmor_path_mkdir(struct path *dir, struct dentry *dentry, apparmor_path_mkdir()
261 static int apparmor_path_rmdir(struct path *dir, struct dentry *dentry) apparmor_path_rmdir()
266 static int apparmor_path_mknod(struct path *dir, struct dentry *dentry, apparmor_path_mknod()
272 static int apparmor_path_truncate(struct path *path) apparmor_path_truncate() argument
274 struct path_cond cond = { d_backing_inode(path->dentry)->i_uid, apparmor_path_truncate()
275 d_backing_inode(path->dentry)->i_mode apparmor_path_truncate()
278 if (!path->mnt || !mediated_filesystem(path->dentry)) apparmor_path_truncate()
281 return common_perm(OP_TRUNC, path, MAY_WRITE | AA_MAY_META_WRITE, apparmor_path_truncate()
285 static int apparmor_path_symlink(struct path *dir, struct dentry *dentry, apparmor_path_symlink()
292 static int apparmor_path_link(struct dentry *old_dentry, struct path *new_dir, apparmor_path_link()
307 static int apparmor_path_rename(struct path *old_dir, struct dentry *old_dentry, apparmor_path_rename()
308 struct path *new_dir, struct dentry *new_dentry) apparmor_path_rename()
318 struct path old_path = { old_dir->mnt, old_dentry }; apparmor_path_rename()
319 struct path new_path = { new_dir->mnt, new_dentry }; apparmor_path_rename()
337 static int apparmor_path_chmod(struct path *path, umode_t mode) apparmor_path_chmod() argument
339 if (!mediated_filesystem(path->dentry)) apparmor_path_chmod()
342 return common_perm_mnt_dentry(OP_CHMOD, path->mnt, path->dentry, AA_MAY_CHMOD); apparmor_path_chmod()
345 static int apparmor_path_chown(struct path *path, kuid_t uid, kgid_t gid) apparmor_path_chown() argument
347 struct path_cond cond = { d_backing_inode(path->dentry)->i_uid, apparmor_path_chown()
348 d_backing_inode(path->dentry)->i_mode apparmor_path_chown()
351 if (!mediated_filesystem(path->dentry)) apparmor_path_chown()
354 return common_perm(OP_CHOWN, path, AA_MAY_CHOWN, &cond); apparmor_path_chown()
357 static int apparmor_inode_getattr(const struct path *path) apparmor_inode_getattr() argument
359 if (!mediated_filesystem(path->dentry)) apparmor_inode_getattr()
362 return common_perm_mnt_dentry(OP_GETATTR, path->mnt, path->dentry, apparmor_inode_getattr()
/linux-4.4.14/security/apparmor/include/
H A Dpath.h4 * This file contains AppArmor basic path manipulation function definitions.
20 PATH_IS_DIR = 0x1, /* path is a directory */
22 PATH_CHROOT_REL = 0x8, /* do path lookup relative to chroot */
29 int aa_path_name(struct path *path, int flags, char **buffer,
/linux-4.4.14/security/tomoyo/
H A Dtomoyo.c141 static int tomoyo_inode_getattr(const struct path *path) tomoyo_inode_getattr() argument
143 return tomoyo_path_perm(TOMOYO_TYPE_GETATTR, path, NULL); tomoyo_inode_getattr()
149 * @path: Pointer to "struct path".
153 static int tomoyo_path_truncate(struct path *path) tomoyo_path_truncate() argument
155 return tomoyo_path_perm(TOMOYO_TYPE_TRUNCATE, path, NULL); tomoyo_path_truncate()
161 * @parent: Pointer to "struct path".
166 static int tomoyo_path_unlink(struct path *parent, struct dentry *dentry) tomoyo_path_unlink()
168 struct path path = { parent->mnt, dentry }; tomoyo_path_unlink() local
169 return tomoyo_path_perm(TOMOYO_TYPE_UNLINK, &path, NULL); tomoyo_path_unlink()
175 * @parent: Pointer to "struct path".
181 static int tomoyo_path_mkdir(struct path *parent, struct dentry *dentry, tomoyo_path_mkdir()
184 struct path path = { parent->mnt, dentry }; tomoyo_path_mkdir() local
185 return tomoyo_path_number_perm(TOMOYO_TYPE_MKDIR, &path, tomoyo_path_mkdir()
192 * @parent: Pointer to "struct path".
197 static int tomoyo_path_rmdir(struct path *parent, struct dentry *dentry) tomoyo_path_rmdir()
199 struct path path = { parent->mnt, dentry }; tomoyo_path_rmdir() local
200 return tomoyo_path_perm(TOMOYO_TYPE_RMDIR, &path, NULL); tomoyo_path_rmdir()
206 * @parent: Pointer to "struct path".
212 static int tomoyo_path_symlink(struct path *parent, struct dentry *dentry, tomoyo_path_symlink()
215 struct path path = { parent->mnt, dentry }; tomoyo_path_symlink() local
216 return tomoyo_path_perm(TOMOYO_TYPE_SYMLINK, &path, old_name); tomoyo_path_symlink()
222 * @parent: Pointer to "struct path".
229 static int tomoyo_path_mknod(struct path *parent, struct dentry *dentry, tomoyo_path_mknod()
232 struct path path = { parent->mnt, dentry }; tomoyo_path_mknod() local
246 return tomoyo_mkdev_perm(type, &path, perm, dev); tomoyo_path_mknod()
256 return tomoyo_path_number_perm(type, &path, perm); tomoyo_path_mknod()
263 * @new_dir: Pointer to "struct path".
268 static int tomoyo_path_link(struct dentry *old_dentry, struct path *new_dir, tomoyo_path_link()
271 struct path path1 = { new_dir->mnt, old_dentry }; tomoyo_path_link()
272 struct path path2 = { new_dir->mnt, new_dentry }; tomoyo_path_link()
279 * @old_parent: Pointer to "struct path".
281 * @new_parent: Pointer to "struct path".
286 static int tomoyo_path_rename(struct path *old_parent, tomoyo_path_rename()
288 struct path *new_parent, tomoyo_path_rename()
291 struct path path1 = { old_parent->mnt, old_dentry }; tomoyo_path_rename()
292 struct path path2 = { new_parent->mnt, new_dentry }; tomoyo_path_rename()
349 * @path: Pointer to "struct path".
354 static int tomoyo_path_chmod(struct path *path, umode_t mode) tomoyo_path_chmod() argument
356 return tomoyo_path_number_perm(TOMOYO_TYPE_CHMOD, path, tomoyo_path_chmod()
363 * @path: Pointer to "struct path".
369 static int tomoyo_path_chown(struct path *path, kuid_t uid, kgid_t gid) tomoyo_path_chown() argument
373 error = tomoyo_path_number_perm(TOMOYO_TYPE_CHOWN, path, tomoyo_path_chown()
376 error = tomoyo_path_number_perm(TOMOYO_TYPE_CHGRP, path, tomoyo_path_chown()
384 * @path: Pointer to "struct path".
388 static int tomoyo_path_chroot(struct path *path) tomoyo_path_chroot() argument
390 return tomoyo_path_perm(TOMOYO_TYPE_CHROOT, path, NULL); tomoyo_path_chroot()
397 * @path: Pointer to "struct path".
404 static int tomoyo_sb_mount(const char *dev_name, struct path *path, tomoyo_sb_mount() argument
407 return tomoyo_mount_permission(dev_name, path, type, flags, data); tomoyo_sb_mount()
420 struct path path = { mnt, mnt->mnt_root }; tomoyo_sb_umount() local
421 return tomoyo_path_perm(TOMOYO_TYPE_UMOUNT, &path, NULL); tomoyo_sb_umount()
427 * @old_path: Pointer to "struct path".
428 * @new_path: Pointer to "struct path".
432 static int tomoyo_sb_pivotroot(struct path *old_path, struct path *new_path) tomoyo_sb_pivotroot()
H A Drealpath.c82 * tomoyo_get_absolute_path - Get the path of a dentry but ignores chroot'ed root.
84 * @path: Pointer to "struct path".
92 static char *tomoyo_get_absolute_path(const struct path *path, char * const buffer, tomoyo_get_absolute_path() argument
98 pos = d_absolute_path(path, buffer, buflen - 1); tomoyo_get_absolute_path()
100 struct inode *inode = d_backing_inode(path->dentry); tomoyo_get_absolute_path()
111 * tomoyo_get_dentry_path - Get the path of a dentry.
139 * tomoyo_get_local_path - Get the path of a dentry.
213 * @path: Pointer to "struct path".
219 static char *tomoyo_get_socket_name(const struct path *path, char * const buffer, tomoyo_get_socket_name() argument
222 struct inode *inode = d_backing_inode(path->dentry); tomoyo_get_socket_name()
238 * @path: Pointer to "struct path".
240 * Returns the realpath of the given @path on success, NULL otherwise.
250 char *tomoyo_realpath_from_path(const struct path *path) tomoyo_realpath_from_path() argument
255 struct dentry *dentry = path->dentry; tomoyo_realpath_from_path()
272 pos = tomoyo_get_socket_name(path, buf, buf_len - 1); tomoyo_realpath_from_path()
285 if (!path->mnt || tomoyo_realpath_from_path()
287 pos = tomoyo_get_local_path(path->dentry, buf, tomoyo_realpath_from_path()
291 pos = tomoyo_get_absolute_path(path, buf, buf_len - 1); tomoyo_realpath_from_path()
297 pos = tomoyo_get_local_path(path->dentry, buf, tomoyo_realpath_from_path()
321 struct path path; tomoyo_realpath_nofollow() local
323 if (pathname && kern_path(pathname, 0, &path) == 0) { tomoyo_realpath_nofollow()
324 char *buf = tomoyo_realpath_from_path(&path); tomoyo_realpath_nofollow()
325 path_put(&path); tomoyo_realpath_nofollow()
H A Dmount.c38 * tomoyo_check_mount_acl - Check permission for path path path number operation.
66 * @dir: Pointer to "struct path".
76 struct path *dir, const char *type, tomoyo_mount_acl()
80 struct path path; tomoyo_mount_acl() local
132 if (!dev_name || kern_path(dev_name, LOOKUP_FOLLOW, &path)) { tomoyo_mount_acl()
136 obj.path1 = path; tomoyo_mount_acl()
137 requested_dev_name = tomoyo_realpath_from_path(&path); tomoyo_mount_acl()
180 * @path: Pointer to "struct path".
187 int tomoyo_mount_permission(const char *dev_name, struct path *path, tomoyo_mount_permission() argument
233 error = tomoyo_mount_acl(&r, dev_name, path, type, flags); tomoyo_mount_permission()
H A Dfile.c144 * @path: Pointer to "struct path".
148 static bool tomoyo_get_realpath(struct tomoyo_path_info *buf, const struct path *path) tomoyo_get_realpath() argument
150 buf->name = tomoyo_realpath_from_path(path); tomoyo_get_realpath()
159 * tomoyo_audit_path_log - Audit path request log.
168 [r->param.path.operation], tomoyo_audit_path_log()
169 r->param.path.filename->name); tomoyo_audit_path_log()
173 * tomoyo_audit_path2_log - Audit path/path request log.
188 * tomoyo_audit_mkdev_log - Audit path/number/number/number request log.
205 * tomoyo_audit_path_number_log - Audit path/number request log.
239 * tomoyo_check_path_acl - Check permission for path operation.
255 if (acl->perm & (1 << r->param.path.operation)) { tomoyo_check_path_acl()
256 r->param.path.matched_path = tomoyo_check_path_acl()
257 tomoyo_compare_name_union(r->param.path.filename, tomoyo_check_path_acl()
259 return r->param.path.matched_path != NULL; tomoyo_check_path_acl()
265 * tomoyo_check_path_number_acl - Check permission for path number operation.
285 * tomoyo_check_path2_acl - Check permission for path path operation.
304 * tomoyo_check_mkdev_acl - Check permission for path number number number operation.
548 * tomoyo_path_permission - Check permission for single path operation.
568 r->param.path.filename = filename; tomoyo_path_permission()
569 r->param.path.operation = operation; tomoyo_path_permission()
598 r->param.path.filename = filename; tomoyo_execute_permission()
599 r->param.path.operation = TOMOYO_TYPE_EXECUTE; tomoyo_execute_permission()
685 * @path: Pointer to "struct path".
690 int tomoyo_path_number_perm(const u8 type, struct path *path, tomoyo_path_number_perm() argument
695 .path1 = *path, tomoyo_path_number_perm()
702 == TOMOYO_CONFIG_DISABLED || !path->dentry) tomoyo_path_number_perm()
705 if (!tomoyo_get_realpath(&buf, path)) tomoyo_path_number_perm()
730 * @path: Pointer to "struct path".
736 struct path *path, const int flag) tomoyo_check_open_permission()
743 .path1 = *path, tomoyo_check_open_permission()
753 if (!tomoyo_get_realpath(&buf, path)) { tomoyo_check_open_permission()
779 * @path: Pointer to "struct path".
785 int tomoyo_path_perm(const u8 operation, const struct path *path, const char *target) tomoyo_path_perm() argument
789 .path1 = *path, tomoyo_path_perm()
804 if (!tomoyo_get_realpath(&buf, path)) tomoyo_path_perm()
835 * @path: Pointer to "struct path".
841 int tomoyo_mkdev_perm(const u8 operation, struct path *path, tomoyo_mkdev_perm() argument
846 .path1 = *path, tomoyo_mkdev_perm()
857 if (tomoyo_get_realpath(&buf, path)) { tomoyo_mkdev_perm()
880 * @path1: Pointer to "struct path".
881 * @path2: Pointer to "struct path".
885 int tomoyo_path2_perm(const u8 operation, struct path *path1, tomoyo_path2_perm()
886 struct path *path2) tomoyo_path2_perm()
735 tomoyo_check_open_permission(struct tomoyo_domain_info *domain, struct path *path, const int flag) tomoyo_check_open_permission() argument
H A Dload_policy.c38 struct path path; tomoyo_policy_loader_exists() local
41 if (kern_path(tomoyo_loader, LOOKUP_FOLLOW, &path)) { tomoyo_policy_loader_exists()
46 path_put(&path); tomoyo_policy_loader_exists()
/linux-4.4.14/fs/
H A Dfs_struct.c4 #include <linux/path.h>
13 void set_fs_root(struct fs_struct *fs, const struct path *path) set_fs_root() argument
15 struct path old_root; set_fs_root()
17 path_get(path); set_fs_root()
21 fs->root = *path; set_fs_root()
32 void set_fs_pwd(struct fs_struct *fs, const struct path *path) set_fs_pwd() argument
34 struct path old_pwd; set_fs_pwd()
36 path_get(path); set_fs_pwd()
40 fs->pwd = *path; set_fs_pwd()
48 static inline int replace_path(struct path *p, const struct path *old, const struct path *new) replace_path()
56 void chroot_fs_refs(const struct path *old_root, const struct path *new_root) chroot_fs_refs()
H A Dfhandle.c15 static long do_sys_name_to_handle(struct path *path, do_sys_name_to_handle() argument
28 if (!path->dentry->d_sb->s_export_op || do_sys_name_to_handle()
29 !path->dentry->d_sb->s_export_op->fh_to_dentry) do_sys_name_to_handle()
47 retval = exportfs_encode_fh(path->dentry, do_sys_name_to_handle()
70 if (copy_to_user(mnt_id, &real_mount(path->mnt)->mnt_id, do_sys_name_to_handle()
96 struct path path; SYSCALL_DEFINE5() local
106 err = user_path_at(dfd, name, lookup_flags, &path); SYSCALL_DEFINE5()
108 err = do_sys_name_to_handle(&path, handle, mnt_id); SYSCALL_DEFINE5()
109 path_put(&path); SYSCALL_DEFINE5()
139 struct path *path) do_handle_to_path()
144 path->mnt = get_vfsmount_from_fd(mountdirfd); do_handle_to_path()
145 if (IS_ERR(path->mnt)) { do_handle_to_path()
146 retval = PTR_ERR(path->mnt); do_handle_to_path()
151 path->dentry = exportfs_decode_fh(path->mnt, do_handle_to_path()
155 if (IS_ERR(path->dentry)) { do_handle_to_path()
156 retval = PTR_ERR(path->dentry); do_handle_to_path()
161 mntput(path->mnt); do_handle_to_path()
167 struct path *path) handle_to_path()
206 retval = do_handle_to_path(mountdirfd, handle, path); handle_to_path()
218 struct path path; do_handle_open() local
222 retval = handle_to_path(mountdirfd, ufh, &path); do_handle_open()
228 path_put(&path); do_handle_open()
231 file = file_open_root(path.dentry, path.mnt, "", open_flag, 0); do_handle_open()
240 path_put(&path); do_handle_open()
138 do_handle_to_path(int mountdirfd, struct file_handle *handle, struct path *path) do_handle_to_path() argument
166 handle_to_path(int mountdirfd, struct file_handle __user *ufh, struct path *path) handle_to_path() argument
H A Dnamei.c95 * inside the path - always follow.
187 /* The empty path is special. */ getname_flags()
470 * path_get - get a reference to a path
471 * @path: path to get the reference to
473 * Given a path increment the reference count to the dentry and the vfsmount.
475 void path_get(const struct path *path) path_get() argument
477 mntget(path->mnt); path_get()
478 dget(path->dentry); path_get()
483 * path_put - put a reference to a path
484 * @path: path to put the reference to
486 * Given a path decrement the reference count to the dentry and the vfsmount.
488 void path_put(const struct path *path) path_put() argument
490 dput(path->dentry); path_put()
491 mntput(path->mnt); path_put()
497 struct path path; member in struct:nameidata
499 struct path root;
500 struct inode *inode; /* path.dentry.d_inode */
507 struct path link;
564 * path_connected - Verify that a path->dentry is below path->mnt.mnt_root
565 * @path: nameidate to verify
570 static bool path_connected(const struct path *path) path_connected() argument
572 struct vfsmount *mnt = path->mnt; path_connected()
578 return is_subdir(path->dentry, mnt->mnt_root); path_connected()
608 path_put(&nd->path); terminate_walk()
626 struct path *path, unsigned seq) legitimize_path()
628 int res = __legitimize_mnt(path->mnt, nd->m_seq); legitimize_path()
631 path->mnt = NULL; legitimize_path()
632 path->dentry = NULL; legitimize_path()
635 if (unlikely(!lockref_get_not_dead(&path->dentry->d_lockref))) { legitimize_path()
636 path->dentry = NULL; legitimize_path()
639 return !read_seqcount_retry(&path->dentry->d_seq, seq); legitimize_path()
658 * Documentation/filesystems/path-lookup.txt). In situations when we can't
664 * to restart the path walk from the beginning in ref-walk mode.
670 * @dentry: child of nd->path.dentry or NULL
674 * unlazy_walk attempts to legitimize the current nd->path, nd->root and dentry
675 * for ref-walk mode. @dentry must be a path found by a do_lookup call on
682 struct dentry *parent = nd->path.dentry; unlazy_walk()
689 if (unlikely(!legitimize_mnt(nd->path.mnt, nd->m_seq))) unlazy_walk()
736 nd->path.mnt = NULL; unlazy_walk()
738 nd->path.dentry = NULL; unlazy_walk()
747 static int unlazy_link(struct nameidata *nd, struct path *link, unsigned seq) unlazy_link()
753 nd->path.mnt = NULL; unlazy_link()
754 nd->path.dentry = NULL; unlazy_link()
771 * complete_walk - successful completion of path walk
774 * If we had been in RCU mode, drop out of it and legitimize nd->path.
776 * the path walk or the filesystem doesn't ask for it. Return 0 on
778 * need to drop nd->path.
782 struct dentry *dentry = nd->path.dentry; complete_walk()
825 static void path_put_conditional(struct path *path, struct nameidata *nd) path_put_conditional() argument
827 dput(path->dentry); path_put_conditional()
828 if (path->mnt != nd->path.mnt) path_put_conditional()
829 mntput(path->mnt); path_put_conditional()
832 static inline void path_to_nameidata(const struct path *path, path_to_nameidata() argument
836 dput(nd->path.dentry); path_to_nameidata()
837 if (nd->path.mnt != path->mnt) path_to_nameidata()
838 mntput(nd->path.mnt); path_to_nameidata()
840 nd->path.mnt = path->mnt; path_to_nameidata()
841 nd->path.dentry = path->dentry; path_to_nameidata()
845 * Helper to directly jump to a known parsed path from ->follow_link,
846 * caller must have taken a reference to path beforehand.
848 void nd_jump_link(struct path *path) nd_jump_link() argument
851 path_put(&nd->path); nd_jump_link()
853 nd->path = *path; nd_jump_link()
854 nd->inode = nd->path.dentry->d_inode; nd_jump_link()
878 * processes from failing races against path names that may change out
962 static int may_linkat(struct path *link) may_linkat()
1022 nd->path = nd->root; get_link()
1023 d = nd->path.dentry; get_link()
1031 path_put(&nd->path); get_link()
1032 nd->path = nd->root; get_link()
1034 nd->inode = nd->path.dentry->d_inode; get_link()
1046 * follow_up - Find the mountpoint of path's vfsmount
1048 * Given a path, find the mountpoint of its source file system.
1049 * Replace @path with the path of the mountpoint in the parent mount.
1055 int follow_up(struct path *path) follow_up() argument
1057 struct mount *mnt = real_mount(path->mnt); follow_up()
1070 dput(path->dentry); follow_up()
1071 path->dentry = mountpoint; follow_up()
1072 mntput(path->mnt); follow_up()
1073 path->mnt = &parent->mnt; follow_up()
1080 * - return -EISDIR to tell follow_managed() to stop and return the path we
1083 static int follow_automount(struct path *path, struct nameidata *nd, follow_automount() argument
1089 if (!path->dentry->d_op || !path->dentry->d_op->d_automount) follow_automount()
1105 path->dentry->d_inode) follow_automount()
1112 mnt = path->dentry->d_op->d_automount(path); follow_automount()
1120 * the path being looked up; if it wasn't then the remainder of follow_automount()
1121 * the path is inaccessible and we should say so. follow_automount()
1132 /* lock_mount() may release path->mnt on error */ follow_automount()
1133 mntget(path->mnt); follow_automount()
1136 err = finish_automount(mnt, path); follow_automount()
1143 path_put(path); follow_automount()
1144 path->mnt = mnt; follow_automount()
1145 path->dentry = dget(mnt->mnt_root); follow_automount()
1163 static int follow_managed(struct path *path, struct nameidata *nd) follow_managed() argument
1165 struct vfsmount *mnt = path->mnt; /* held by caller, must be left alone */ follow_managed()
1173 while (managed = ACCESS_ONCE(path->dentry->d_flags), follow_managed()
1179 BUG_ON(!path->dentry->d_op); follow_managed()
1180 BUG_ON(!path->dentry->d_op->d_manage); follow_managed()
1181 ret = path->dentry->d_op->d_manage(path->dentry, false); follow_managed()
1188 struct vfsmount *mounted = lookup_mnt(path); follow_managed()
1190 dput(path->dentry); follow_managed()
1192 mntput(path->mnt); follow_managed()
1193 path->mnt = mounted; follow_managed()
1194 path->dentry = dget(mounted->mnt_root); follow_managed()
1207 ret = follow_automount(path, nd, &need_mntput); follow_managed()
1213 /* We didn't change the current path point */ follow_managed()
1217 if (need_mntput && path->mnt == mnt) follow_managed()
1218 mntput(path->mnt); follow_managed()
1224 path_put_conditional(path, nd); follow_managed()
1228 int follow_down_one(struct path *path) follow_down_one() argument
1232 mounted = lookup_mnt(path); follow_down_one()
1234 dput(path->dentry); follow_down_one()
1235 mntput(path->mnt); follow_down_one()
1236 path->mnt = mounted; follow_down_one()
1237 path->dentry = dget(mounted->mnt_root); follow_down_one()
1254 static bool __follow_mount_rcu(struct nameidata *nd, struct path *path, __follow_mount_rcu() argument
1263 switch (managed_dentry_rcu(path->dentry)) { __follow_mount_rcu()
1273 if (!d_mountpoint(path->dentry)) __follow_mount_rcu()
1274 return !(path->dentry->d_flags & DCACHE_NEED_AUTOMOUNT); __follow_mount_rcu()
1276 mounted = __lookup_mnt(path->mnt, path->dentry); __follow_mount_rcu()
1279 path->mnt = &mounted->mnt; __follow_mount_rcu()
1280 path->dentry = mounted->mnt.mnt_root; __follow_mount_rcu()
1282 *seqp = read_seqcount_begin(&path->dentry->d_seq); __follow_mount_rcu()
1288 *inode = path->dentry->d_inode; __follow_mount_rcu()
1291 !(path->dentry->d_flags & DCACHE_NEED_AUTOMOUNT); __follow_mount_rcu()
1301 if (path_equal(&nd->path, &nd->root)) follow_dotdot_rcu()
1303 if (nd->path.dentry != nd->path.mnt->mnt_root) { follow_dotdot_rcu()
1304 struct dentry *old = nd->path.dentry; follow_dotdot_rcu()
1312 nd->path.dentry = parent; follow_dotdot_rcu()
1314 if (unlikely(!path_connected(&nd->path))) follow_dotdot_rcu()
1318 struct mount *mnt = real_mount(nd->path.mnt); follow_dotdot_rcu()
1325 if (&mparent->mnt == nd->path.mnt) follow_dotdot_rcu()
1328 nd->path.dentry = mountpoint; follow_dotdot_rcu()
1329 nd->path.mnt = &mparent->mnt; follow_dotdot_rcu()
1334 while (unlikely(d_mountpoint(nd->path.dentry))) { follow_dotdot_rcu()
1336 mounted = __lookup_mnt(nd->path.mnt, nd->path.dentry); follow_dotdot_rcu()
1341 nd->path.mnt = &mounted->mnt; follow_dotdot_rcu()
1342 nd->path.dentry = mounted->mnt.mnt_root; follow_dotdot_rcu()
1343 inode = nd->path.dentry->d_inode; follow_dotdot_rcu()
1344 nd->seq = read_seqcount_begin(&nd->path.dentry->d_seq); follow_dotdot_rcu()
1355 int follow_down(struct path *path) follow_down() argument
1360 while (managed = ACCESS_ONCE(path->dentry->d_flags), follow_down()
1373 BUG_ON(!path->dentry->d_op); follow_down()
1374 BUG_ON(!path->dentry->d_op->d_manage); follow_down()
1375 ret = path->dentry->d_op->d_manage( follow_down()
1376 path->dentry, false); follow_down()
1383 struct vfsmount *mounted = lookup_mnt(path); follow_down()
1386 dput(path->dentry); follow_down()
1387 mntput(path->mnt); follow_down()
1388 path->mnt = mounted; follow_down()
1389 path->dentry = dget(mounted->mnt_root); follow_down()
1403 static void follow_mount(struct path *path) follow_mount() argument
1405 while (d_mountpoint(path->dentry)) { follow_mount()
1406 struct vfsmount *mounted = lookup_mnt(path); follow_mount()
1409 dput(path->dentry); follow_mount()
1410 mntput(path->mnt); follow_mount()
1411 path->mnt = mounted; follow_mount()
1412 path->dentry = dget(mounted->mnt_root); follow_mount()
1422 struct dentry *old = nd->path.dentry; follow_dotdot()
1424 if (nd->path.dentry == nd->root.dentry && follow_dotdot()
1425 nd->path.mnt == nd->root.mnt) { follow_dotdot()
1428 if (nd->path.dentry != nd->path.mnt->mnt_root) { follow_dotdot()
1430 nd->path.dentry = dget_parent(nd->path.dentry); follow_dotdot()
1432 if (unlikely(!path_connected(&nd->path))) follow_dotdot()
1436 if (!follow_up(&nd->path)) follow_dotdot()
1439 follow_mount(&nd->path); follow_dotdot()
1440 nd->inode = nd->path.dentry->d_inode; follow_dotdot()
1525 * small and for now I'd prefer to have fast path as straight as possible.
1529 struct path *path, struct inode **inode, lookup_fast()
1532 struct vfsmount *mnt = nd->path.mnt; lookup_fast()
1533 struct dentry *dentry, *parent = nd->path.dentry; lookup_fast()
1584 path->mnt = mnt; lookup_fast()
1585 path->dentry = dentry; lookup_fast()
1586 if (likely(__follow_mount_rcu(nd, path, inode, seqp))) lookup_fast()
1614 path->mnt = mnt; lookup_fast()
1615 path->dentry = dentry; lookup_fast()
1616 err = follow_managed(path, nd); lookup_fast()
1618 *inode = d_backing_inode(path->dentry); lookup_fast()
1626 static int lookup_slow(struct nameidata *nd, struct path *path) lookup_slow() argument
1630 parent = nd->path.dentry; lookup_slow()
1638 path->mnt = nd->path.mnt; lookup_slow()
1639 path->dentry = dentry; lookup_slow()
1640 return follow_managed(path, nd); lookup_slow()
1666 static int pick_link(struct nameidata *nd, struct path *link, pick_link()
1676 if (link->mnt == nd->path.mnt) pick_link()
1706 static inline int should_follow_link(struct nameidata *nd, struct path *link, should_follow_link()
1726 struct path path; walk_component() local
1741 err = lookup_fast(nd, &path, &inode, &seq); walk_component()
1746 err = lookup_slow(nd, &path); walk_component()
1752 if (d_is_negative(path.dentry)) walk_component()
1754 inode = d_backing_inode(path.dentry); walk_component()
1759 err = should_follow_link(nd, &path, flags & WALK_GET, inode, seq); walk_component()
1762 path_to_nameidata(&path, nd); walk_component()
1768 path_to_nameidata(&path, nd); walk_component()
1830 * Calculate the length and hash of the path component, and
1869 * We know there's a real path component here of at least
1905 /* At this point we know we have a real path component. */ link_path_walk()
1928 struct dentry *parent = nd->path.dentry; link_path_walk()
1986 if (unlikely(!d_can_lookup(nd->path.dentry))) { link_path_walk()
2014 nd->path = nd->root; path_init()
2018 nd->seq = __read_seqcount_begin(&nd->path.dentry->d_seq); path_init()
2022 path_get(&nd->path); path_init()
2039 nd->path = nd->root; path_init()
2049 nd->path = fs->pwd; path_init()
2050 nd->seq = __read_seqcount_begin(&nd->path.dentry->d_seq); path_init()
2053 get_fs_pwd(current->fs, &nd->path); path_init()
2056 /* Caller must check execute permissions on the starting path component */ path_init()
2072 nd->path = f.file->f_path; path_init()
2075 nd->inode = nd->path.dentry->d_inode; path_init()
2076 nd->seq = read_seqcount_begin(&nd->path.dentry->d_seq); path_init()
2078 path_get(&nd->path); path_init()
2079 nd->inode = nd->path.dentry->d_inode; path_init()
2085 nd->inode = nd->path.dentry->d_inode; path_init()
2088 if (likely(!read_seqcount_retry(&nd->path.dentry->d_seq, nd->seq))) path_init()
2123 static int path_lookupat(struct nameidata *nd, unsigned flags, struct path *path) path_lookupat() argument
2142 if (!d_can_lookup(nd->path.dentry)) path_lookupat()
2145 *path = nd->path; path_lookupat()
2146 nd->path.mnt = NULL; path_lookupat()
2147 nd->path.dentry = NULL; path_lookupat()
2154 struct path *path, struct path *root) filename_lookup()
2165 retval = path_lookupat(&nd, flags | LOOKUP_RCU, path); filename_lookup()
2167 retval = path_lookupat(&nd, flags, path); filename_lookup()
2169 retval = path_lookupat(&nd, flags | LOOKUP_REVAL, path); filename_lookup()
2172 audit_inode(name, path->dentry, flags & LOOKUP_PARENT); filename_lookup()
2180 struct path *parent) path_parentat()
2190 *parent = nd->path; path_parentat()
2191 nd->path.mnt = NULL; path_parentat()
2192 nd->path.dentry = NULL; path_parentat()
2199 unsigned int flags, struct path *parent, filename_parentat()
2226 struct dentry *kern_path_locked(const char *name, struct path *path) kern_path_locked() argument
2233 filename = filename_parentat(AT_FDCWD, getname_kernel(name), 0, path, kern_path_locked()
2238 path_put(path); kern_path_locked()
2242 mutex_lock_nested(&path->dentry->d_inode->i_mutex, I_MUTEX_PARENT); kern_path_locked()
2243 d = __lookup_hash(&last, path->dentry, 0); kern_path_locked()
2245 mutex_unlock(&path->dentry->d_inode->i_mutex); kern_path_locked()
2246 path_put(path); kern_path_locked()
2252 int kern_path(const char *name, unsigned int flags, struct path *path) kern_path() argument
2255 flags, path, NULL); kern_path()
2260 * vfs_path_lookup - lookup a file path relative to a dentry-vfsmount pair
2265 * @path: pointer to struct path to fill
2269 struct path *path) vfs_path_lookup()
2271 struct path root = {.mnt = mnt, .dentry = dentry}; vfs_path_lookup()
2274 flags , path, &root); vfs_path_lookup()
2330 struct path *path, int *empty) user_path_at_empty()
2333 flags, path, NULL); user_path_at_empty()
2341 * path-walking is complete.
2344 user_path_parent(int dfd, const char __user *path, user_path_parent() argument
2345 struct path *parent, user_path_parent()
2351 return filename_parentat(dfd, getname(path), flags & LOOKUP_REVAL, user_path_parent()
2358 * @path: pointer to container for result
2361 * need to resolve the path without doing any revalidation.
2366 * cases where it won't are if nd->last refers to a symlink or the path is
2371 * lookup found a negative dentry. The nd->path reference will also be
2374 * 0: if we successfully resolved nd->path and found it to not to be a
2375 * symlink that needs to be followed. "path" will also be populated.
2376 * The nd->path reference will also be put.
2379 * that needs to be followed. "path" will be populated with the path
2380 * to the link, and nd->path will *not* be put.
2383 mountpoint_last(struct nameidata *nd, struct path *path) mountpoint_last() argument
2387 struct dentry *dir = nd->path.dentry; mountpoint_last()
2401 dentry = dget(nd->path.dentry); mountpoint_last()
2411 * path doesn't actually point to a mounted dentry. mountpoint_last()
2433 path->dentry = dentry; mountpoint_last()
2434 path->mnt = nd->path.mnt; mountpoint_last()
2435 error = should_follow_link(nd, path, nd->flags & LOOKUP_FOLLOW, mountpoint_last()
2439 mntget(path->mnt); mountpoint_last()
2440 follow_mount(path); mountpoint_last()
2445 * path_mountpoint - look up a path to be umounted
2448 * @path: pointer to container for result
2451 * Returns 0 and "path" will be valid on success; Returns error otherwise.
2454 path_mountpoint(struct nameidata *nd, unsigned flags, struct path *path) path_mountpoint() argument
2461 (err = mountpoint_last(nd, path)) > 0) { path_mountpoint()
2473 filename_mountpoint(int dfd, struct filename *name, struct path *path, filename_mountpoint() argument
2481 error = path_mountpoint(&nd, flags | LOOKUP_RCU, path); filename_mountpoint()
2483 error = path_mountpoint(&nd, flags, path); filename_mountpoint()
2485 error = path_mountpoint(&nd, flags | LOOKUP_REVAL, path); filename_mountpoint()
2487 audit_inode(name, path->dentry, 0); filename_mountpoint()
2494 * user_path_mountpoint_at - lookup a path from userland in order to umount it
2498 * @path: pointer to container to hold result
2500 * A umount is a special case for path walking. We're not actually interested
2505 * Returns 0 and populates "path" on success.
2509 struct path *path) user_path_mountpoint_at()
2511 return filename_mountpoint(dfd, getname(name), path, flags); user_path_mountpoint_at()
2515 kern_path_mountpoint(int dfd, const char *name, struct path *path, kern_path_mountpoint() argument
2518 return filename_mountpoint(dfd, getname_kernel(name), path, flags); kern_path_mountpoint()
2671 static int may_open(struct path *path, int acc_mode, int flag) may_open() argument
2673 struct dentry *dentry = path->dentry; may_open()
2693 if (path->mnt->mnt_flags & MNT_NODEV) may_open()
2725 struct path *path = &filp->f_path; handle_truncate() local
2726 struct inode *inode = path->dentry->d_inode; handle_truncate()
2735 error = security_path_truncate(path); handle_truncate()
2737 error = do_truncate(path->dentry, 0, handle_truncate()
2752 static int may_o_create(struct path *dir, struct dentry *dentry, umode_t mode) may_o_create()
2773 * caller will need to perform the open themselves. @path will have been
2779 struct path *path, struct file *file, atomic_open()
2784 struct inode *dir = nd->path.dentry->d_inode; atomic_open()
2838 error = may_o_create(&nd->path, dentry, mode); atomic_open()
2851 file->f_path.mnt = nd->path.mnt; atomic_open()
2915 path->dentry = dentry; atomic_open()
2916 path->mnt = nd->path.mnt; atomic_open()
2929 * and creations will have been performed and the dentry returned in @path will
2938 static int lookup_open(struct nameidata *nd, struct path *path, lookup_open() argument
2943 struct dentry *dir = nd->path.dentry; lookup_open()
2959 return atomic_open(nd, dentry, path, file, op, got_write, lookup_open()
2988 error = security_path_mknod(&nd->path, dentry, mode, 0); lookup_open()
2997 path->dentry = dentry; lookup_open()
2998 path->mnt = nd->path.mnt; lookup_open()
3013 struct dentry *dir = nd->path.dentry; do_last()
3020 struct path save_parent = { .dentry = NULL, .mnt = NULL }; do_last()
3021 struct path path; do_last() local
3039 error = lookup_fast(nd, &path, &inode, &seq); do_last()
3066 error = mnt_want_write(nd->path.mnt); do_last()
3076 error = lookup_open(nd, &path, file, op, got_write, opened); do_last()
3096 path_to_nameidata(&path, nd); do_last()
3103 if (d_is_positive(path.dentry)) do_last()
3104 audit_inode(nd->name, path.dentry, 0); do_last()
3112 mnt_drop_write(nd->path.mnt); do_last()
3117 path_to_nameidata(&path, nd); do_last()
3121 error = follow_managed(&path, nd); do_last()
3127 if (unlikely(d_is_negative(path.dentry))) { do_last()
3128 path_to_nameidata(&path, nd); do_last()
3131 inode = d_backing_inode(path.dentry); do_last()
3135 error = should_follow_link(nd, &path, nd->flags & LOOKUP_FOLLOW, do_last()
3140 if ((nd->flags & LOOKUP_RCU) || nd->path.mnt != path.mnt) { do_last()
3141 path_to_nameidata(&path, nd); do_last()
3143 save_parent.dentry = nd->path.dentry; do_last()
3144 save_parent.mnt = mntget(path.mnt); do_last()
3145 nd->path.dentry = path.dentry; do_last()
3157 audit_inode(nd->name, nd->path.dentry, 0); do_last()
3158 if (unlikely(d_is_symlink(nd->path.dentry)) && !(open_flag & O_PATH)) { do_last()
3163 if ((open_flag & O_CREAT) && d_is_dir(nd->path.dentry)) do_last()
3166 if ((nd->flags & LOOKUP_DIRECTORY) && !d_can_lookup(nd->path.dentry)) do_last()
3168 if (!d_is_reg(nd->path.dentry)) do_last()
3172 error = mnt_want_write(nd->path.mnt); do_last()
3178 error = may_open(&nd->path, acc_mode, open_flag); do_last()
3183 error = vfs_open(&nd->path, file, current_cred()); do_last()
3210 mnt_drop_write(nd->path.mnt); do_last()
3224 path_put(&nd->path); do_last()
3225 nd->path = save_parent; do_last()
3230 mnt_drop_write(nd->path.mnt); do_last()
3244 struct path path; do_tmpfile() local
3245 int error = path_lookupat(nd, flags | LOOKUP_DIRECTORY, &path); do_tmpfile()
3248 error = mnt_want_write(path.mnt); do_tmpfile()
3251 dir = path.dentry->d_inode; do_tmpfile()
3260 child = d_alloc(path.dentry, &name); do_tmpfile()
3265 dput(path.dentry); do_tmpfile()
3266 path.dentry = child; do_tmpfile()
3272 error = may_open(&path, MAY_OPEN, op->open_flag); do_tmpfile()
3275 file->f_path.mnt = path.mnt; do_tmpfile()
3289 mnt_drop_write(path.mnt); do_tmpfile()
3291 path_put(&path); do_tmpfile()
3393 struct path *path, unsigned int lookup_flags) filename_create()
3408 name = filename_parentat(dfd, name, lookup_flags, path, &last, &type); filename_create()
3420 err2 = mnt_want_write(path->mnt); filename_create()
3425 mutex_lock_nested(&path->dentry->d_inode->i_mutex, I_MUTEX_PARENT); filename_create()
3426 dentry = __lookup_hash(&last, path->dentry, lookup_flags); filename_create()
3454 mutex_unlock(&path->dentry->d_inode->i_mutex); filename_create()
3456 mnt_drop_write(path->mnt); filename_create()
3458 path_put(path); filename_create()
3464 struct path *path, unsigned int lookup_flags) kern_path_create()
3467 path, lookup_flags); kern_path_create()
3471 void done_path_create(struct path *path, struct dentry *dentry) done_path_create() argument
3474 mutex_unlock(&path->dentry->d_inode->i_mutex); done_path_create()
3475 mnt_drop_write(path->mnt); done_path_create()
3476 path_put(path); done_path_create()
3481 struct path *path, unsigned int lookup_flags) user_path_create()
3483 return filename_create(dfd, getname(pathname), path, lookup_flags); user_path_create()
3536 struct path path; SYSCALL_DEFINE4() local
3544 dentry = user_path_create(dfd, filename, &path, lookup_flags); SYSCALL_DEFINE4()
3548 if (!IS_POSIXACL(path.dentry->d_inode)) SYSCALL_DEFINE4()
3550 error = security_path_mknod(&path, dentry, mode, dev); SYSCALL_DEFINE4()
3555 error = vfs_create(path.dentry->d_inode,dentry,mode,true); SYSCALL_DEFINE4()
3558 error = vfs_mknod(path.dentry->d_inode,dentry,mode, SYSCALL_DEFINE4()
3562 error = vfs_mknod(path.dentry->d_inode,dentry,mode,0); SYSCALL_DEFINE4()
3566 done_path_create(&path, dentry); SYSCALL_DEFINE4()
3608 struct path path; SYSCALL_DEFINE3() local
3613 dentry = user_path_create(dfd, pathname, &path, lookup_flags); SYSCALL_DEFINE3()
3617 if (!IS_POSIXACL(path.dentry->d_inode)) SYSCALL_DEFINE3()
3619 error = security_path_mkdir(&path, dentry, mode); SYSCALL_DEFINE3()
3621 error = vfs_mkdir(path.dentry->d_inode, dentry, mode); SYSCALL_DEFINE3()
3622 done_path_create(&path, dentry); SYSCALL_DEFINE3()
3704 struct path path; do_rmdir() local
3710 &path, &last, &type, lookup_flags); do_rmdir()
3726 error = mnt_want_write(path.mnt); do_rmdir()
3730 mutex_lock_nested(&path.dentry->d_inode->i_mutex, I_MUTEX_PARENT); do_rmdir()
3731 dentry = __lookup_hash(&last, path.dentry, lookup_flags); do_rmdir()
3739 error = security_path_rmdir(&path, dentry); do_rmdir()
3742 error = vfs_rmdir(path.dentry->d_inode, dentry); do_rmdir()
3746 mutex_unlock(&path.dentry->d_inode->i_mutex); do_rmdir()
3747 mnt_drop_write(path.mnt); do_rmdir()
3749 path_put(&path); do_rmdir()
3832 struct path path; do_unlinkat() local
3840 &path, &last, &type, lookup_flags); do_unlinkat()
3848 error = mnt_want_write(path.mnt); do_unlinkat()
3852 mutex_lock_nested(&path.dentry->d_inode->i_mutex, I_MUTEX_PARENT); do_unlinkat()
3853 dentry = __lookup_hash(&last, path.dentry, lookup_flags); do_unlinkat()
3863 error = security_path_unlink(&path, dentry); do_unlinkat()
3866 error = vfs_unlink(path.dentry->d_inode, dentry, &delegated_inode); do_unlinkat()
3870 mutex_unlock(&path.dentry->d_inode->i_mutex); do_unlinkat()
3879 mnt_drop_write(path.mnt); do_unlinkat()
3881 path_put(&path); do_unlinkat()
3943 struct path path; SYSCALL_DEFINE3() local
3950 dentry = user_path_create(newdfd, newname, &path, lookup_flags); SYSCALL_DEFINE3()
3955 error = security_path_symlink(&path, dentry, from->name); SYSCALL_DEFINE3()
3957 error = vfs_symlink(path.dentry->d_inode, dentry, from->name); SYSCALL_DEFINE3()
3958 done_path_create(&path, dentry); SYSCALL_DEFINE3()
4059 struct path old_path, new_path; SYSCALL_DEFINE5()
4317 struct path old_path, new_path; SYSCALL_DEFINE5()
625 legitimize_path(struct nameidata *nd, struct path *path, unsigned seq) legitimize_path() argument
1528 lookup_fast(struct nameidata *nd, struct path *path, struct inode **inode, unsigned *seqp) lookup_fast() argument
2153 filename_lookup(int dfd, struct filename *name, unsigned flags, struct path *path, struct path *root) filename_lookup() argument
2267 vfs_path_lookup(struct dentry *dentry, struct vfsmount *mnt, const char *name, unsigned int flags, struct path *path) vfs_path_lookup() argument
2329 user_path_at_empty(int dfd, const char __user *name, unsigned flags, struct path *path, int *empty) user_path_at_empty() argument
2508 user_path_mountpoint_at(int dfd, const char __user *name, unsigned int flags, struct path *path) user_path_mountpoint_at() argument
2778 atomic_open(struct nameidata *nd, struct dentry *dentry, struct path *path, struct file *file, const struct open_flags *op, bool got_write, bool need_lookup, int *opened) atomic_open() argument
3392 filename_create(int dfd, struct filename *name, struct path *path, unsigned int lookup_flags) filename_create() argument
3463 kern_path_create(int dfd, const char *pathname, struct path *path, unsigned int lookup_flags) kern_path_create() argument
3480 user_path_create(int dfd, const char __user *pathname, struct path *path, unsigned int lookup_flags) user_path_create() argument
H A Ddcookies.c6 * Persistent cookie-path mappings. These are used by
27 #include <linux/path.h>
36 struct path path; member in struct:dcookie_struct
55 return (unsigned long)dcs->path.dentry; dcookie_value()
93 static struct dcookie_struct *alloc_dcookie(struct path *path) alloc_dcookie() argument
101 d = path->dentry; alloc_dcookie()
106 dcs->path = *path; alloc_dcookie()
107 path_get(path); alloc_dcookie()
116 int get_dcookie(struct path *path, unsigned long *cookie) get_dcookie() argument
128 if (path->dentry->d_flags & DCACHE_COOKIE) { get_dcookie()
129 dcs = find_dcookie((unsigned long)path->dentry); get_dcookie()
131 dcs = alloc_dcookie(path); get_dcookie()
147 * to retrieve the path.
154 char * path; SYSCALL_DEFINE3() local
158 /* we could leak path information to users SYSCALL_DEFINE3()
180 path = d_path(&dcs->path, kbuf, PAGE_SIZE); SYSCALL_DEFINE3()
184 if (IS_ERR(path)) { SYSCALL_DEFINE3()
185 err = PTR_ERR(path); SYSCALL_DEFINE3()
191 pathlen = kbuf + PAGE_SIZE - path; SYSCALL_DEFINE3()
194 if (copy_to_user(buf, path, pathlen)) SYSCALL_DEFINE3()
273 struct dentry *d = dcs->path.dentry; free_dcookie()
279 path_put(&dcs->path); free_dcookie()
H A Dopen.c68 long vfs_truncate(struct path *path, loff_t length) vfs_truncate() argument
73 inode = path->dentry->d_inode; vfs_truncate()
81 error = mnt_want_write(path->mnt); vfs_truncate()
107 error = security_path_truncate(path); vfs_truncate()
109 error = do_truncate(path->dentry, length, 0, NULL); vfs_truncate()
114 mnt_drop_write(path->mnt); vfs_truncate()
123 struct path path; do_sys_truncate() local
130 error = user_path_at(AT_FDCWD, pathname, lookup_flags, &path); do_sys_truncate()
132 error = vfs_truncate(&path, length); do_sys_truncate()
133 path_put(&path); do_sys_truncate()
142 SYSCALL_DEFINE2(truncate, const char __user *, path, long, length) SYSCALL_DEFINE2()
144 return do_sys_truncate(path, length); SYSCALL_DEFINE2()
148 COMPAT_SYSCALL_DEFINE2(truncate, const char __user *, path, compat_off_t, length) COMPAT_SYSCALL_DEFINE2()
150 return do_sys_truncate(path, length); COMPAT_SYSCALL_DEFINE2()
215 SYSCALL_DEFINE2(truncate64, const char __user *, path, loff_t, length) SYSCALL_DEFINE2()
217 return do_sys_truncate(path, length); SYSCALL_DEFINE2()
341 struct path path; SYSCALL_DEFINE3() local
368 res = user_path_at(dfd, filename, lookup_flags, &path); SYSCALL_DEFINE3()
372 inode = d_backing_inode(path.dentry); SYSCALL_DEFINE3()
380 if (path_noexec(&path)) SYSCALL_DEFINE3()
398 if (__mnt_is_readonly(path.mnt)) SYSCALL_DEFINE3()
402 path_put(&path); SYSCALL_DEFINE3()
420 struct path path; SYSCALL_DEFINE1() local
424 error = user_path_at(AT_FDCWD, filename, lookup_flags, &path); SYSCALL_DEFINE1()
428 error = inode_permission(path.dentry->d_inode, MAY_EXEC | MAY_CHDIR); SYSCALL_DEFINE1()
432 set_fs_pwd(current->fs, &path); SYSCALL_DEFINE1()
435 path_put(&path); SYSCALL_DEFINE1()
471 struct path path; SYSCALL_DEFINE1() local
475 error = user_path_at(AT_FDCWD, filename, lookup_flags, &path); SYSCALL_DEFINE1()
479 error = inode_permission(path.dentry->d_inode, MAY_EXEC | MAY_CHDIR); SYSCALL_DEFINE1()
486 error = security_path_chroot(&path); SYSCALL_DEFINE1()
490 set_fs_root(current->fs, &path); SYSCALL_DEFINE1()
493 path_put(&path); SYSCALL_DEFINE1()
502 static int chmod_common(struct path *path, umode_t mode) chmod_common() argument
504 struct inode *inode = path->dentry->d_inode; chmod_common()
509 error = mnt_want_write(path->mnt); chmod_common()
514 error = security_path_chmod(path, mode); chmod_common()
519 error = notify_change(path->dentry, &newattrs, &delegated_inode); chmod_common()
527 mnt_drop_write(path->mnt); chmod_common()
546 struct path path; SYSCALL_DEFINE3() local
550 error = user_path_at(dfd, filename, lookup_flags, &path); SYSCALL_DEFINE3()
552 error = chmod_common(&path, mode); SYSCALL_DEFINE3()
553 path_put(&path); SYSCALL_DEFINE3()
567 static int chown_common(struct path *path, uid_t user, gid_t group) chown_common() argument
569 struct inode *inode = path->dentry->d_inode; chown_common()
597 error = security_path_chown(path, uid, gid); chown_common()
599 error = notify_change(path->dentry, &newattrs, &delegated_inode); chown_common()
612 struct path path; SYSCALL_DEFINE5() local
623 error = user_path_at(dfd, filename, lookup_flags, &path); SYSCALL_DEFINE5()
626 error = mnt_want_write(path.mnt); SYSCALL_DEFINE5()
629 error = chown_common(&path, user, group); SYSCALL_DEFINE5()
630 mnt_drop_write(path.mnt); SYSCALL_DEFINE5()
632 path_put(&path); SYSCALL_DEFINE5()
835 * vfs_open - open the file at the given path
836 * @path: path to open
840 int vfs_open(const struct path *path, struct file *file, vfs_open() argument
843 struct inode *inode = vfs_select_inode(path->dentry, file->f_flags); vfs_open()
848 file->f_path = *path; vfs_open()
852 struct file *dentry_open(const struct path *path, int flags, dentry_open() argument
861 BUG_ON(!path->mnt); dentry_open()
866 error = vfs_open(path, f, cred); dentry_open()
954 * @name: struct filename containing path to open
972 * @filename: path to open
H A Dinternal.h15 struct path;
51 extern int user_path_mountpoint_at(int, const char __user *, unsigned int, struct path *);
53 const char *, unsigned int, struct path *);
61 extern struct vfsmount *lookup_mnt(struct path *);
62 extern int finish_automount(struct vfsmount *, struct path *);
76 extern void chroot_fs_refs(const struct path *, const struct path *);
110 extern int vfs_open(const struct path *, struct file *, const struct cred *);
H A Dstat.c42 * @path: file to get attributes from
52 int vfs_getattr_nosec(struct path *path, struct kstat *stat) vfs_getattr_nosec() argument
54 struct inode *inode = d_backing_inode(path->dentry); vfs_getattr_nosec()
57 return inode->i_op->getattr(path->mnt, path->dentry, stat); vfs_getattr_nosec()
65 int vfs_getattr(struct path *path, struct kstat *stat) vfs_getattr() argument
69 retval = security_inode_getattr(path); vfs_getattr()
72 return vfs_getattr_nosec(path, stat); vfs_getattr()
93 struct path path; vfs_fstatat() local
106 error = user_path_at(dfd, filename, lookup_flags, &path); vfs_fstatat()
110 error = vfs_getattr(&path, stat); vfs_fstatat()
111 path_put(&path); vfs_fstatat()
318 struct path path; SYSCALL_DEFINE4() local
327 error = user_path_at_empty(dfd, pathname, lookup_flags, &path, &empty); SYSCALL_DEFINE4()
329 struct inode *inode = d_backing_inode(path.dentry); SYSCALL_DEFINE4()
333 error = security_inode_readlink(path.dentry); SYSCALL_DEFINE4()
335 touch_atime(&path); SYSCALL_DEFINE4()
336 error = inode->i_op->readlink(path.dentry, SYSCALL_DEFINE4()
340 path_put(&path); SYSCALL_DEFINE4()
349 SYSCALL_DEFINE3(readlink, const char __user *, path, char __user *, buf, SYSCALL_DEFINE3()
352 return sys_readlinkat(AT_FDCWD, path, buf, bufsiz); SYSCALL_DEFINE3()
H A Dstatfs.c66 int vfs_statfs(struct path *path, struct kstatfs *buf) vfs_statfs() argument
70 error = statfs_by_dentry(path->dentry, buf); vfs_statfs()
72 buf->f_flags = calculate_f_flags(path->mnt); vfs_statfs()
79 struct path path; user_statfs() local
83 error = user_path_at(AT_FDCWD, pathname, lookup_flags, &path); user_statfs()
85 error = vfs_statfs(&path, st); user_statfs()
86 path_put(&path); user_statfs()
H A Dutimes.c51 static int utimes_common(struct path *path, struct timespec *times) utimes_common() argument
55 struct inode *inode = path->dentry->d_inode; utimes_common()
58 error = mnt_want_write(path->mnt); utimes_common()
107 error = notify_change(path->dentry, &newattrs, &delegated_inode); utimes_common()
116 mnt_drop_write(path->mnt); utimes_common()
124 * @filename: path name or NULL
163 struct path path; do_utimes() local
169 error = user_path_at(dfd, filename, lookup_flags, &path); do_utimes()
173 error = utimes_common(&path, times); do_utimes()
174 path_put(&path); do_utimes()
194 /* Nothing to do, we must not even check the path. */ SYSCALL_DEFINE4()
H A Danon_inodes.c75 struct path path; anon_inode_getfile() local
92 path.dentry = d_alloc_pseudo(anon_inode_mnt->mnt_sb, &this); anon_inode_getfile()
93 if (!path.dentry) anon_inode_getfile()
96 path.mnt = mntget(anon_inode_mnt); anon_inode_getfile()
103 d_instantiate(path.dentry, anon_inode_inode); anon_inode_getfile()
105 file = alloc_file(&path, OPEN_FMODE(flags), fops); anon_inode_getfile()
116 path_put(&path); anon_inode_getfile()
H A Dnamespace.c663 * lookup_mnt - Return the first child mount mounted at path
678 struct vfsmount *lookup_mnt(struct path *path) lookup_mnt() argument
687 child_mnt = __lookup_mnt(path->mnt, path->dentry); lookup_mnt()
826 static void detach_mnt(struct mount *mnt, struct path *old_path) detach_mnt()
1156 struct vfsmount *mnt_clone_internal(struct path *path) mnt_clone_internal() argument
1159 p = clone_mnt(real_mount(path->mnt), path->dentry, CL_PRIVATE); mnt_clone_internal()
1547 * During unlink, rmdir, and d_drop it is possible to loose the path
1597 struct path path; SYSCALL_DEFINE2() local
1611 retval = user_path_mountpoint_at(AT_FDCWD, name, lookup_flags, &path); SYSCALL_DEFINE2()
1614 mnt = real_mount(path.mnt); SYSCALL_DEFINE2()
1616 if (path.dentry != path.mnt->mnt_root) SYSCALL_DEFINE2()
1629 dput(path.dentry); SYSCALL_DEFINE2()
1741 struct vfsmount *collect_mounts(struct path *path) collect_mounts() argument
1745 if (!check_mnt(real_mount(path->mnt))) collect_mounts()
1748 tree = copy_tree(real_mount(path->mnt), path->dentry, collect_mounts()
1766 * clone_private_mount - create a private clone of a path
1768 * This creates a new vfsmount, which will be the clone of @path. The new will
1774 struct vfsmount *clone_private_mount(struct path *path) clone_private_mount() argument
1776 struct mount *old_mnt = real_mount(path->mnt); clone_private_mount()
1783 new_mnt = clone_mnt(old_mnt, path->dentry, CL_PRIVATE); clone_private_mount()
1900 struct path *parent_path) attach_recursive_mnt()
1951 static struct mountpoint *lock_mount(struct path *path) lock_mount() argument
1954 struct dentry *dentry = path->dentry; lock_mount()
1962 mnt = lookup_mnt(path); lock_mount()
1975 mutex_unlock(&path->dentry->d_inode->i_mutex); lock_mount()
1976 path_put(path); lock_mount()
1977 path->mnt = mnt; lock_mount()
1978 dentry = path->dentry = dget(mnt->mnt_root); lock_mount()
2022 static int do_change_type(struct path *path, int flag) do_change_type() argument
2025 struct mount *mnt = real_mount(path->mnt); do_change_type()
2030 if (path->dentry != path->mnt->mnt_root) do_change_type()
2070 static int do_loopback(struct path *path, const char *old_name, do_loopback() argument
2073 struct path old_path; do_loopback()
2087 mp = lock_mount(path); do_loopback()
2093 parent = real_mount(path->mnt); do_loopback()
2155 static int do_remount(struct path *path, int flags, int mnt_flags, do_remount() argument
2159 struct super_block *sb = path->mnt->mnt_sb; do_remount()
2160 struct mount *mnt = real_mount(path->mnt); do_remount()
2165 if (path->dentry != path->mnt->mnt_root) do_remount()
2207 err = change_mount_flags(path->mnt, flags); do_remount()
2233 static int do_move_mount(struct path *path, const char *old_name) do_move_mount() argument
2235 struct path old_path, parent_path; do_move_mount()
2246 mp = lock_mount(path); do_move_mount()
2252 p = real_mount(path->mnt); do_move_mount()
2268 if (d_is_dir(path->dentry) != do_move_mount()
2287 err = attach_recursive_mnt(old, real_mount(path->mnt), mp, &parent_path); do_move_mount()
2329 static int do_add_mount(struct mount *newmnt, struct path *path, int mnt_flags) do_add_mount() argument
2337 mp = lock_mount(path); do_add_mount()
2341 parent = real_mount(path->mnt); do_add_mount()
2354 if (path->mnt->mnt_sb == newmnt->mnt.mnt_sb && do_add_mount()
2355 path->mnt->mnt_root == path->dentry) do_add_mount()
2376 static int do_new_mount(struct path *path, const char *fstype, int flags, do_new_mount() argument
2418 err = do_add_mount(real_mount(mnt), path, mnt_flags); do_new_mount()
2424 int finish_automount(struct vfsmount *m, struct path *path) finish_automount() argument
2433 if (m->mnt_sb == path->mnt->mnt_sb && finish_automount()
2434 m->mnt_root == path->dentry) { finish_automount()
2439 err = do_add_mount(mnt, path, path->mnt->mnt_flags | MNT_SHRINKABLE); finish_automount()
2659 struct path path; do_mount() local
2672 retval = user_path(dir_name, &path); do_mount()
2676 retval = security_sb_mount(dev_name, &path, do_mount()
2708 mnt_flags |= path.mnt->mnt_flags & MNT_ATIME_MASK; do_mount()
2716 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags, do_mount()
2719 retval = do_loopback(&path, dev_name, flags & MS_REC); do_mount()
2721 retval = do_change_type(&path, flags); do_mount()
2723 retval = do_move_mount(&path, dev_name); do_mount()
2725 retval = do_new_mount(&path, type_page, flags, mnt_flags, do_mount()
2728 path_put(&path); do_mount()
2867 struct path path; mount_subtree() local
2875 name, LOOKUP_FOLLOW|LOOKUP_AUTOMOUNT, &path); mount_subtree()
2883 s = path.mnt->mnt_sb; mount_subtree()
2885 mntput(path.mnt); mount_subtree()
2889 return path.dentry; mount_subtree()
2928 * Return true if path is reachable from root
2933 const struct path *root) is_path_reachable()
2942 int path_is_under(struct path *path1, struct path *path2) path_is_under()
2980 struct path new, old, parent_path, root_parent, root; SYSCALL_DEFINE2()
3079 struct path root; init_mount_tree()
3187 struct path ns_root; current_chrooted()
3188 struct path fs_root; current_chrooted()
3311 struct path root; mntns_install()
H A Dnsfs.c47 void *ns_get_path(struct path *path, struct task_struct *task, ns_get_path() argument
73 path->mnt = mnt; ns_get_path()
74 path->dentry = dentry; ns_get_path()
/linux-4.4.14/security/keys/
H A Dbig_key.c60 struct path *path = (struct path *)&prep->payload.data[big_key_path]; big_key_preparse() local
98 *path = file->f_path; big_key_preparse()
99 path_get(path); big_key_preparse()
124 struct path *path = (struct path *)&prep->payload.data[big_key_path]; big_key_free_preparse() local
125 path_put(path); big_key_free_preparse()
137 struct path *path = (struct path *)&key->payload.data[big_key_path]; big_key_revoke() local
143 vfs_truncate(path, 0); big_key_revoke()
154 struct path *path = (struct path *)&key->payload.data[big_key_path]; big_key_destroy() local
155 path_put(path); big_key_destroy()
156 path->mnt = NULL; big_key_destroy()
157 path->dentry = NULL; big_key_destroy()
192 struct path *path = (struct path *)&key->payload.data[big_key_path]; big_key_read() local
196 file = dentry_open(path, O_RDONLY, current_cred()); big_key_read()
/linux-4.4.14/include/net/iucv/
H A Diucv.h42 * path until an iucv_path_resume is issued.
87 * pathid: 16 bit path identification
89 * flags: properties of the path: IPRMDATA, IPQUSCE, IPPRTY
91 * private: private information of the handler associated with the path
92 * list: list_head for the iucv_handler path list.
135 * type 0x01 has been received. The base code allocates a path
136 * structure and "asks" the handler if this path belongs to the
137 * handler. To accept the path the path_pending function needs
147 * type 0x02 has been received for a path that has been established
155 * his end of the communication path. The path still exists and
157 * shuts down the other end of the path as well.
163 * the path. Delivery of messages is stopped until iucv_path_resume
170 * the path.
218 * Allocate a new path structure for use with iucv_connect.
221 * path structure.
225 struct iucv_path *path; iucv_path_alloc() local
227 path = kzalloc(sizeof(struct iucv_path), gfp); iucv_path_alloc()
228 if (path) { iucv_path_alloc()
229 path->msglim = msglim; iucv_path_alloc()
230 path->flags = flags; iucv_path_alloc()
232 return path; iucv_path_alloc()
237 * @path: address of iucv path structure
239 * Frees a path structure.
241 static inline void iucv_path_free(struct iucv_path *path) iucv_path_free() argument
243 kfree(path); iucv_path_free()
248 * @path: address of iucv path structure
251 * @private: private data passed to interrupt handlers for this path
254 * external interrupt and now wishes to complete the IUCV communication path.
258 int iucv_path_accept(struct iucv_path *path, struct iucv_handler *handler,
263 * @path: address of iucv path structure
268 * @private: private data passed to interrupt handlers for this path
270 * This function establishes an IUCV path. Although the connect may complete
271 * successfully, you are not able to use the path until you receive an IUCV
276 int iucv_path_connect(struct iucv_path *path, struct iucv_handler *handler,
282 * @path: address of iucv path structure
285 * This function temporarily suspends incoming messages on an IUCV path.
286 * You can later reactivate the path by invoking the iucv_resume function.
290 int iucv_path_quiesce(struct iucv_path *path, u8 *userdata);
294 * @path: address of iucv path structure
297 * This function resumes incoming messages on an IUCV path that has
302 int iucv_path_resume(struct iucv_path *path, u8 *userdata);
306 * @path: address of iucv path structure
309 * This function terminates an IUCV path.
313 int iucv_path_sever(struct iucv_path *path, u8 *userdata);
317 * @path: address of iucv path structure
325 int iucv_message_purge(struct iucv_path *path, struct iucv_message *msg,
330 * @path: address of iucv path structure
345 int iucv_message_receive(struct iucv_path *path, struct iucv_message *msg,
350 * @path: address of iucv path structure
365 int __iucv_message_receive(struct iucv_path *path, struct iucv_message *msg,
371 * @path: address of iucv path structure
380 int iucv_message_reject(struct iucv_path *path, struct iucv_message *msg);
384 * @path: address of iucv path structure
397 int iucv_message_reply(struct iucv_path *path, struct iucv_message *msg,
402 * @path: address of iucv path structure
417 int iucv_message_send(struct iucv_path *path, struct iucv_message *msg,
422 * @path: address of iucv path structure
437 int __iucv_message_send(struct iucv_path *path, struct iucv_message *msg,
442 * @path: address of iucv path structure
459 int iucv_message_send2way(struct iucv_path *path, struct iucv_message *msg,
464 int (*message_receive)(struct iucv_path *path, struct iucv_message *msg,
466 int (*__message_receive)(struct iucv_path *path,
469 int (*message_reply)(struct iucv_path *path, struct iucv_message *msg,
471 int (*message_reject)(struct iucv_path *path, struct iucv_message *msg);
472 int (*message_send)(struct iucv_path *path, struct iucv_message *msg,
474 int (*__message_send)(struct iucv_path *path, struct iucv_message *msg,
476 int (*message_send2way)(struct iucv_path *path,
479 int (*message_purge)(struct iucv_path *path, struct iucv_message *msg,
481 int (*path_accept)(struct iucv_path *path, struct iucv_handler *handler,
483 int (*path_connect)(struct iucv_path *path,
486 int (*path_quiesce)(struct iucv_path *path, u8 userdata[16]);
487 int (*path_resume)(struct iucv_path *path, u8 userdata[16]);
488 int (*path_sever)(struct iucv_path *path, u8 userdata[16]);
/linux-4.4.14/include/video/
H A Dmmp_disp.h79 /* parameters used by path/overlay */
106 /* path related para: mode */
156 /* overlay describes a z-order indexed slot in each path. */
160 struct mmp_path *path; member in struct:mmp_overlay
188 /* path name used to connect to proper path configed */
202 int (*check_status)(struct mmp_path *path);
203 struct mmp_overlay *(*get_overlay)(struct mmp_path *path,
205 int (*get_modelist)(struct mmp_path *path,
209 void (*set_mode)(struct mmp_path *path, struct mmp_mode *mode);
210 void (*set_onoff)(struct mmp_path *path, int status);
214 /* path output types */
221 /* path is main part of mmp-disp */
251 static inline void mmp_path_set_mode(struct mmp_path *path, mmp_path_set_mode() argument
254 if (path) mmp_path_set_mode()
255 path->ops.set_mode(path, mode); mmp_path_set_mode()
257 static inline void mmp_path_set_onoff(struct mmp_path *path, int status) mmp_path_set_onoff() argument
259 if (path) mmp_path_set_onoff()
260 path->ops.set_onoff(path, status); mmp_path_set_onoff()
262 static inline int mmp_path_get_modelist(struct mmp_path *path, mmp_path_get_modelist() argument
265 if (path) mmp_path_get_modelist()
266 return path->ops.get_modelist(path, modelist); mmp_path_get_modelist()
270 struct mmp_path *path, int overlay_id) mmp_path_get_overlay()
272 if (path) mmp_path_get_overlay()
273 return path->ops.get_overlay(path, overlay_id); mmp_path_get_overlay()
303 * driver data is set from each detailed ctrl driver for path usage
313 void (*set_mode)(struct mmp_path *path, struct mmp_mode *mode);
314 void (*set_onoff)(struct mmp_path *path, int status);
321 extern void mmp_unregister_path(struct mmp_path *path);
269 mmp_path_get_overlay( struct mmp_path *path, int overlay_id) mmp_path_get_overlay() argument
/linux-4.4.14/arch/sh/drivers/pci/
H A Dfixups-cayman.c13 5V slots get into the CPU via a different path from the IRQ lines pcibios_map_platform_irq()
15 interrupts go via the 5V or 3.3V path, i.e. the 'bridge swizzling' pcibios_map_platform_irq()
34 } path[4]; pcibios_map_platform_irq() local
39 slot = path[i].slot = PCI_SLOT(dev->devfn); pcibios_map_platform_irq()
40 pin = path[i].pin = pci_swizzle_interrupt_pin(dev, pin); pcibios_map_platform_irq()
43 if (i > 3) panic("PCI path to root bus too long!\n"); pcibios_map_platform_irq()
57 slot = path[i].slot; pcibios_map_platform_irq()
58 pin = path[i].pin; pcibios_map_platform_irq()
65 slot = path[i].slot; pcibios_map_platform_irq()
66 pin = path[i].pin; pcibios_map_platform_irq()
/linux-4.4.14/fs/btrfs/tests/
H A Dqgroup-tests.c41 struct btrfs_path *path; insert_normal_tree_ref() local
53 path = btrfs_alloc_path(); insert_normal_tree_ref()
54 if (!path) { insert_normal_tree_ref()
55 test_msg("Couldn't allocate path\n"); insert_normal_tree_ref()
59 path->leave_spinning = 1; insert_normal_tree_ref()
60 ret = btrfs_insert_empty_item(&trans, root, path, &ins, size); insert_normal_tree_ref()
63 btrfs_free_path(path); insert_normal_tree_ref()
67 leaf = path->nodes[0]; insert_normal_tree_ref()
68 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); insert_normal_tree_ref()
83 btrfs_free_path(path); insert_normal_tree_ref()
92 struct btrfs_path *path; add_tree_ref() local
103 path = btrfs_alloc_path(); add_tree_ref()
104 if (!path) { add_tree_ref()
105 test_msg("Couldn't allocate path\n"); add_tree_ref()
109 path->leave_spinning = 1; add_tree_ref()
110 ret = btrfs_search_slot(&trans, root, &key, path, 0, 1); add_tree_ref()
113 btrfs_free_path(path); add_tree_ref()
117 item = btrfs_item_ptr(path->nodes[0], path->slots[0], add_tree_ref()
119 refs = btrfs_extent_refs(path->nodes[0], item); add_tree_ref()
120 btrfs_set_extent_refs(path->nodes[0], item, refs + 1); add_tree_ref()
121 btrfs_release_path(path); add_tree_ref()
132 ret = btrfs_insert_empty_item(&trans, root, path, &key, 0); add_tree_ref()
135 btrfs_free_path(path); add_tree_ref()
144 struct btrfs_path *path; remove_extent_item() local
153 path = btrfs_alloc_path(); remove_extent_item()
154 if (!path) { remove_extent_item()
155 test_msg("Couldn't allocate path\n"); remove_extent_item()
158 path->leave_spinning = 1; remove_extent_item()
160 ret = btrfs_search_slot(&trans, root, &key, path, -1, 1); remove_extent_item()
163 btrfs_free_path(path); remove_extent_item()
166 btrfs_del_item(&trans, root, path); remove_extent_item()
167 btrfs_free_path(path); remove_extent_item()
176 struct btrfs_path *path; remove_extent_ref() local
187 path = btrfs_alloc_path(); remove_extent_ref()
188 if (!path) { remove_extent_ref()
189 test_msg("Couldn't allocate path\n"); remove_extent_ref()
193 path->leave_spinning = 1; remove_extent_ref()
194 ret = btrfs_search_slot(&trans, root, &key, path, 0, 1); remove_extent_ref()
197 btrfs_free_path(path); remove_extent_ref()
201 item = btrfs_item_ptr(path->nodes[0], path->slots[0], remove_extent_ref()
203 refs = btrfs_extent_refs(path->nodes[0], item); remove_extent_ref()
204 btrfs_set_extent_refs(path->nodes[0], item, refs - 1); remove_extent_ref()
205 btrfs_release_path(path); remove_extent_ref()
216 ret = btrfs_search_slot(&trans, root, &key, path, -1, 1); remove_extent_ref()
219 btrfs_free_path(path); remove_extent_ref()
222 btrfs_del_item(&trans, root, path); remove_extent_ref()
223 btrfs_free_path(path); remove_extent_ref()
H A Dextent-buffer-tests.c27 struct btrfs_path *path; test_btrfs_split_item() local
49 path = btrfs_alloc_path(); test_btrfs_split_item()
50 if (!path) { test_btrfs_split_item()
51 test_msg("Could not allocate path\n"); test_btrfs_split_item()
56 path->nodes[0] = eb = alloc_dummy_extent_buffer(NULL, 4096); test_btrfs_split_item()
62 path->slots[0] = 0; test_btrfs_split_item()
68 setup_items_for_insert(root, path, &key, &value_len, value_len, test_btrfs_split_item()
81 ret = btrfs_split_item(NULL, root, path, &key, 17); test_btrfs_split_item()
142 ret = btrfs_split_item(NULL, root, path, &key, 4); test_btrfs_split_item()
220 btrfs_free_path(path); test_btrfs_split_item()
/linux-4.4.14/tools/testing/selftests/memfd/
H A Dfuse_mnt.c26 static int memfd_getattr(const char *path, struct stat *st) memfd_getattr() argument
30 if (!strcmp(path, "/")) { memfd_getattr()
33 } else if (!strcmp(path, memfd_path)) { memfd_getattr()
44 static int memfd_readdir(const char *path, memfd_readdir() argument
50 if (strcmp(path, "/")) memfd_readdir()
60 static int memfd_open(const char *path, struct fuse_file_info *fi) memfd_open() argument
62 if (strcmp(path, memfd_path)) memfd_open()
74 static int memfd_read(const char *path, memfd_read() argument
82 if (strcmp(path, memfd_path) != 0) memfd_read()
/linux-4.4.14/fs/ext4/
H A Dextents.c141 struct ext4_ext_path *path) ext4_ext_get_access()
143 if (path->p_bh) { ext4_ext_get_access()
144 /* path points to block */ ext4_ext_get_access()
145 BUFFER_TRACE(path->p_bh, "get_write_access"); ext4_ext_get_access()
146 return ext4_journal_get_write_access(handle, path->p_bh); ext4_ext_get_access()
148 /* path points to leaf/index in inode body */ ext4_ext_get_access()
160 struct inode *inode, struct ext4_ext_path *path) __ext4_ext_dirty()
165 if (path->p_bh) { __ext4_ext_dirty()
166 ext4_extent_block_csum_set(inode, ext_block_hdr(path->p_bh)); __ext4_ext_dirty()
167 /* path points to block */ __ext4_ext_dirty()
169 inode, path->p_bh); __ext4_ext_dirty()
171 /* path points to leaf/index in inode body */ __ext4_ext_dirty()
178 struct ext4_ext_path *path, ext4_ext_find_goal()
181 if (path) { ext4_ext_find_goal()
182 int depth = path->p_depth; ext4_ext_find_goal()
202 ex = path[depth].p_ext; ext4_ext_find_goal()
215 if (path[depth].p_bh) ext4_ext_find_goal()
216 return path[depth].p_bh->b_blocknr; ext4_ext_find_goal()
228 struct ext4_ext_path *path, ext4_ext_new_meta_block()
233 goal = ext4_ext_find_goal(inode, path, le32_to_cpu(ex->ee_block)); ext4_ext_new_meta_block()
298 struct ext4_ext_path *path = *ppath; ext4_force_split_extent_at() local
299 int unwritten = ext4_ext_is_unwritten(path[path->p_depth].p_ext); ext4_force_split_extent_at()
569 struct ext4_ext_path *path = NULL; ext4_ext_precache() local
579 path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 1), ext4_ext_precache()
581 if (path == NULL) { ext4_ext_precache()
589 path[0].p_hdr = ext_inode_hdr(inode); ext4_ext_precache()
590 ret = ext4_ext_check(inode, path[0].p_hdr, depth, 0); ext4_ext_precache()
593 path[0].p_idx = EXT_FIRST_INDEX(path[0].p_hdr); ext4_ext_precache()
600 path[i].p_idx > EXT_LAST_INDEX(path[i].p_hdr)) { ext4_ext_precache()
601 brelse(path[i].p_bh); ext4_ext_precache()
602 path[i].p_bh = NULL; ext4_ext_precache()
607 ext4_idx_pblock(path[i].p_idx++), ext4_ext_precache()
615 path[i].p_bh = bh; ext4_ext_precache()
616 path[i].p_hdr = ext_block_hdr(bh); ext4_ext_precache()
617 path[i].p_idx = EXT_FIRST_INDEX(path[i].p_hdr); ext4_ext_precache()
622 ext4_ext_drop_refs(path); ext4_ext_precache()
623 kfree(path); ext4_ext_precache()
628 static void ext4_ext_show_path(struct inode *inode, struct ext4_ext_path *path) ext4_ext_show_path() argument
630 int k, l = path->p_depth; ext4_ext_show_path()
632 ext_debug("path:"); ext4_ext_show_path()
633 for (k = 0; k <= l; k++, path++) { ext4_ext_show_path()
634 if (path->p_idx) { ext4_ext_show_path()
635 ext_debug(" %d->%llu", le32_to_cpu(path->p_idx->ei_block), ext4_ext_show_path()
636 ext4_idx_pblock(path->p_idx)); ext4_ext_show_path()
637 } else if (path->p_ext) { ext4_ext_show_path()
639 le32_to_cpu(path->p_ext->ee_block), ext4_ext_show_path()
640 ext4_ext_is_unwritten(path->p_ext), ext4_ext_show_path()
641 ext4_ext_get_actual_len(path->p_ext), ext4_ext_show_path()
642 ext4_ext_pblock(path->p_ext)); ext4_ext_show_path()
649 static void ext4_ext_show_leaf(struct inode *inode, struct ext4_ext_path *path) ext4_ext_show_leaf() argument
656 if (!path) ext4_ext_show_leaf()
659 eh = path[depth].p_hdr; ext4_ext_show_leaf()
672 static void ext4_ext_show_move(struct inode *inode, struct ext4_ext_path *path, ext4_ext_show_move() argument
680 idx = path[level].p_idx; ext4_ext_show_move()
681 while (idx <= EXT_MAX_INDEX(path[level].p_hdr)) { ext4_ext_show_move()
692 ex = path[depth].p_ext; ext4_ext_show_move()
693 while (ex <= EXT_MAX_EXTENT(path[depth].p_hdr)) { ext4_ext_show_move()
705 #define ext4_ext_show_path(inode, path)
706 #define ext4_ext_show_leaf(inode, path)
707 #define ext4_ext_show_move(inode, path, newblock, level)
710 void ext4_ext_drop_refs(struct ext4_ext_path *path) ext4_ext_drop_refs() argument
714 if (!path) ext4_ext_drop_refs()
716 depth = path->p_depth; ext4_ext_drop_refs()
717 for (i = 0; i <= depth; i++, path++) ext4_ext_drop_refs()
718 if (path->p_bh) { ext4_ext_drop_refs()
719 brelse(path->p_bh); ext4_ext_drop_refs()
720 path->p_bh = NULL; ext4_ext_drop_refs()
731 struct ext4_ext_path *path, ext4_lblk_t block) ext4_ext_binsearch_idx()
733 struct ext4_extent_header *eh = path->p_hdr; ext4_ext_binsearch_idx()
752 path->p_idx = l - 1; ext4_ext_binsearch_idx()
753 ext_debug(" -> %u->%lld ", le32_to_cpu(path->p_idx->ei_block), ext4_ext_binsearch_idx()
754 ext4_idx_pblock(path->p_idx)); ext4_ext_binsearch_idx()
778 BUG_ON(chix != path->p_idx); ext4_ext_binsearch_idx()
791 struct ext4_ext_path *path, ext4_lblk_t block) ext4_ext_binsearch()
793 struct ext4_extent_header *eh = path->p_hdr; ext4_ext_binsearch()
820 path->p_ext = l - 1; ext4_ext_binsearch()
822 le32_to_cpu(path->p_ext->ee_block), ext4_ext_binsearch()
823 ext4_ext_pblock(path->p_ext), ext4_ext_binsearch()
824 ext4_ext_is_unwritten(path->p_ext), ext4_ext_binsearch()
825 ext4_ext_get_actual_len(path->p_ext)); ext4_ext_binsearch()
840 BUG_ON(chex != path->p_ext); ext4_ext_binsearch()
865 struct ext4_ext_path *path = orig_path ? *orig_path : NULL; ext4_find_extent() local
872 if (path) { ext4_find_extent()
873 ext4_ext_drop_refs(path); ext4_find_extent()
874 if (depth > path[0].p_maxdepth) { ext4_find_extent()
875 kfree(path); ext4_find_extent()
876 *orig_path = path = NULL; ext4_find_extent()
879 if (!path) { ext4_find_extent()
881 path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 2), ext4_find_extent()
883 if (unlikely(!path)) ext4_find_extent()
885 path[0].p_maxdepth = depth + 1; ext4_find_extent()
887 path[0].p_hdr = eh; ext4_find_extent()
888 path[0].p_bh = NULL; ext4_find_extent()
896 ext4_ext_binsearch_idx(inode, path + ppos, block); ext4_find_extent()
897 path[ppos].p_block = ext4_idx_pblock(path[ppos].p_idx); ext4_find_extent()
898 path[ppos].p_depth = i; ext4_find_extent()
899 path[ppos].p_ext = NULL; ext4_find_extent()
901 bh = read_extent_tree_block(inode, path[ppos].p_block, --i, ext4_find_extent()
917 path[ppos].p_bh = bh; ext4_find_extent()
918 path[ppos].p_hdr = eh; ext4_find_extent()
921 path[ppos].p_depth = i; ext4_find_extent()
922 path[ppos].p_ext = NULL; ext4_find_extent()
923 path[ppos].p_idx = NULL; ext4_find_extent()
926 ext4_ext_binsearch(inode, path + ppos, block); ext4_find_extent()
928 if (path[ppos].p_ext) ext4_find_extent()
929 path[ppos].p_block = ext4_ext_pblock(path[ppos].p_ext); ext4_find_extent()
931 ext4_ext_show_path(inode, path); ext4_find_extent()
933 return path; ext4_find_extent()
936 ext4_ext_drop_refs(path); ext4_find_extent()
937 kfree(path); ext4_find_extent()
1016 * inserts new subtree into the path, using free index entry
1026 struct ext4_ext_path *path, ext4_ext_split()
1044 if (unlikely(path[depth].p_ext > EXT_MAX_EXTENT(path[depth].p_hdr))) { ext4_ext_split()
1048 if (path[depth].p_ext != EXT_MAX_EXTENT(path[depth].p_hdr)) { ext4_ext_split()
1049 border = path[depth].p_ext[1].ee_block; ext4_ext_split()
1079 newblock = ext4_ext_new_meta_block(handle, inode, path, ext4_ext_split()
1110 /* move remainder of path[depth] to the new leaf */ ext4_ext_split()
1111 if (unlikely(path[depth].p_hdr->eh_entries != ext4_ext_split()
1112 path[depth].p_hdr->eh_max)) { ext4_ext_split()
1114 path[depth].p_hdr->eh_entries, ext4_ext_split()
1115 path[depth].p_hdr->eh_max); ext4_ext_split()
1120 m = EXT_MAX_EXTENT(path[depth].p_hdr) - path[depth].p_ext++; ext4_ext_split()
1121 ext4_ext_show_move(inode, path, newblock, depth); ext4_ext_split()
1125 memmove(ex, path[depth].p_ext, sizeof(struct ext4_extent) * m); ext4_ext_split()
1141 err = ext4_ext_get_access(handle, inode, path + depth); ext4_ext_split()
1144 le16_add_cpu(&path[depth].p_hdr->eh_entries, -m); ext4_ext_split()
1145 err = ext4_ext_dirty(handle, inode, path + depth); ext4_ext_split()
1189 /* move remainder of path[i] to the new index block */ ext4_ext_split()
1190 if (unlikely(EXT_MAX_INDEX(path[i].p_hdr) != ext4_ext_split()
1191 EXT_LAST_INDEX(path[i].p_hdr))) { ext4_ext_split()
1194 le32_to_cpu(path[i].p_ext->ee_block)); ext4_ext_split()
1199 m = EXT_MAX_INDEX(path[i].p_hdr) - path[i].p_idx++; ext4_ext_split()
1200 ext_debug("cur 0x%p, last 0x%p\n", path[i].p_idx, ext4_ext_split()
1201 EXT_MAX_INDEX(path[i].p_hdr)); ext4_ext_split()
1202 ext4_ext_show_move(inode, path, newblock, i); ext4_ext_split()
1204 memmove(++fidx, path[i].p_idx, ext4_ext_split()
1220 err = ext4_ext_get_access(handle, inode, path + i); ext4_ext_split()
1223 le16_add_cpu(&path[i].p_hdr->eh_entries, -m); ext4_ext_split()
1224 err = ext4_ext_dirty(handle, inode, path + i); ext4_ext_split()
1233 err = ext4_ext_insert_index(handle, inode, path + at, ext4_ext_split()
1353 struct ext4_ext_path *path = *ppath; ext4_ext_create_new_leaf() local
1361 curp = path + depth; ext4_ext_create_new_leaf()
1372 err = ext4_ext_split(handle, inode, mb_flags, path, newext, i); ext4_ext_create_new_leaf()
1376 /* refill path */ ext4_ext_create_new_leaf()
1377 path = ext4_find_extent(inode, ext4_ext_create_new_leaf()
1380 if (IS_ERR(path)) ext4_ext_create_new_leaf()
1381 err = PTR_ERR(path); ext4_ext_create_new_leaf()
1388 /* refill path */ ext4_ext_create_new_leaf()
1389 path = ext4_find_extent(inode, ext4_ext_create_new_leaf()
1392 if (IS_ERR(path)) { ext4_ext_create_new_leaf()
1393 err = PTR_ERR(path); ext4_ext_create_new_leaf()
1402 if (path[depth].p_hdr->eh_entries == path[depth].p_hdr->eh_max) { ext4_ext_create_new_leaf()
1420 struct ext4_ext_path *path, ext4_ext_search_left()
1427 if (unlikely(path == NULL)) { ext4_ext_search_left()
1428 EXT4_ERROR_INODE(inode, "path == NULL *logical %d!", *logical); ext4_ext_search_left()
1431 depth = path->p_depth; ext4_ext_search_left()
1434 if (depth == 0 && path->p_ext == NULL) ext4_ext_search_left()
1437 /* usually extent in the path covers blocks smaller ext4_ext_search_left()
1441 ex = path[depth].p_ext; ext4_ext_search_left()
1444 if (unlikely(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex)) { ext4_ext_search_left()
1451 ix = path[depth].p_idx; ext4_ext_search_left()
1452 if (unlikely(ix != EXT_FIRST_INDEX(path[depth].p_hdr))) { ext4_ext_search_left()
1456 EXT_FIRST_INDEX(path[depth].p_hdr) != NULL ? ext4_ext_search_left()
1457 le32_to_cpu(EXT_FIRST_INDEX(path[depth].p_hdr)->ei_block) : 0, ext4_ext_search_left()
1485 struct ext4_ext_path *path, ext4_ext_search_right()
1497 if (unlikely(path == NULL)) { ext4_ext_search_right()
1498 EXT4_ERROR_INODE(inode, "path == NULL *logical %d!", *logical); ext4_ext_search_right()
1501 depth = path->p_depth; ext4_ext_search_right()
1504 if (depth == 0 && path->p_ext == NULL) ext4_ext_search_right()
1507 /* usually extent in the path covers blocks smaller ext4_ext_search_right()
1511 ex = path[depth].p_ext; ext4_ext_search_right()
1514 if (unlikely(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex)) { ext4_ext_search_right()
1516 "first_extent(path[%d].p_hdr) != ex", ext4_ext_search_right()
1521 ix = path[depth].p_idx; ext4_ext_search_right()
1522 if (unlikely(ix != EXT_FIRST_INDEX(path[depth].p_hdr))) { ext4_ext_search_right()
1539 if (ex != EXT_LAST_EXTENT(path[depth].p_hdr)) { ext4_ext_search_right()
1547 ix = path[depth].p_idx; ext4_ext_search_right()
1548 if (ix != EXT_LAST_INDEX(path[depth].p_hdr)) ext4_ext_search_right()
1561 while (++depth < path->p_depth) { ext4_ext_search_right()
1564 path->p_depth - depth, 0); ext4_ext_search_right()
1573 bh = read_extent_tree_block(inode, block, path->p_depth - depth, 0); ext4_ext_search_right()
1595 ext4_ext_next_allocated_block(struct ext4_ext_path *path) ext4_ext_next_allocated_block() argument
1599 BUG_ON(path == NULL); ext4_ext_next_allocated_block()
1600 depth = path->p_depth; ext4_ext_next_allocated_block()
1602 if (depth == 0 && path->p_ext == NULL) ext4_ext_next_allocated_block()
1606 if (depth == path->p_depth) { ext4_ext_next_allocated_block()
1608 if (path[depth].p_ext && ext4_ext_next_allocated_block()
1609 path[depth].p_ext != ext4_ext_next_allocated_block()
1610 EXT_LAST_EXTENT(path[depth].p_hdr)) ext4_ext_next_allocated_block()
1611 return le32_to_cpu(path[depth].p_ext[1].ee_block); ext4_ext_next_allocated_block()
1614 if (path[depth].p_idx != ext4_ext_next_allocated_block()
1615 EXT_LAST_INDEX(path[depth].p_hdr)) ext4_ext_next_allocated_block()
1616 return le32_to_cpu(path[depth].p_idx[1].ei_block); ext4_ext_next_allocated_block()
1628 static ext4_lblk_t ext4_ext_next_leaf_block(struct ext4_ext_path *path) ext4_ext_next_leaf_block() argument
1632 BUG_ON(path == NULL); ext4_ext_next_leaf_block()
1633 depth = path->p_depth; ext4_ext_next_leaf_block()
1643 if (path[depth].p_idx != ext4_ext_next_leaf_block()
1644 EXT_LAST_INDEX(path[depth].p_hdr)) ext4_ext_next_leaf_block()
1646 le32_to_cpu(path[depth].p_idx[1].ei_block); ext4_ext_next_leaf_block()
1660 struct ext4_ext_path *path) ext4_ext_correct_indexes()
1668 eh = path[depth].p_hdr; ext4_ext_correct_indexes()
1669 ex = path[depth].p_ext; ext4_ext_correct_indexes()
1691 border = path[depth].p_ext->ee_block; ext4_ext_correct_indexes()
1692 err = ext4_ext_get_access(handle, inode, path + k); ext4_ext_correct_indexes()
1695 path[k].p_idx->ei_block = border; ext4_ext_correct_indexes()
1696 err = ext4_ext_dirty(handle, inode, path + k); ext4_ext_correct_indexes()
1702 if (path[k+1].p_idx != EXT_FIRST_INDEX(path[k+1].p_hdr)) ext4_ext_correct_indexes()
1704 err = ext4_ext_get_access(handle, inode, path + k); ext4_ext_correct_indexes()
1707 path[k].p_idx->ei_block = border; ext4_ext_correct_indexes()
1708 err = ext4_ext_dirty(handle, inode, path + k); ext4_ext_correct_indexes()
1762 struct ext4_ext_path *path, ext4_ext_try_to_merge_right()
1770 BUG_ON(path[depth].p_hdr == NULL); ext4_ext_try_to_merge_right()
1771 eh = path[depth].p_hdr; ext4_ext_try_to_merge_right()
1804 struct ext4_ext_path *path) ext4_ext_try_to_merge_up()
1810 if ((path[0].p_depth != 1) || ext4_ext_try_to_merge_up()
1811 (le16_to_cpu(path[0].p_hdr->eh_entries) != 1) || ext4_ext_try_to_merge_up()
1812 (le16_to_cpu(path[1].p_hdr->eh_entries) > max_root)) ext4_ext_try_to_merge_up()
1826 blk = ext4_idx_pblock(path[0].p_idx); ext4_ext_try_to_merge_up()
1827 s = le16_to_cpu(path[1].p_hdr->eh_entries) * ext4_ext_try_to_merge_up()
1831 path[1].p_maxdepth = path[0].p_maxdepth; ext4_ext_try_to_merge_up()
1832 memcpy(path[0].p_hdr, path[1].p_hdr, s); ext4_ext_try_to_merge_up()
1833 path[0].p_depth = 0; ext4_ext_try_to_merge_up()
1834 path[0].p_ext = EXT_FIRST_EXTENT(path[0].p_hdr) + ext4_ext_try_to_merge_up()
1835 (path[1].p_ext - EXT_FIRST_EXTENT(path[1].p_hdr)); ext4_ext_try_to_merge_up()
1836 path[0].p_hdr->eh_max = cpu_to_le16(max_root); ext4_ext_try_to_merge_up()
1838 brelse(path[1].p_bh); ext4_ext_try_to_merge_up()
1849 struct ext4_ext_path *path, ext4_ext_try_to_merge()
1856 BUG_ON(path[depth].p_hdr == NULL); ext4_ext_try_to_merge()
1857 eh = path[depth].p_hdr; ext4_ext_try_to_merge()
1860 merge_done = ext4_ext_try_to_merge_right(inode, path, ex - 1); ext4_ext_try_to_merge()
1863 (void) ext4_ext_try_to_merge_right(inode, path, ex); ext4_ext_try_to_merge()
1865 ext4_ext_try_to_merge_up(handle, inode, path); ext4_ext_try_to_merge()
1879 struct ext4_ext_path *path) ext4_ext_check_overlap()
1888 if (!path[depth].p_ext) ext4_ext_check_overlap()
1890 b2 = EXT4_LBLK_CMASK(sbi, le32_to_cpu(path[depth].p_ext->ee_block)); ext4_ext_check_overlap()
1893 * get the next allocated block if the extent in the path ext4_ext_check_overlap()
1897 b2 = ext4_ext_next_allocated_block(path); ext4_ext_check_overlap()
1929 struct ext4_ext_path *path = *ppath; ext4_ext_insert_extent() local
1945 ex = path[depth].p_ext; ext4_ext_insert_extent()
1946 eh = path[depth].p_hdr; ext4_ext_insert_extent()
1947 if (unlikely(path[depth].p_hdr == NULL)) { ext4_ext_insert_extent()
1948 EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth); ext4_ext_insert_extent()
1985 path + depth); ext4_ext_insert_extent()
1993 eh = path[depth].p_hdr; ext4_ext_insert_extent()
2011 path + depth); ext4_ext_insert_extent()
2022 eh = path[depth].p_hdr; ext4_ext_insert_extent()
2029 eh = path[depth].p_hdr; ext4_ext_insert_extent()
2037 next = ext4_ext_next_leaf_block(path); ext4_ext_insert_extent()
2044 BUG_ON(npath->p_depth != path->p_depth); ext4_ext_insert_extent()
2049 path = npath; ext4_ext_insert_extent()
2067 eh = path[depth].p_hdr; ext4_ext_insert_extent()
2070 nearex = path[depth].p_ext; ext4_ext_insert_extent()
2072 err = ext4_ext_get_access(handle, inode, path + depth); ext4_ext_insert_extent()
2122 path[depth].p_ext = nearex; ext4_ext_insert_extent()
2130 ext4_ext_try_to_merge(handle, inode, path, nearex); ext4_ext_insert_extent()
2134 err = ext4_ext_correct_indexes(handle, inode, path); ext4_ext_insert_extent()
2138 err = ext4_ext_dirty(handle, inode, path + path->p_depth); ext4_ext_insert_extent()
2150 struct ext4_ext_path *path = NULL; ext4_fill_fiemap_extents() local
2164 path = ext4_find_extent(inode, block, &path, 0); ext4_fill_fiemap_extents()
2165 if (IS_ERR(path)) { ext4_fill_fiemap_extents()
2167 err = PTR_ERR(path); ext4_fill_fiemap_extents()
2168 path = NULL; ext4_fill_fiemap_extents()
2173 if (unlikely(path[depth].p_hdr == NULL)) { ext4_fill_fiemap_extents()
2175 EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth); ext4_fill_fiemap_extents()
2179 ex = path[depth].p_ext; ext4_fill_fiemap_extents()
2180 next = ext4_ext_next_allocated_block(path); ext4_fill_fiemap_extents()
2290 ext4_ext_drop_refs(path); ext4_fill_fiemap_extents()
2291 kfree(path); ext4_fill_fiemap_extents()
2301 ext4_ext_put_gap_in_cache(struct inode *inode, struct ext4_ext_path *path, ext4_ext_put_gap_in_cache() argument
2310 ex = path[depth].p_ext; ext4_ext_put_gap_in_cache()
2329 next = ext4_ext_next_allocated_block(path); ext4_ext_put_gap_in_cache()
2356 struct ext4_ext_path *path, int depth) ext4_ext_rm_idx()
2363 path = path + depth; ext4_ext_rm_idx()
2364 leaf = ext4_idx_pblock(path->p_idx); ext4_ext_rm_idx()
2365 if (unlikely(path->p_hdr->eh_entries == 0)) { ext4_ext_rm_idx()
2366 EXT4_ERROR_INODE(inode, "path->p_hdr->eh_entries == 0"); ext4_ext_rm_idx()
2369 err = ext4_ext_get_access(handle, inode, path); ext4_ext_rm_idx()
2373 if (path->p_idx != EXT_LAST_INDEX(path->p_hdr)) { ext4_ext_rm_idx()
2374 int len = EXT_LAST_INDEX(path->p_hdr) - path->p_idx; ext4_ext_rm_idx()
2376 memmove(path->p_idx, path->p_idx + 1, len); ext4_ext_rm_idx()
2379 le16_add_cpu(&path->p_hdr->eh_entries, -1); ext4_ext_rm_idx()
2380 err = ext4_ext_dirty(handle, inode, path); ext4_ext_rm_idx()
2390 if (path->p_idx != EXT_FIRST_INDEX(path->p_hdr)) ext4_ext_rm_idx()
2392 path--; ext4_ext_rm_idx()
2393 err = ext4_ext_get_access(handle, inode, path); ext4_ext_rm_idx()
2396 path->p_idx->ei_block = (path+1)->p_idx->ei_block; ext4_ext_rm_idx()
2397 err = ext4_ext_dirty(handle, inode, path); ext4_ext_rm_idx()
2408 * When pass the actual path, the caller should calculate credits
2412 struct ext4_ext_path *path) ext4_ext_calc_credits_for_single_extent()
2414 if (path) { ext4_ext_calc_credits_for_single_extent()
2419 if (le16_to_cpu(path[depth].p_hdr->eh_entries) ext4_ext_calc_credits_for_single_extent()
2420 < le16_to_cpu(path[depth].p_hdr->eh_max)) { ext4_ext_calc_credits_for_single_extent()
2583 * @path: The path to the leaf
2593 struct ext4_ext_path *path, ext4_ext_rm_leaf()
2611 if (!path[depth].p_hdr) ext4_ext_rm_leaf()
2612 path[depth].p_hdr = ext_block_hdr(path[depth].p_bh); ext4_ext_rm_leaf()
2613 eh = path[depth].p_hdr; ext4_ext_rm_leaf()
2614 if (unlikely(path[depth].p_hdr == NULL)) { ext4_ext_rm_leaf()
2615 EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth); ext4_ext_rm_leaf()
2619 ex = path[depth].p_ext; ext4_ext_rm_leaf()
2638 path[depth].p_ext = ex; ext4_ext_rm_leaf()
2696 err = ext4_ext_get_access(handle, inode, path + depth); ext4_ext_rm_leaf()
2737 err = ext4_ext_dirty(handle, inode, path + depth); ext4_ext_rm_leaf()
2749 err = ext4_ext_correct_indexes(handle, inode, path); ext4_ext_rm_leaf()
2771 if (err == 0 && eh->eh_entries == 0 && path[depth].p_bh != NULL) ext4_ext_rm_leaf()
2772 err = ext4_ext_rm_idx(handle, inode, path, depth); ext4_ext_rm_leaf()
2783 ext4_ext_more_to_rm(struct ext4_ext_path *path) ext4_ext_more_to_rm() argument
2785 BUG_ON(path->p_idx == NULL); ext4_ext_more_to_rm()
2787 if (path->p_idx < EXT_FIRST_INDEX(path->p_hdr)) ext4_ext_more_to_rm()
2794 if (le16_to_cpu(path->p_hdr->eh_entries) == path->p_block) ext4_ext_more_to_rm()
2804 struct ext4_ext_path *path = NULL; ext4_ext_remove_space() local
2832 path = ext4_find_extent(inode, end, NULL, EXT4_EX_NOCACHE); ext4_ext_remove_space()
2833 if (IS_ERR(path)) { ext4_ext_remove_space()
2835 return PTR_ERR(path); ext4_ext_remove_space()
2839 ex = path[depth].p_ext; ext4_ext_remove_space()
2843 "path[%d].p_hdr == NULL", ext4_ext_remove_space()
2878 err = ext4_force_split_extent_at(handle, inode, &path, ext4_ext_remove_space()
2893 err = ext4_ext_search_right(inode, path, &lblk, &pblk, ext4_ext_remove_space()
2907 if (path) { ext4_ext_remove_space()
2910 path[k].p_block = ext4_ext_remove_space()
2911 le16_to_cpu(path[k].p_hdr->eh_entries)+1; ext4_ext_remove_space()
2913 path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 1), ext4_ext_remove_space()
2915 if (path == NULL) { ext4_ext_remove_space()
2919 path[0].p_maxdepth = path[0].p_depth = depth; ext4_ext_remove_space()
2920 path[0].p_hdr = ext_inode_hdr(inode); ext4_ext_remove_space()
2923 if (ext4_ext_check(inode, path[0].p_hdr, depth, 0)) { ext4_ext_remove_space()
2933 err = ext4_ext_rm_leaf(handle, inode, path, ext4_ext_remove_space()
2937 brelse(path[i].p_bh); ext4_ext_remove_space()
2938 path[i].p_bh = NULL; ext4_ext_remove_space()
2944 if (!path[i].p_hdr) { ext4_ext_remove_space()
2946 path[i].p_hdr = ext_block_hdr(path[i].p_bh); ext4_ext_remove_space()
2949 if (!path[i].p_idx) { ext4_ext_remove_space()
2951 path[i].p_idx = EXT_LAST_INDEX(path[i].p_hdr); ext4_ext_remove_space()
2952 path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries)+1; ext4_ext_remove_space()
2954 path[i].p_hdr, ext4_ext_remove_space()
2955 le16_to_cpu(path[i].p_hdr->eh_entries)); ext4_ext_remove_space()
2958 path[i].p_idx--; ext4_ext_remove_space()
2962 i, EXT_FIRST_INDEX(path[i].p_hdr), ext4_ext_remove_space()
2963 path[i].p_idx); ext4_ext_remove_space()
2964 if (ext4_ext_more_to_rm(path + i)) { ext4_ext_remove_space()
2968 i + 1, ext4_idx_pblock(path[i].p_idx)); ext4_ext_remove_space()
2969 memset(path + i + 1, 0, sizeof(*path)); ext4_ext_remove_space()
2971 ext4_idx_pblock(path[i].p_idx), depth - i - 1, ext4_ext_remove_space()
2985 path[i + 1].p_bh = bh; ext4_ext_remove_space()
2989 path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries); ext4_ext_remove_space()
2993 if (path[i].p_hdr->eh_entries == 0 && i > 0) { ext4_ext_remove_space()
2997 err = ext4_ext_rm_idx(handle, inode, path, i); ext4_ext_remove_space()
3000 brelse(path[i].p_bh); ext4_ext_remove_space()
3001 path[i].p_bh = NULL; ext4_ext_remove_space()
3008 partial_cluster, path->p_hdr->eh_entries); ext4_ext_remove_space()
3025 if (path->p_hdr->eh_entries == 0) { ext4_ext_remove_space()
3030 err = ext4_ext_get_access(handle, inode, path); ext4_ext_remove_space()
3035 err = ext4_ext_dirty(handle, inode, path); ext4_ext_remove_space()
3039 ext4_ext_drop_refs(path); ext4_ext_remove_space()
3040 kfree(path); ext4_ext_remove_space()
3041 path = NULL; ext4_ext_remove_space()
3142 * @path: the path to the extent
3165 struct ext4_ext_path *path = *ppath; ext4_split_extent_at() local
3179 ext4_ext_show_leaf(inode, path); ext4_split_extent_at()
3182 ex = path[depth].p_ext; ext4_split_extent_at()
3193 err = ext4_ext_get_access(handle, inode, path + depth); ext4_split_extent_at()
3209 ext4_ext_try_to_merge(handle, inode, path, ex); ext4_split_extent_at()
3211 err = ext4_ext_dirty(handle, inode, path + path->p_depth); ext4_split_extent_at()
3222 * path may lead to new leaf, not to original leaf any more ext4_split_extent_at()
3225 err = ext4_ext_dirty(handle, inode, path + depth); ext4_split_extent_at()
3267 ext4_ext_try_to_merge(handle, inode, path, ex); ext4_split_extent_at()
3268 err = ext4_ext_dirty(handle, inode, path + path->p_depth); ext4_split_extent_at()
3280 ext4_ext_show_leaf(inode, path); ext4_split_extent_at()
3285 ext4_ext_dirty(handle, inode, path + path->p_depth); ext4_split_extent_at()
3307 struct ext4_ext_path *path = *ppath; ext4_split_extent() local
3317 ex = path[depth].p_ext; ext4_split_extent()
3338 * Update path is required because previous ext4_split_extent_at() may ext4_split_extent()
3341 path = ext4_find_extent(inode, map->m_lblk, ppath, 0); ext4_split_extent()
3342 if (IS_ERR(path)) ext4_split_extent()
3343 return PTR_ERR(path); ext4_split_extent()
3345 ex = path[depth].p_ext; ext4_split_extent()
3367 ext4_ext_show_leaf(inode, path); ext4_split_extent()
3383 * - The extent pointed to by 'path' is unwritten.
3384 * - The extent pointed to by 'path' contains a superset
3398 struct ext4_ext_path *path = *ppath; ext4_ext_convert_to_initialized() local
3421 eh = path[depth].p_hdr; ext4_ext_convert_to_initialized()
3422 ex = path[depth].p_ext; ext4_ext_convert_to_initialized()
3475 err = ext4_ext_get_access(handle, inode, path + depth); ext4_ext_convert_to_initialized()
3521 err = ext4_ext_get_access(handle, inode, path + depth); ext4_ext_convert_to_initialized()
3543 ext4_ext_dirty(handle, inode, path + depth); ext4_ext_convert_to_initialized()
3545 /* Update path to point to the right extent */ ext4_ext_convert_to_initialized()
3546 path[depth].p_ext = abut_ex; ext4_ext_convert_to_initialized()
3574 err = ext4_ext_get_access(handle, inode, path + depth); ext4_ext_convert_to_initialized()
3578 ext4_ext_try_to_merge(handle, inode, path, ex); ext4_ext_convert_to_initialized()
3579 err = ext4_ext_dirty(handle, inode, path + path->p_depth); ext4_ext_convert_to_initialized()
3666 struct ext4_ext_path *path = *ppath; ext4_split_convert_extents() local
3686 ex = path[depth].p_ext; ext4_split_convert_extents()
3708 struct ext4_ext_path *path = *ppath; ext4_convert_unwritten_extents_endio() local
3716 ex = path[depth].p_ext; ext4_convert_unwritten_extents_endio()
3741 path = ext4_find_extent(inode, map->m_lblk, ppath, 0); ext4_convert_unwritten_extents_endio()
3742 if (IS_ERR(path)) ext4_convert_unwritten_extents_endio()
3743 return PTR_ERR(path); ext4_convert_unwritten_extents_endio()
3745 ex = path[depth].p_ext; ext4_convert_unwritten_extents_endio()
3748 err = ext4_ext_get_access(handle, inode, path + depth); ext4_convert_unwritten_extents_endio()
3757 ext4_ext_try_to_merge(handle, inode, path, ex); ext4_convert_unwritten_extents_endio()
3760 err = ext4_ext_dirty(handle, inode, path + path->p_depth); ext4_convert_unwritten_extents_endio()
3762 ext4_ext_show_leaf(inode, path); ext4_convert_unwritten_extents_endio()
3779 struct ext4_ext_path *path, check_eofblocks_fl()
3790 eh = path[depth].p_hdr; check_eofblocks_fl()
3820 if (path[i].p_idx != EXT_LAST_INDEX(path[i].p_hdr)) check_eofblocks_fl()
3941 struct ext4_ext_path *path = *ppath; convert_initialized_extent() local
3956 ex = path[depth].p_ext; convert_initialized_extent()
3969 path = ext4_find_extent(inode, map->m_lblk, ppath, 0); convert_initialized_extent()
3970 if (IS_ERR(path)) convert_initialized_extent()
3971 return PTR_ERR(path); convert_initialized_extent()
3973 ex = path[depth].p_ext; convert_initialized_extent()
3981 err = ext4_ext_get_access(handle, inode, path + depth); convert_initialized_extent()
3990 ext4_ext_try_to_merge(handle, inode, path, ex); convert_initialized_extent()
3993 err = ext4_ext_dirty(handle, inode, path + path->p_depth); convert_initialized_extent()
3996 ext4_ext_show_leaf(inode, path); convert_initialized_extent()
3999 err = check_eofblocks_fl(handle, inode, map->m_lblk, path, map->m_len); convert_initialized_extent()
4015 struct ext4_ext_path *path = *ppath; ext4_ext_handle_unwritten_extents() local
4024 ext4_ext_show_leaf(inode, path); ext4_ext_handle_unwritten_extents()
4060 path, map->m_len); ext4_ext_handle_unwritten_extents()
4122 * and quota reservation done in the delayed write path. ext4_ext_handle_unwritten_extents()
4139 err = check_eofblocks_fl(handle, inode, map->m_lblk, path, ext4_ext_handle_unwritten_extents()
4147 ext4_ext_show_leaf(inode, path); ext4_ext_handle_unwritten_extents()
4198 struct ext4_ext_path *path) get_implied_cluster_alloc()
4244 ext4_lblk_t next = ext4_ext_next_allocated_block(path); get_implied_cluster_alloc()
4278 struct ext4_ext_path *path = NULL; ext4_ext_map_blocks() local
4296 path = ext4_find_extent(inode, map->m_lblk, NULL, 0); ext4_ext_map_blocks()
4297 if (IS_ERR(path)) { ext4_ext_map_blocks()
4298 err = PTR_ERR(path); ext4_ext_map_blocks()
4299 path = NULL; ext4_ext_map_blocks()
4310 if (unlikely(path[depth].p_ext == NULL && depth != 0)) { ext4_ext_map_blocks()
4314 path[depth].p_block); ext4_ext_map_blocks()
4319 ex = path[depth].p_ext; ext4_ext_map_blocks()
4349 handle, inode, map, &path, ext4_ext_map_blocks()
4356 handle, inode, map, &path, flags, ext4_ext_map_blocks()
4375 ext4_ext_put_gap_in_cache(inode, path, map->m_lblk); ext4_ext_map_blocks()
4390 get_implied_cluster_alloc(inode->i_sb, map, ex, path)) { ext4_ext_map_blocks()
4399 err = ext4_ext_search_left(inode, path, &ar.lleft, &ar.pleft); ext4_ext_map_blocks()
4404 err = ext4_ext_search_right(inode, path, &ar.lright, &ar.pright, &ex2); ext4_ext_map_blocks()
4411 get_implied_cluster_alloc(inode->i_sb, map, ex2, path)) { ext4_ext_map_blocks()
4433 err = ext4_ext_check_overlap(sbi, inode, &newex, path); ext4_ext_map_blocks()
4441 ar.goal = ext4_ext_find_goal(inode, path, map->m_lblk); ext4_ext_map_blocks()
4499 path, ar.len); ext4_ext_map_blocks()
4501 err = ext4_ext_insert_extent(handle, inode, &path, ext4_ext_map_blocks()
4617 ext4_ext_show_leaf(inode, path); ext4_ext_map_blocks()
4622 ext4_ext_drop_refs(path); ext4_ext_map_blocks()
4623 kfree(path); ext4_ext_map_blocks()
5225 * Function to access the path buffer for marking it dirty.
5227 * to update path.
5231 struct ext4_ext_path *path) ext4_access_path()
5252 err = ext4_ext_get_access(handle, inode, path); ext4_access_path()
5258 * Shift the extents of a path structure lying between path[depth].p_ext
5259 * and EXT_LAST_EXTENT(path[depth].p_hdr), by @shift blocks. @SHIFT tells
5263 ext4_ext_shift_path_extents(struct ext4_ext_path *path, ext4_lblk_t shift, ext4_ext_shift_path_extents() argument
5270 depth = path->p_depth; ext4_ext_shift_path_extents()
5273 if (depth == path->p_depth) { ext4_ext_shift_path_extents()
5274 ex_start = path[depth].p_ext; ext4_ext_shift_path_extents()
5278 ex_last = EXT_LAST_EXTENT(path[depth].p_hdr); ext4_ext_shift_path_extents()
5280 err = ext4_access_path(handle, inode, path + depth); ext4_ext_shift_path_extents()
5284 if (ex_start == EXT_FIRST_EXTENT(path[depth].p_hdr)) ext4_ext_shift_path_extents()
5293 EXT_FIRST_EXTENT(path[depth].p_hdr)) ext4_ext_shift_path_extents()
5296 path, ex_start - 1)) ext4_ext_shift_path_extents()
5302 ext4_ext_try_to_merge_right(inode, path, ext4_ext_shift_path_extents()
5307 err = ext4_ext_dirty(handle, inode, path + depth); ext4_ext_shift_path_extents()
5316 err = ext4_access_path(handle, inode, path + depth); ext4_ext_shift_path_extents()
5321 le32_add_cpu(&path[depth].p_idx->ei_block, -shift); ext4_ext_shift_path_extents()
5323 le32_add_cpu(&path[depth].p_idx->ei_block, shift); ext4_ext_shift_path_extents()
5324 err = ext4_ext_dirty(handle, inode, path + depth); ext4_ext_shift_path_extents()
5329 if (path[depth].p_idx != EXT_FIRST_INDEX(path[depth].p_hdr)) ext4_ext_shift_path_extents()
5351 struct ext4_ext_path *path; ext4_ext_shift_extents() local
5356 /* Let path point to the last extent */ ext4_ext_shift_extents()
5357 path = ext4_find_extent(inode, EXT_MAX_BLOCKS - 1, NULL, 0); ext4_ext_shift_extents()
5358 if (IS_ERR(path)) ext4_ext_shift_extents()
5359 return PTR_ERR(path); ext4_ext_shift_extents()
5361 depth = path->p_depth; ext4_ext_shift_extents()
5362 extent = path[depth].p_ext; ext4_ext_shift_extents()
5374 path = ext4_find_extent(inode, start - 1, &path, 0); ext4_ext_shift_extents()
5375 if (IS_ERR(path)) ext4_ext_shift_extents()
5376 return PTR_ERR(path); ext4_ext_shift_extents()
5377 depth = path->p_depth; ext4_ext_shift_extents()
5378 extent = path[depth].p_ext; ext4_ext_shift_extents()
5390 ext4_ext_drop_refs(path); ext4_ext_shift_extents()
5391 kfree(path); ext4_ext_shift_extents()
5408 path = ext4_find_extent(inode, *iterator, &path, 0); ext4_ext_shift_extents()
5409 if (IS_ERR(path)) ext4_ext_shift_extents()
5410 return PTR_ERR(path); ext4_ext_shift_extents()
5411 depth = path->p_depth; ext4_ext_shift_extents()
5412 extent = path[depth].p_ext; ext4_ext_shift_extents()
5421 if (extent < EXT_LAST_EXTENT(path[depth].p_hdr)) { ext4_ext_shift_extents()
5422 path[depth].p_ext++; ext4_ext_shift_extents()
5424 *iterator = ext4_ext_next_allocated_block(path); ext4_ext_shift_extents()
5430 extent = EXT_LAST_EXTENT(path[depth].p_hdr); ext4_ext_shift_extents()
5434 extent = EXT_FIRST_EXTENT(path[depth].p_hdr); ext4_ext_shift_extents()
5437 /* Update path extent in case we need to stop */ ext4_ext_shift_extents()
5440 path[depth].p_ext = extent; ext4_ext_shift_extents()
5442 ret = ext4_ext_shift_path_extents(path, shift, inode, ext4_ext_shift_extents()
5448 ext4_ext_drop_refs(path); ext4_ext_shift_extents()
5449 kfree(path); ext4_ext_shift_extents()
5606 struct ext4_ext_path *path; ext4_insert_range() local
5699 path = ext4_find_extent(inode, offset_lblk, NULL, 0); ext4_insert_range()
5700 if (IS_ERR(path)) { ext4_insert_range()
5706 extent = path[depth].p_ext; ext4_insert_range()
5720 ret = ext4_split_extent_at(handle, inode, &path, ext4_insert_range()
5727 ext4_ext_drop_refs(path); ext4_insert_range()
5728 kfree(path); ext4_insert_range()
5880 * path must to be revalidated. */ ext4_swap_extents()
5906 * path must to be revalidated. */ ext4_swap_extents()
140 ext4_ext_get_access(handle_t *handle, struct inode *inode, struct ext4_ext_path *path) ext4_ext_get_access() argument
159 __ext4_ext_dirty(const char *where, unsigned int line, handle_t *handle, struct inode *inode, struct ext4_ext_path *path) __ext4_ext_dirty() argument
177 ext4_ext_find_goal(struct inode *inode, struct ext4_ext_path *path, ext4_lblk_t block) ext4_ext_find_goal() argument
227 ext4_ext_new_meta_block(handle_t *handle, struct inode *inode, struct ext4_ext_path *path, struct ext4_extent *ex, int *err, unsigned int flags) ext4_ext_new_meta_block() argument
730 ext4_ext_binsearch_idx(struct inode *inode, struct ext4_ext_path *path, ext4_lblk_t block) ext4_ext_binsearch_idx() argument
790 ext4_ext_binsearch(struct inode *inode, struct ext4_ext_path *path, ext4_lblk_t block) ext4_ext_binsearch() argument
1024 ext4_ext_split(handle_t *handle, struct inode *inode, unsigned int flags, struct ext4_ext_path *path, struct ext4_extent *newext, int at) ext4_ext_split() argument
1419 ext4_ext_search_left(struct inode *inode, struct ext4_ext_path *path, ext4_lblk_t *logical, ext4_fsblk_t *phys) ext4_ext_search_left() argument
1484 ext4_ext_search_right(struct inode *inode, struct ext4_ext_path *path, ext4_lblk_t *logical, ext4_fsblk_t *phys, struct ext4_extent **ret_ex) ext4_ext_search_right() argument
1659 ext4_ext_correct_indexes(handle_t *handle, struct inode *inode, struct ext4_ext_path *path) ext4_ext_correct_indexes() argument
1761 ext4_ext_try_to_merge_right(struct inode *inode, struct ext4_ext_path *path, struct ext4_extent *ex) ext4_ext_try_to_merge_right() argument
1802 ext4_ext_try_to_merge_up(handle_t *handle, struct inode *inode, struct ext4_ext_path *path) ext4_ext_try_to_merge_up() argument
1847 ext4_ext_try_to_merge(handle_t *handle, struct inode *inode, struct ext4_ext_path *path, struct ext4_extent *ex) ext4_ext_try_to_merge() argument
1876 ext4_ext_check_overlap(struct ext4_sb_info *sbi, struct inode *inode, struct ext4_extent *newext, struct ext4_ext_path *path) ext4_ext_check_overlap() argument
2355 ext4_ext_rm_idx(handle_t *handle, struct inode *inode, struct ext4_ext_path *path, int depth) ext4_ext_rm_idx() argument
2411 ext4_ext_calc_credits_for_single_extent(struct inode *inode, int nrblocks, struct ext4_ext_path *path) ext4_ext_calc_credits_for_single_extent() argument
2592 ext4_ext_rm_leaf(handle_t *handle, struct inode *inode, struct ext4_ext_path *path, long long *partial_cluster, ext4_lblk_t start, ext4_lblk_t end) ext4_ext_rm_leaf() argument
3777 check_eofblocks_fl(handle_t *handle, struct inode *inode, ext4_lblk_t lblk, struct ext4_ext_path *path, unsigned int len) check_eofblocks_fl() argument
4195 get_implied_cluster_alloc(struct super_block *sb, struct ext4_map_blocks *map, struct ext4_extent *ex, struct ext4_ext_path *path) get_implied_cluster_alloc() argument
5230 ext4_access_path(handle_t *handle, struct inode *inode, struct ext4_ext_path *path) ext4_access_path() argument
H A Dmove_extent.c24 * get_ext_path - Find an extent path for designated logical block number.
27 * @lblock: logical block number to find an extent path
28 * @path: pointer to an extent path pointer (for output)
37 struct ext4_ext_path *path; get_ext_path() local
39 path = ext4_find_extent(inode, lblock, ppath, EXT4_EX_NOCACHE); get_ext_path()
40 if (IS_ERR(path)) get_ext_path()
41 return PTR_ERR(path); get_ext_path()
42 if (path[ext_depth(inode)].p_ext == NULL) { get_ext_path()
43 ext4_ext_drop_refs(path); get_ext_path()
44 kfree(path); get_ext_path()
48 *ppath = path; get_ext_path()
101 struct ext4_ext_path *path = NULL; mext_check_coverage() local
106 *err = get_ext_path(inode, from, &path); mext_check_coverage()
109 ext = path[ext_depth(inode)].p_ext; mext_check_coverage()
113 ext4_ext_drop_refs(path); mext_check_coverage()
117 ext4_ext_drop_refs(path); mext_check_coverage()
118 kfree(path); mext_check_coverage()
563 struct ext4_ext_path *path = NULL; ext4_move_extents() local
626 ret = get_ext_path(orig_inode, o_start, &path); ext4_move_extents()
629 ex = path[path->p_depth].p_ext; ext4_move_extents()
630 next_blk = ext4_ext_next_allocated_block(path); ext4_move_extents()
694 ext4_ext_drop_refs(path); ext4_move_extents()
695 kfree(path); ext4_move_extents()
/linux-4.4.14/fs/autofs4/
H A Ddev-ioctl.c93 * Copy parameter control struct, including a possible path allocated
123 * Check sanity of parameter control fields and if a path is present
138 err = invalid_str(param->path, param->size - sizeof(*param)); validate_dev_ioctl()
141 "path string terminator missing for cmd(0x%08x)", validate_dev_ioctl()
146 err = check_name(param->path); validate_dev_ioctl()
148 AUTOFS_WARN("invalid path supplied for cmd(0x%08x)", validate_dev_ioctl()
195 struct path *res, find_autofs_mount()
196 int test(struct path *path, void *data), find_autofs_mount()
199 struct path path; find_autofs_mount() local
200 int err = kern_path_mountpoint(AT_FDCWD, pathname, &path, 0); find_autofs_mount()
204 while (path.dentry == path.mnt->mnt_root) { find_autofs_mount()
205 if (path.dentry->d_sb->s_magic == AUTOFS_SUPER_MAGIC) { find_autofs_mount()
206 if (test(&path, data)) { find_autofs_mount()
207 path_get(&path); find_autofs_mount()
208 *res = path; find_autofs_mount()
213 if (!follow_up(&path)) find_autofs_mount()
216 path_put(&path); find_autofs_mount()
220 static int test_by_dev(struct path *path, void *p) test_by_dev() argument
222 return path->dentry->d_sb->s_dev == *(dev_t *)p; test_by_dev()
225 static int test_by_type(struct path *path, void *p) test_by_type() argument
227 struct autofs_info *ino = autofs4_dentry_ino(path->dentry); test_by_type()
233 * to the given path and device number (aka. new_encode_dev(sb->s_dev)).
242 struct path path; autofs_dev_ioctl_open_mountpoint() local
244 err = find_autofs_mount(name, &path, test_by_dev, &devid); autofs_dev_ioctl_open_mountpoint()
253 filp = dentry_open(&path, O_RDONLY, current_cred()); autofs_dev_ioctl_open_mountpoint()
254 path_put(&path); autofs_dev_ioctl_open_mountpoint()
275 const char *path; autofs_dev_ioctl_openmount() local
279 /* param->path has already been checked */ autofs_dev_ioctl_openmount()
285 path = param->path; autofs_dev_ioctl_openmount()
289 fd = autofs_dev_ioctl_open_mountpoint(path, devid); autofs_dev_ioctl_openmount()
437 struct path path; autofs_dev_ioctl_requester() local
450 err = find_autofs_mount(param->path, &path, test_by_dev, &devid); autofs_dev_ioctl_requester()
454 ino = autofs4_dentry_ino(path.dentry); autofs_dev_ioctl_requester()
457 autofs4_expire_wait(path.dentry, 0); autofs_dev_ioctl_requester()
463 path_put(&path); autofs_dev_ioctl_requester()
497 * Check if the given path is a mountpoint.
501 * the path is considered a mountpoint if it is itself a
504 * path is a mount point and the super magic of the covering
508 * lookup the path and check if it is the root of a mount.
511 * located path is the root of a mount we return 1 along with
521 struct path path; autofs_dev_ioctl_ismountpoint() local
532 name = param->path; autofs_dev_ioctl_ismountpoint()
541 name, &path, LOOKUP_FOLLOW); autofs_dev_ioctl_ismountpoint()
543 err = find_autofs_mount(name, &path, autofs_dev_ioctl_ismountpoint()
547 devid = new_encode_dev(path.dentry->d_sb->s_dev); autofs_dev_ioctl_ismountpoint()
549 if (path.mnt->mnt_root == path.dentry) { autofs_dev_ioctl_ismountpoint()
551 magic = path.dentry->d_sb->s_magic; autofs_dev_ioctl_ismountpoint()
556 err = find_autofs_mount(name, &path, test_by_dev, &dev); autofs_dev_ioctl_ismountpoint()
562 err = have_submounts(path.dentry); autofs_dev_ioctl_ismountpoint()
564 if (follow_down_one(&path)) autofs_dev_ioctl_ismountpoint()
565 magic = path.dentry->d_sb->s_magic; autofs_dev_ioctl_ismountpoint()
570 path_put(&path); autofs_dev_ioctl_ismountpoint()
194 find_autofs_mount(const char *pathname, struct path *res, int test(struct path *path, void *data), void *data) find_autofs_mount() argument
/linux-4.4.14/fs/coda/
H A Dpioctl.c52 struct path path; coda_pioctl() local
68 error = user_path(data.path, &path); coda_pioctl()
70 error = user_lpath(data.path, &path); coda_pioctl()
75 target_inode = d_inode(path.dentry); coda_pioctl()
88 path_put(&path); coda_pioctl()
/linux-4.4.14/drivers/gpu/drm/sti/
H A Dsti_compositor.h57 * @clk_pix_main: pixel clock for main path
58 * @clk_pix_aux: pixel clock for aux path
59 * @rst_main: reset control of the main path
60 * @rst_aux: reset control of the aux path
63 * @vtg_main: vtg for main data path
64 * @vtg_aux: vtg for auxillary data path
/linux-4.4.14/fs/notify/fanotify/
H A Dfanotify.h2 #include <linux/path.h>
16 * We hold ref to this path so it may be dereferenced at any point
19 struct path path; member in struct:fanotify_event_info
50 struct path *path);
H A Dfanotify.c24 old->path.mnt == new->path.mnt && should_merge()
25 old->path.dentry == new->path.dentry) should_merge()
107 struct path *path = data; fanotify_should_send_event() local
118 if (!d_is_reg(path->dentry) && fanotify_should_send_event()
119 !d_can_lookup(path->dentry)) fanotify_should_send_event()
142 if (d_is_dir(path->dentry) && fanotify_should_send_event()
154 struct path *path) fanotify_alloc_event()
177 if (path) { fanotify_alloc_event()
178 event->path = *path; fanotify_alloc_event()
179 path_get(&event->path); fanotify_alloc_event()
181 event->path.mnt = NULL; fanotify_alloc_event()
182 event->path.dentry = NULL; fanotify_alloc_event()
254 path_put(&event->path); fanotify_free_event()
153 fanotify_alloc_event(struct inode *inode, u32 mask, struct path *path) fanotify_alloc_event() argument
/linux-4.4.14/kernel/
H A Daudit_fsnotify.c33 * but dev, ino, and path are about the child
38 char *path; /* insertion path */ member in struct:audit_fsnotify_mark
52 kfree(audit_mark->path); audit_fsnotify_mark_free()
66 return mark->path; audit_mark_path()
86 struct path path; audit_alloc_mark() local
94 dentry = kern_path_locked(pathname, &path); audit_alloc_mark()
97 inode = path.dentry->d_inode; audit_alloc_mark()
108 audit_mark->path = pathname; audit_alloc_mark()
119 path_put(&path); audit_alloc_mark()
137 audit_log_format(ab, " path="); audit_mark_log_rule_change()
138 audit_log_untrustedstring(ab, audit_mark->path); audit_mark_log_rule_change()
183 inode = ((struct path *)data)->dentry->d_inode; audit_mark_handle_event()
194 if (audit_compare_dname_path(dname, audit_mark->path, AUDIT_NAME_FULL)) audit_mark_handle_event()
H A Daudit_watch.c50 char *path; /* insertion path */ member in struct:audit_watch
121 kfree(watch->path); audit_put_watch()
136 return watch->path; audit_watch_path()
147 static struct audit_parent *audit_init_parent(struct path *path) audit_init_parent() argument
149 struct inode *inode = d_backing_inode(path->dentry); audit_init_parent()
171 static struct audit_watch *audit_init_watch(char *path) audit_init_watch() argument
181 watch->path = path; audit_init_watch()
189 int audit_to_watch(struct audit_krule *krule, char *path, int len, u32 op) audit_to_watch() argument
196 if (path[0] != '/' || path[len-1] == '/' || audit_to_watch()
202 watch = audit_init_watch(path); audit_to_watch()
215 char *path; audit_dupe_watch() local
218 path = kstrdup(old->path, GFP_KERNEL); audit_dupe_watch()
219 if (unlikely(!path)) audit_dupe_watch()
222 new = audit_init_watch(path); audit_dupe_watch()
224 kfree(path); audit_dupe_watch()
248 audit_log_format(ab, " path="); audit_watch_log_rule_change()
249 audit_log_untrustedstring(ab, w->path); audit_watch_log_rule_change()
269 if (audit_compare_dname_path(dname, owatch->path, audit_update_watch()
361 /* Get path information necessary for adding watches. */ audit_get_nd()
362 static int audit_get_nd(struct audit_watch *watch, struct path *parent) audit_get_nd()
364 struct dentry *d = kern_path_locked(watch->path, parent); audit_get_nd()
388 if (strcmp(watch->path, w->path)) audit_add_to_parent()
418 struct path parent_path; audit_add_watch()
486 inode = d_backing_inode(((struct path *)data)->dentry); audit_watch_handle_event()
/linux-4.4.14/drivers/infiniband/ulp/ipoib/
H A Dipoib_main.c81 struct ipoib_path path; member in struct:ipoib_path_iter
488 struct ipoib_path *path; __path_find() local
492 path = rb_entry(n, struct ipoib_path, rb_node); __path_find()
494 ret = memcmp(gid, path->pathrec.dgid.raw, __path_find()
502 return path; __path_find()
508 static int __path_add(struct net_device *dev, struct ipoib_path *path) __path_add() argument
520 ret = memcmp(path->pathrec.dgid.raw, tpath->pathrec.dgid.raw, __path_add()
530 rb_link_node(&path->rb_node, pn, n); __path_add()
531 rb_insert_color(&path->rb_node, &priv->path_tree); __path_add()
533 list_add_tail(&path->list, &priv->path_list); __path_add()
538 static void path_free(struct net_device *dev, struct ipoib_path *path) path_free() argument
542 while ((skb = __skb_dequeue(&path->queue))) path_free()
547 /* remove all neigh connected to this path */ path_free()
548 ipoib_del_neighs_by_gid(dev, path->pathrec.dgid.raw); path_free()
550 if (path->ah) path_free()
551 ipoib_put_ah(path->ah); path_free()
553 kfree(path); path_free()
567 memset(iter->path.pathrec.dgid.raw, 0, 16); ipoib_path_iter_init()
581 struct ipoib_path *path; ipoib_path_iter_next() local
589 path = rb_entry(n, struct ipoib_path, rb_node); ipoib_path_iter_next()
591 if (memcmp(iter->path.pathrec.dgid.raw, path->pathrec.dgid.raw, ipoib_path_iter_next()
593 iter->path = *path; ipoib_path_iter_next()
607 struct ipoib_path *path) ipoib_path_iter_read()
609 *path = iter->path; ipoib_path_iter_read()
617 struct ipoib_path *path, *tp; ipoib_mark_paths_invalid() local
621 list_for_each_entry_safe(path, tp, &priv->path_list, list) { ipoib_mark_paths_invalid()
622 ipoib_dbg(priv, "mark path LID 0x%04x GID %pI6 invalid\n", ipoib_mark_paths_invalid()
623 be16_to_cpu(path->pathrec.dlid), ipoib_mark_paths_invalid()
624 path->pathrec.dgid.raw); ipoib_mark_paths_invalid()
625 path->valid = 0; ipoib_mark_paths_invalid()
634 struct ipoib_path *path, *tp; ipoib_flush_paths() local
643 list_for_each_entry(path, &remove_list, list) ipoib_flush_paths()
644 rb_erase(&path->rb_node, &priv->path_tree); ipoib_flush_paths()
646 list_for_each_entry_safe(path, tp, &remove_list, list) { ipoib_flush_paths()
647 if (path->query) ipoib_flush_paths()
648 ib_sa_cancel_query(path->query_id, path->query); ipoib_flush_paths()
651 wait_for_completion(&path->done); ipoib_flush_paths()
652 path_free(dev, path); ipoib_flush_paths()
665 struct ipoib_path *path = path_ptr; path_rec_completion() local
666 struct net_device *dev = path->dev; path_rec_completion()
680 status, path->pathrec.dgid.raw); path_rec_completion()
694 path->pathrec = *pathrec; path_rec_completion()
696 old_ah = path->ah; path_rec_completion()
697 path->ah = ah; path_rec_completion()
702 while ((skb = __skb_dequeue(&path->queue))) path_rec_completion()
705 list_for_each_entry_safe(neigh, tn, &path->neigh_list, list) { path_rec_completion()
712 * the original value of path->ah (ie path_rec_completion()
717 kref_get(&path->ah->ref); path_rec_completion()
718 neigh->ah = path->ah; path_rec_completion()
723 path, path_rec_completion()
734 path->valid = 1; path_rec_completion()
737 path->query = NULL; path_rec_completion()
738 complete(&path->done); path_rec_completion()
743 ipoib_del_neighs_by_gid(dev, path->pathrec.dgid.raw); path_rec_completion()
759 struct ipoib_path *path; path_rec_create() local
764 path = kzalloc(sizeof *path, GFP_ATOMIC); path_rec_create()
765 if (!path) path_rec_create()
768 path->dev = dev; path_rec_create()
770 skb_queue_head_init(&path->queue); path_rec_create()
772 INIT_LIST_HEAD(&path->neigh_list); path_rec_create()
774 memcpy(path->pathrec.dgid.raw, gid, sizeof (union ib_gid)); path_rec_create()
775 path->pathrec.sgid = priv->local_gid; path_rec_create()
776 path->pathrec.pkey = cpu_to_be16(priv->pkey); path_rec_create()
777 path->pathrec.numb_path = 1; path_rec_create()
778 path->pathrec.traffic_class = priv->broadcast->mcmember.traffic_class; path_rec_create()
780 return path; path_rec_create()
784 struct ipoib_path *path) path_rec_start()
788 ipoib_dbg(priv, "Start path record lookup for %pI6\n", path_rec_start()
789 path->pathrec.dgid.raw); path_rec_start()
791 init_completion(&path->done); path_rec_start()
793 path->query_id = path_rec_start()
795 &path->pathrec, path_rec_start()
803 path, &path->query); path_rec_start()
804 if (path->query_id < 0) { path_rec_start()
805 ipoib_warn(priv, "ib_sa_path_rec_get failed: %d\n", path->query_id); path_rec_start()
806 path->query = NULL; path_rec_start()
807 complete(&path->done); path_rec_start()
808 return path->query_id; path_rec_start()
818 struct ipoib_path *path; neigh_add_path() local
831 path = __path_find(dev, daddr + 4); neigh_add_path()
832 if (!path) { neigh_add_path()
833 path = path_rec_create(dev, daddr + 4); neigh_add_path()
834 if (!path) neigh_add_path()
837 __path_add(dev, path); neigh_add_path()
840 list_add_tail(&neigh->list, &path->neigh_list); neigh_add_path()
842 if (path->ah) { neigh_add_path()
843 kref_get(&path->ah->ref); neigh_add_path()
844 neigh->ah = path->ah; neigh_add_path()
848 ipoib_cm_set(neigh, ipoib_cm_create_tx(dev, path, neigh)); neigh_add_path()
862 ipoib_send(dev, skb, path->ah, IPOIB_QPN(daddr)); neigh_add_path()
869 if (!path->query && path_rec_start(dev, path)) neigh_add_path()
895 struct ipoib_path *path; unicast_arp_send() local
900 path = __path_find(dev, cb->hwaddr + 4); unicast_arp_send()
901 if (!path || !path->valid) { unicast_arp_send()
904 if (!path) { unicast_arp_send()
905 path = path_rec_create(dev, cb->hwaddr + 4); unicast_arp_send()
908 if (path) { unicast_arp_send()
909 if (skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) { unicast_arp_send()
910 __skb_queue_tail(&path->queue, skb); unicast_arp_send()
916 if (!path->query && path_rec_start(dev, path)) { unicast_arp_send()
919 path_free(dev, path); unicast_arp_send()
922 __path_add(dev, path); unicast_arp_send()
932 if (path->ah) { unicast_arp_send()
934 be16_to_cpu(path->pathrec.dlid)); unicast_arp_send()
937 ipoib_send(dev, skb, path->ah, IPOIB_QPN(cb->hwaddr)); unicast_arp_send()
939 } else if ((path->query || !path_rec_start(dev, path)) && unicast_arp_send()
940 skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) { unicast_arp_send()
941 __skb_queue_tail(&path->queue, skb); unicast_arp_send()
996 /* for unicast ARP and RARP should always perform path find */ ipoib_start_xmit()
1198 /* remove from path/mc list */ __ipoib_reap_neigh()
1430 /* remove all neigh connected to a given path or mcast */ ipoib_del_neighs_by_gid()
1492 /* remove from path/mc list */ ipoib_flush_neighs()
606 ipoib_path_iter_read(struct ipoib_path_iter *iter, struct ipoib_path *path) ipoib_path_iter_read() argument
783 path_rec_start(struct net_device *dev, struct ipoib_path *path) path_rec_start() argument
H A Dipoib_fs.c200 struct ipoib_path path; ipoib_path_seq_show() local
206 ipoib_path_iter_read(iter, &path); ipoib_path_seq_show()
208 format_gid(&path.pathrec.dgid, gid_buf); ipoib_path_seq_show()
213 gid_buf, path.pathrec.dlid ? "yes" : "no"); ipoib_path_seq_show()
215 if (path.pathrec.dlid) { ipoib_path_seq_show()
216 rate = ib_rate_to_mbps(path.pathrec.rate); ipoib_path_seq_show()
222 be16_to_cpu(path.pathrec.dlid), ipoib_path_seq_show()
223 path.pathrec.sl, ipoib_path_seq_show()
277 ipoib_warn(priv, "failed to create path debug file\n"); ipoib_create_debug_files()
/linux-4.4.14/tools/lib/api/fs/
H A Dfs.c72 char path[PATH_MAX]; member in struct:fs
123 fs->path, type) == 2) { fs__read_mounts()
153 strcpy(fs->path, *ptr); fs__check_mounts()
191 strncpy(fs->path, override_path, sizeof(fs->path)); fs__env_override()
198 return fs->path; fs__get_mountpoint()
201 return fs->path; fs__get_mountpoint()
204 return fs->path; fs__get_mountpoint()
214 return (const char *)fs->path; fs__mountpoint()
237 return (const char *)fs->path; fs__mount()
244 return fs__check_mounts(fs) ? fs->path : NULL; fs__mount()
305 char path[PATH_MAX]; sysfs__read_ull() local
311 snprintf(path, sizeof(path), "%s/%s", sysfs, entry); sysfs__read_ull()
313 return filename__read_ull(path, value); sysfs__read_ull()
318 char path[PATH_MAX]; sysfs__read_int() local
324 snprintf(path, sizeof(path), "%s/%s", sysfs, entry); sysfs__read_int()
326 return filename__read_int(path, value); sysfs__read_int()
331 char path[PATH_MAX]; sysctl__read_int() local
337 snprintf(path, sizeof(path), "%s/sys/%s", procfs, sysctl); sysctl__read_int()
339 return filename__read_int(path, value); sysctl__read_int()
H A Dtracing_path.c130 char path[PATH_MAX]; tracing_path__strerror_open_tp() local
132 snprintf(path, PATH_MAX, "%s/%s", sys, name ?: "*"); tracing_path__strerror_open_tp()
134 return strerror_open(err, buf, size, path); tracing_path__strerror_open_tp()
/linux-4.4.14/tools/power/cpupower/utils/helpers/
H A Dsysfs.c19 unsigned int sysfs_read_file(const char *path, char *buf, size_t buflen) sysfs_read_file() argument
24 fd = open(path, O_RDONLY); sysfs_read_file()
50 char path[SYSFS_PATH_MAX]; sysfs_is_cpu_online() local
58 snprintf(path, sizeof(path), PATH_TO_CPU "cpu%u", cpu); sysfs_is_cpu_online()
60 if (stat(path, &statbuf) != 0) sysfs_is_cpu_online()
67 snprintf(path, sizeof(path), PATH_TO_CPU "cpu%u/online", cpu); sysfs_is_cpu_online()
68 if (stat(path, &statbuf) != 0) sysfs_is_cpu_online()
71 fd = open(path, O_RDONLY); sysfs_is_cpu_online()
108 char path[SYSFS_PATH_MAX]; sysfs_idlestate_file_exists() local
112 snprintf(path, sizeof(path), PATH_TO_CPU "cpu%u/cpuidle/state%u/%s", sysfs_idlestate_file_exists()
114 if (stat(path, &statbuf) != 0) sysfs_idlestate_file_exists()
121 * fname is a relative path under "cpuX/cpuidle/stateX/" dir
128 char path[SYSFS_PATH_MAX]; sysfs_idlestate_read_file() local
132 snprintf(path, sizeof(path), PATH_TO_CPU "cpu%u/cpuidle/state%u/%s", sysfs_idlestate_read_file()
135 fd = open(path, O_RDONLY); sysfs_idlestate_read_file()
153 * fname is a relative path under "../cpuX/cpuidle/cstateY/" dir
163 char path[SYSFS_PATH_MAX]; sysfs_idlestate_write_file() local
167 snprintf(path, sizeof(path), PATH_TO_CPU "cpu%u/cpuidle/state%u/%s", sysfs_idlestate_write_file()
170 fd = open(path, O_WRONLY); sysfs_idlestate_write_file()
383 * fname is a relative path under "cpu/cpuidle/" dir
388 char path[SYSFS_PATH_MAX]; sysfs_cpuidle_read_file() local
390 snprintf(path, sizeof(path), PATH_TO_CPU "cpuidle/%s", fname); sysfs_cpuidle_read_file()
392 return sysfs_read_file(path, buf, buflen); sysfs_cpuidle_read_file()
/linux-4.4.14/net/iucv/
H A Diucv.c153 * The work element to deliver path pending interrupts.
711 * @pathid: path identification number.
714 * Sever an iucv path to free up the pathid. Used internally.
742 * Function called after a path has been severed to find all remaining
751 * When a path is severed, the pathid can be reused immediately iucv_cleanup_queue()
868 * @path: address of iucv path structure
871 * @private: private data passed to interrupt handlers for this path
874 * external interrupt and now wishes to complete the IUCV communication path.
878 int iucv_path_accept(struct iucv_path *path, struct iucv_handler *handler, iucv_path_accept() argument
892 parm->ctrl.ippathid = path->pathid; iucv_path_accept()
893 parm->ctrl.ipmsglim = path->msglim; iucv_path_accept()
896 parm->ctrl.ipflags1 = path->flags; iucv_path_accept()
900 path->private = private; iucv_path_accept()
901 path->msglim = parm->ctrl.ipmsglim; iucv_path_accept()
902 path->flags = parm->ctrl.ipflags1; iucv_path_accept()
912 * @path: address of iucv path structure
917 * @private: private data passed to interrupt handlers for this path
919 * This function establishes an IUCV path. Although the connect may complete
920 * successfully, you are not able to use the path until you receive an IUCV
925 int iucv_path_connect(struct iucv_path *path, struct iucv_handler *handler, iucv_path_connect() argument
940 parm->ctrl.ipmsglim = path->msglim; iucv_path_connect()
941 parm->ctrl.ipflags1 = path->flags; iucv_path_connect()
959 path->pathid = parm->ctrl.ippathid; iucv_path_connect()
960 path->msglim = parm->ctrl.ipmsglim; iucv_path_connect()
961 path->flags = parm->ctrl.ipflags1; iucv_path_connect()
962 path->handler = handler; iucv_path_connect()
963 path->private = private; iucv_path_connect()
964 list_add_tail(&path->list, &handler->paths); iucv_path_connect()
965 iucv_path_table[path->pathid] = path; iucv_path_connect()
980 * @path: address of iucv path structure
983 * This function temporarily suspends incoming messages on an IUCV path.
984 * You can later reactivate the path by invoking the iucv_resume function.
988 int iucv_path_quiesce(struct iucv_path *path, u8 *userdata) iucv_path_quiesce() argument
1002 parm->ctrl.ippathid = path->pathid; iucv_path_quiesce()
1012 * @path: address of iucv path structure
1015 * This function resumes incoming messages on an IUCV path that has
1020 int iucv_path_resume(struct iucv_path *path, u8 *userdata) iucv_path_resume() argument
1034 parm->ctrl.ippathid = path->pathid; iucv_path_resume()
1043 * @path: address of iucv path structure
1046 * This function terminates an IUCV path.
1050 int iucv_path_sever(struct iucv_path *path, u8 *userdata) iucv_path_sever() argument
1061 rc = iucv_sever_pathid(path->pathid, userdata); iucv_path_sever()
1062 iucv_path_table[path->pathid] = NULL; iucv_path_sever()
1063 list_del_init(&path->list); iucv_path_sever()
1074 * @path: address of iucv path structure
1082 int iucv_message_purge(struct iucv_path *path, struct iucv_message *msg, iucv_message_purge() argument
1095 parm->purge.ippathid = path->pathid; iucv_message_purge()
1112 * @path: address of iucv path structure
1122 static int iucv_message_receive_iprmdata(struct iucv_path *path, iucv_message_receive_iprmdata() argument
1157 * @path: address of iucv path structure
1172 int __iucv_message_receive(struct iucv_path *path, struct iucv_message *msg, __iucv_message_receive() argument
1179 return iucv_message_receive_iprmdata(path, msg, flags, __iucv_message_receive()
1190 parm->db.ippathid = path->pathid; __iucv_message_receive()
1207 * @path: address of iucv path structure
1222 int iucv_message_receive(struct iucv_path *path, struct iucv_message *msg, iucv_message_receive() argument
1228 return iucv_message_receive_iprmdata(path, msg, flags, iucv_message_receive()
1231 rc = __iucv_message_receive(path, msg, flags, buffer, size, residual); iucv_message_receive()
1239 * @path: address of iucv path structure
1248 int iucv_message_reject(struct iucv_path *path, struct iucv_message *msg) iucv_message_reject() argument
1260 parm->db.ippathid = path->pathid; iucv_message_reject()
1273 * @path: address of iucv path structure
1286 int iucv_message_reply(struct iucv_path *path, struct iucv_message *msg, iucv_message_reply() argument
1300 parm->dpl.ippathid = path->pathid; iucv_message_reply()
1308 parm->db.ippathid = path->pathid; iucv_message_reply()
1322 * @path: address of iucv path structure
1337 int __iucv_message_send(struct iucv_path *path, struct iucv_message *msg, __iucv_message_send() argument
1351 parm->dpl.ippathid = path->pathid; __iucv_message_send()
1360 parm->db.ippathid = path->pathid; __iucv_message_send()
1376 * @path: address of iucv path structure
1391 int iucv_message_send(struct iucv_path *path, struct iucv_message *msg, iucv_message_send() argument
1397 rc = __iucv_message_send(path, msg, flags, srccls, buffer, size); iucv_message_send()
1405 * @path: address of iucv path structure
1422 int iucv_message_send2way(struct iucv_path *path, struct iucv_message *msg, iucv_message_send2way() argument
1437 parm->dpl.ippathid = path->pathid; iucv_message_send2way()
1438 parm->dpl.ipflags1 = path->flags; /* priority message */ iucv_message_send2way()
1446 parm->db.ippathid = path->pathid; iucv_message_send2way()
1447 parm->db.ipflags1 = path->flags; /* priority message */ iucv_message_send2way()
1489 struct iucv_path *path; iucv_path_pending() local
1493 /* New pathid, handler found. Create a new path struct. */ iucv_path_pending()
1495 path = iucv_path_alloc(ipp->ipmsglim, ipp->ipflags1, GFP_ATOMIC); iucv_path_pending()
1496 if (!path) iucv_path_pending()
1498 path->pathid = ipp->ippathid; iucv_path_pending()
1499 iucv_path_table[path->pathid] = path; iucv_path_pending()
1502 /* Call registered handler until one is found that wants the path. */ iucv_path_pending()
1507 * Add path to handler to allow a call to iucv_path_sever iucv_path_pending()
1509 * an error remove the path from the handler again. iucv_path_pending()
1511 list_add(&path->list, &handler->paths); iucv_path_pending()
1512 path->handler = handler; iucv_path_pending()
1513 if (!handler->path_pending(path, ipp->ipvmid, ipp->ipuser)) iucv_path_pending()
1515 list_del(&path->list); iucv_path_pending()
1516 path->handler = NULL; iucv_path_pending()
1518 /* No handler wanted the path. */ iucv_path_pending()
1519 iucv_path_table[path->pathid] = NULL; iucv_path_pending()
1520 iucv_path_free(path); iucv_path_pending()
1549 struct iucv_path *path = iucv_path_table[ipc->ippathid]; iucv_path_complete() local
1551 if (path) iucv_path_complete()
1552 path->flags = ipc->ipflags1; iucv_path_complete()
1553 if (path && path->handler && path->handler->path_complete) iucv_path_complete()
1554 path->handler->path_complete(path, ipc->ipuser); iucv_path_complete()
1579 struct iucv_path *path = iucv_path_table[ips->ippathid]; iucv_path_severed() local
1581 if (!path || !path->handler) /* Already severed */ iucv_path_severed()
1583 if (path->handler->path_severed) iucv_path_severed()
1584 path->handler->path_severed(path, ips->ipuser); iucv_path_severed()
1586 iucv_sever_pathid(path->pathid, NULL); iucv_path_severed()
1587 iucv_path_table[path->pathid] = NULL; iucv_path_severed()
1588 list_del(&path->list); iucv_path_severed()
1589 iucv_path_free(path); iucv_path_severed()
1615 struct iucv_path *path = iucv_path_table[ipq->ippathid]; iucv_path_quiesced() local
1617 if (path && path->handler && path->handler->path_quiesced) iucv_path_quiesced()
1618 path->handler->path_quiesced(path, ipq->ipuser); iucv_path_quiesced()
1643 struct iucv_path *path = iucv_path_table[ipr->ippathid]; iucv_path_resumed() local
1645 if (path && path->handler && path->handler->path_resumed) iucv_path_resumed()
1646 path->handler->path_resumed(path, ipr->ipuser); iucv_path_resumed()
1674 struct iucv_path *path = iucv_path_table[imc->ippathid]; iucv_message_complete() local
1677 if (path && path->handler && path->handler->message_complete) { iucv_message_complete()
1685 path->handler->message_complete(path, &msg); iucv_message_complete()
1719 struct iucv_path *path = iucv_path_table[imp->ippathid]; iucv_message_pending() local
1722 if (path && path->handler && path->handler->message_pending) { iucv_message_pending()
1732 path->handler->message_pending(path, &msg); iucv_message_pending()
1783 * This work function loops over the queue of path pending irq blocks
1873 * iucv_path_table_empty() - determine if iucv path table is empty
1930 * make iucv ready for use again: allocate path table, declare interrupt buffers
1965 * make iucv ready for use again: allocate path table, declare interrupt buffers
H A Daf_iucv.c281 * Always returns true if the socket is not connected (no iucv path for
291 return (skb_queue_len(&iucv->send_skb_q) < iucv->path->msglim); iucv_below_msglim()
435 /* Terminate an IUCV path */ iucv_sever_path()
440 struct iucv_path *path = iucv->path; iucv_sever_path() local
442 if (iucv->path) { iucv_sever_path()
443 iucv->path = NULL; iucv_sever_path()
448 pr_iucv->path_sever(path, user_data); iucv_sever_path()
450 pr_iucv->path_sever(path, NULL); iucv_sever_path()
451 iucv_path_free(path); iucv_sever_path()
560 iucv->path = NULL; iucv_sock_alloc()
727 if (iucv->path) iucv_sock_bind()
813 /* Create path. */ afiucv_path_connect()
814 iucv->path = iucv_path_alloc(iucv->msglimit, afiucv_path_connect()
816 if (!iucv->path) { afiucv_path_connect()
820 err = pr_iucv->path_connect(iucv->path, &af_iucv_handler, afiucv_path_connect()
824 iucv_path_free(iucv->path); afiucv_path_connect()
825 iucv->path = NULL; afiucv_path_connect()
1009 * @path: IUCV path
1020 static int iucv_send_iprm(struct iucv_path *path, struct iucv_message *msg, iucv_send_iprm() argument
1027 return pr_iucv->message_send(path, msg, IUCV_IPRMDATA, 0, iucv_send_iprm()
1128 /* wait if outstanding messages for iucv path has reached */
1155 if (((iucv->path->flags & IUCV_IPRMDATA) & iucv->flags)
1157 err = iucv_send_iprm(iucv->path, &txmsg, skb);
1167 * IUCV_IPRMDATA path flag is set... sever path */
1169 pr_iucv->path_sever(iucv->path, NULL);
1175 err = pr_iucv->message_send(iucv->path, &txmsg, 0, 0,
1247 struct iucv_path *path, iucv_process_message()
1266 rc = pr_iucv->message_receive(path, msg, iucv_process_message()
1282 pr_iucv->path_sever(path, NULL); iucv_process_message()
1312 iucv_process_message(sk, skb, p->path, &p->msg); iucv_process_message_q()
1522 err = pr_iucv->message_send(iucv->path, &txmsg, iucv_sock_shutdown()
1544 iucv->path) { iucv_sock_shutdown()
1545 err = pr_iucv->path_quiesce(iucv->path, NULL); iucv_sock_shutdown()
1652 val = (iucv->path != NULL) ? iucv->path->msglim /* connected */ iucv_sock_getsockopt()
1677 static int iucv_callback_connreq(struct iucv_path *path, iucv_callback_connreq() argument
1689 /* Find out if this path belongs to af_iucv. */ iucv_callback_connreq()
1715 err = pr_iucv->path_sever(path, user_data); iucv_callback_connreq()
1716 iucv_path_free(path); iucv_callback_connreq()
1722 err = pr_iucv->path_sever(path, user_data); iucv_callback_connreq()
1723 iucv_path_free(path); iucv_callback_connreq()
1730 err = pr_iucv->path_sever(path, user_data); iucv_callback_connreq()
1731 iucv_path_free(path); iucv_callback_connreq()
1744 niucv->path = path; iucv_callback_connreq()
1751 /* set message limit for path based on msglimit of accepting socket */ iucv_callback_connreq()
1753 path->msglim = iucv->msglimit; iucv_callback_connreq()
1754 err = pr_iucv->path_accept(path, &af_iucv_handler, nuser_data, nsk); iucv_callback_connreq()
1772 static void iucv_callback_connack(struct iucv_path *path, u8 ipuser[16]) iucv_callback_connack() argument
1774 struct sock *sk = path->private; iucv_callback_connack()
1780 static void iucv_callback_rx(struct iucv_path *path, struct iucv_message *msg) iucv_callback_rx() argument
1782 struct sock *sk = path->private; iucv_callback_rx()
1789 pr_iucv->message_reject(path, msg); iucv_callback_rx()
1808 iucv_process_message(sk, skb, path, msg); iucv_callback_rx()
1815 save_msg->path = path; iucv_callback_rx()
1824 static void iucv_callback_txdone(struct iucv_path *path, iucv_callback_txdone() argument
1827 struct sock *sk = path->private; iucv_callback_txdone()
1866 static void iucv_callback_connrej(struct iucv_path *path, u8 ipuser[16]) iucv_callback_connrej() argument
1868 struct sock *sk = path->private; iucv_callback_connrej()
1884 static void iucv_callback_shutdown(struct iucv_path *path, u8 ipuser[16]) iucv_callback_shutdown() argument
1886 struct sock *sk = path->private; iucv_callback_shutdown()
1246 iucv_process_message(struct sock *sk, struct sk_buff *skb, struct iucv_path *path, struct iucv_message *msg) iucv_process_message() argument
/linux-4.4.14/tools/testing/selftests/exec/
H A Dexecveat.c27 static int execveat_(int fd, const char *path, char **argv, char **envp, execveat_() argument
31 return syscall(__NR_execveat, fd, path, argv, envp, flags); execveat_()
38 #define check_execveat_fail(fd, path, flags, errno) \
39 _check_execveat_fail(fd, path, flags, errno, #errno) _check_execveat_fail()
40 static int _check_execveat_fail(int fd, const char *path, int flags, _check_execveat_fail() argument
47 fd, path?:"(null)", flags, errno_str); _check_execveat_fail()
48 rc = execveat_(fd, path, argv, envp, flags); _check_execveat_fail()
64 static int check_execveat_invoked_rc(int fd, const char *path, int flags, check_execveat_invoked_rc() argument
70 int pathlen = path ? strlen(path) : 0; check_execveat_invoked_rc()
74 fd, path, (path + pathlen - 20), flags); check_execveat_invoked_rc()
77 fd, path?:"(null)", flags); check_execveat_invoked_rc()
85 rc = execveat_(fd, path, argv, envp, flags); check_execveat_invoked_rc()
111 static int check_execveat(int fd, const char *path, int flags) check_execveat() argument
113 return check_execveat_invoked_rc(fd, path, flags, 99, 99); check_execveat()
231 int fd_ephemeral_path = open_or_die("execveat.path.ephemeral", run_tests()
249 /* dfd + path */ run_tests()
253 /* absolute path */ run_tests()
255 /* absolute path with nonsense dfd */ run_tests()
257 /* fd + no path */ run_tests()
259 /* O_CLOEXEC fd + no path */ run_tests()
265 /* fd + no path to a file that's been renamed */ run_tests()
268 /* fd + no path to a file that's been deleted */ run_tests()
273 /* fd + no path to a file that's been deleted */ run_tests()
274 unlink("execveat.path.ephemeral"); run_tests()
282 /* dfd + path */ run_tests()
285 /* absolute path */ run_tests()
287 /* fd + no path, even with AT_SYMLINK_NOFOLLOW (already followed) */ run_tests()
293 /* dfd + path */ run_tests()
298 /* absolute path */ run_tests()
303 /* dfd + path */ run_tests()
307 /* absolute path */ run_tests()
309 /* fd + no path */ run_tests()
319 /* fd + no path to a file that's been renamed */ run_tests()
322 /* fd + no path to a file that's been deleted */ run_tests()
326 /* Rename a subdirectory in the path: */ run_tests()
339 /* Invalid path => ENOENT */ run_tests()
368 exe_cp("execveat", "execveat.path.ephemeral"); prerequisites()
/linux-4.4.14/arch/um/os-Linux/
H A Dexecvp.c52 char *path = getenv("PATH"); execvp_noalloc() local
53 if (path == NULL) execvp_noalloc()
54 path = ":/bin:/usr/bin"; execvp_noalloc()
57 pathlen = strlen(path); execvp_noalloc()
64 p = path; execvp_noalloc()
68 path = p; execvp_noalloc()
70 //p = strchrnul (path, ':'); execvp_noalloc()
71 p = strchr(path, ':'); execvp_noalloc()
73 p = strchr(path, '\0'); execvp_noalloc()
75 if (p == path) execvp_noalloc()
80 startp = memcpy(name - (p - path), path, p - path); execvp_noalloc()
100 by us, in which case we want to just try the next path execvp_noalloc()
/linux-4.4.14/sound/pci/hda/
H A Dhda_generic.c253 /* return true if the given NID is contained in the path */ is_nid_contained()
254 static bool is_nid_contained(struct nid_path *path, hda_nid_t nid) is_nid_contained() argument
256 return find_idx_in_nid_list(nid, path->path, path->depth) >= 0; is_nid_contained()
267 struct nid_path *path = snd_array_elem(&spec->paths, i); get_nid_path() local
268 if (path->depth <= 0) get_nid_path()
270 if ((!from_nid || path->path[0] == from_nid) && get_nid_path()
271 (!to_nid || path->path[path->depth - 1] == to_nid)) { get_nid_path()
273 (anchor_nid > 0 && is_nid_contained(path, anchor_nid)) || get_nid_path()
274 (anchor_nid < 0 && !is_nid_contained(path, anchor_nid))) get_nid_path()
275 return path; get_nid_path()
282 * snd_hda_get_nid_path - get the path between the given NIDs
284 * @from_nid: the NID where the path start from
285 * @to_nid: the NID where the path ends at
298 * snd_hda_get_path_idx - get the index number corresponding to the path
301 * @path: nid_path object
304 * and zero is handled as an invalid path
306 int snd_hda_get_path_idx(struct hda_codec *codec, struct nid_path *path) snd_hda_get_path_idx() argument
314 idx = path - array; snd_hda_get_path_idx()
322 * snd_hda_get_path_from_idx - get the path instance corresponding to the
325 * @idx: the path index
344 struct nid_path *path = snd_array_elem(&spec->paths, i); is_dac_already_used() local
345 if (path->path[0] == nid) is_dac_already_used()
363 /* check whether the given ctl is already assigned in any path elements */ is_ctl_used()
371 struct nid_path *path = snd_array_elem(&spec->paths, i); is_ctl_used() local
372 if ((path->ctls[type] & AMP_VAL_COMPARE_MASK) == val) is_ctl_used()
387 const char *pfx, struct nid_path *path) print_nid_path()
394 for (i = 0; i < path->depth; i++) print_nid_path()
397 path->path[i]); print_nid_path()
399 codec_dbg(codec, "%s path: depth=%d '%s'\n", pfx, path->depth, buf); print_nid_path()
405 int anchor_nid, struct nid_path *path, __parse_nid_path()
440 anchor_nid, path, depth + 1)) __parse_nid_path()
446 path->path[path->depth] = conn[i]; __parse_nid_path()
447 path->idx[path->depth + 1] = i; __parse_nid_path()
449 path->multi[path->depth + 1] = 1; __parse_nid_path()
450 path->depth++; __parse_nid_path()
455 * snd_hda_parse_nid_path - parse the widget path from the given nid to
458 * @from_nid: the NID where the path start from
459 * @to_nid: the NID where the path ends at
461 * @path: the path object to store the result
463 * Returns true if a matching path is found.
471 * when @anchor_nid is zero, no special handling about path selection.
475 struct nid_path *path) snd_hda_parse_nid_path()
477 if (__parse_nid_path(codec, from_nid, to_nid, anchor_nid, path, 1)) { snd_hda_parse_nid_path()
478 path->path[path->depth] = to_nid; snd_hda_parse_nid_path()
479 path->depth++; snd_hda_parse_nid_path()
487 * snd_hda_add_new_path - parse the path between the given NIDs and
488 * add to the path list
490 * @from_nid: the NID where the path start from
491 * @to_nid: the NID where the path ends at
494 * If no valid path is found, returns NULL.
501 struct nid_path *path; snd_hda_add_new_path() local
506 /* check whether the path has been already added */ snd_hda_add_new_path()
507 path = get_nid_path(codec, from_nid, to_nid, anchor_nid); snd_hda_add_new_path()
508 if (path) snd_hda_add_new_path()
509 return path; snd_hda_add_new_path()
511 path = snd_array_new(&spec->paths); snd_hda_add_new_path()
512 if (!path) snd_hda_add_new_path()
514 memset(path, 0, sizeof(*path)); snd_hda_add_new_path()
515 if (snd_hda_parse_nid_path(codec, from_nid, to_nid, anchor_nid, path)) snd_hda_add_new_path()
516 return path; snd_hda_add_new_path()
523 /* clear the given path as invalid so that it won't be picked up later */ invalidate_nid_path()
526 struct nid_path *path = snd_hda_get_path_from_idx(codec, idx); invalidate_nid_path() local
527 if (!path) invalidate_nid_path()
529 memset(path, 0, sizeof(*path)); invalidate_nid_path()
584 /* look for a widget suitable for assigning a mute switch in the path */ look_for_out_mute_nid()
586 struct nid_path *path) look_for_out_mute_nid()
590 for (i = path->depth - 1; i >= 0; i--) { look_for_out_mute_nid()
591 if (nid_has_mute(codec, path->path[i], HDA_OUTPUT)) look_for_out_mute_nid()
592 return path->path[i]; look_for_out_mute_nid()
593 if (i != path->depth - 1 && i != 0 && look_for_out_mute_nid()
594 nid_has_mute(codec, path->path[i], HDA_INPUT)) look_for_out_mute_nid()
595 return path->path[i]; look_for_out_mute_nid()
600 /* look for a widget suitable for assigning a volume ctl in the path */ look_for_out_vol_nid()
602 struct nid_path *path) look_for_out_vol_nid()
607 for (i = path->depth - 1; i >= 0; i--) { look_for_out_vol_nid()
608 hda_nid_t nid = path->path[i]; look_for_out_vol_nid()
618 * path activation / deactivation
622 static bool has_amp_in(struct hda_codec *codec, struct nid_path *path, int idx) has_amp_in() argument
624 hda_nid_t nid = path->path[idx]; has_amp_in()
636 static bool has_amp_out(struct hda_codec *codec, struct nid_path *path, int idx) has_amp_out() argument
638 hda_nid_t nid = path->path[idx]; has_amp_out()
661 struct nid_path *path = snd_array_elem(&spec->paths, n); is_active_nid() local
662 if (!path->active) is_active_nid()
665 if (!path->stream_enabled) is_active_nid()
668 if (!(path->pin_enabled || path->pin_fixed) && is_active_nid()
672 for (i = 0; i < path->depth; i++) { is_active_nid()
673 if (path->path[i] == nid) { is_active_nid()
675 path->idx[i] == idx) is_active_nid()
794 static void activate_amp_out(struct hda_codec *codec, struct nid_path *path, activate_amp_out() argument
797 hda_nid_t nid = path->path[i]; activate_amp_out()
802 static void activate_amp_in(struct hda_codec *codec, struct nid_path *path, activate_amp_in() argument
809 hda_nid_t nid = path->path[i]; activate_amp_in()
818 idx = path->idx[i]; activate_amp_in()
824 * when aa-mixer is available, we need to enable the path as well activate_amp_in()
840 /* sync power of each widget in the the given path */ path_power_update()
842 struct nid_path *path, path_power_update()
848 for (i = 0; i < path->depth; i++) { path_power_update()
849 nid = path->path[i]; path_power_update()
889 * snd_hda_activate_path - activate or deactivate the given path
891 * @path: the path to activate/deactivate
897 void snd_hda_activate_path(struct hda_codec *codec, struct nid_path *path, snd_hda_activate_path() argument
903 path->active = enable; snd_hda_activate_path()
907 path_power_update(codec, path, codec->power_save_node); snd_hda_activate_path()
909 for (i = path->depth - 1; i >= 0; i--) { snd_hda_activate_path()
910 hda_nid_t nid = path->path[i]; snd_hda_activate_path()
912 if (enable && path->multi[i]) snd_hda_activate_path()
915 path->idx[i]); snd_hda_activate_path()
916 if (has_amp_in(codec, path, i)) snd_hda_activate_path()
917 activate_amp_in(codec, path, i, enable, add_aamix); snd_hda_activate_path()
918 if (has_amp_out(codec, path, i)) snd_hda_activate_path()
919 activate_amp_out(codec, path, i, enable); snd_hda_activate_path()
924 /* if the given path is inactive, put widgets into D3 (only if suitable) */ path_power_down_sync()
925 static void path_power_down_sync(struct hda_codec *codec, struct nid_path *path) path_power_down_sync() argument
929 if (!(spec->power_down_unused || codec->power_save_node) || path->active) path_power_down_sync()
931 sync_power_state_change(codec, path_power_update(codec, path, true)); path_power_down_sync()
950 /* re-initialize the path specified by the given path index */ resume_path_from_idx()
953 struct nid_path *path = snd_hda_get_path_from_idx(codec, path_idx); resume_path_from_idx() local
954 if (path) resume_path_from_idx()
955 snd_hda_activate_path(codec, path, path->active, false); resume_path_from_idx()
1031 unsigned int chs, struct nid_path *path) add_vol_ctl()
1034 if (!path) add_vol_ctl()
1036 val = path->ctls[NID_PATH_VOL_CTL]; add_vol_ctl()
1043 /* return the channel bits suitable for the given path->ctls[] */ get_default_ch_nums()
1044 static int get_default_ch_nums(struct hda_codec *codec, struct nid_path *path, get_default_ch_nums() argument
1048 if (path) { get_default_ch_nums()
1049 hda_nid_t nid = get_amp_nid_(path->ctls[type]); get_default_ch_nums()
1057 struct nid_path *path) add_stereo_vol()
1059 int chs = get_default_ch_nums(codec, path, NID_PATH_VOL_CTL); add_stereo_vol()
1060 return add_vol_ctl(codec, pfx, cidx, chs, path); add_stereo_vol()
1067 unsigned int chs, struct nid_path *path) add_sw_ctl()
1072 if (!path) add_sw_ctl()
1074 val = path->ctls[NID_PATH_MUTE_CTL]; add_sw_ctl()
1090 int cidx, struct nid_path *path) add_stereo_sw()
1092 int chs = get_default_ch_nums(codec, path, NID_PATH_MUTE_CTL); add_stereo_sw()
1093 return add_sw_ctl(codec, pfx, cidx, chs, path); add_stereo_sw()
1125 /* any ctl assigned to the path with the given index? */ path_has_mixer()
1128 struct nid_path *path = snd_hda_get_path_from_idx(codec, path_idx); path_has_mixer() local
1129 return path && path->ctls[ctl_type]; path_has_mixer()
1239 /* look for widgets in the given path which are appropriate for
1242 * When no appropriate widget is found in the path, the badness value
1246 static int assign_out_path_ctls(struct hda_codec *codec, struct nid_path *path) assign_out_path_ctls() argument
1253 if (!path) assign_out_path_ctls()
1256 if (path->ctls[NID_PATH_VOL_CTL] || assign_out_path_ctls()
1257 path->ctls[NID_PATH_MUTE_CTL]) assign_out_path_ctls()
1260 nid = look_for_out_vol_nid(codec, path); assign_out_path_ctls()
1268 path->ctls[NID_PATH_VOL_CTL] = val; assign_out_path_ctls()
1271 nid = look_for_out_mute_nid(codec, path); assign_out_path_ctls()
1282 path->ctls[NID_PATH_MUTE_CTL] = val; assign_out_path_ctls()
1344 struct nid_path *path; try_assign_dacs() local
1347 path = snd_hda_get_path_from_idx(codec, path_idx[i]); try_assign_dacs()
1348 if (path) { try_assign_dacs()
1349 badness += assign_out_path_ctls(codec, path); try_assign_dacs()
1398 path = snd_hda_add_new_path(codec, dac, pin, -spec->mixer_nid); try_assign_dacs()
1399 if (!path && !i && spec->mixer_nid) { try_assign_dacs()
1401 path = snd_hda_add_new_path(codec, dac, pin, 0); try_assign_dacs()
1403 if (!path) { try_assign_dacs()
1407 /* print_nid_path(codec, "output", path); */ try_assign_dacs()
1408 path->active = true; try_assign_dacs()
1409 path_idx[i] = snd_hda_get_path_idx(codec, path); try_assign_dacs()
1410 badness += assign_out_path_ctls(codec, path); try_assign_dacs()
1494 struct nid_path *path; fill_multi_ios() local
1528 path = snd_hda_add_new_path(codec, dac, nid, fill_multi_ios()
1530 if (!path) { fill_multi_ios()
1534 /* print_nid_path(codec, "multiio", path); */ fill_multi_ios()
1538 snd_hda_get_path_idx(codec, path); fill_multi_ios()
1562 path = snd_hda_get_path_from_idx(codec, spec->out_paths[cfg->line_outs + i]); fill_multi_ios()
1563 badness += assign_out_path_ctls(codec, path); fill_multi_ios()
1577 struct nid_path *path; map_singles() local
1584 path = snd_hda_add_new_path(codec, dac, pins[i], map_singles()
1586 if (!path && !i && spec->mixer_nid) map_singles()
1587 path = snd_hda_add_new_path(codec, dac, pins[i], 0); map_singles()
1588 if (path) { map_singles()
1591 /* print_nid_path(codec, "output", path); */ map_singles()
1592 path->active = true; map_singles()
1593 path_idx[i] = snd_hda_get_path_idx(codec, path); map_singles()
1605 /* create a new path including aamix if available, and return its index */ check_aamix_out_path()
1609 struct nid_path *path; check_aamix_out_path() local
1612 path = snd_hda_get_path_from_idx(codec, path_idx); check_aamix_out_path()
1613 if (!path || !path->depth || check_aamix_out_path()
1614 is_nid_contained(path, spec->mixer_nid)) check_aamix_out_path()
1616 path_dac = path->path[0]; check_aamix_out_path()
1618 pin = path->path[path->depth - 1]; check_aamix_out_path()
1619 path = snd_hda_add_new_path(codec, dac, pin, spec->mixer_nid); check_aamix_out_path()
1620 if (!path) { check_aamix_out_path()
1630 path = snd_hda_add_new_path(codec, dac, pin, check_aamix_out_path()
1633 if (!path) check_aamix_out_path()
1635 /* print_nid_path(codec, "output-aamix", path); */ check_aamix_out_path()
1636 path->active = false; /* unused as default */ check_aamix_out_path()
1637 path->pin_fixed = true; /* static route */ check_aamix_out_path()
1638 return snd_hda_get_path_idx(codec, path); check_aamix_out_path()
1646 struct nid_path *path; indep_hp_possible() local
1653 path = snd_hda_get_path_from_idx(codec, idx); indep_hp_possible()
1654 if (!path) indep_hp_possible()
1657 /* assume no path conflicts unless aamix is involved */ indep_hp_possible()
1658 if (!spec->mixer_nid || !is_nid_contained(path, spec->mixer_nid)) indep_hp_possible()
1665 path = snd_hda_get_path_from_idx(codec, spec->out_paths[i]); indep_hp_possible()
1666 if (path && is_nid_contained(path, spec->mixer_nid)) indep_hp_possible()
1670 path = snd_hda_get_path_from_idx(codec, spec->speaker_paths[i]); indep_hp_possible()
1671 if (path && is_nid_contained(path, spec->mixer_nid)) indep_hp_possible()
1684 struct nid_path *path; refill_shared_dacs() local
1690 path = snd_hda_get_path_from_idx(codec, path_idx[i]); refill_shared_dacs()
1691 if (!path) refill_shared_dacs()
1693 dacs[i] = path->path[0]; refill_shared_dacs()
1715 /* clear path indices */ fill_and_eval_dacs()
1864 struct nid_path *path; print_nid_path_idx() local
1866 path = snd_hda_get_path_from_idx(codec, idx); print_nid_path_idx()
1867 if (path) print_nid_path_idx()
1868 print_nid_path(codec, pfx, path); print_nid_path_idx()
2028 struct nid_path *path; parse_output_paths() local
2029 path = snd_hda_get_path_from_idx(codec, spec->out_paths[0]); parse_output_paths()
2030 if (path) parse_output_paths()
2031 spec->vmaster_nid = look_for_out_vol_nid(codec, path); parse_output_paths()
2076 struct nid_path *path; create_multi_out_ctls() local
2078 path = snd_hda_get_path_from_idx(codec, spec->out_paths[i]); create_multi_out_ctls()
2079 if (!path) create_multi_out_ctls()
2085 err = add_vol_ctl(codec, "Center", 0, 1, path); create_multi_out_ctls()
2088 err = add_vol_ctl(codec, "LFE", 0, 2, path); create_multi_out_ctls()
2092 err = add_stereo_vol(codec, name, index, path); create_multi_out_ctls()
2099 err = add_sw_ctl(codec, "Center", 0, 1, path); create_multi_out_ctls()
2102 err = add_sw_ctl(codec, "LFE", 0, 2, path); create_multi_out_ctls()
2106 err = add_stereo_sw(codec, name, index, path); create_multi_out_ctls()
2117 struct nid_path *path; create_extra_out() local
2120 path = snd_hda_get_path_from_idx(codec, path_idx); create_extra_out()
2121 if (!path) create_extra_out()
2123 err = add_stereo_vol(codec, pfx, cidx, path); create_extra_out()
2126 err = add_stereo_sw(codec, pfx, cidx, path); create_extra_out()
2335 struct nid_path *path; set_multi_io() local
2337 path = get_multiio_path(codec, idx); set_multi_io()
2338 if (!path) set_multi_io()
2341 if (path->active == output) set_multi_io()
2346 snd_hda_activate_path(codec, path, true, aamix_default(spec)); set_multi_io()
2350 snd_hda_activate_path(codec, path, false, aamix_default(spec)); set_multi_io()
2352 path_power_down_sync(codec, path); set_multi_io()
2429 /* if HP aamix path is driven from a different DAC and the update_aamix_paths()
2430 * independent HP mode is ON, can't turn on aamix path update_aamix_paths()
2433 mix_path->path[0] != spec->alt_dac_nid) update_aamix_paths()
2452 struct nid_path *path; update_output_paths() local
2456 path = snd_hda_get_path_from_idx(codec, paths[i]); update_output_paths()
2457 if (path) update_output_paths()
2458 snd_hda_activate_path(codec, path, path->active, update_output_paths()
2512 /* if no explicit aamix path is present (e.g. for Realtek codecs), create_loopback_mixing_ctl()
3016 * aamix path; the amp has to be either in the mixer node or its direct leaf
3059 struct nid_path *path; new_analog_input() local
3066 path = snd_hda_add_new_path(codec, pin, mix_nid, 0); new_analog_input()
3067 if (!path) new_analog_input()
3069 print_nid_path(codec, "loopback", path); new_analog_input()
3070 spec->loopback_paths[input_idx] = snd_hda_get_path_idx(codec, path); new_analog_input()
3072 idx = path->idx[path->depth - 1]; new_analog_input()
3077 path->ctls[NID_PATH_VOL_CTL] = mix_val; new_analog_input()
3084 path->ctls[NID_PATH_MUTE_CTL] = mute_val; new_analog_input()
3087 path->active = true; new_analog_input()
3088 path->stream_enabled = true; /* no DAC/ADC involved */ new_analog_input()
3095 path = snd_hda_add_new_path(codec, spec->mixer_nid, new_analog_input()
3097 if (path) { new_analog_input()
3098 print_nid_path(codec, "loopback-merge", path); new_analog_input()
3099 path->active = true; new_analog_input()
3100 path->pin_fixed = true; /* static route */ new_analog_input()
3101 path->stream_enabled = true; /* no DAC/ADC involved */ new_analog_input()
3103 snd_hda_get_path_idx(codec, path); new_analog_input()
3225 struct nid_path *path; parse_capture_source() local
3230 path = snd_hda_add_new_path(codec, pin, adc, anchor); parse_capture_source()
3231 if (!path) parse_capture_source()
3233 print_nid_path(codec, "input", path); parse_capture_source()
3235 snd_hda_get_path_idx(codec, path); parse_capture_source()
3361 /* get the input path specified by the given adc and imux indices */ get_input_path()
3433 struct nid_path *path; cap_put_caller() local
3440 path = get_input_path(codec, adc_idx, i); cap_put_caller()
3441 if (!path || !path->ctls[type]) cap_put_caller()
3443 kcontrol->private_value = path->ctls[type]; cap_put_caller()
3499 static int parse_capvol_in_path(struct hda_codec *codec, struct nid_path *path) parse_capvol_in_path() argument
3504 path->ctls[NID_PATH_VOL_CTL] = path->ctls[NID_PATH_MUTE_CTL] = 0; parse_capvol_in_path()
3506 if (depth >= path->depth) parse_capvol_in_path()
3508 i = path->depth - depth - 1; parse_capvol_in_path()
3509 nid = path->path[i]; parse_capvol_in_path()
3510 if (!path->ctls[NID_PATH_VOL_CTL]) { parse_capvol_in_path()
3512 path->ctls[NID_PATH_VOL_CTL] = parse_capvol_in_path()
3515 int idx = path->idx[i]; parse_capvol_in_path()
3518 path->ctls[NID_PATH_VOL_CTL] = parse_capvol_in_path()
3522 if (!path->ctls[NID_PATH_MUTE_CTL]) { parse_capvol_in_path()
3524 path->ctls[NID_PATH_MUTE_CTL] = parse_capvol_in_path()
3527 int idx = path->idx[i]; parse_capvol_in_path()
3530 path->ctls[NID_PATH_MUTE_CTL] = parse_capvol_in_path()
3665 struct nid_path *path; get_first_cap_ctl() local
3669 path = get_input_path(codec, 0, idx); get_first_cap_ctl()
3670 if (!path) get_first_cap_ctl()
3672 ctl = path->ctls[type]; get_first_cap_ctl()
3676 path = get_input_path(codec, 0, i); get_first_cap_ctl()
3677 if (path && path->ctls[type] == ctl) get_first_cap_ctl()
3742 struct nid_path *path; create_capture_mixers() local
3743 path = get_input_path(codec, n, i); create_capture_mixers()
3744 if (!path) create_capture_mixers()
3746 parse_capvol_in_path(codec, path); create_capture_mixers()
3748 vol = path->ctls[NID_PATH_VOL_CTL]; create_capture_mixers()
3749 else if (vol != path->ctls[NID_PATH_VOL_CTL]) { create_capture_mixers()
3752 path->ctls[NID_PATH_VOL_CTL], HDA_INPUT)) create_capture_mixers()
3756 sw = path->ctls[NID_PATH_MUTE_CTL]; create_capture_mixers()
3757 else if (sw != path->ctls[NID_PATH_MUTE_CTL]) { create_capture_mixers()
3760 path->ctls[NID_PATH_MUTE_CTL], HDA_INPUT)) create_capture_mixers()
3805 struct nid_path *path) look_for_boost_amp()
3812 if (depth >= path->depth - 1) look_for_boost_amp()
3814 nid = path->path[depth]; look_for_boost_amp()
3819 path->idx[depth])) { look_for_boost_amp()
3820 val = HDA_COMPOSE_AMP_VAL(nid, 3, path->idx[depth], look_for_boost_amp()
3840 struct nid_path *path; parse_mic_boost() local
3853 path = get_input_path(codec, 0, i); parse_mic_boost()
3854 if (!path) parse_mic_boost()
3857 val = look_for_boost_amp(codec, path); parse_mic_boost()
3868 path->ctls[NID_PATH_BOOST_CTL] = val; parse_mic_boost()
3879 struct nid_path *path; parse_digital() local
3890 path = snd_hda_add_new_path(codec, dig_nid, pin, 0); parse_digital()
3891 if (!path) parse_digital()
3893 print_nid_path(codec, "digout", path); parse_digital()
3894 path->active = true; parse_digital()
3895 path->pin_fixed = true; /* no jack detection */ parse_digital()
3896 spec->digout_paths[i] = snd_hda_get_path_idx(codec, path); parse_digital()
3918 path = snd_hda_add_new_path(codec, pin, dig_nid, 0); for_each_hda_codec_node()
3919 if (path) { for_each_hda_codec_node()
3920 print_nid_path(codec, "digin", path); for_each_hda_codec_node()
3921 path->active = true; for_each_hda_codec_node()
3922 path->pin_fixed = true; /* no jack */ for_each_hda_codec_node()
3924 spec->digin_path = snd_hda_get_path_idx(codec, path); for_each_hda_codec_node()
3945 struct nid_path *old_path, *path; mux_select() local
3970 path = get_input_path(codec, adc_idx, idx); mux_select()
3971 if (!path) mux_select()
3973 if (path->active) mux_select()
3975 snd_hda_activate_path(codec, path, true, false); mux_select()
3992 struct nid_path *path; set_path_power() local
3996 path = snd_array_elem(&spec->paths, n); set_path_power()
3997 if (path->path[0] == nid || set_path_power()
3998 path->path[path->depth - 1] == nid) { set_path_power()
3999 bool pin_old = path->pin_enabled; set_path_power()
4000 bool stream_old = path->stream_enabled; set_path_power()
4003 path->pin_enabled = pin_state; set_path_power()
4005 path->stream_enabled = stream_state; set_path_power()
4006 if ((!path->pin_fixed && path->pin_enabled != pin_old) set_path_power()
4007 || path->stream_enabled != stream_old) { set_path_power()
4008 last = path_power_update(codec, path, true); set_path_power()
4105 /* sync path power up/down with the jack states of given pins */ sync_pin_power_ctls()
4116 /* sync path power up/down with pins; called at init and resume */ sync_all_pin_power_ctls()
4139 struct nid_path *path; add_fake_paths() local
4147 path = snd_array_new(&spec->paths); add_fake_paths()
4148 if (!path) add_fake_paths()
4150 memset(path, 0, sizeof(*path)); add_fake_paths()
4151 path->depth = 2; add_fake_paths()
4152 path->path[0] = nid; add_fake_paths()
4153 path->path[1] = pins[i]; add_fake_paths()
4154 path->active = true; add_fake_paths()
4200 struct nid_path *path; snd_hda_gen_fix_pin_power() local
4202 path = snd_array_new(&spec->paths); snd_hda_gen_fix_pin_power()
4203 if (!path) snd_hda_gen_fix_pin_power()
4205 memset(path, 0, sizeof(*path)); snd_hda_gen_fix_pin_power()
4206 path->depth = 1; snd_hda_gen_fix_pin_power()
4207 path->path[0] = pin; snd_hda_gen_fix_pin_power()
4208 path->active = true; snd_hda_gen_fix_pin_power()
4209 path->pin_fixed = true; snd_hda_gen_fix_pin_power()
4210 path->stream_enabled = true; snd_hda_gen_fix_pin_power()
4256 struct nid_path *path; do_automute() local
4259 path = snd_hda_get_path_from_idx(codec, paths[i]); do_automute()
4260 if (!path) do_automute()
4262 mute_nid = get_amp_nid_(path->ctls[NID_PATH_MUTE_CTL]); do_automute()
5643 /* configure the given path as a proper output */ set_output_and_unmute()
5646 struct nid_path *path; set_output_and_unmute() local
5649 path = snd_hda_get_path_from_idx(codec, path_idx); set_output_and_unmute()
5650 if (!path || !path->depth) set_output_and_unmute()
5652 pin = path->path[path->depth - 1]; set_output_and_unmute()
5654 snd_hda_activate_path(codec, path, path->active, set_output_and_unmute()
5656 set_pin_eapd(codec, pin, path->active); set_output_and_unmute()
5698 struct nid_path *path; init_multi_io() local
5699 path = get_multiio_path(codec, i); init_multi_io()
5700 if (!path) init_multi_io()
5705 snd_hda_activate_path(codec, path, path->active, init_multi_io()
5754 struct nid_path *path; init_input_src() local
5764 path = get_input_path(codec, c, i); init_input_src()
5765 if (path) { init_input_src()
5766 bool active = path->active; init_input_src()
5769 snd_hda_activate_path(codec, path, active, false); init_input_src()
386 print_nid_path(struct hda_codec *codec, const char *pfx, struct nid_path *path) print_nid_path() argument
403 __parse_nid_path(struct hda_codec *codec, hda_nid_t from_nid, hda_nid_t to_nid, int anchor_nid, struct nid_path *path, int depth) __parse_nid_path() argument
473 snd_hda_parse_nid_path(struct hda_codec *codec, hda_nid_t from_nid, hda_nid_t to_nid, int anchor_nid, struct nid_path *path) snd_hda_parse_nid_path() argument
585 look_for_out_mute_nid(struct hda_codec *codec, struct nid_path *path) look_for_out_mute_nid() argument
601 look_for_out_vol_nid(struct hda_codec *codec, struct nid_path *path) look_for_out_vol_nid() argument
841 path_power_update(struct hda_codec *codec, struct nid_path *path, bool allow_powerdown) path_power_update() argument
1030 add_vol_ctl(struct hda_codec *codec, const char *pfx, int cidx, unsigned int chs, struct nid_path *path) add_vol_ctl() argument
1056 add_stereo_vol(struct hda_codec *codec, const char *pfx, int cidx, struct nid_path *path) add_stereo_vol() argument
1066 add_sw_ctl(struct hda_codec *codec, const char *pfx, int cidx, unsigned int chs, struct nid_path *path) add_sw_ctl() argument
1089 add_stereo_sw(struct hda_codec *codec, const char *pfx, int cidx, struct nid_path *path) add_stereo_sw() argument
3804 look_for_boost_amp(struct hda_codec *codec, struct nid_path *path) look_for_boost_amp() argument
H A Dhda_generic.h22 /* Widget connection path
27 * idx[i] contains the source index number to select on of the widget path[i];
28 * e.g. idx[1] is the index of the DAC (path[0]) selected by path[1] widget
45 hda_nid_t path[MAX_NID_PATH_DEPTH]; member in struct:nid_path
51 bool pin_fixed:1; /* path with fixed pin */
169 /* path list */
172 /* path indices */
246 /* badness tables for output path evaluations */
311 int snd_hda_get_path_idx(struct hda_codec *codec, struct nid_path *path);
315 struct nid_path *path);
319 void snd_hda_activate_path(struct hda_codec *codec, struct nid_path *path,
/linux-4.4.14/drivers/staging/sm750fb/
H A Dddk750_display.h4 /* panel path select
54 /* crt path select
85 LCD1 means panel path TFT1 & panel path DVI (so enable DAC)
86 CRT means crt path DSUB
/linux-4.4.14/drivers/clk/imx/
H A Dclk.c24 char *path; imx_obtain_fixed_clock_from_dt() local
26 path = kasprintf(GFP_KERNEL, "/clocks/%s", name); imx_obtain_fixed_clock_from_dt()
27 if (!path) imx_obtain_fixed_clock_from_dt()
30 phandle.np = of_find_node_by_path(path); imx_obtain_fixed_clock_from_dt()
31 kfree(path); imx_obtain_fixed_clock_from_dt()
/linux-4.4.14/drivers/base/
H A Ddevtmpfs.c153 struct path path; dev_mkdir() local
156 dentry = kern_path_create(AT_FDCWD, name, &path, LOOKUP_DIRECTORY); dev_mkdir()
160 err = vfs_mkdir(d_inode(path.dentry), dentry, mode); dev_mkdir()
164 done_path_create(&path, dentry); dev_mkdir()
170 char *path; create_path() local
175 path = kstrdup(nodepath, GFP_KERNEL); create_path()
176 if (!path) create_path()
179 s = path; create_path()
185 err = dev_mkdir(path, 0755); create_path()
191 kfree(path); create_path()
199 struct path path; handle_create() local
202 dentry = kern_path_create(AT_FDCWD, nodename, &path, 0); handle_create()
205 dentry = kern_path_create(AT_FDCWD, nodename, &path, 0); handle_create()
210 err = vfs_mknod(d_inode(path.dentry), dentry, mode, dev->devt); handle_create()
225 done_path_create(&path, dentry); handle_create()
231 struct path parent; dev_rmdir()
254 const char *path; delete_path() local
257 path = kstrdup(nodepath, GFP_KERNEL); delete_path()
258 if (!path) delete_path()
264 base = strrchr(path, '/'); delete_path()
268 err = dev_rmdir(path); delete_path()
273 kfree(path); delete_path()
300 struct path parent; handle_remove()
311 struct path p = {.mnt = parent.mnt, .dentry = dentry}; handle_remove()
/linux-4.4.14/drivers/net/ethernet/intel/ixgbe/
H A Dixgbe_dcb_nl.c73 if (dst->path[tx].prio_type != src->path[tx].prio_type) { ixgbe_copy_dcb_cfg()
74 dst->path[tx].prio_type = src->path[tx].prio_type; ixgbe_copy_dcb_cfg()
78 if (dst->path[tx].bwg_id != src->path[tx].bwg_id) { ixgbe_copy_dcb_cfg()
79 dst->path[tx].bwg_id = src->path[tx].bwg_id; ixgbe_copy_dcb_cfg()
83 if (dst->path[tx].bwg_percent != src->path[tx].bwg_percent) { ixgbe_copy_dcb_cfg()
84 dst->path[tx].bwg_percent = src->path[tx].bwg_percent; ixgbe_copy_dcb_cfg()
88 if (dst->path[tx].up_to_tc_bitmap != ixgbe_copy_dcb_cfg()
89 src->path[tx].up_to_tc_bitmap) { ixgbe_copy_dcb_cfg()
90 dst->path[tx].up_to_tc_bitmap = ixgbe_copy_dcb_cfg()
91 src->path[tx].up_to_tc_bitmap; ixgbe_copy_dcb_cfg()
95 if (dst->path[rx].prio_type != src->path[rx].prio_type) { ixgbe_copy_dcb_cfg()
96 dst->path[rx].prio_type = src->path[rx].prio_type; ixgbe_copy_dcb_cfg()
100 if (dst->path[rx].bwg_id != src->path[rx].bwg_id) { ixgbe_copy_dcb_cfg()
101 dst->path[rx].bwg_id = src->path[rx].bwg_id; ixgbe_copy_dcb_cfg()
105 if (dst->path[rx].bwg_percent != src->path[rx].bwg_percent) { ixgbe_copy_dcb_cfg()
106 dst->path[rx].bwg_percent = src->path[rx].bwg_percent; ixgbe_copy_dcb_cfg()
110 if (dst->path[rx].up_to_tc_bitmap != ixgbe_copy_dcb_cfg()
111 src->path[rx].up_to_tc_bitmap) { ixgbe_copy_dcb_cfg()
112 dst->path[rx].up_to_tc_bitmap = ixgbe_copy_dcb_cfg()
113 src->path[rx].up_to_tc_bitmap; ixgbe_copy_dcb_cfg()
199 adapter->temp_dcb_cfg.tc_config[tc].path[0].prio_type = prio; ixgbe_dcbnl_set_pg_tc_cfg_tx()
201 adapter->temp_dcb_cfg.tc_config[tc].path[0].bwg_id = bwg_id; ixgbe_dcbnl_set_pg_tc_cfg_tx()
203 adapter->temp_dcb_cfg.tc_config[tc].path[0].bwg_percent = ixgbe_dcbnl_set_pg_tc_cfg_tx()
206 adapter->temp_dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap = ixgbe_dcbnl_set_pg_tc_cfg_tx()
225 adapter->temp_dcb_cfg.tc_config[tc].path[1].prio_type = prio; ixgbe_dcbnl_set_pg_tc_cfg_rx()
227 adapter->temp_dcb_cfg.tc_config[tc].path[1].bwg_id = bwg_id; ixgbe_dcbnl_set_pg_tc_cfg_rx()
229 adapter->temp_dcb_cfg.tc_config[tc].path[1].bwg_percent = ixgbe_dcbnl_set_pg_tc_cfg_rx()
232 adapter->temp_dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap = ixgbe_dcbnl_set_pg_tc_cfg_rx()
250 *prio = adapter->dcb_cfg.tc_config[tc].path[0].prio_type; ixgbe_dcbnl_get_pg_tc_cfg_tx()
251 *bwg_id = adapter->dcb_cfg.tc_config[tc].path[0].bwg_id; ixgbe_dcbnl_get_pg_tc_cfg_tx()
252 *bw_pct = adapter->dcb_cfg.tc_config[tc].path[0].bwg_percent; ixgbe_dcbnl_get_pg_tc_cfg_tx()
253 *up_map = adapter->dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap; ixgbe_dcbnl_get_pg_tc_cfg_tx()
270 *prio = adapter->dcb_cfg.tc_config[tc].path[1].prio_type; ixgbe_dcbnl_get_pg_tc_cfg_rx()
271 *bwg_id = adapter->dcb_cfg.tc_config[tc].path[1].bwg_id; ixgbe_dcbnl_get_pg_tc_cfg_rx()
272 *bw_pct = adapter->dcb_cfg.tc_config[tc].path[1].bwg_percent; ixgbe_dcbnl_get_pg_tc_cfg_rx()
273 *up_map = adapter->dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap; ixgbe_dcbnl_get_pg_tc_cfg_rx()
/linux-4.4.14/drivers/isdn/hardware/eicon/
H A Dmaintidi.c857 const char *path = (char *)&pVar->path_length + 1; process_idi_event() local
861 if (!strncmp("State\\B Event", path, pVar->path_length)) { process_idi_event()
877 if (!strncmp("State\\FAX Event", path, pVar->path_length)) { process_idi_event()
893 if (!strncmp("State\\Modem Event", path, pVar->path_length)) { process_idi_event()
942 if (!strncmp("Events Down", path, pVar->path_length)) { process_idi_event()
951 if (!strncmp("State\\Layer1", path, pVar->path_length)) { process_idi_event()
960 if (!strncmp("State\\Layer2 No1", path, pVar->path_length)) { process_idi_event()
1020 if (!strncmp("Statistics\\Incoming Calls\\Calls", path, pVar->path_length) || process_idi_event()
1021 !strncmp("Statistics\\Incoming Calls\\Connected", path, pVar->path_length)) { process_idi_event()
1025 if (!strncmp("Statistics\\Outgoing Calls\\Calls", path, pVar->path_length) || process_idi_event()
1026 !strncmp("Statistics\\Outgoing Calls\\Connected", path, pVar->path_length)) { process_idi_event()
1050 Uses path of first I.E. to detect the source of the
1055 const char *path = (char *)&pVar->path_length + 1; process_idi_info() local
1064 if (!strncmp(name, path, len)) { process_idi_info()
1074 if (!strncmp(name, path, len)) { process_idi_info()
1084 if (!strncmp(name, path, len)) { process_idi_info()
1111 if ((cur = find_var(pVar, pLib->parse_table[i].path))) { diva_modem_info()
1144 if ((cur = find_var(pVar, pLib->parse_table[i].path))) { diva_fax_info()
1182 if ((cur = find_var(pVar, pLib->parse_table[i].path))) { diva_line_info()
1237 const char *path; find_var() local
1240 path = (char *)&pVar->path_length + 1; find_var()
1242 if (!strncmp(name, path, pVar->path_length)) { find_var()
1264 sprintf(pLib->parse_table[pLib->cur_parse_entry].path, diva_create_line_parse_table()
1268 sprintf(pLib->parse_table[pLib->cur_parse_entry].path, diva_create_line_parse_table()
1272 sprintf(pLib->parse_table[pLib->cur_parse_entry].path, diva_create_line_parse_table()
1276 sprintf(pLib->parse_table[pLib->cur_parse_entry].path, diva_create_line_parse_table()
1280 sprintf(pLib->parse_table[pLib->cur_parse_entry].path, diva_create_line_parse_table()
1285 sprintf(pLib->parse_table[pLib->cur_parse_entry].path, diva_create_line_parse_table()
1290 sprintf(pLib->parse_table[pLib->cur_parse_entry].path, diva_create_line_parse_table()
1295 sprintf(pLib->parse_table[pLib->cur_parse_entry].path, diva_create_line_parse_table()
1300 sprintf(pLib->parse_table[pLib->cur_parse_entry].path, diva_create_line_parse_table()
1304 sprintf(pLib->parse_table[pLib->cur_parse_entry].path, diva_create_line_parse_table()
1308 sprintf(pLib->parse_table[pLib->cur_parse_entry].path, diva_create_line_parse_table()
1312 sprintf(pLib->parse_table[pLib->cur_parse_entry].path, diva_create_line_parse_table()
1316 sprintf(pLib->parse_table[pLib->cur_parse_entry].path, diva_create_line_parse_table()
1320 sprintf(pLib->parse_table[pLib->cur_parse_entry].path, diva_create_line_parse_table()
1325 sprintf(pLib->parse_table[pLib->cur_parse_entry].path, diva_create_line_parse_table()
1345 sprintf(pLib->parse_table[pLib->cur_parse_entry].path, diva_create_fax_parse_table()
1349 sprintf(pLib->parse_table[pLib->cur_parse_entry].path, diva_create_fax_parse_table()
1353 sprintf(pLib->parse_table[pLib->cur_parse_entry].path, diva_create_fax_parse_table()
1357 sprintf(pLib->parse_table[pLib->cur_parse_entry].path, diva_create_fax_parse_table()
1361 sprintf(pLib->parse_table[pLib->cur_parse_entry].path, diva_create_fax_parse_table()
1365 sprintf(pLib->parse_table[pLib->cur_parse_entry].path, diva_create_fax_parse_table()
1369 sprintf(pLib->parse_table[pLib->cur_parse_entry].path, diva_create_fax_parse_table()
1373 sprintf(pLib->parse_table[pLib->cur_parse_entry].path, diva_create_fax_parse_table()
1377 sprintf(pLib->parse_table[pLib->cur_parse_entry].path, diva_create_fax_parse_table()
1381 sprintf(pLib->parse_table[pLib->cur_parse_entry].path, diva_create_fax_parse_table()
1385 sprintf(pLib->parse_table[pLib->cur_parse_entry].path, diva_create_fax_parse_table()
1389 sprintf(pLib->parse_table[pLib->cur_parse_entry].path, diva_create_fax_parse_table()
1409 sprintf(pLib->parse_table[pLib->cur_parse_entry].path, diva_create_modem_parse_table()
1413 sprintf(pLib->parse_table[pLib->cur_parse_entry].path, diva_create_modem_parse_table()
1417 sprintf(pLib->parse_table[pLib->cur_parse_entry].path, diva_create_modem_parse_table()
1421 sprintf(pLib->parse_table[pLib->cur_parse_entry].path, diva_create_modem_parse_table()
1425 sprintf(pLib->parse_table[pLib->cur_parse_entry].path, diva_create_modem_parse_table()
1429 sprintf(pLib->parse_table[pLib->cur_parse_entry].path, diva_create_modem_parse_table()
1433 sprintf(pLib->parse_table[pLib->cur_parse_entry].path, diva_create_modem_parse_table()
1437 sprintf(pLib->parse_table[pLib->cur_parse_entry].path, diva_create_modem_parse_table()
1441 sprintf(pLib->parse_table[pLib->cur_parse_entry].path, diva_create_modem_parse_table()
1445 sprintf(pLib->parse_table[pLib->cur_parse_entry].path, diva_create_modem_parse_table()
1449 sprintf(pLib->parse_table[pLib->cur_parse_entry].path, diva_create_modem_parse_table()
1453 sprintf(pLib->parse_table[pLib->cur_parse_entry].path, diva_create_modem_parse_table()
1457 sprintf(pLib->parse_table[pLib->cur_parse_entry].path, diva_create_modem_parse_table()
1461 sprintf(pLib->parse_table[pLib->cur_parse_entry].path, diva_create_modem_parse_table()
1465 sprintf(pLib->parse_table[pLib->cur_parse_entry].path, diva_create_modem_parse_table()
1469 sprintf(pLib->parse_table[pLib->cur_parse_entry].path, diva_create_modem_parse_table()
1490 strcpy(pLib->parse_table[pLib->cur_parse_entry].path, diva_create_parse_table()
1495 strcpy(pLib->parse_table[pLib->cur_parse_entry].path, diva_create_parse_table()
1500 strcpy(pLib->parse_table[pLib->cur_parse_entry].path, diva_create_parse_table()
1505 strcpy(pLib->parse_table[pLib->cur_parse_entry].path, diva_create_parse_table()
1510 strcpy(pLib->parse_table[pLib->cur_parse_entry].path, diva_create_parse_table()
1515 strcpy(pLib->parse_table[pLib->cur_parse_entry].path, diva_create_parse_table()
1520 strcpy(pLib->parse_table[pLib->cur_parse_entry].path, diva_create_parse_table()
1528 strcpy(pLib->parse_table[pLib->cur_parse_entry].path, diva_create_parse_table()
1533 strcpy(pLib->parse_table[pLib->cur_parse_entry].path, diva_create_parse_table()
1538 strcpy(pLib->parse_table[pLib->cur_parse_entry].path, diva_create_parse_table()
1543 strcpy(pLib->parse_table[pLib->cur_parse_entry].path, diva_create_parse_table()
1548 strcpy(pLib->parse_table[pLib->cur_parse_entry].path, diva_create_parse_table()
1553 strcpy(pLib->parse_table[pLib->cur_parse_entry].path, diva_create_parse_table()
1558 strcpy(pLib->parse_table[pLib->cur_parse_entry].path, diva_create_parse_table()
1563 strcpy(pLib->parse_table[pLib->cur_parse_entry].path, diva_create_parse_table()
1573 strcpy(pLib->parse_table[pLib->cur_parse_entry].path, diva_create_parse_table()
1578 strcpy(pLib->parse_table[pLib->cur_parse_entry].path, diva_create_parse_table()
1583 strcpy(pLib->parse_table[pLib->cur_parse_entry].path, diva_create_parse_table()
1588 strcpy(pLib->parse_table[pLib->cur_parse_entry].path, diva_create_parse_table()
1593 strcpy(pLib->parse_table[pLib->cur_parse_entry].path, diva_create_parse_table()
1598 strcpy(pLib->parse_table[pLib->cur_parse_entry].path, diva_create_parse_table()
1603 strcpy(pLib->parse_table[pLib->cur_parse_entry].path, diva_create_parse_table()
1608 strcpy(pLib->parse_table[pLib->cur_parse_entry].path, diva_create_parse_table()
1613 strcpy(pLib->parse_table[pLib->cur_parse_entry].path, diva_create_parse_table()
1625 strcpy(pLib->parse_table[pLib->cur_parse_entry].path, diva_create_parse_table()
1630 strcpy(pLib->parse_table[pLib->cur_parse_entry].path, diva_create_parse_table()
1635 strcpy(pLib->parse_table[pLib->cur_parse_entry].path, diva_create_parse_table()
1640 strcpy(pLib->parse_table[pLib->cur_parse_entry].path, diva_create_parse_table()
1645 strcpy(pLib->parse_table[pLib->cur_parse_entry].path, diva_create_parse_table()
1650 strcpy(pLib->parse_table[pLib->cur_parse_entry].path, diva_create_parse_table()
1655 strcpy(pLib->parse_table[pLib->cur_parse_entry].path, diva_create_parse_table()
1660 strcpy(pLib->parse_table[pLib->cur_parse_entry].path, diva_create_parse_table()
1665 strcpy(pLib->parse_table[pLib->cur_parse_entry].path, diva_create_parse_table()
1670 strcpy(pLib->parse_table[pLib->cur_parse_entry].path, diva_create_parse_table()
1675 strcpy(pLib->parse_table[pLib->cur_parse_entry].path, diva_create_parse_table()
1680 strcpy(pLib->parse_table[pLib->cur_parse_entry].path, diva_create_parse_table()
1685 strcpy(pLib->parse_table[pLib->cur_parse_entry].path, diva_create_parse_table()
1690 strcpy(pLib->parse_table[pLib->cur_parse_entry].path, diva_create_parse_table()
1695 strcpy(pLib->parse_table[pLib->cur_parse_entry].path, diva_create_parse_table()
1700 strcpy(pLib->parse_table[pLib->cur_parse_entry].path, diva_create_parse_table()
1705 strcpy(pLib->parse_table[pLib->cur_parse_entry].path, diva_create_parse_table()
1710 strcpy(pLib->parse_table[pLib->cur_parse_entry].path, diva_create_parse_table()
1720 strcpy(pLib->parse_table[pLib->cur_parse_entry].path, diva_create_parse_table()
1725 strcpy(pLib->parse_table[pLib->cur_parse_entry].path, diva_create_parse_table()
1730 strcpy(pLib->parse_table[pLib->cur_parse_entry].path, diva_create_parse_table()
1735 strcpy(pLib->parse_table[pLib->cur_parse_entry].path, diva_create_parse_table()
1740 strcpy(pLib->parse_table[pLib->cur_parse_entry].path, diva_create_parse_table()
1745 strcpy(pLib->parse_table[pLib->cur_parse_entry].path, diva_create_parse_table()
1753 strcpy(pLib->parse_table[pLib->cur_parse_entry].path, diva_create_parse_table()
1758 strcpy(pLib->parse_table[pLib->cur_parse_entry].path, diva_create_parse_table()
1763 strcpy(pLib->parse_table[pLib->cur_parse_entry].path, diva_create_parse_table()
1768 strcpy(pLib->parse_table[pLib->cur_parse_entry].path, diva_create_parse_table()
1773 strcpy(pLib->parse_table[pLib->cur_parse_entry].path, diva_create_parse_table()
1778 strcpy(pLib->parse_table[pLib->cur_parse_entry].path, diva_create_parse_table()
1786 strcpy(pLib->parse_table[pLib->cur_parse_entry].path, diva_create_parse_table()
1791 strcpy(pLib->parse_table[pLib->cur_parse_entry].path, diva_create_parse_table()
1796 strcpy(pLib->parse_table[pLib->cur_parse_entry].path, diva_create_parse_table()
1801 strcpy(pLib->parse_table[pLib->cur_parse_entry].path, diva_create_parse_table()
1806 strcpy(pLib->parse_table[pLib->cur_parse_entry].path, diva_create_parse_table()
1811 strcpy(pLib->parse_table[pLib->cur_parse_entry].path, diva_create_parse_table()
1819 strcpy(pLib->parse_table[pLib->cur_parse_entry].path, diva_create_parse_table()
1824 strcpy(pLib->parse_table[pLib->cur_parse_entry].path, diva_create_parse_table()
1829 strcpy(pLib->parse_table[pLib->cur_parse_entry].path, diva_create_parse_table()
1834 strcpy(pLib->parse_table[pLib->cur_parse_entry].path, diva_create_parse_table()
1839 strcpy(pLib->parse_table[pLib->cur_parse_entry].path, diva_create_parse_table()
1844 strcpy(pLib->parse_table[pLib->cur_parse_entry].path, diva_create_parse_table()
2109 if ((cur = find_var(pVar, pLib->parse_table[i].path))) { diva_ifc_statistics()
/linux-4.4.14/tools/perf/
H A Dbuiltin-help.c104 static void exec_woman_emacs(const char *path, const char *page) exec_woman_emacs() argument
112 if (!path) exec_woman_emacs()
113 path = "emacsclient"; exec_woman_emacs()
115 execlp(path, "emacsclient", "-e", man_page.buf, NULL); exec_woman_emacs()
116 warning("failed to exec '%s': %s", path, exec_woman_emacs()
121 static void exec_man_konqueror(const char *path, const char *page) exec_man_konqueror() argument
131 if (path) { exec_man_konqueror()
132 const char *file = strrchr(path, '/'); exec_man_konqueror()
134 char *new = strdup(path); exec_man_konqueror()
139 path = new; exec_man_konqueror()
144 path = "kfmclient"; exec_man_konqueror()
146 execlp(path, filename, "newTab", man_page.buf, NULL); exec_man_konqueror()
147 warning("failed to exec '%s': %s", path, exec_man_konqueror()
152 static void exec_man_man(const char *path, const char *page) exec_man_man() argument
156 if (!path) exec_man_man()
157 path = "man"; exec_man_man()
158 execlp(path, "man", page, NULL); exec_man_man()
159 warning("failed to exec '%s': %s", path, exec_man_man()
211 warning("'%s': path for unsupported man viewer.\n" add_man_viewer_path()
224 "Please consider using 'man.<tool>.path' instead.", add_man_viewer_cmd()
240 if (!strcmp(subkey, ".path")) { add_man_viewer_info()
328 /* We should always put ':' after our path. If there is no setup_man_path()
408 static void open_html(const char *path) open_html() argument
410 execl_perf_cmd("web--browse", "-c", "help.browser", path, NULL); open_html()
/linux-4.4.14/scripts/dtc/
H A Dfdtput.c42 int auto_path; /* automatically create all path components */
155 * Create paths as needed for all components of a path
157 * Any components of the path that do not exist are created. Errors are
166 const char *path = in_path; create_paths() local
171 while (*path == '/') create_paths()
172 path++; create_paths()
174 for (sep = path; *sep; path = sep + 1, offset = node) { create_paths()
176 sep = strchr(path, '/'); create_paths()
178 sep = path + strlen(path); create_paths()
180 node = fdt_subnode_offset_namelen(blob, offset, path, create_paths()
181 sep - path); create_paths()
183 node = fdt_add_subnode_namelen(blob, offset, path, create_paths()
184 sep - path); create_paths()
187 report_error(path, sep - path, node); create_paths()
285 "\t-p\t\tAutomatically create nodes as needed for the node path\n"
/linux-4.4.14/fs/configfs/
H A Dsymlink.c112 static int get_target(const char *symname, struct path *path, get_target() argument
117 ret = kern_path(symname, LOOKUP_FOLLOW|LOOKUP_DIRECTORY, path); get_target()
119 if (path->dentry->d_sb == sb) { get_target()
120 *target = configfs_get_config_item(path->dentry); get_target()
123 path_put(path); get_target()
127 path_put(path); get_target()
138 struct path path; configfs_symlink() local
161 ret = get_target(symname, &path, &target_item, dentry->d_sb); configfs_symlink()
176 path_put(&path); configfs_symlink()
236 char *path) configfs_get_target_path()
248 for (s = path; depth--; s += 3) configfs_get_target_path()
251 fill_item_path(target, path, size); configfs_get_target_path()
252 pr_debug("%s: path = '%s'\n", __func__, path); configfs_get_target_path()
257 static int configfs_getlink(struct dentry *dentry, char * path) configfs_getlink() argument
273 error = configfs_get_target_path(item, target_item, path); configfs_getlink()
235 configfs_get_target_path(struct config_item * item, struct config_item * target, char *path) configfs_get_target_path() argument
/linux-4.4.14/drivers/s390/cio/
H A Dcio.h14 * path management control word
29 u8 lpm; /* logical path mask */
30 u8 pnom; /* path not operational mask */
31 u8 lpum; /* last path used mask */
32 u8 pim; /* path installed mask */
34 u8 pom; /* path operational mask */
35 u8 pam; /* path available mask */
65 struct pmcw pmcw; /* path management control word */
92 __u8 vpm; /* verified path mask */
93 __u8 lpm; /* logical path mask */
94 __u8 opm; /* operational path mask */
H A Dchp.c43 /* Map for channel-path status. */
47 /* Time after which channel-path status may be outdated. */
63 /* On success return 0 if channel-path is varied offline, 1 if it is varied
64 * online. Return -ENODEV if channel-path is not registered. */ chp_get_status()
74 * Calculate and return the operational path mask (opm) based on the chpids
96 * chp_is_registered - check if a channel-path is registered
97 * @chpid: channel-path ID
99 * Return non-zero if a channel-path with the given chpid is registered,
232 * Files for the channel path entries.
416 * chp_update_desc - update channel-path description
417 * @chp - channel-path
419 * Update the channel-path description of the specified channel-path.
436 * chp_new - register a new channel-path
437 * @chpid - channel-path ID
439 * Create and register data structure representing new channel-path. Return
461 /* Obtain channel path description and fill it in. */ chp_new()
506 * chp_get_chp_desc - return newly allocated channel-path description
507 * @chpid: channel-path ID
509 * On success return a newly allocated copy of the channel-path description
510 * data associated with the given channel-path ID. Return %NULL on error.
531 * chp_process_crw - process channel-path status change
536 * Handle channel-report-words indicating that the status of a channel-path
554 * created by reset channel path and need not be chp_process_crw()
559 "channel path %02X\n", crw0->rsid); chp_process_crw()
631 * chp_info_get_status - retrieve configure status of a channel-path
632 * @chpid: channel-path ID
727 * @chpid - channel-path ID
730 * Schedule a channel-path configuration/deconfiguration request.
745 * @chpid - channel-path ID
747 * Cancel an active channel-path deconfiguration request if it has not yet
H A Dcss.h17 * path grouping stuff
37 __u8 state1 : 2; /* path state value 1 */
38 __u8 state2 : 2; /* path state value 2 */
39 __u8 state3 : 1; /* path state value 3 */
51 struct path_state ps; /* SNID path state */
69 * @chp_event: called for events affecting a channel path
133 /* Helper functions to build lists for the slow path. */
H A Dccwreq.c22 * lpm_adjust - adjust path mask
23 * @lpm: path mask to adjust
37 * Adjust path mask to use next path and reset retry count. Return resulting
38 * path mask.
83 /* Retries exhausted, try next path. */ ccwreq_do()
100 /* Permant path error. */ ccwreq_do()
181 /* Check for path error. */ ccwreq_status()
305 /* Try next path and restart I/O. */ ccw_request_handler()
/linux-4.4.14/include/trace/events/
H A Dasoc.h151 TP_printk("%s: checks %d power, %d path, %d neighbour",
160 struct snd_soc_dapm_path *path),
162 TP_ARGS(widget, dir, path),
166 __string( pname, path->name ? path->name : DAPM_DIRECT)
167 __string( pnname, path->node[dir]->name )
175 __assign_str(pname, path->name ? path->name : DAPM_DIRECT);
176 __assign_str(pnname, path->node[dir]->name);
177 __entry->path_connect = path->connect;
178 __entry->path_node = (long)path->node[dir];
/linux-4.4.14/fs/reiserfs/
H A Dstree.c208 * path, starting from the bottom of the path, and going upwards. We must
209 * check the path's validity at each step. If the key is not in the path,
221 "PAP-5010: invalid offset in the path"); get_lkey()
223 /* While not higher in path than first element. */ get_lkey()
230 /* Parent at the path is not in the tree now. */ get_lkey()
241 /* Check whether parent at the path really points to the child. */ get_lkey()
260 /* Get delimiting key of the buffer at the path and its right neighbor. */ get_rkey()
268 "PAP-5030: invalid offset in the path"); get_rkey()
276 /* Parent at the path is not in the tree now. */ get_rkey()
288 * Check whether parent at the path really points get_rkey()
312 * Check whether a key is contained in the tree rooted from a buffer at a path.
314 * in the last path_element in the path. These delimiting keys are stored
331 "PAP-5050: pointer to the key(%p) is NULL or invalid path length(%d)", key_in_buffer()
349 "path not properly relsed"); reiserfs_check_path()
354 * Drop the reference to each buffer in a path and restore
364 "clm-4000: invalid path offset"); pathrelse_and_restore()
375 /* Drop the reference to each buffer in a path */ pathrelse()
381 "PAP-5090: invalid path offset"); pathrelse()
562 * This function fills up the path from the root to the leaf as it
572 * key. search_by_key returns a path that must be checked for the
573 * correctness of the top of the path but need not be checked for the
574 * correctness of the bottom of the path
608 * As we add each node to a path we increase its count. This means search_by_key()
609 * that we must be careful to release all nodes in a path before we search_by_key()
610 * either discard the path struct or re-use the path struct, as we search_by_key()
620 * current node, and calculate the next current node(next path element) search_by_key()
636 /* prep path to have another element added to it. */ search_by_key()
644 * in the path to have a pointer to it. search_by_key()
814 * Form the path to an item and position in this item which contains
816 * corresponding to the key, we point the path to the item with
868 /* Item is not found. Set path to the previous item. */ search_for_position_by_key()
882 /* Needed byte is contained in the item pointed to by the path. */ search_for_position_by_key()
894 * path. Set pos_in_item out of the item. search_for_position_by_key()
905 /* Compare given item and item pointed to by the path. */ comp_items()
906 int comp_items(const struct item_head *stored_ih, const struct treepath *path) comp_items() argument
908 struct buffer_head *bh = PATH_PLAST_BUFFER(path); comp_items()
911 /* Last buffer at the path is not in the tree. */ comp_items()
915 /* Last path position is invalid. */ comp_items()
916 if (PATH_LAST_POSITION(path) >= B_NR_ITEMS(bh)) comp_items()
920 ih = tp_item_head(path); comp_items()
931 static inline int prepare_for_direct_item(struct treepath *path, prepare_for_direct_item() argument
952 pos_in_item(path) = round_len - (le_ih_k_offset(le_ih) - 1); prepare_for_direct_item()
953 *cut_size = -(ih_item_len(le_ih) - pos_in_item(path)); prepare_for_direct_item()
967 (pos_in_item(path) = prepare_for_direct_item()
972 static inline int prepare_for_direntry_item(struct treepath *path, prepare_for_direntry_item() argument
999 entry_length(get_last_bh(path), le_ih, pos_in_item(path))); prepare_for_direntry_item()
1006 * If the path points to a directory or direct item, calculate mode
1008 * If the path points to an indirect item, remove some number of its
1018 struct treepath *path, prepare_for_delete_or_cut()
1032 struct item_head *p_le_ih = tp_item_head(path); prepare_for_delete_or_cut()
1033 struct buffer_head *bh = PATH_PLAST_BUFFER(path); prepare_for_delete_or_cut()
1049 return prepare_for_direntry_item(path, p_le_ih, inode, prepare_for_delete_or_cut()
1055 return prepare_for_direct_item(path, p_le_ih, inode, prepare_for_delete_or_cut()
1079 bh = PATH_PLAST_BUFFER(path); prepare_for_delete_or_cut()
1080 copy_item_head(&s_ih, tp_item_head(path)); prepare_for_delete_or_cut()
1109 if (item_moved (&s_ih, path)) { prepare_for_delete_or_cut()
1131 search_for_position_by_key(sb, item_key, path) == POSITION_FOUND); prepare_for_delete_or_cut()
1132 pos_in_item(path) = pos * UNFM_P_SIZE; prepare_for_delete_or_cut()
1175 struct treepath *path, int size) init_tb_struct()
1183 tb->tb_path = path; init_tb_struct()
1184 PATH_OFFSET_PBUFFER(path, ILLEGAL_PATH_ELEMENT_OFFSET) = NULL; init_tb_struct()
1185 PATH_OFFSET_POSITION(path, ILLEGAL_PATH_ELEMENT_OFFSET) = 0; init_tb_struct()
1228 * path - path to the deleted item
1234 struct treepath *path, const struct cpu_key *item_key, reiserfs_delete_item()
1252 init_tb_struct(th, &s_del_balance, sb, path, reiserfs_delete_item()
1262 prepare_for_delete_or_cut(th, inode, path, reiserfs_delete_item()
1269 copy_item_head(&s_ih, tp_item_head(path)); reiserfs_delete_item()
1280 search_for_position_by_key(sb, item_key, path); reiserfs_delete_item()
1298 q_ih = tp_item_head(path); reiserfs_delete_item()
1347 ih_item_body(PATH_PLAST_BUFFER(path), &s_ih), reiserfs_delete_item()
1391 INITIALIZE_PATH(path); reiserfs_delete_solid_item()
1403 retval = search_item(th->t_super, &cpu_key, &path); reiserfs_delete_solid_item()
1411 pathrelse(&path); reiserfs_delete_solid_item()
1431 item_len = ih_item_len(tp_item_head(&path)); reiserfs_delete_solid_item()
1432 init_tb_struct(th, &tb, th->t_super, &path, reiserfs_delete_solid_item()
1435 quota_cut_bytes = ih_item_len(tp_item_head(&path)); reiserfs_delete_solid_item()
1473 reiserfs_check_path(&path); reiserfs_delete_solid_item()
1542 struct treepath *path, maybe_indirect_to_direct()
1564 pathrelse(path); maybe_indirect_to_direct()
1569 return indirect2direct(th, inode, page, path, item_key, maybe_indirect_to_direct()
1580 struct inode *inode, struct treepath *path) indirect_to_direct_roll_back()
1594 if (search_for_position_by_key(inode->i_sb, &tail_key, path) == indirect_to_direct_roll_back()
1598 RFALSE(path->pos_in_item != indirect_to_direct_roll_back()
1599 ih_item_len(tp_item_head(path)) - 1, indirect_to_direct_roll_back()
1601 PATH_LAST_POSITION(path)--; indirect_to_direct_roll_back()
1604 reiserfs_delete_item(th, path, &tail_key, inode, indirect_to_direct_roll_back()
1622 struct treepath *path, reiserfs_cut_from_item()
1648 init_tb_struct(th, &s_cut_balance, inode->i_sb, path, reiserfs_cut_from_item()
1664 prepare_for_delete_or_cut(th, inode, path, reiserfs_cut_from_item()
1677 path, item_key, reiserfs_cut_from_item()
1707 path) == POSITION_NOT_FOUND) { reiserfs_cut_from_item()
1708 print_block(PATH_PLAST_BUFFER(path), 3, reiserfs_cut_from_item()
1709 PATH_LAST_POSITION(path) - 1, reiserfs_cut_from_item()
1710 PATH_LAST_POSITION(path) + 1); reiserfs_cut_from_item()
1718 pathrelse(path); reiserfs_cut_from_item()
1731 search_for_position_by_key(sb, item_key, path); reiserfs_cut_from_item()
1748 indirect_to_direct_roll_back(th, inode, path); reiserfs_cut_from_item()
1764 M_DELETE) ? ih_item_len(tp_item_head(path)) : -s_cut_balance. reiserfs_cut_from_item()
2027 static void check_research_for_paste(struct treepath *path, check_research_for_paste() argument
2030 struct item_head *found_ih = tp_item_head(path); check_research_for_paste()
2035 get_last_bh(path)->b_size) != check_research_for_paste()
2038 get_last_bh(path)->b_size) != check_research_for_paste()
2039 pos_in_item(path)) check_research_for_paste()
2043 pos_in_item(path), key); check_research_for_paste()
2048 get_last_bh(path)->b_size) != check_research_for_paste()
2050 || I_UNFM_NUM(found_ih) != pos_in_item(path) check_research_for_paste()
2055 found_ih, pos_in_item(path), key); check_research_for_paste()
2146 /* this also releases the path */ reiserfs_paste_into_item()
2161 * Insert new item into the buffer at the path.
2163 * path - path to the inserted item
2168 struct treepath *path, const struct cpu_key *key, reiserfs_insert_item()
2204 pathrelse(path); reiserfs_insert_item()
2208 init_tb_struct(th, &s_ins_balance, th->t_super, path, reiserfs_insert_item()
2227 retval = search_item(th->t_super, key, path); reiserfs_insert_item()
2249 /* also releases the path */ reiserfs_insert_item()
1016 prepare_for_delete_or_cut(struct reiserfs_transaction_handle *th, struct inode *inode, struct treepath *path, const struct cpu_key *item_key, int *removed, int *cut_size, unsigned long long new_file_length ) prepare_for_delete_or_cut() argument
1172 init_tb_struct(struct reiserfs_transaction_handle *th, struct tree_balance *tb, struct super_block *sb, struct treepath *path, int size) init_tb_struct() argument
1233 reiserfs_delete_item(struct reiserfs_transaction_handle *th, struct treepath *path, const struct cpu_key *item_key, struct inode *inode, struct buffer_head *un_bh) reiserfs_delete_item() argument
1539 maybe_indirect_to_direct(struct reiserfs_transaction_handle *th, struct inode *inode, struct page *page, struct treepath *path, const struct cpu_key *item_key, loff_t new_file_size, char *mode) maybe_indirect_to_direct() argument
1579 indirect_to_direct_roll_back(struct reiserfs_transaction_handle *th, struct inode *inode, struct treepath *path) indirect_to_direct_roll_back() argument
1621 reiserfs_cut_from_item(struct reiserfs_transaction_handle *th, struct treepath *path, struct cpu_key *item_key, struct inode *inode, struct page *page, loff_t new_file_size) reiserfs_cut_from_item() argument
2167 reiserfs_insert_item(struct reiserfs_transaction_handle *th, struct treepath *path, const struct cpu_key *key, struct item_head *ih, struct inode *inode, const char *body) reiserfs_insert_item() argument
H A Dtail_conversion.c21 * path points to first direct item of the file regardless of how many of
25 struct treepath *path, struct buffer_head *unbh, direct2indirect()
30 struct item_head *p_le_ih = tp_item_head(path); direct2indirect()
65 if (search_for_position_by_key(sb, &end_key, path) == POSITION_FOUND) { direct2indirect()
69 pathrelse(path); direct2indirect()
73 p_le_ih = tp_item_head(path); direct2indirect()
81 PATH_LAST_POSITION(path)++; direct2indirect()
83 reiserfs_insert_item(th, path, &end_key, &ind_ih, inode, direct2indirect()
87 retval = reiserfs_paste_into_item(th, path, &end_key, inode, direct2indirect()
114 if (search_for_position_by_key(sb, &end_key, path) == direct2indirect()
118 p_le_ih = tp_item_head(path); direct2indirect()
138 retval = reiserfs_delete_item(th, path, &end_key, inode, direct2indirect()
203 struct treepath *path, /* path to the indirect item. */ indirect2direct()
224 /* store item head path points to. */ indirect2direct()
225 copy_item_head(&s_ih, tp_item_head(path)); indirect2direct()
246 if (path_changed(&s_ih, path)) { indirect2direct()
248 if (search_for_position_by_key(sb, item_key, path) indirect2direct()
253 copy_item_head(&s_ih, tp_item_head(path)); indirect2direct()
276 PATH_LAST_POSITION(path)++; indirect2direct()
282 if (reiserfs_insert_item(th, path, &key, &s_ih, inode, indirect2direct()
24 direct2indirect(struct reiserfs_transaction_handle *th, struct inode *inode, struct treepath *path, struct buffer_head *unbh, loff_t tail_offset) direct2indirect() argument
201 indirect2direct(struct reiserfs_transaction_handle *th, struct inode *inode, struct page *page, struct treepath *path, const struct cpu_key *item_key, loff_t n_new_file_size, char *mode) indirect2direct() argument
/linux-4.4.14/arch/alpha/lib/
H A Ddec_and_lock.c35 /* Slow path */ atomic_dec_and_lock_1()
/linux-4.4.14/include/uapi/linux/
H A Dsonet.h12 __HANDLE_ITEM(path_bip); /* path parity errors (B3) */ \
14 __HANDLE_ITEM(path_febe); /* path parity errors at remote */ \
46 #define SONET_INS_PBIP 4 /* path BIP */
50 #define SONET_INS_PAIS 64 /* path alarm indication signal */
H A Dlimits.h12 #define PATH_MAX 4096 /* # chars in a path name including nul */
H A Dunix_diag.h16 #define UDIAG_SHOW_NAME 0x00000001 /* show name (not path) */
/linux-4.4.14/arch/mips/vdso/
H A Dgenvdso.c112 static void *map_vdso(const char *path, size_t *_size) map_vdso() argument
119 fd = open(path, O_RDWR); map_vdso()
122 path, strerror(errno)); map_vdso()
128 path, strerror(errno)); map_vdso()
136 path, strerror(errno)); map_vdso()
145 path); map_vdso()
156 program_name, path); map_vdso()
167 program_name, path); map_vdso()
174 program_name, path); map_vdso()
179 program_name, path); map_vdso()
187 static bool patch_vdso(const char *path, void *vdso) patch_vdso() argument
190 return patch_vdso64(path, vdso); patch_vdso()
192 return patch_vdso32(path, vdso); patch_vdso()
195 static bool get_symbols(const char *path, void *vdso) get_symbols() argument
198 return get_symbols64(path, vdso); get_symbols()
200 return get_symbols32(path, vdso); get_symbols()
H A Dgenvdso.h11 static inline bool FUNC(patch_vdso)(const char *path, void *vdso) patch_vdso() argument
42 program_name, path); patch_vdso()
53 program_name, path); patch_vdso()
105 program_name, path); patch_vdso()
113 static inline bool FUNC(get_symbols)(const char *path, void *vdso) get_symbols() argument
137 path); get_symbols()
181 program_name, path, vdso_symbols[i].name); get_symbols()
/linux-4.4.14/tools/virtio/virtio-trace/
H A Dtrace-agent.h14 * @ctl_fd: fd of control path, /dev/virtio-ports/agent-ctl-path
28 * @in_fd: fd of reading trace data path in cpu_num
29 * @out_fd: fd of writing trace data path in cpu_num
H A Dtrace-agent.c25 #define WRITE_PATH_FMT "/dev/virtio-ports/trace-path-cpu%d"
26 #define CTL_PATH "/dev/virtio-ports/agent-ctl-path"
123 /* write(output) path */ make_path()
126 /* read(input) path */ make_path()
130 pr_err("Failed to generate %s path(CPU#%d):%d\n", make_path()
160 /* set read(input) path per read/write thread */ agent_info_init()
165 /* set write(output) path per read/write thread*/ agent_info_init()
/linux-4.4.14/fs/nfsd/
H A Dexport.h23 char *path; /* slash separated list of path components */ member in struct:nfsd4_fs_location
52 struct path ex_path;
76 struct path ek_path;
93 struct path *);
95 struct path *);
98 char *path, struct knfsd_fh *, int maxsize);
H A Dexport.c72 /* client fsidtype fsid expiry [path] */ expkey_parse()
148 dprintk("Found the path %s\n", buf); expkey_parse()
173 seq_puts(m, "#domain fsidtype fsid [path]\n"); expkey_show()
301 kfree(locations[i].path); nfsd4_fslocs_free()
323 /* client path */ svc_export_request()
421 /* slash separated path component list */ fsloc_parse()
426 fsloc->locations[i].path = kstrdup(buf, GFP_KERNEL); fsloc_parse()
427 if (!fsloc->locations[i].path) fsloc_parse()
511 /* client path expiry [flags anonuid anongid fsid] */ svc_export_parse()
538 /* path */ svc_export_parse()
663 seq_puts(m, "#path domain(flags)\n"); svc_export_show()
827 const struct path *path, struct cache_req *reqp) exp_get_by_name()
836 key.ex_path = *path; exp_get_by_name()
852 exp_parent(struct cache_detail *cd, struct auth_domain *clp, struct path *path) exp_parent() argument
854 struct dentry *saved = dget(path->dentry); exp_parent()
855 struct svc_export *exp = exp_get_by_name(cd, clp, path, NULL); exp_parent()
857 while (PTR_ERR(exp) == -ENOENT && !IS_ROOT(path->dentry)) { exp_parent()
858 struct dentry *parent = dget_parent(path->dentry); exp_parent()
859 dput(path->dentry); exp_parent()
860 path->dentry = parent; exp_parent()
861 exp = exp_get_by_name(cd, clp, path, NULL); exp_parent()
863 dput(path->dentry); exp_parent()
864 path->dentry = saved; exp_parent()
880 struct path path; exp_rootfh() local
889 if (kern_path(name, 0, &path)) { exp_rootfh()
890 printk("nfsd: exp_rootfh path not found %s", name); exp_rootfh()
893 inode = d_inode(path.dentry); exp_rootfh()
896 name, path.dentry, clp->name, exp_rootfh()
898 exp = exp_parent(cd, clp, &path); exp_rootfh()
908 if (fh_compose(&fh, exp, path.dentry, NULL)) exp_rootfh()
916 path_put(&path); exp_rootfh()
970 rqst_exp_get_by_name(struct svc_rqst *rqstp, struct path *path) rqst_exp_get_by_name() argument
980 exp = exp_get_by_name(cd, rqstp->rq_client, path, &rqstp->rq_chandle); rqst_exp_get_by_name()
992 gssexp = exp_get_by_name(cd, rqstp->rq_gssclient, path, &rqstp->rq_chandle); rqst_exp_get_by_name()
1034 rqst_exp_parent(struct svc_rqst *rqstp, struct path *path) rqst_exp_parent() argument
1036 struct dentry *saved = dget(path->dentry); rqst_exp_parent()
1037 struct svc_export *exp = rqst_exp_get_by_name(rqstp, path); rqst_exp_parent()
1039 while (PTR_ERR(exp) == -ENOENT && !IS_ROOT(path->dentry)) { rqst_exp_parent()
1040 struct dentry *parent = dget_parent(path->dentry); rqst_exp_parent()
1041 dput(path->dentry); rqst_exp_parent()
1042 path->dentry = parent; rqst_exp_parent()
1043 exp = rqst_exp_get_by_name(rqstp, path); rqst_exp_parent()
1045 dput(path->dentry); rqst_exp_parent()
1046 path->dentry = saved; rqst_exp_parent()
1174 seq_escape(m, fsloc->locations[0].path, ",;@ \t\n\\"); exp_flags()
1179 seq_escape(m, fsloc->locations[i].path, ",;@ \t\n\\"); exp_flags()
826 exp_get_by_name(struct cache_detail *cd, struct auth_domain *clp, const struct path *path, struct cache_req *reqp) exp_get_by_name() argument
/linux-4.4.14/tools/perf/arch/
H A Dcommon.c64 char *path, *tmp = NULL; lookup_path() local
75 path = strtok_r(env, ":", &tmp); lookup_path()
76 while (path) { lookup_path()
77 scnprintf(buf, sizeof(buf), "%s/%s", path, name); lookup_path()
82 path = strtok_r(NULL, ":", &tmp); lookup_path()
132 const char *name, const char **path) perf_env__lookup_binutils_path()
146 * We don't need to try to find objdump path for native system. perf_env__lookup_binutils_path()
147 * Just use default binutils path (e.g.: "objdump"). perf_env__lookup_binutils_path()
200 *path = buf; perf_env__lookup_binutils_path()
204 *path = NULL; perf_env__lookup_binutils_path()
131 perf_env__lookup_binutils_path(struct perf_env *env, const char *name, const char **path) perf_env__lookup_binutils_path() argument
/linux-4.4.14/drivers/tty/hvc/
H A Dhvc_iucv.c69 struct iucv_path *path; /* IUCV path pointer */ member in struct:hvc_iucv_private
80 u8 info_path[16]; /* IUCV path info (dev attr) */
214 * IUCV path.
215 * If the IUCV path has been severed, then -EPIPE is returned to cause a
229 /* if the IUCV path has been severed, return -EPIPE to inform the hvc_iucv_write()
249 rc = __iucv_message_receive(priv->path, &rb->msg, 0, hvc_iucv_write()
308 * If an IUCV communication path has been established, pending IUCV messages
352 * If an existing IUCV communicaton path has been severed, -EPIPE is returned
383 * If an IUCV communication path has been established, the buffered output data
385 * Returns 0 if there is no established IUCV communication path or
386 * -EPIPE if an existing IUCV communicaton path has been severed.
414 rc = __iucv_message_send(priv->path, &sb->msg, 0, 0, hvc_iucv_send()
558 * hvc_iucv_hangup() - Sever IUCV path and schedule hvc tty hang up
561 * This routine severs an existing IUCV communication path and hangs
563 * The hang-up occurs only if an IUCV communication path is established;
567 * 1. After the IUCV path has been severed, the iucv_state is set to
591 struct iucv_path *path; hvc_iucv_hangup() local
593 path = NULL; hvc_iucv_hangup()
596 path = priv->path; hvc_iucv_hangup()
597 priv->path = NULL; hvc_iucv_hangup()
611 /* finally sever path (outside of priv->lock due to lock ordering) */ hvc_iucv_hangup()
612 if (path) { hvc_iucv_hangup()
613 iucv_path_sever(path, NULL); hvc_iucv_hangup()
614 iucv_path_free(path); hvc_iucv_hangup()
627 * to keep an existing IUCV communication path established.
630 * If the tty has been opened and an established IUCV path has been severed
650 * ignore this hangup and keep an established IUCV path open... hvc_iucv_notifier_hangup()
672 struct iucv_path *path; hvc_iucv_dtr_rts() local
690 path = priv->path; /* save reference to IUCV path */ hvc_iucv_dtr_rts()
691 priv->path = NULL; hvc_iucv_dtr_rts()
695 /* Sever IUCV path outside of priv->lock due to lock ordering of: hvc_iucv_dtr_rts()
697 if (path) { hvc_iucv_dtr_rts()
698 iucv_path_sever(path, NULL); hvc_iucv_dtr_rts()
699 iucv_path_free(path); hvc_iucv_dtr_rts()
768 * @path: Pending path (struct iucv_path)
770 * @ipuser: User specified data for this path
773 * The function uses the @ipuser data to determine if the pending path belongs
775 * If the path belongs to this driver, ensure that the terminal is not accessed
777 * If the terminal is not yet connected, the pending path is accepted and is
780 * Returns 0 if @path belongs to a terminal managed by the this device driver;
781 * otherwise returns -ENODEV in order to dispatch this path to other handlers.
785 static int hvc_iucv_path_pending(struct iucv_path *path, u8 *ipvmid, hvc_iucv_path_pending() argument
797 /* First, check if the pending path request is managed by this hvc_iucv_path_pending()
827 iucv_path_sever(path, ipuser); hvc_iucv_path_pending()
828 iucv_path_free(path); hvc_iucv_path_pending()
839 * this path to enforce that there is only ONE established communication hvc_iucv_path_pending()
840 * path per terminal. */ hvc_iucv_path_pending()
842 iucv_path_sever(path, ipuser); hvc_iucv_path_pending()
843 iucv_path_free(path); hvc_iucv_path_pending()
847 /* accept path */ hvc_iucv_path_pending()
850 path->msglim = 0xffff; /* IUCV MSGLIMIT */ hvc_iucv_path_pending()
851 path->flags &= ~IUCV_IPRMDATA; /* TODO: use IUCV_IPRMDATA */ hvc_iucv_path_pending()
852 rc = iucv_path_accept(path, &hvc_iucv_handler, nuser_data, priv); hvc_iucv_path_pending()
854 iucv_path_sever(path, ipuser); hvc_iucv_path_pending()
855 iucv_path_free(path); hvc_iucv_path_pending()
858 priv->path = path; hvc_iucv_path_pending()
861 /* store path information */ hvc_iucv_path_pending()
874 * hvc_iucv_path_severed() - IUCV handler to process a path sever.
875 * @path: Pending path (struct iucv_path)
876 * @ipuser: User specified data for this path
884 static void hvc_iucv_path_severed(struct iucv_path *path, u8 *ipuser) hvc_iucv_path_severed() argument
886 struct hvc_iucv_private *priv = path->private; hvc_iucv_path_severed()
893 * @path: Pending path (struct iucv_path)
902 static void hvc_iucv_msg_pending(struct iucv_path *path, hvc_iucv_msg_pending() argument
905 struct hvc_iucv_private *priv = path->private; hvc_iucv_msg_pending()
910 iucv_message_reject(path, msg); hvc_iucv_msg_pending()
918 iucv_message_reject(path, msg); hvc_iucv_msg_pending()
925 iucv_message_reject(path, msg); hvc_iucv_msg_pending()
940 * @path: Pending path (struct iucv_path)
950 static void hvc_iucv_msg_complete(struct iucv_path *path, hvc_iucv_msg_complete() argument
953 struct hvc_iucv_private *priv = path->private; hvc_iucv_msg_complete()
972 * Sever an established IUCV communication path and
/linux-4.4.14/arch/powerpc/platforms/cell/spufs/
H A Dsyscalls.c65 struct path path; do_spu_create() local
69 dentry = user_path_create(AT_FDCWD, pathname, &path, LOOKUP_DIRECTORY); do_spu_create()
72 ret = spufs_create(&path, dentry, flags, mode, neighbor); do_spu_create()
73 done_path_create(&path, dentry); do_spu_create()
/linux-4.4.14/arch/powerpc/sysdev/
H A Dmv64x60_udbg.c73 const char *path; mv64x60_udbg_init() local
80 path = of_get_property(of_chosen, "linux,stdout-path", NULL); mv64x60_udbg_init()
81 if (!path) mv64x60_udbg_init()
84 stdout = of_find_node_by_path(path); mv64x60_udbg_init()
/linux-4.4.14/arch/powerpc/boot/
H A Dplanetcore.c109 char *path; planetcore_set_stdout_path() local
122 path = get_path(node, prop_buf, MAX_PROP_LEN); planetcore_set_stdout_path()
123 if (!path) planetcore_set_stdout_path()
132 setprop_str(chosen, "linux,stdout-path", path); planetcore_set_stdout_path()
H A Dvirtex.c83 char path[MAX_PATH_LEN]; platform_specific_init() local
89 if (getprop(devp, "linux,stdout-path", path, MAX_PATH_LEN) > 0) { platform_specific_init()
90 devp = finddevice(path); platform_specific_init()
/linux-4.4.14/drivers/xen/xenbus/
H A Dxenbus_xs.c355 /* Return the path to dir with /name appended. Buffer must be kfree()'ed. */ join()
393 char *strings, *path; xenbus_directory() local
396 path = join(dir, node); xenbus_directory()
397 if (IS_ERR(path)) xenbus_directory()
398 return (char **)path; xenbus_directory()
400 strings = xs_single(t, XS_DIRECTORY, path, &len); xenbus_directory()
401 kfree(path); xenbus_directory()
409 /* Check if a path exists. Return 1 if it does. */ xenbus_exists()
431 char *path; xenbus_read() local
434 path = join(dir, node); xenbus_read()
435 if (IS_ERR(path)) xenbus_read()
436 return (void *)path; xenbus_read()
438 ret = xs_single(t, XS_READ, path, len); xenbus_read()
439 kfree(path); xenbus_read()
450 const char *path; xenbus_write() local
454 path = join(dir, node); xenbus_write()
455 if (IS_ERR(path)) xenbus_write()
456 return PTR_ERR(path); xenbus_write()
458 iovec[0].iov_base = (void *)path; xenbus_write()
459 iovec[0].iov_len = strlen(path) + 1; xenbus_write()
464 kfree(path); xenbus_write()
473 char *path; xenbus_mkdir() local
476 path = join(dir, node); xenbus_mkdir()
477 if (IS_ERR(path)) xenbus_mkdir()
478 return PTR_ERR(path); xenbus_mkdir()
480 ret = xs_error(xs_single(t, XS_MKDIR, path, NULL)); xenbus_mkdir()
481 kfree(path); xenbus_mkdir()
489 char *path; xenbus_rm() local
492 path = join(dir, node); xenbus_rm()
493 if (IS_ERR(path)) xenbus_rm()
494 return PTR_ERR(path); xenbus_rm()
496 ret = xs_error(xs_single(t, XS_RM, path, NULL)); xenbus_rm()
497 kfree(path); xenbus_rm()
620 static int xs_watch(const char *path, const char *token) xs_watch() argument
624 iov[0].iov_base = (void *)path; xs_watch()
625 iov[0].iov_len = strlen(path) + 1; xs_watch()
633 static int xs_unwatch(const char *path, const char *token) xs_unwatch() argument
637 iov[0].iov_base = (char *)path; xs_unwatch()
638 iov[0].iov_len = strlen(path) + 1; xs_unwatch()
/linux-4.4.14/Documentation/ia64/
H A Daliasing-test.c27 static int map_mem(char *path, off_t offset, size_t length, int touch) map_mem() argument
33 fd = open(path, O_RDWR); map_mem()
35 perror(path); map_mem()
39 if (fnmatch("/proc/bus/pci/*", path, 0) == 0) { map_mem()
65 static int scan_tree(char *path, char *file, off_t offset, size_t length, int touch) scan_tree() argument
72 n = scandir(path, &namelist, 0, alphasort); scan_tree()
86 path2 = malloc(strlen(path) + strlen(name) + 3); scan_tree()
87 strcpy(path2, path); scan_tree()
122 static int read_rom(char *path) read_rom() argument
127 fd = open(path, O_RDWR); read_rom()
129 perror(path); read_rom()
150 static int scan_rom(char *path, char *file) scan_rom() argument
157 n = scandir(path, &namelist, 0, alphasort); scan_rom()
171 path2 = malloc(strlen(path) + strlen(name) + 3); scan_rom()
172 strcpy(path2, path); scan_rom()
/linux-4.4.14/arch/parisc/kernel/
H A Ddrivers.c286 * get_node_path fills in @path with the firmware path to the device.
292 static void get_node_path(struct device *dev, struct hardware_path *path) get_node_path() argument
295 memset(&path->bc, -1, 6); get_node_path()
299 path->mod = PCI_FUNC(devfn); get_node_path()
300 path->bc[i--] = PCI_SLOT(devfn); get_node_path()
307 path->bc[i--] = PCI_SLOT(devfn) | (PCI_FUNC(devfn)<< 5); get_node_path()
309 path->bc[i--] = to_parisc_device(dev)->hw_path; get_node_path()
315 static char *print_hwpath(struct hardware_path *path, char *output) print_hwpath() argument
319 if (path->bc[i] == -1) print_hwpath()
321 output += sprintf(output, "%u/", (unsigned char) path->bc[i]); print_hwpath()
323 output += sprintf(output, "%u", (unsigned char) path->mod); print_hwpath()
328 * print_pa_hwpath - Returns hardware path for PA devices
329 * dev: The device to return the path for
330 * output: Pointer to a previously-allocated array to place the path in.
332 * This function fills in the output array with a human-readable path
338 struct hardware_path path; print_pa_hwpath() local
340 get_node_path(dev->dev.parent, &path); print_pa_hwpath()
341 path.mod = dev->hw_path; print_pa_hwpath()
342 return print_hwpath(&path, output); print_pa_hwpath()
348 * get_pci_node_path - Determines the hardware path for a PCI device
349 * @pdev: The device to return the path for
350 * @path: Pointer to a previously-allocated array to place the path in.
356 void get_pci_node_path(struct pci_dev *pdev, struct hardware_path *path) get_pci_node_path() argument
358 get_node_path(&pdev->dev, path); get_pci_node_path()
363 * print_pci_hwpath - Returns hardware path for PCI devices
364 * dev: The device to return the path for
365 * output: Pointer to a previously-allocated array to place the path in.
367 * This function fills in the output array with a human-readable path
373 struct hardware_path path; print_pci_hwpath() local
375 get_pci_node_path(dev, &path); print_pci_hwpath()
376 return print_hwpath(&path, output); print_pci_hwpath()
384 struct hardware_path path; setup_bus_id() local
389 get_node_path(padev->dev.parent, &path); setup_bus_id()
392 if (path.bc[i] == -1) setup_bus_id()
394 output += sprintf(output, "%u:", (unsigned char) path.bc[i]); setup_bus_id()
446 * @id: the element of the module path for this entry
493 printk(KERN_ERR "Two devices have hardware path [%s]. " alloc_pa_dev()
631 * match_pci_device - Matches a pci device against a given hardware path
635 * @modpath: the hardware path.
636 * @return: true if the device matches the hardware path.
645 /* we are at the end of the path, and on the actual device */ match_pci_device()
657 * path entry.
660 * @modpath: the hardware path.
661 * @return: true if the device matches the hardware path.
729 * hwpath_to_device - Finds the generic device corresponding to a given hardware path.
730 * @modpath: the hardware path.
754 * @param path pointer to a previously allocated hwpath struct to be filled in
756 void device_to_hwpath(struct device *dev, struct hardware_path *path) device_to_hwpath() argument
761 get_node_path(dev->parent, path); device_to_hwpath()
762 path->mod = padev->hw_path; device_to_hwpath()
764 get_node_path(dev, path); device_to_hwpath()
826 struct hardware_path path; walk_native_bus() local
828 get_node_path(parent, &path); walk_native_bus()
836 path.mod = i; walk_native_bus()
837 dev = alloc_pa_dev(hpa, &path); walk_native_bus()
/linux-4.4.14/drivers/s390/char/
H A Dmonreader.c48 struct iucv_path *path; member in struct:mon_private
154 rc = iucv_message_reply(monpriv->path, &monmsg->msg, mon_send_reply()
232 static void mon_iucv_path_complete(struct iucv_path *path, u8 *ipuser) mon_iucv_path_complete() argument
234 struct mon_private *monpriv = path->private; mon_iucv_path_complete()
240 static void mon_iucv_path_severed(struct iucv_path *path, u8 *ipuser) mon_iucv_path_severed() argument
242 struct mon_private *monpriv = path->private; mon_iucv_path_severed()
246 iucv_path_sever(path, NULL); mon_iucv_path_severed()
252 static void mon_iucv_message_pending(struct iucv_path *path, mon_iucv_message_pending() argument
255 struct mon_private *monpriv = path->private; mon_iucv_message_pending()
297 monpriv->path = iucv_path_alloc(MON_MSGLIM, IUCV_IPRMDATA, GFP_KERNEL); mon_open()
298 if (!monpriv->path) mon_open()
300 rc = iucv_path_connect(monpriv->path, &monreader_iucv_handler, mon_open()
325 iucv_path_free(monpriv->path); mon_open()
342 if (monpriv->path) { mon_close()
343 rc = iucv_path_sever(monpriv->path, user_data_sever); mon_close()
347 iucv_path_free(monpriv->path); mon_close()
469 if (monpriv->path) { monreader_freeze()
470 rc = iucv_path_sever(monpriv->path, user_data_sever); monreader_freeze()
474 iucv_path_free(monpriv->path); monreader_freeze()
482 monpriv->path = NULL; monreader_freeze()
494 monpriv->path = iucv_path_alloc(MON_MSGLIM, IUCV_IPRMDATA, GFP_KERNEL); monreader_thaw()
495 if (!monpriv->path) monreader_thaw()
497 rc = iucv_path_connect(monpriv->path, &monreader_iucv_handler, monreader_thaw()
512 iucv_path_free(monpriv->path); monreader_thaw()
513 monpriv->path = NULL; monreader_thaw()
/linux-4.4.14/drivers/hwtracing/coresight/
H A Dcoresight.c238 static int coresight_enable_path(struct list_head *path) coresight_enable_path() argument
244 * At this point we have a full @path, from source to sink. The coresight_enable_path()
248 list_for_each_entry(cd, path, path_link) { list_for_each_entry()
249 if (cd == list_first_entry(path, struct coresight_device, list_for_each_entry()
252 } else if (list_is_last(&cd->path_link, path)) { list_for_each_entry()
256 * along the path have been configured properly. list_for_each_entry()
268 list_for_each_entry_continue_reverse(cd, path, path_link) { list_for_each_entry_continue_reverse()
269 if (cd == list_first_entry(path, struct coresight_device, list_for_each_entry_continue_reverse()
272 } else if (list_is_last(&cd->path_link, path)) { list_for_each_entry_continue_reverse()
282 static int coresight_disable_path(struct list_head *path) coresight_disable_path() argument
286 list_for_each_entry_reverse(cd, path, path_link) { list_for_each_entry_reverse()
287 if (cd == list_first_entry(path, struct coresight_device, list_for_each_entry_reverse()
290 } else if (list_is_last(&cd->path_link, path)) { list_for_each_entry_reverse()
305 struct list_head *path, coresight_build_paths()
311 list_add(&csdev->path_link, path); coresight_build_paths()
317 ret = coresight_enable_path(path); coresight_build_paths()
319 ret = coresight_disable_path(path); coresight_build_paths()
324 path, enable) == 0) coresight_build_paths()
329 if (list_first_entry(path, struct coresight_device, path_link) != csdev) coresight_build_paths()
340 LIST_HEAD(path); coresight_enable()
351 if (coresight_build_paths(csdev, &path, true)) { coresight_enable()
352 dev_err(&csdev->dev, "building path(s) failed\n"); coresight_enable()
366 LIST_HEAD(path); coresight_disable()
377 if (coresight_build_paths(csdev, &path, false)) coresight_disable()
378 dev_err(&csdev->dev, "releasing path(s) failed\n"); coresight_disable()
304 coresight_build_paths(struct coresight_device *csdev, struct list_head *path, bool enable) coresight_build_paths() argument
/linux-4.4.14/net/hsr/
H A Dhsr_main.h47 * path, LSDU_size, sequence Nr }. But we let eth_header() create { h_dest,
48 * h_source, h_proto = 0x88FB }, and add { path, LSDU_size, sequence Nr,
61 /* The helper functions below assumes that 'path' occupies the 4 most
62 * significant bits of the 16-bit field shared by 'path' and 'LSDU_size' (or
68 * with the path field in-between, which seems strange. I'm guessing the MAC
81 static inline void set_hsr_tag_path(struct hsr_tag *ht, u16 path) set_hsr_tag_path() argument
84 (ntohs(ht->path_and_LSDU_size) & 0x0FFF) | (path << 12)); set_hsr_tag_path()
124 static inline void set_hsr_stag_path(struct hsr_sup_tag *hst, u16 path) set_hsr_stag_path() argument
126 set_hsr_tag_path((struct hsr_tag *) hst, path); set_hsr_stag_path()
/linux-4.4.14/scripts/
H A Dheaderdep.pl80 my $path = "$i/$filename";
81 return $path if -f $path;
95 my $path = search($header);
96 next unless $path;
98 open(my $file, '<', $path) or die($!);
H A Dheaders_check.pl110 my $path = $_[0];
114 my @file_paths = ($path, $dir . "/" . $path, dirname($filename) . "/" . $path);
/linux-4.4.14/fs/ocfs2/
H A Dalloc.c573 struct ocfs2_path *path,
576 * Reset the actual path elements so that we can re-use the structure
577 * to build another path. Generally, this involves freeing the buffer
580 void ocfs2_reinit_path(struct ocfs2_path *path, int keep_root) ocfs2_reinit_path() argument
588 for(i = start; i < path_num_items(path); i++) { ocfs2_reinit_path()
589 node = &path->p_node[i]; ocfs2_reinit_path()
598 * keeping the root extent list, then make sure that our path ocfs2_reinit_path()
602 depth = le16_to_cpu(path_root_el(path)->l_tree_depth); ocfs2_reinit_path()
604 path_root_access(path) = NULL; ocfs2_reinit_path()
606 path->p_tree_depth = depth; ocfs2_reinit_path()
609 void ocfs2_free_path(struct ocfs2_path *path) ocfs2_free_path() argument
611 if (path) { ocfs2_free_path()
612 ocfs2_reinit_path(path, 0); ocfs2_free_path()
613 kfree(path); ocfs2_free_path()
644 * Make the *dest path the same as src and re-initialize src path to
670 static inline void ocfs2_path_insert_eb(struct ocfs2_path *path, int index, ocfs2_path_insert_eb() argument
683 path->p_node[index].bh = eb_bh; ocfs2_path_insert_eb()
684 path->p_node[index].el = &eb->h_list; ocfs2_path_insert_eb()
691 struct ocfs2_path *path; ocfs2_new_path() local
695 path = kzalloc(sizeof(*path), GFP_NOFS); ocfs2_new_path()
696 if (path) { ocfs2_new_path()
697 path->p_tree_depth = le16_to_cpu(root_el->l_tree_depth); ocfs2_new_path()
699 path_root_bh(path) = root_bh; ocfs2_new_path()
700 path_root_el(path) = root_el; ocfs2_new_path()
701 path_root_access(path) = access; ocfs2_new_path()
704 return path; ocfs2_new_path()
707 struct ocfs2_path *ocfs2_new_path_from_path(struct ocfs2_path *path) ocfs2_new_path_from_path() argument
709 return ocfs2_new_path(path_root_bh(path), path_root_el(path), ocfs2_new_path_from_path()
710 path_root_access(path)); ocfs2_new_path_from_path()
728 struct ocfs2_path *path, ocfs2_path_bh_journal_access()
731 ocfs2_journal_access_func access = path_root_access(path); ocfs2_path_bh_journal_access()
739 return access(handle, ci, path->p_node[idx].bh, ocfs2_path_bh_journal_access()
744 * Convenience function to journal all components in a path.
748 struct ocfs2_path *path) ocfs2_journal_access_path()
752 if (!path) ocfs2_journal_access_path()
755 for(i = 0; i < path_num_items(path); i++) { ocfs2_journal_access_path()
756 ret = ocfs2_path_bh_journal_access(handle, ci, path, i); ocfs2_journal_access_path()
856 * we'll have to update the path to that leaf.
1099 * Change range of the branches in the right most path according to the leaf
1106 struct ocfs2_path *path = NULL; ocfs2_adjust_rightmost_branch() local
1110 path = ocfs2_new_path_from_et(et); ocfs2_adjust_rightmost_branch()
1111 if (!path) { ocfs2_adjust_rightmost_branch()
1116 status = ocfs2_find_path(et->et_ci, path, UINT_MAX); ocfs2_adjust_rightmost_branch()
1122 status = ocfs2_extend_trans(handle, path_num_items(path)); ocfs2_adjust_rightmost_branch()
1128 status = ocfs2_journal_access_path(et->et_ci, handle, path); ocfs2_adjust_rightmost_branch()
1134 el = path_leaf_el(path); ocfs2_adjust_rightmost_branch()
1137 ocfs2_adjust_rightmost_records(handle, et, path, rec); ocfs2_adjust_rightmost_branch()
1140 ocfs2_free_path(path); ocfs2_adjust_rightmost_branch()
1722 * the lowest level tree node which contains a path to both leafs. This
1726 * pair of adjacent leaves. It's task is to figure out which path
1765 * Traverse a btree path in search of cpos, starting at root_el.
1768 * case it will return the rightmost path.
1855 * Given an initialized path (that is, it has a valid root extent
1856 * list), this function will traverse the btree in search of the path
1859 * The path traveled is recorded in the path structure.
1867 struct ocfs2_path *path; member in struct:find_path_data
1874 ocfs2_path_insert_eb(fp->path, fp->index, bh); find_path_ins()
1878 struct ocfs2_path *path, u32 cpos) ocfs2_find_path()
1883 data.path = path; ocfs2_find_path()
1884 return __ocfs2_find_path(ci, path_root_el(path), cpos, ocfs2_find_path()
1904 * Some paths want to call this instead of allocating a path structure
2000 * The path walking code should have never returned a root and ocfs2_adjust_root_records()
2014 * - When we've moved an extent record from the left path leaf to the right
2015 * path leaf to make room for an empty extent in the left path leaf.
2016 * - When our insert into the right path leaf is at the leftmost edge
2017 * and requires an update of the path immediately to it's left. This
2019 * - When we've adjusted the last extent record in the left path leaf and the
2020 * 1st extent record in the right path leaf during cross extent block merge.
2078 * begin our path to the leaves. ocfs2_complete_edge_insert()
2191 * Given a full path, determine what cpos value would return us a path
2194 * Will return zero if the path passed in is already the leftmost path.
2197 struct ocfs2_path *path, u32 *cpos) ocfs2_find_cpos_for_left_leaf()
2203 BUG_ON(path->p_tree_depth == 0); ocfs2_find_cpos_for_left_leaf()
2207 blkno = path_leaf_bh(path)->b_blocknr; ocfs2_find_cpos_for_left_leaf()
2210 i = path->p_tree_depth - 1; ocfs2_find_cpos_for_left_leaf()
2212 el = path->p_node[i].el; ocfs2_find_cpos_for_left_leaf()
2216 * path. ocfs2_find_cpos_for_left_leaf()
2224 * path specified is already ocfs2_find_cpos_for_left_leaf()
2256 blkno = path->p_node[i].bh->b_blocknr; ocfs2_find_cpos_for_left_leaf()
2271 struct ocfs2_path *path) ocfs2_extend_rotate_transaction()
2274 int credits = (path->p_tree_depth - subtree_depth) * 2 + 1 + op_credits; ocfs2_extend_rotate_transaction()
2290 * theoretical ranges in the path components above the leaves are
2335 * The path to the rightmost leaf should be passed in.
2337 * The array is assumed to be large enough to hold an entire path (tree depth).
2341 * - The 'right_path' array will contain a path to the leaf block
2345 * *ret_left_path will contain a valid path which can be passed to
2382 * 1) Start with the rightmost path. ocfs2_rotate_tree_right()
2384 * 2) Determine a path to the leaf block directly to the left ocfs2_rotate_tree_right()
2388 * which contains a path to both leaves. ocfs2_rotate_tree_right()
2392 * 5) Find the next subtree by considering the left path to be ocfs2_rotate_tree_right()
2393 * the new right path. ocfs2_rotate_tree_right()
2397 * value to get us the left path - insert_cpos might very well ocfs2_rotate_tree_right()
2420 "(left path cpos %u) results in two identical " ocfs2_rotate_tree_right()
2436 * situation by returning the left path. ocfs2_rotate_tree_right()
2489 * There is no need to re-read the next right path ocfs2_rotate_tree_right()
2491 * path. Optimize by copying values instead. ocfs2_rotate_tree_right()
2511 int subtree_index, struct ocfs2_path *path) ocfs2_update_edge_lengths()
2525 * records for all the bh in the path. ocfs2_update_edge_lengths()
2534 ret = ocfs2_journal_access_path(et->et_ci, handle, path); ocfs2_update_edge_lengths()
2541 eb = (struct ocfs2_extent_block *)path_leaf_bh(path)->b_data; ocfs2_update_edge_lengths()
2550 for (i = 0; i < path->p_tree_depth; i++) { ocfs2_update_edge_lengths()
2551 el = path->p_node[i].el; ocfs2_update_edge_lengths()
2558 ocfs2_journal_dirty(handle, path->p_node[i].bh); ocfs2_update_edge_lengths()
2567 struct ocfs2_path *path, int unlink_start) ocfs2_unlink_path()
2574 for(i = unlink_start; i < path_num_items(path); i++) { ocfs2_unlink_path()
2575 bh = path->p_node[i].bh; ocfs2_unlink_path()
2717 * Getting here with an empty extent in the right path implies ocfs2_rotate_subtree_left()
2718 * that it's the rightmost path and will be deleted. ocfs2_rotate_subtree_left()
2749 * after removal of the right path in which case we ocfs2_rotate_subtree_left()
2784 * above so we could delete the right path ocfs2_rotate_subtree_left()
2802 * Given a full path, determine what cpos value would return us a path
2805 * Will return zero if the path passed in is already the rightmost path.
2811 struct ocfs2_path *path, u32 *cpos) ocfs2_find_cpos_for_right_leaf()
2819 if (path->p_tree_depth == 0) ocfs2_find_cpos_for_right_leaf()
2822 blkno = path_leaf_bh(path)->b_blocknr; ocfs2_find_cpos_for_right_leaf()
2825 i = path->p_tree_depth - 1; ocfs2_find_cpos_for_right_leaf()
2829 el = path->p_node[i].el; ocfs2_find_cpos_for_right_leaf()
2833 * path. ocfs2_find_cpos_for_right_leaf()
2842 * path specified is already ocfs2_find_cpos_for_right_leaf()
2871 blkno = path->p_node[i].bh->b_blocknr; ocfs2_find_cpos_for_right_leaf()
2881 struct ocfs2_path *path) ocfs2_rotate_rightmost_leaf_left()
2884 struct buffer_head *bh = path_leaf_bh(path); ocfs2_rotate_rightmost_leaf_left()
2885 struct ocfs2_extent_list *el = path_leaf_el(path); ocfs2_rotate_rightmost_leaf_left()
2890 ret = ocfs2_path_bh_journal_access(handle, et->et_ci, path, ocfs2_rotate_rightmost_leaf_left()
2891 path_num_items(path) - 1); ocfs2_rotate_rightmost_leaf_left()
2907 struct ocfs2_path *path, __ocfs2_rotate_tree_left()
2917 if (!ocfs2_is_empty_extent(&(path_leaf_el(path)->l_recs[0]))) __ocfs2_rotate_tree_left()
2922 ret = ocfs2_find_cpos_for_right_leaf(sb, path, &right_cpos); __ocfs2_rotate_tree_left()
2928 left_path = ocfs2_new_path_from_path(path); __ocfs2_rotate_tree_left()
2935 ocfs2_cp_path(left_path, path); __ocfs2_rotate_tree_left()
2937 right_path = ocfs2_new_path_from_path(path); __ocfs2_rotate_tree_left()
3023 struct ocfs2_path *path, ocfs2_remove_rightmost_path()
3038 * whether path is the only existing one. ocfs2_remove_rightmost_path()
3042 path); ocfs2_remove_rightmost_path()
3048 ret = ocfs2_journal_access_path(et->et_ci, handle, path); ocfs2_remove_rightmost_path()
3055 path, &cpos); ocfs2_remove_rightmost_path()
3063 * We have a path to the left of this one - it needs ocfs2_remove_rightmost_path()
3066 left_path = ocfs2_new_path_from_path(path); ocfs2_remove_rightmost_path()
3085 subtree_index = ocfs2_find_subtree_root(et, left_path, path); ocfs2_remove_rightmost_path()
3087 ocfs2_unlink_subtree(handle, et, left_path, path, ocfs2_remove_rightmost_path()
3100 * 'path' is also the leftmost path which ocfs2_remove_rightmost_path()
3106 ocfs2_unlink_path(handle, et, dealloc, path, 1); ocfs2_remove_rightmost_path()
3116 ocfs2_journal_dirty(handle, path_root_bh(path)); ocfs2_remove_rightmost_path()
3125 struct ocfs2_path *path, ocfs2_remove_rightmost_empty_extent()
3130 int credits = path->p_tree_depth * 2 + 1; ocfs2_remove_rightmost_empty_extent()
3139 ret = ocfs2_remove_rightmost_path(handle, et, path, dealloc); ocfs2_remove_rightmost_empty_extent()
3151 * rotation. We start at some non-rightmost path containing an empty
3153 * path by rotating records to the left in every subtree.
3159 * This won't handle a length update of the rightmost path records if
3165 struct ocfs2_path *path, ocfs2_rotate_tree_left()
3173 el = path_leaf_el(path); ocfs2_rotate_tree_left()
3177 if (path->p_tree_depth == 0) { ocfs2_rotate_tree_left()
3183 ret = ocfs2_rotate_rightmost_leaf_left(handle, et, path); ocfs2_rotate_tree_left()
3202 eb = (struct ocfs2_extent_block *)path_leaf_bh(path)->b_data; ocfs2_rotate_tree_left()
3207 * rightmost path. Get the other cases out of the way ocfs2_rotate_tree_left()
3223 * XXX: The caller can not trust "path" any more after ocfs2_rotate_tree_left()
3231 ret = ocfs2_remove_rightmost_path(handle, et, path, ocfs2_rotate_tree_left()
3239 * Now we can loop, remembering the path we get from -EAGAIN ocfs2_rotate_tree_left()
3243 ret = __ocfs2_rotate_tree_left(handle, et, orig_credits, path, ocfs2_rotate_tree_left()
3525 * the right path to indicate the new rightmost path.
3654 * So we use the new rightmost path. ocfs2_merge_rec_left()
3669 struct ocfs2_path *path, ocfs2_try_to_merge_extent()
3676 struct ocfs2_extent_list *el = path_leaf_el(path); ocfs2_try_to_merge_extent()
3689 ret = ocfs2_rotate_tree_left(handle, et, path, dealloc); ocfs2_try_to_merge_extent()
3718 ret = ocfs2_merge_rec_right(path, handle, et, split_rec, ocfs2_try_to_merge_extent()
3731 ret = ocfs2_rotate_tree_left(handle, et, path, dealloc); ocfs2_try_to_merge_extent()
3743 ret = ocfs2_merge_rec_left(path, handle, et, rec, ocfs2_try_to_merge_extent()
3751 ret = ocfs2_rotate_tree_left(handle, et, path, dealloc); ocfs2_try_to_merge_extent()
3768 ret = ocfs2_merge_rec_left(path, handle, et, ocfs2_try_to_merge_extent()
3776 ret = ocfs2_merge_rec_right(path, handle, ocfs2_try_to_merge_extent()
3790 ret = ocfs2_rotate_tree_left(handle, et, path, ocfs2_try_to_merge_extent()
3928 struct ocfs2_path *path, ocfs2_adjust_rightmost_records()
3939 for (i = 0; i < path->p_tree_depth; i++) { ocfs2_adjust_rightmost_records()
3940 bh = path->p_node[i].bh; ocfs2_adjust_rightmost_records()
3941 el = path->p_node[i].el; ocfs2_adjust_rightmost_records()
3985 * neighboring path. ocfs2_append_rec_to_path()
4073 * started in the left path but moved to the ocfs2_split_record()
4078 * In this case, the left path should always ocfs2_split_record()
4087 * empty extent in the left path, we ocfs2_split_record()
4112 * Left path is easy - we can just allow the insert to ocfs2_split_record()
4131 * right_path is the path we want to do the actual insert
4249 * Determine the path to start with. Rotations need the ocfs2_do_insert_extent()
4250 * rightmost path, everything else can go directly to the ocfs2_do_insert_extent()
4270 * Both might pass back a path immediate to the left of the ocfs2_do_insert_extent()
4329 struct ocfs2_path *path, ocfs2_figure_merge_contig_type()
4346 } else if (path->p_tree_depth > 0) { ocfs2_figure_merge_contig_type()
4347 status = ocfs2_find_cpos_for_left_leaf(sb, path, &left_cpos); ocfs2_figure_merge_contig_type()
4352 left_path = ocfs2_new_path_from_path(path); ocfs2_figure_merge_contig_type()
4400 path->p_tree_depth > 0) { ocfs2_figure_merge_contig_type()
4401 status = ocfs2_find_cpos_for_right_leaf(sb, path, &right_cpos); ocfs2_figure_merge_contig_type()
4408 right_path = ocfs2_new_path_from_path(path); ocfs2_figure_merge_contig_type()
4560 struct ocfs2_path *path = NULL; ocfs2_figure_insert_type() local
4603 path = ocfs2_new_path_from_et(et); ocfs2_figure_insert_type()
4604 if (!path) { ocfs2_figure_insert_type()
4613 * us the rightmost tree path. This is accounted for below in ocfs2_figure_insert_type()
4616 ret = ocfs2_find_path(et->et_ci, path, le32_to_cpu(insert_rec->e_cpos)); ocfs2_figure_insert_type()
4622 el = path_leaf_el(path); ocfs2_figure_insert_type()
4625 * Now that we have the path, there's two things we want to determine: ocfs2_figure_insert_type()
4638 * cluster count on the last record of the path directly to it's ocfs2_figure_insert_type()
4649 * whether the path doesn't exist. This will only happen in ocfs2_figure_insert_type()
4654 path_leaf_bh(path)->b_blocknr) { ocfs2_figure_insert_type()
4657 * tree path. This might be an appending insert. There are ocfs2_figure_insert_type()
4667 ocfs2_free_path(path); ocfs2_figure_insert_type()
4885 struct ocfs2_path *path, ocfs2_split_and_insert()
4905 rec = path_leaf_el(path)->l_recs[split_index]; ocfs2_split_and_insert()
4969 ocfs2_reinit_path(path, 1); ocfs2_split_and_insert()
4972 ret = ocfs2_find_path(et->et_ci, path, cpos); ocfs2_split_and_insert()
4978 el = path_leaf_el(path); ocfs2_split_and_insert()
4997 struct ocfs2_path *path, ocfs2_replace_extent_rec()
5004 ret = ocfs2_path_bh_journal_access(handle, et->et_ci, path, ocfs2_replace_extent_rec()
5005 path_num_items(path) - 1); ocfs2_replace_extent_rec()
5013 ocfs2_journal_dirty(handle, path_leaf_bh(path)); ocfs2_replace_extent_rec()
5020 * pointed to by path. Merge with the contiguous extent record if needed.
5039 struct ocfs2_path *path, ocfs2_split_extent()
5046 struct ocfs2_extent_list *el = path_leaf_el(path); ocfs2_split_extent()
5060 ret = ocfs2_figure_merge_contig_type(et, path, el, ocfs2_split_extent()
5074 if (path->p_tree_depth) { ocfs2_split_extent()
5088 rightmost_el = path_root_el(path); ocfs2_split_extent()
5104 ret = ocfs2_replace_extent_rec(handle, et, path, el, ocfs2_split_extent()
5107 ret = ocfs2_split_and_insert(handle, et, path, ocfs2_split_extent()
5113 ret = ocfs2_try_to_merge_extent(handle, et, path, ocfs2_split_extent()
5261 struct ocfs2_path *path, ocfs2_split_tree()
5276 el = path_leaf_el(path); ocfs2_split_tree()
5281 depth = path->p_tree_depth; ocfs2_split_tree()
5294 rightmost_el = path_leaf_el(path); ocfs2_split_tree()
5296 credits = path->p_tree_depth + ocfs2_split_tree()
5331 struct ocfs2_path *path, int index, ocfs2_truncate_rec()
5340 struct ocfs2_extent_list *el = path_leaf_el(path); ocfs2_truncate_rec()
5345 ret = ocfs2_rotate_tree_left(handle, et, path, dealloc); ocfs2_truncate_rec()
5355 path->p_tree_depth) { ocfs2_truncate_rec()
5362 eb = (struct ocfs2_extent_block *)path_leaf_bh(path)->b_data; ocfs2_truncate_rec()
5368 if (index == 0 && path->p_tree_depth && ocfs2_truncate_rec()
5372 * record truncate) of an interior (or rightmost) path ocfs2_truncate_rec()
5383 ret = ocfs2_find_cpos_for_left_leaf(sb, path, &left_cpos); ocfs2_truncate_rec()
5390 left_path = ocfs2_new_path_from_path(path); ocfs2_truncate_rec()
5408 path); ocfs2_truncate_rec()
5414 ret = ocfs2_journal_access_path(et->et_ci, handle, path); ocfs2_truncate_rec()
5439 * We skip the edge update if this path will ocfs2_truncate_rec()
5443 ocfs2_adjust_rightmost_records(handle, et, path, ocfs2_truncate_rec()
5455 ocfs2_adjust_rightmost_records(handle, et, path, rec); ocfs2_truncate_rec()
5469 subtree_index = ocfs2_find_subtree_root(et, left_path, path); ocfs2_truncate_rec()
5470 ocfs2_complete_edge_insert(handle, left_path, path, ocfs2_truncate_rec()
5474 ocfs2_journal_dirty(handle, path_leaf_bh(path)); ocfs2_truncate_rec()
5476 ret = ocfs2_rotate_tree_left(handle, et, path, dealloc); ocfs2_truncate_rec()
5497 struct ocfs2_path *path = NULL; ocfs2_remove_extent() local
5505 path = ocfs2_new_path_from_et(et); ocfs2_remove_extent()
5506 if (!path) { ocfs2_remove_extent()
5512 ret = ocfs2_find_path(et->et_ci, path, cpos); ocfs2_remove_extent()
5518 el = path_leaf_el(path); ocfs2_remove_extent()
5557 ret = ocfs2_truncate_rec(handle, et, path, index, dealloc, ocfs2_remove_extent()
5564 ret = ocfs2_split_tree(handle, et, path, index, ocfs2_remove_extent()
5575 ocfs2_reinit_path(path, 1); ocfs2_remove_extent()
5577 ret = ocfs2_find_path(et->et_ci, path, cpos); ocfs2_remove_extent()
5583 el = path_leaf_el(path); ocfs2_remove_extent()
5611 ret = ocfs2_truncate_rec(handle, et, path, index, dealloc, ocfs2_remove_extent()
5620 ocfs2_free_path(path); ocfs2_remove_extent()
7034 struct ocfs2_path *path = NULL; ocfs2_commit_truncate() local
7048 path = ocfs2_new_path(di_bh, &di->id2.i_list, ocfs2_commit_truncate()
7050 if (!path) { ocfs2_commit_truncate()
7070 status = ocfs2_find_path(INODE_CACHE(inode), path, UINT_MAX); ocfs2_commit_truncate()
7080 path->p_tree_depth); ocfs2_commit_truncate()
7093 el = path_leaf_el(path); ocfs2_commit_truncate()
7098 (unsigned long long)path_leaf_bh(path)->b_blocknr); ocfs2_commit_truncate()
7118 &et, path, &dealloc); ocfs2_commit_truncate()
7124 ocfs2_reinit_path(path, 1); ocfs2_commit_truncate()
7175 ocfs2_reinit_path(path, 1); ocfs2_commit_truncate()
7191 ocfs2_free_path(path); ocfs2_commit_truncate()
726 ocfs2_path_bh_journal_access(handle_t *handle, struct ocfs2_caching_info *ci, struct ocfs2_path *path, int idx) ocfs2_path_bh_journal_access() argument
746 ocfs2_journal_access_path(struct ocfs2_caching_info *ci, handle_t *handle, struct ocfs2_path *path) ocfs2_journal_access_path() argument
1877 ocfs2_find_path(struct ocfs2_caching_info *ci, struct ocfs2_path *path, u32 cpos) ocfs2_find_path() argument
2196 ocfs2_find_cpos_for_left_leaf(struct super_block *sb, struct ocfs2_path *path, u32 *cpos) ocfs2_find_cpos_for_left_leaf() argument
2269 ocfs2_extend_rotate_transaction(handle_t *handle, int subtree_depth, int op_credits, struct ocfs2_path *path) ocfs2_extend_rotate_transaction() argument
2509 ocfs2_update_edge_lengths(handle_t *handle, struct ocfs2_extent_tree *et, int subtree_index, struct ocfs2_path *path) ocfs2_update_edge_lengths() argument
2564 ocfs2_unlink_path(handle_t *handle, struct ocfs2_extent_tree *et, struct ocfs2_cached_dealloc_ctxt *dealloc, struct ocfs2_path *path, int unlink_start) ocfs2_unlink_path() argument
2810 ocfs2_find_cpos_for_right_leaf(struct super_block *sb, struct ocfs2_path *path, u32 *cpos) ocfs2_find_cpos_for_right_leaf() argument
2879 ocfs2_rotate_rightmost_leaf_left(handle_t *handle, struct ocfs2_extent_tree *et, struct ocfs2_path *path) ocfs2_rotate_rightmost_leaf_left() argument
2904 __ocfs2_rotate_tree_left(handle_t *handle, struct ocfs2_extent_tree *et, int orig_credits, struct ocfs2_path *path, struct ocfs2_cached_dealloc_ctxt *dealloc, struct ocfs2_path **empty_extent_path) __ocfs2_rotate_tree_left() argument
3021 ocfs2_remove_rightmost_path(handle_t *handle, struct ocfs2_extent_tree *et, struct ocfs2_path *path, struct ocfs2_cached_dealloc_ctxt *dealloc) ocfs2_remove_rightmost_path() argument
3123 ocfs2_remove_rightmost_empty_extent(struct ocfs2_super *osb, struct ocfs2_extent_tree *et, struct ocfs2_path *path, struct ocfs2_cached_dealloc_ctxt *dealloc) ocfs2_remove_rightmost_empty_extent() argument
3163 ocfs2_rotate_tree_left(handle_t *handle, struct ocfs2_extent_tree *et, struct ocfs2_path *path, struct ocfs2_cached_dealloc_ctxt *dealloc) ocfs2_rotate_tree_left() argument
3667 ocfs2_try_to_merge_extent(handle_t *handle, struct ocfs2_extent_tree *et, struct ocfs2_path *path, int split_index, struct ocfs2_extent_rec *split_rec, struct ocfs2_cached_dealloc_ctxt *dealloc, struct ocfs2_merge_ctxt *ctxt) ocfs2_try_to_merge_extent() argument
3926 ocfs2_adjust_rightmost_records(handle_t *handle, struct ocfs2_extent_tree *et, struct ocfs2_path *path, struct ocfs2_extent_rec *insert_rec) ocfs2_adjust_rightmost_records() argument
4328 ocfs2_figure_merge_contig_type(struct ocfs2_extent_tree *et, struct ocfs2_path *path, struct ocfs2_extent_list *el, int index, struct ocfs2_extent_rec *split_rec, struct ocfs2_merge_ctxt *ctxt) ocfs2_figure_merge_contig_type() argument
4883 ocfs2_split_and_insert(handle_t *handle, struct ocfs2_extent_tree *et, struct ocfs2_path *path, struct buffer_head **last_eb_bh, int split_index, struct ocfs2_extent_rec *orig_split_rec, struct ocfs2_alloc_context *meta_ac) ocfs2_split_and_insert() argument
4995 ocfs2_replace_extent_rec(handle_t *handle, struct ocfs2_extent_tree *et, struct ocfs2_path *path, struct ocfs2_extent_list *el, int split_index, struct ocfs2_extent_rec *split_rec) ocfs2_replace_extent_rec() argument
5037 ocfs2_split_extent(handle_t *handle, struct ocfs2_extent_tree *et, struct ocfs2_path *path, int split_index, struct ocfs2_extent_rec *split_rec, struct ocfs2_alloc_context *meta_ac, struct ocfs2_cached_dealloc_ctxt *dealloc) ocfs2_split_extent() argument
5260 ocfs2_split_tree(handle_t *handle, struct ocfs2_extent_tree *et, struct ocfs2_path *path, int index, u32 new_range, struct ocfs2_alloc_context *meta_ac) ocfs2_split_tree() argument
5329 ocfs2_truncate_rec(handle_t *handle, struct ocfs2_extent_tree *et, struct ocfs2_path *path, int index, struct ocfs2_cached_dealloc_ctxt *dealloc, u32 cpos, u32 len) ocfs2_truncate_rec() argument
/linux-4.4.14/tools/lib/bpf/
H A Dlibbpf.c203 char path[]; member in struct:bpf_object
306 static struct bpf_object *bpf_object__new(const char *path, bpf_object__new() argument
312 obj = calloc(1, sizeof(struct bpf_object) + strlen(path) + 1); bpf_object__new()
314 pr_warning("alloc memory failed for %s\n", path); bpf_object__new()
318 strcpy(obj->path, path); bpf_object__new()
373 obj->efile.fd = open(obj->path, O_RDONLY); bpf_object__elf_init()
375 pr_warning("failed to open %s: %s\n", obj->path, bpf_object__elf_init()
387 obj->path); bpf_object__elf_init()
394 obj->path); bpf_object__elf_init()
402 obj->path); bpf_object__elf_init()
447 pr_debug("license of %s is %s\n", obj->path, obj->license); bpf_object__init_license()
458 pr_warning("invalid kver section in %s\n", obj->path); bpf_object__init_kversion()
463 pr_debug("kernel version of %s is %x\n", obj->path, bpf_object__init_kversion()
474 obj->path); bpf_object__init_maps()
480 pr_warning("malloc maps failed: %s\n", obj->path); bpf_object__init_maps()
486 pr_debug("maps in %s: %ld bytes\n", obj->path, (long)size); bpf_object__init_maps()
500 obj->path); bpf_object__elf_collect()
512 obj->path); bpf_object__elf_collect()
520 obj->path); bpf_object__elf_collect()
528 name, obj->path); bpf_object__elf_collect()
551 obj->path); bpf_object__elf_collect()
565 name, obj->path, errmsg); bpf_object__elf_collect()
677 obj->path); bpf_object__create_maps()
898 obj->path); bpf_object__validate()
905 __bpf_object__open(const char *path, void *obj_buf, size_t obj_buf_sz) __bpf_object__open() argument
911 pr_warning("failed to init libelf for %s\n", path); __bpf_object__open()
915 obj = bpf_object__new(path, obj_buf, obj_buf_sz); __bpf_object__open()
932 struct bpf_object *bpf_object__open(const char *path) bpf_object__open() argument
935 if (!path) bpf_object__open()
938 pr_debug("loading %s\n", path); bpf_object__open()
940 return __bpf_object__open(path, NULL, 0); bpf_object__open()
1005 pr_warning("failed to load object '%s'\n", obj->path); bpf_object__load()
1055 return obj->path; bpf_object__get_name()
/linux-4.4.14/tools/lib/traceevent/
H A Devent-plugin.c284 load_plugin(struct pevent *pevent, const char *path, load_plugin() argument
294 plugin = malloc(strlen(path) + strlen(file) + 2); load_plugin()
300 strcpy(plugin, path); load_plugin()
343 const char *path, load_plugins_dir()
345 const char *path, load_plugins_dir()
355 ret = stat(path, &st); load_plugins_dir()
362 dir = opendir(path); load_plugins_dir()
377 load_plugin(pevent, path, name, data); load_plugins_dir()
386 const char *path, load_plugins()
392 char *path; load_plugins() local
424 path = malloc(strlen(home) + strlen(LOCAL_PLUGIN_DIR) + 2); load_plugins()
425 if (!path) { load_plugins()
430 strcpy(path, home); load_plugins()
431 strcat(path, "/"); load_plugins()
432 strcat(path, LOCAL_PLUGIN_DIR); load_plugins()
434 load_plugins_dir(pevent, suffix, path, load_plugin, data); load_plugins()
436 free(path); load_plugins()
342 load_plugins_dir(struct pevent *pevent, const char *suffix, const char *path, void (*load_plugin)(struct pevent *pevent, const char *path, const char *name, void *data), void *data) load_plugins_dir() argument
384 load_plugins(struct pevent *pevent, const char *suffix, void (*load_plugin)(struct pevent *pevent, const char *path, const char *name, void *data), void *data) load_plugins() argument
/linux-4.4.14/fs/ceph/
H A Ddebugfs.c57 char *path; mdsc_show() local
81 path = ceph_mdsc_build_path(req->r_dentry, &pathlen, mdsc_show()
83 if (IS_ERR(path)) mdsc_show()
84 path = NULL; mdsc_show()
89 path ? path : ""); mdsc_show()
91 kfree(path); mdsc_show()
100 path = ceph_mdsc_build_path(req->r_old_dentry, &pathlen, mdsc_show()
102 if (IS_ERR(path)) mdsc_show()
103 path = NULL; mdsc_show()
109 path ? path : ""); mdsc_show()
111 kfree(path); mdsc_show()
/linux-4.4.14/kernel/bpf/
H A Dinode.c203 struct path path; bpf_obj_do_pin() local
208 dentry = kern_path_create(AT_FDCWD, pathname->name, &path, 0); bpf_obj_do_pin()
215 ret = security_path_mknod(&path, dentry, mode, devt); bpf_obj_do_pin()
219 dir = d_inode(path.dentry); bpf_obj_do_pin()
229 done_path_create(&path, dentry); bpf_obj_do_pin()
262 struct path path; bpf_obj_do_get() local
266 ret = kern_path(pathname->name, LOOKUP_FOLLOW, &path); bpf_obj_do_get()
270 inode = d_backing_inode(path.dentry); bpf_obj_do_get()
281 touch_atime(&path); bpf_obj_do_get()
283 path_put(&path); bpf_obj_do_get()
286 path_put(&path); bpf_obj_do_get()
/linux-4.4.14/fs/overlayfs/
H A Dsuper.c61 struct path lowerstack[];
99 void ovl_path_upper(struct dentry *dentry, struct path *path) ovl_path_upper() argument
104 path->mnt = ofs->upper_mnt; ovl_path_upper()
105 path->dentry = ovl_upperdentry_dereference(oe); ovl_path_upper()
108 enum ovl_path_type ovl_path_real(struct dentry *dentry, struct path *path) ovl_path_real() argument
113 ovl_path_lower(dentry, path); ovl_path_real()
115 ovl_path_upper(dentry, path); ovl_path_real()
174 void ovl_path_lower(struct dentry *dentry, struct path *path) ovl_path_lower() argument
178 *path = oe->numlower ? oe->lowerstack[0] : (struct path) { NULL, NULL }; ovl_path_lower()
417 int ovl_path_next(int idx, struct dentry *dentry, struct path *path) ovl_path_next() argument
423 ovl_path_upper(dentry, path); ovl_path_next()
424 if (path->dentry) ovl_path_next()
429 *path = oe->lowerstack[idx - 1]; ovl_path_next()
439 struct path *stack = NULL; ovl_lookup()
474 stack = kcalloc(poe->numlower, sizeof(struct path), GFP_KERNEL); ovl_lookup()
481 struct path lowerpath = poe->lowerstack[i]; ovl_lookup()
551 memcpy(oe->lowerstack, stack, sizeof(struct path) * ctr); ovl_lookup()
570 struct file *ovl_path_open(struct path *path, int flags) ovl_path_open() argument
572 return dentry_open(path, flags, current_cred()); ovl_path_open()
604 struct path path; ovl_statfs() local
607 ovl_path_real(root_dentry, &path); ovl_statfs()
609 err = vfs_statfs(&path, buf); ovl_statfs()
806 static int ovl_mount_dir_noesc(const char *name, struct path *path) ovl_mount_dir_noesc() argument
814 err = kern_path(name, LOOKUP_FOLLOW, path); ovl_mount_dir_noesc()
820 if (ovl_dentry_weird(path->dentry)) { ovl_mount_dir_noesc()
824 if (!S_ISDIR(path->dentry->d_inode->i_mode)) { ovl_mount_dir_noesc()
831 path_put(path); ovl_mount_dir_noesc()
836 static int ovl_mount_dir(const char *name, struct path *path) ovl_mount_dir() argument
843 err = ovl_mount_dir_noesc(tmp, path); ovl_mount_dir()
846 if (ovl_dentry_remote(path->dentry)) { ovl_mount_dir()
849 path_put(path); ovl_mount_dir()
857 static int ovl_lower_dir(const char *name, struct path *path, long *namelen, ovl_lower_dir() argument
863 err = ovl_mount_dir_noesc(name, path); ovl_lower_dir()
867 err = vfs_statfs(path, &statfs); ovl_lower_dir()
873 *stack_depth = max(*stack_depth, path->mnt->mnt_sb->s_stack_depth); ovl_lower_dir()
875 if (ovl_dentry_remote(path->dentry)) ovl_lower_dir()
881 path_put(path); ovl_lower_dir()
920 struct path upperpath = { NULL, NULL }; ovl_fill_super()
921 struct path workpath = { NULL, NULL }; ovl_fill_super()
925 struct path *stack = NULL; ovl_fill_super()
999 stack = kcalloc(stacklen, sizeof(struct path), GFP_KERNEL); ovl_fill_super()
H A Doverlayfs.h137 void ovl_path_upper(struct dentry *dentry, struct path *path);
138 void ovl_path_lower(struct dentry *dentry, struct path *path);
139 enum ovl_path_type ovl_path_real(struct dentry *dentry, struct path *path);
140 int ovl_path_next(int idx, struct dentry *dentry, struct path *path);
156 struct file *ovl_path_open(struct path *path, int flags);
197 struct path *lowerpath, struct kstat *stat);
/linux-4.4.14/sound/core/
H A Dmisc.c55 /* strip the leading path if the given path is absolute */ sanity_file_name()
56 static const char *sanity_file_name(const char *path) sanity_file_name() argument
58 if (*path == '/') sanity_file_name()
59 return strrchr(path, '/') + 1; sanity_file_name()
61 return path; sanity_file_name()
66 void __snd_printk(unsigned int level, const char *path, int line, __snd_printk() argument
93 printk(verbose_fmt, sanity_file_name(path), line, &vaf); __snd_printk()
/linux-4.4.14/net/mac80211/
H A Dmesh.h22 * enum mesh_path_flags - mac80211 mesh path flags
26 * @MESH_PATH_ACTIVE: the mesh path can be used for forwarding
27 * @MESH_PATH_RESOLVING: the discovery process is running for this mesh path
28 * @MESH_PATH_SN_VALID: the mesh path contains a valid destination sequence
30 * @MESH_PATH_FIXED: the mesh path has been manually set and should not be
32 * @MESH_PATH_RESOLVED: the mesh path can has been resolved
33 * @MESH_PATH_REQ_QUEUED: there is an unsent path request for this destination
36 * MESH_PATH_RESOLVED is used by the mesh path timer to
37 * decide when to stop or cancel the mesh path discovery.
54 * @MESH_WORK_GROW_MPATH_TABLE: the mesh path table is full and needs
73 * struct mesh_path - mac80211 mesh path structure
75 * @dst: mesh path destination mac address
79 * @timer: mesh path discovery timer
81 * path is unresolved
85 * @exp_time: in jiffies, when the path will expire or when it expired
89 * @flags: mesh path flags, as specified on &enum mesh_path_flags
90 * @state_lock: mesh path state lock used to protect changes to the
92 * an mpath to a hash bucket on a path table.
94 * @rann_metric: the aggregated path metric towards the root node
96 * @is_root: the destination station of this path is a root node
97 * @is_gate: the destination station of this path is a mesh gate
100 * The combination of dst and sdata is unique in the mesh path table. Since the
/linux-4.4.14/arch/x86/um/shared/sysdep/
H A Dptrace_32.h13 /* syscall emulation path in ptrace */
/linux-4.4.14/arch/cris/include/arch-v32/arch/
H A Dbug.h9 * The penalty for the in-band code path will be the size of break 14.
/linux-4.4.14/samples/trace_events/
H A DMakefile6 # have that tracer file in its main search path. This is because
/linux-4.4.14/security/integrity/
H A Ddigsig.c88 int __init integrity_load_x509(const unsigned int id, const char *path) integrity_load_x509() argument
97 rc = integrity_read_file(path, &data); integrity_load_x509()
112 rc, path); integrity_load_x509()
115 key_ref_to_ptr(key)->description, path); integrity_load_x509()
/linux-4.4.14/include/net/netfilter/
H A Dxt_rateest.h11 /* following fields not accessed in hot path */
/linux-4.4.14/arch/mips/include/asm/octeon/
H A Dcvmx-spi.h90 * active) or as a halfplex (either the Tx data path is
91 * active or the Rx data path is active, but not both).
108 * active) or as a halfplex (either the Tx data path is
109 * active or the Rx data path is active, but not both).
177 * active) or as a halfplex (either the Tx data path is
178 * active or the Rx data path is active, but not both).
193 * active) or as a halfplex (either the Tx data path is
194 * active or the Rx data path is active, but not both).
210 * active) or as a halfplex (either the Tx data path is
211 * active or the Rx data path is active, but not both).
227 * active) or as a halfplex (either the Tx data path is
228 * active or the Rx data path is active, but not both).
244 * active) or as a halfplex (either the Tx data path is
245 * active or the Rx data path is active, but not both).
261 * active) or as a halfplex (either the Tx data path is
262 * active or the Rx data path is active, but not both).
/linux-4.4.14/tools/testing/selftests/firmware/
H A Dfw_filesystem.sh21 OLD_FWPATH=$(cat /sys/module/firmware_class/parameters/path)
31 echo -n "$OLD_PATH" >/sys/module/firmware_class/parameters/path
43 # Set the kernel search path.
44 echo -n "$FWPATH" >/sys/module/firmware_class/parameters/path
/linux-4.4.14/drivers/acpi/acpica/
H A Dnsnames.c59 * the node, In external format (name segments separated by path
83 * RETURN: Length of path, including prefix
146 /* Build the path in the caller buffer */ acpi_ns_handle_to_pathname()
164 * full_path - Where the path name is returned
165 * path_size - Size of returned path name buffer
168 * RETURN: Return 1 if the AML path is empty, otherwise returning (length
174 * contain the namespace node's path name, the actual required
193 #define ACPI_PATH_PUT8(path, size, byte, length) \ acpi_ns_build_normalized_path()
197 (path)[(length)] = (byte); \ acpi_ns_build_normalized_path()
235 /* Reverse the path string */ acpi_ns_build_normalized_path()
265 * the node, In external format (name segments separated by path
297 /* Build the path in the allocated buffer */ acpi_ns_get_normalized_pathname()
/linux-4.4.14/drivers/net/wireless/realtek/rtlwifi/rtl8723be/
H A Dphy.c78 void rtl8723be_phy_set_rf_reg(struct ieee80211_hw *hw, enum radio_path path, rtl8723be_phy_set_rf_reg() argument
87 regaddr, bitmask, data, path); rtl8723be_phy_set_rf_reg()
92 original_value = rtl8723_phy_rf_serial_read(hw, path, rtl8723be_phy_set_rf_reg()
99 rtl8723_phy_rf_serial_write(hw, path, regaddr, data); rtl8723be_phy_set_rf_reg()
105 regaddr, bitmask, data, path); rtl8723be_phy_set_rf_reg()
214 u8 band, path, txnum, section; _rtl8723be_phy_init_tx_power_by_rate() local
217 for (path = 0; path < TX_PWR_BY_RATE_NUM_RF; ++path) _rtl8723be_phy_init_tx_power_by_rate()
223 [band][path][txnum][section] = 0; _rtl8723be_phy_init_tx_power_by_rate()
249 u8 path, u8 rate_section, _rtl8723be_phy_set_txpower_by_rate_base()
255 if (path > RF90_PATH_D) { _rtl8723be_phy_set_txpower_by_rate_base()
258 path); _rtl8723be_phy_set_txpower_by_rate_base()
265 rtlphy->txpwr_by_rate_base_24g[path][txnum][0] = value; _rtl8723be_phy_set_txpower_by_rate_base()
268 rtlphy->txpwr_by_rate_base_24g[path][txnum][1] = value; _rtl8723be_phy_set_txpower_by_rate_base()
271 rtlphy->txpwr_by_rate_base_24g[path][txnum][2] = value; _rtl8723be_phy_set_txpower_by_rate_base()
274 rtlphy->txpwr_by_rate_base_24g[path][txnum][3] = value; _rtl8723be_phy_set_txpower_by_rate_base()
279 rate_section, path, txnum); _rtl8723be_phy_set_txpower_by_rate_base()
291 u8 band, u8 path, u8 txnum, _rtl8723be_phy_get_txpower_by_rate_base()
297 if (path > RF90_PATH_D) { _rtl8723be_phy_get_txpower_by_rate_base()
300 path); _rtl8723be_phy_get_txpower_by_rate_base()
307 value = rtlphy->txpwr_by_rate_base_24g[path][txnum][0]; _rtl8723be_phy_get_txpower_by_rate_base()
310 value = rtlphy->txpwr_by_rate_base_24g[path][txnum][1]; _rtl8723be_phy_get_txpower_by_rate_base()
313 value = rtlphy->txpwr_by_rate_base_24g[path][txnum][2]; _rtl8723be_phy_get_txpower_by_rate_base()
316 value = rtlphy->txpwr_by_rate_base_24g[path][txnum][3]; _rtl8723be_phy_get_txpower_by_rate_base()
321 rate_section, path, txnum); _rtl8723be_phy_get_txpower_by_rate_base()
338 u8 base = 0, path = 0; _rtl8723be_phy_store_txpower_by_rate_base() local
340 for (path = RF90_PATH_A; path <= RF90_PATH_B; ++path) { _rtl8723be_phy_store_txpower_by_rate_base()
341 if (path == RF90_PATH_A) { _rtl8723be_phy_store_txpower_by_rate_base()
343 [BAND_ON_2_4G][path][RF_1TX][3] >> 24) & 0xFF; _rtl8723be_phy_store_txpower_by_rate_base()
346 BAND_ON_2_4G, path, CCK, RF_1TX, base); _rtl8723be_phy_store_txpower_by_rate_base()
347 } else if (path == RF90_PATH_B) { _rtl8723be_phy_store_txpower_by_rate_base()
349 [BAND_ON_2_4G][path][RF_1TX][3] >> 0) & 0xFF; _rtl8723be_phy_store_txpower_by_rate_base()
353 path, CCK, _rtl8723be_phy_store_txpower_by_rate_base()
357 [BAND_ON_2_4G][path][RF_1TX][1] >> 24) & 0xFF; _rtl8723be_phy_store_txpower_by_rate_base()
360 path, OFDM, RF_1TX, _rtl8723be_phy_store_txpower_by_rate_base()
364 [BAND_ON_2_4G][path][RF_1TX][5] >> 24) & 0xFF; _rtl8723be_phy_store_txpower_by_rate_base()
367 path, HT_MCS0_MCS7, _rtl8723be_phy_store_txpower_by_rate_base()
371 [BAND_ON_2_4G][path][RF_2TX][7] >> 24) & 0xFF; _rtl8723be_phy_store_txpower_by_rate_base()
374 path, HT_MCS8_MCS15, _rtl8723be_phy_store_txpower_by_rate_base()
877 static u8 _rtl8723be_phy_get_ratesection_intxpower_byrate(enum radio_path path, _rtl8723be_phy_get_ratesection_intxpower_byrate() argument
889 if (path == RF90_PATH_A) _rtl8723be_phy_get_ratesection_intxpower_byrate()
891 else if (path == RF90_PATH_B) _rtl8723be_phy_get_ratesection_intxpower_byrate()
1016 static u8 _rtl8723be_get_txpower_index(struct ieee80211_hw *hw, u8 path, _rtl8723be_get_txpower_index() argument
1031 txpower = rtlefuse->txpwrlevel_cck[path][index]; _rtl8723be_get_txpower_index()
1033 txpower = rtlefuse->txpwrlevel_ht40_1s[path][index]; _rtl8723be_get_txpower_index()
1057 path, rate); _rtl8723be_get_txpower_index()
1068 u8 power_index, u8 path, u8 rate) _rtl8723be_phy_set_txpower_index()
1071 if (path == RF90_PATH_A) { _rtl8723be_phy_set_txpower_index()
1529 /* switch to path A */ _rtl8723be_phy_path_a_iqk()
1531 /* enable path A PA in TXIQK mode */ _rtl8723be_phy_path_a_iqk()
1538 /* path-A IQK setting */ _rtl8723be_phy_path_a_iqk()
1542 /* path-A IQK setting */ _rtl8723be_phy_path_a_iqk()
1557 /* One shot, path A LOK & IQK */ _rtl8723be_phy_path_a_iqk()
1603 /* switch to path A */ _rtl8723be_phy_path_a_rx_iqk()
1619 /* path-A IQK setting */ _rtl8723be_phy_path_a_rx_iqk()
1636 /* One shot, path A LOK & IQK */ _rtl8723be_phy_path_a_rx_iqk()
1690 /* path-A IQK setting */ _rtl8723be_phy_path_a_rx_iqk()
1707 /* One shot, path A LOK & IQK */ _rtl8723be_phy_path_a_rx_iqk()
1749 /* switch to path B */ _rtl8723be_phy_path_b_iqk()
1752 /* enable path B PA in TXIQK mode */ _rtl8723be_phy_path_b_iqk()
1760 /* path-A IQK setting */ _rtl8723be_phy_path_b_iqk()
1777 /* One shot, path B LOK & IQK */ _rtl8723be_phy_path_b_iqk()
1822 /* switch to path B */ _rtl8723be_phy_path_b_rx_iqk()
1840 /* path-B IQK setting */ _rtl8723be_phy_path_b_rx_iqk()
1856 /* One shot, path B TXIQK @ RXIQK */ _rtl8723be_phy_path_b_rx_iqk()
1910 /* path-B IQK setting */ _rtl8723be_phy_path_b_rx_iqk()
1926 /* One shot, path B LOK & IQK */ _rtl8723be_phy_path_b_rx_iqk()
2006 u8 final_candidate[2] = {0xFF, 0xFF}; /* for path A and path B */ _rtl8723be_phy_simularity_compare()
2056 if (!(simularity_bitmap & 0x03)) { /* path A TX OK */ _rtl8723be_phy_simularity_compare()
2060 if (!(simularity_bitmap & 0x0c)) { /* path A RX OK */ _rtl8723be_phy_simularity_compare()
2064 if (!(simularity_bitmap & 0x30)) { /* path B TX OK */ _rtl8723be_phy_simularity_compare()
2068 if (!(simularity_bitmap & 0xc0)) { /* path B RX OK */ _rtl8723be_phy_simularity_compare()
2134 /* path A TX IQK */ _rtl8723be_phy_iq_calibrate()
2150 /* path A RX IQK */ _rtl8723be_phy_iq_calibrate()
2170 /* path B TX IQK */ _rtl8723be_phy_iq_calibrate()
2187 /* path B RX IQK */ _rtl8723be_phy_iq_calibrate()
247 _rtl8723be_phy_set_txpower_by_rate_base(struct ieee80211_hw *hw, u8 band, u8 path, u8 rate_section, u8 txnum, u8 value) _rtl8723be_phy_set_txpower_by_rate_base() argument
290 _rtl8723be_phy_get_txpower_by_rate_base(struct ieee80211_hw *hw, u8 band, u8 path, u8 txnum, u8 rate_section) _rtl8723be_phy_get_txpower_by_rate_base() argument
1067 _rtl8723be_phy_set_txpower_index(struct ieee80211_hw *hw, u8 power_index, u8 path, u8 rate) _rtl8723be_phy_set_txpower_index() argument
H A Dhw.c1762 u32 path, addr = EEPROM_TX_PWR_INX, group, cnt = 0; _rtl8723be_read_power_value_fromprom() local
1773 for (path = 0; path < MAX_RF_PATH; path++) { _rtl8723be_read_power_value_fromprom()
1776 pw2g->index_cck_base[path][group] = 0x2D; _rtl8723be_read_power_value_fromprom()
1777 pw2g->index_bw40_base[path][group] = 0x2D; _rtl8723be_read_power_value_fromprom()
1781 pw2g->bw20_diff[path][0] = 0x02; _rtl8723be_read_power_value_fromprom()
1782 pw2g->ofdm_diff[path][0] = 0x04; _rtl8723be_read_power_value_fromprom()
1784 pw2g->bw20_diff[path][cnt] = 0xFE; _rtl8723be_read_power_value_fromprom()
1785 pw2g->bw40_diff[path][cnt] = 0xFE; _rtl8723be_read_power_value_fromprom()
1786 pw2g->cck_diff[path][cnt] = 0xFE; _rtl8723be_read_power_value_fromprom()
1787 pw2g->ofdm_diff[path][cnt] = 0xFE; _rtl8723be_read_power_value_fromprom()
1794 for (path = 0; path < MAX_RF_PATH; path++) { _rtl8723be_read_power_value_fromprom()
1797 pw2g->index_cck_base[path][group] = hwinfo[addr++]; _rtl8723be_read_power_value_fromprom()
1798 if (pw2g->index_cck_base[path][group] == 0xFF) _rtl8723be_read_power_value_fromprom()
1799 pw2g->index_cck_base[path][group] = 0x2D; _rtl8723be_read_power_value_fromprom()
1803 pw2g->index_bw40_base[path][group] = hwinfo[addr++]; _rtl8723be_read_power_value_fromprom()
1804 if (pw2g->index_bw40_base[path][group] == 0xFF) _rtl8723be_read_power_value_fromprom()
1805 pw2g->index_bw40_base[path][group] = 0x2D; _rtl8723be_read_power_value_fromprom()
1809 pw2g->bw40_diff[path][cnt] = 0; _rtl8723be_read_power_value_fromprom()
1811 pw2g->bw20_diff[path][cnt] = 0x02; _rtl8723be_read_power_value_fromprom()
1813 pw2g->bw20_diff[path][cnt] = _rtl8723be_read_power_value_fromprom()
1816 if (pw2g->bw20_diff[path][cnt] & BIT(3)) _rtl8723be_read_power_value_fromprom()
1817 pw2g->bw20_diff[path][cnt] |= _rtl8723be_read_power_value_fromprom()
1822 pw2g->ofdm_diff[path][cnt] = 0x04; _rtl8723be_read_power_value_fromprom()
1824 pw2g->ofdm_diff[path][cnt] = _rtl8723be_read_power_value_fromprom()
1827 if (pw2g->ofdm_diff[path][cnt] & BIT(3)) _rtl8723be_read_power_value_fromprom()
1828 pw2g->ofdm_diff[path][cnt] |= _rtl8723be_read_power_value_fromprom()
1831 pw2g->cck_diff[path][cnt] = 0; _rtl8723be_read_power_value_fromprom()
1835 pw2g->bw40_diff[path][cnt] = 0xFE; _rtl8723be_read_power_value_fromprom()
1837 pw2g->bw40_diff[path][cnt] = _rtl8723be_read_power_value_fromprom()
1839 if (pw2g->bw40_diff[path][cnt] & BIT(3)) _rtl8723be_read_power_value_fromprom()
1840 pw2g->bw40_diff[path][cnt] |= _rtl8723be_read_power_value_fromprom()
1845 pw2g->bw20_diff[path][cnt] = 0xFE; _rtl8723be_read_power_value_fromprom()
1847 pw2g->bw20_diff[path][cnt] = _rtl8723be_read_power_value_fromprom()
1849 if (pw2g->bw20_diff[path][cnt] & BIT(3)) _rtl8723be_read_power_value_fromprom()
1850 pw2g->bw20_diff[path][cnt] |= _rtl8723be_read_power_value_fromprom()
1856 pw2g->ofdm_diff[path][cnt] = 0xFE; _rtl8723be_read_power_value_fromprom()
1858 pw2g->ofdm_diff[path][cnt] = _rtl8723be_read_power_value_fromprom()
1860 if (pw2g->ofdm_diff[path][cnt] & BIT(3)) _rtl8723be_read_power_value_fromprom()
1861 pw2g->ofdm_diff[path][cnt] |= _rtl8723be_read_power_value_fromprom()
1866 pw2g->cck_diff[path][cnt] = 0xFE; _rtl8723be_read_power_value_fromprom()
1868 pw2g->cck_diff[path][cnt] = _rtl8723be_read_power_value_fromprom()
1870 if (pw2g->cck_diff[path][cnt] & BIT(3)) _rtl8723be_read_power_value_fromprom()
1871 pw2g->cck_diff[path][cnt] |= _rtl8723be_read_power_value_fromprom()
1880 pw5g->index_bw40_base[path][group] = hwinfo[addr++]; _rtl8723be_read_power_value_fromprom()
1881 if (pw5g->index_bw40_base[path][group] == 0xFF) _rtl8723be_read_power_value_fromprom()
1882 pw5g->index_bw40_base[path][group] = 0xFE; _rtl8723be_read_power_value_fromprom()
1887 pw5g->bw40_diff[path][cnt] = 0; _rtl8723be_read_power_value_fromprom()
1890 pw5g->bw20_diff[path][cnt] = 0; _rtl8723be_read_power_value_fromprom()
1892 pw5g->bw20_diff[path][0] = _rtl8723be_read_power_value_fromprom()
1894 if (pw5g->bw20_diff[path][cnt] & BIT(3)) _rtl8723be_read_power_value_fromprom()
1895 pw5g->bw20_diff[path][cnt] |= _rtl8723be_read_power_value_fromprom()
1900 pw5g->ofdm_diff[path][cnt] = 0x04; _rtl8723be_read_power_value_fromprom()
1902 pw5g->ofdm_diff[path][0] = _rtl8723be_read_power_value_fromprom()
1904 if (pw5g->ofdm_diff[path][cnt] & BIT(3)) _rtl8723be_read_power_value_fromprom()
1905 pw5g->ofdm_diff[path][cnt] |= _rtl8723be_read_power_value_fromprom()
1911 pw5g->bw40_diff[path][cnt] = 0xFE; _rtl8723be_read_power_value_fromprom()
1913 pw5g->bw40_diff[path][cnt] = _rtl8723be_read_power_value_fromprom()
1915 if (pw5g->bw40_diff[path][cnt] & BIT(3)) _rtl8723be_read_power_value_fromprom()
1916 pw5g->bw40_diff[path][cnt] |= 0xF0; _rtl8723be_read_power_value_fromprom()
1920 pw5g->bw20_diff[path][cnt] = 0xFE; _rtl8723be_read_power_value_fromprom()
1922 pw5g->bw20_diff[path][cnt] = _rtl8723be_read_power_value_fromprom()
1924 if (pw5g->bw20_diff[path][cnt] & BIT(3)) _rtl8723be_read_power_value_fromprom()
1925 pw5g->bw20_diff[path][cnt] |= 0xF0; _rtl8723be_read_power_value_fromprom()
1932 pw5g->ofdm_diff[path][1] = 0xFE; _rtl8723be_read_power_value_fromprom()
1933 pw5g->ofdm_diff[path][2] = 0xFE; _rtl8723be_read_power_value_fromprom()
1935 pw5g->ofdm_diff[path][1] = (hwinfo[addr] & 0xf0) >> 4; _rtl8723be_read_power_value_fromprom()
1936 pw5g->ofdm_diff[path][2] = (hwinfo[addr] & 0x0f); _rtl8723be_read_power_value_fromprom()
1941 pw5g->ofdm_diff[path][3] = 0xFE; _rtl8723be_read_power_value_fromprom()
1943 pw5g->ofdm_diff[path][3] = (hwinfo[addr] & 0x0f); _rtl8723be_read_power_value_fromprom()
1947 if (pw5g->ofdm_diff[path][cnt] == 0xFF) _rtl8723be_read_power_value_fromprom()
1948 pw5g->ofdm_diff[path][cnt] = 0xFE; _rtl8723be_read_power_value_fromprom()
1949 else if (pw5g->ofdm_diff[path][cnt] & BIT(3)) _rtl8723be_read_power_value_fromprom()
1950 pw5g->ofdm_diff[path][cnt] |= 0xF0; _rtl8723be_read_power_value_fromprom()
/linux-4.4.14/fs/kernfs/
H A Dsymlink.c50 struct kernfs_node *target, char *path) kernfs_get_target_path()
53 char *s = path; kernfs_get_target_path()
82 if ((s - path) + len > PATH_MAX) kernfs_get_target_path()
101 static int kernfs_getlink(struct dentry *dentry, char *path) kernfs_getlink() argument
109 error = kernfs_get_target_path(parent, target, path); kernfs_getlink()
49 kernfs_get_target_path(struct kernfs_node *parent, struct kernfs_node *target, char *path) kernfs_get_target_path() argument
/linux-4.4.14/arch/parisc/include/asm/
H A Dhardware.h108 struct hardware_path *path);
116 extern char *print_pa_hwpath(struct parisc_device *dev, char *path);
117 extern char *print_pci_hwpath(struct pci_dev *dev, char *path);
118 extern void get_pci_node_path(struct pci_dev *dev, struct hardware_path *path);
121 extern void device_to_hwpath(struct device *dev, struct hardware_path *path);
/linux-4.4.14/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/
H A Dphy.c765 u8 band, u8 path, _rtl8821ae_phy_set_txpower_by_rate_base()
772 if (path > RF90_PATH_D) { _rtl8821ae_phy_set_txpower_by_rate_base()
774 "Invalid Rf Path %d in phy_SetTxPowerByRatBase()\n", path); _rtl8821ae_phy_set_txpower_by_rate_base()
781 rtlphy->txpwr_by_rate_base_24g[path][txnum][0] = value; _rtl8821ae_phy_set_txpower_by_rate_base()
784 rtlphy->txpwr_by_rate_base_24g[path][txnum][1] = value; _rtl8821ae_phy_set_txpower_by_rate_base()
787 rtlphy->txpwr_by_rate_base_24g[path][txnum][2] = value; _rtl8821ae_phy_set_txpower_by_rate_base()
790 rtlphy->txpwr_by_rate_base_24g[path][txnum][3] = value; _rtl8821ae_phy_set_txpower_by_rate_base()
793 rtlphy->txpwr_by_rate_base_24g[path][txnum][4] = value; _rtl8821ae_phy_set_txpower_by_rate_base()
796 rtlphy->txpwr_by_rate_base_24g[path][txnum][5] = value; _rtl8821ae_phy_set_txpower_by_rate_base()
801 rate_section, path, txnum); _rtl8821ae_phy_set_txpower_by_rate_base()
807 rtlphy->txpwr_by_rate_base_5g[path][txnum][0] = value; _rtl8821ae_phy_set_txpower_by_rate_base()
810 rtlphy->txpwr_by_rate_base_5g[path][txnum][1] = value; _rtl8821ae_phy_set_txpower_by_rate_base()
813 rtlphy->txpwr_by_rate_base_5g[path][txnum][2] = value; _rtl8821ae_phy_set_txpower_by_rate_base()
816 rtlphy->txpwr_by_rate_base_5g[path][txnum][3] = value; _rtl8821ae_phy_set_txpower_by_rate_base()
819 rtlphy->txpwr_by_rate_base_5g[path][txnum][4] = value; _rtl8821ae_phy_set_txpower_by_rate_base()
824 rate_section, path, txnum); _rtl8821ae_phy_set_txpower_by_rate_base()
834 u8 band, u8 path, _rtl8821ae_phy_get_txpower_by_rate_base()
841 if (path > RF90_PATH_D) { _rtl8821ae_phy_get_txpower_by_rate_base()
844 path); _rtl8821ae_phy_get_txpower_by_rate_base()
851 value = rtlphy->txpwr_by_rate_base_24g[path][txnum][0]; _rtl8821ae_phy_get_txpower_by_rate_base()
854 value = rtlphy->txpwr_by_rate_base_24g[path][txnum][1]; _rtl8821ae_phy_get_txpower_by_rate_base()
857 value = rtlphy->txpwr_by_rate_base_24g[path][txnum][2]; _rtl8821ae_phy_get_txpower_by_rate_base()
860 value = rtlphy->txpwr_by_rate_base_24g[path][txnum][3]; _rtl8821ae_phy_get_txpower_by_rate_base()
863 value = rtlphy->txpwr_by_rate_base_24g[path][txnum][4]; _rtl8821ae_phy_get_txpower_by_rate_base()
866 value = rtlphy->txpwr_by_rate_base_24g[path][txnum][5]; _rtl8821ae_phy_get_txpower_by_rate_base()
871 rate_section, path, txnum); _rtl8821ae_phy_get_txpower_by_rate_base()
877 value = rtlphy->txpwr_by_rate_base_5g[path][txnum][0]; _rtl8821ae_phy_get_txpower_by_rate_base()
880 value = rtlphy->txpwr_by_rate_base_5g[path][txnum][1]; _rtl8821ae_phy_get_txpower_by_rate_base()
883 value = rtlphy->txpwr_by_rate_base_5g[path][txnum][2]; _rtl8821ae_phy_get_txpower_by_rate_base()
886 value = rtlphy->txpwr_by_rate_base_5g[path][txnum][3]; _rtl8821ae_phy_get_txpower_by_rate_base()
889 value = rtlphy->txpwr_by_rate_base_5g[path][txnum][4]; _rtl8821ae_phy_get_txpower_by_rate_base()
894 rate_section, path, txnum); _rtl8821ae_phy_get_txpower_by_rate_base()
910 u8 base = 0, path = 0; _rtl8821ae_phy_store_txpower_by_rate_base() local
912 for (path = RF90_PATH_A; path <= RF90_PATH_B; ++path) { _rtl8821ae_phy_store_txpower_by_rate_base()
913 rawValue = (u16)(rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][path][RF_1TX][0] >> 24) & 0xFF; _rtl8821ae_phy_store_txpower_by_rate_base()
915 _rtl8821ae_phy_set_txpower_by_rate_base(hw, BAND_ON_2_4G, path, CCK, RF_1TX, base); _rtl8821ae_phy_store_txpower_by_rate_base()
917 rawValue = (u16)(rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][path][RF_1TX][2] >> 24) & 0xFF; _rtl8821ae_phy_store_txpower_by_rate_base()
919 _rtl8821ae_phy_set_txpower_by_rate_base(hw, BAND_ON_2_4G, path, OFDM, RF_1TX, base); _rtl8821ae_phy_store_txpower_by_rate_base()
921 rawValue = (u16)(rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][path][RF_1TX][4] >> 24) & 0xFF; _rtl8821ae_phy_store_txpower_by_rate_base()
923 _rtl8821ae_phy_set_txpower_by_rate_base(hw, BAND_ON_2_4G, path, HT_MCS0_MCS7, RF_1TX, base); _rtl8821ae_phy_store_txpower_by_rate_base()
925 rawValue = (u16)(rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][path][RF_2TX][6] >> 24) & 0xFF; _rtl8821ae_phy_store_txpower_by_rate_base()
927 _rtl8821ae_phy_set_txpower_by_rate_base(hw, BAND_ON_2_4G, path, HT_MCS8_MCS15, RF_2TX, base); _rtl8821ae_phy_store_txpower_by_rate_base()
929 rawValue = (u16)(rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][path][RF_1TX][8] >> 24) & 0xFF; _rtl8821ae_phy_store_txpower_by_rate_base()
931 _rtl8821ae_phy_set_txpower_by_rate_base(hw, BAND_ON_2_4G, path, VHT_1SSMCS0_1SSMCS9, RF_1TX, base); _rtl8821ae_phy_store_txpower_by_rate_base()
933 rawValue = (u16)(rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][path][RF_2TX][11] >> 8) & 0xFF; _rtl8821ae_phy_store_txpower_by_rate_base()
935 _rtl8821ae_phy_set_txpower_by_rate_base(hw, BAND_ON_2_4G, path, VHT_2SSMCS0_2SSMCS9, RF_2TX, base); _rtl8821ae_phy_store_txpower_by_rate_base()
937 rawValue = (u16)(rtlphy->tx_power_by_rate_offset[BAND_ON_5G][path][RF_1TX][2] >> 24) & 0xFF; _rtl8821ae_phy_store_txpower_by_rate_base()
939 _rtl8821ae_phy_set_txpower_by_rate_base(hw, BAND_ON_5G, path, OFDM, RF_1TX, base); _rtl8821ae_phy_store_txpower_by_rate_base()
941 rawValue = (u16)(rtlphy->tx_power_by_rate_offset[BAND_ON_5G][path][RF_1TX][4] >> 24) & 0xFF; _rtl8821ae_phy_store_txpower_by_rate_base()
943 _rtl8821ae_phy_set_txpower_by_rate_base(hw, BAND_ON_5G, path, HT_MCS0_MCS7, RF_1TX, base); _rtl8821ae_phy_store_txpower_by_rate_base()
945 rawValue = (u16)(rtlphy->tx_power_by_rate_offset[BAND_ON_5G][path][RF_2TX][6] >> 24) & 0xFF; _rtl8821ae_phy_store_txpower_by_rate_base()
947 _rtl8821ae_phy_set_txpower_by_rate_base(hw, BAND_ON_5G, path, HT_MCS8_MCS15, RF_2TX, base); _rtl8821ae_phy_store_txpower_by_rate_base()
949 rawValue = (u16)(rtlphy->tx_power_by_rate_offset[BAND_ON_5G][path][RF_1TX][8] >> 24) & 0xFF; _rtl8821ae_phy_store_txpower_by_rate_base()
951 _rtl8821ae_phy_set_txpower_by_rate_base(hw, BAND_ON_5G, path, VHT_1SSMCS0_1SSMCS9, RF_1TX, base); _rtl8821ae_phy_store_txpower_by_rate_base()
953 rawValue = (u16)(rtlphy->tx_power_by_rate_offset[BAND_ON_5G][path][RF_2TX][11] >> 8) & 0xFF; _rtl8821ae_phy_store_txpower_by_rate_base()
955 _rtl8821ae_phy_set_txpower_by_rate_base(hw, BAND_ON_5G, path, VHT_2SSMCS0_2SSMCS9, RF_2TX, base); _rtl8821ae_phy_store_txpower_by_rate_base()
1000 "No power limit table of the specified band %d, bandwidth %d, ratesection %d, channel %d, rf path %d\n", _rtl8812ae_phy_cross_reference_ht_and_vht_txpower_limit()
2269 static char _rtl8821ae_phy_get_ratesection_intxpower_byrate(u8 path, u8 rate) _rtl8821ae_phy_get_ratesection_intxpower_byrate() argument
2535 u8 band, u8 path, u8 rate) _rtl8821ae_phy_get_txpower_by_rate()
2543 rate_section = _rtl8821ae_phy_get_ratesection_intxpower_byrate(path, rate); _rtl8821ae_phy_get_txpower_by_rate()
2616 tx_pwr_diff = (u8)(rtlphy->tx_power_by_rate_offset[band][path] _rtl8821ae_phy_get_txpower_by_rate()
2622 rtlphy->current_chan_bw, path, rate, _rtl8821ae_phy_get_txpower_by_rate()
2645 static u8 _rtl8821ae_get_txpower_index(struct ieee80211_hw *hw, u8 path, _rtl8821ae_get_txpower_index() argument
2667 txpower = rtlefuse->txpwrlevel_cck[path][index]; _rtl8821ae_get_txpower_index()
2669 txpower = rtlefuse->txpwrlevel_ht40_1s[path][index]; _rtl8821ae_get_txpower_index()
2675 txpower += rtlefuse->txpwr_legacyhtdiff[path][TX_1S]; _rtl8821ae_get_txpower_index()
2680 txpower += rtlefuse->txpwr_ht20diff[path][TX_1S]; _rtl8821ae_get_txpower_index()
2683 txpower += rtlefuse->txpwr_ht20diff[path][TX_2S]; _rtl8821ae_get_txpower_index()
2687 txpower += rtlefuse->txpwr_ht40diff[path][TX_1S]; _rtl8821ae_get_txpower_index()
2690 txpower += rtlefuse->txpwr_ht40diff[path][TX_2S]; _rtl8821ae_get_txpower_index()
2695 txpower += rtlefuse->txpwr_ht40diff[path][TX_1S]; _rtl8821ae_get_txpower_index()
2699 txpower += rtlefuse->txpwr_ht40diff[path][TX_2S]; _rtl8821ae_get_txpower_index()
2703 txpower = rtlefuse->txpwr_5g_bw40base[path][index]; _rtl8821ae_get_txpower_index()
2710 txpower += rtlefuse->txpwr_5g_ofdmdiff[path][TX_1S]; _rtl8821ae_get_txpower_index()
2716 txpower += rtlefuse->txpwr_5g_bw20diff[path][TX_1S]; _rtl8821ae_get_txpower_index()
2720 txpower += rtlefuse->txpwr_5g_bw20diff[path][TX_2S]; _rtl8821ae_get_txpower_index()
2725 txpower += rtlefuse->txpwr_5g_bw40diff[path][TX_1S]; _rtl8821ae_get_txpower_index()
2729 txpower += rtlefuse->txpwr_5g_bw40diff[path][TX_2S]; _rtl8821ae_get_txpower_index()
2743 txpower = rtlefuse->txpwr_5g_bw80base[path][index] _rtl8821ae_get_txpower_index()
2744 + rtlefuse->txpwr_5g_bw80diff[path][TX_1S]; _rtl8821ae_get_txpower_index()
2748 txpower = rtlefuse->txpwr_5g_bw80base[path][index] _rtl8821ae_get_txpower_index()
2749 + rtlefuse->txpwr_5g_bw80diff[path][TX_1S] _rtl8821ae_get_txpower_index()
2750 + rtlefuse->txpwr_5g_bw80diff[path][TX_2S]; _rtl8821ae_get_txpower_index()
2756 path, rate); _rtl8821ae_get_txpower_index()
2765 txpower += rtlpriv->dm.remnant_ofdm_swing_idx[path]; _rtl8821ae_get_txpower_index()
2776 u8 power_index, u8 path, u8 rate) _rtl8821ae_phy_set_txpower_index()
2780 if (path == RF90_PATH_A) { _rtl8821ae_phy_set_txpower_index()
2979 } else if (path == RF90_PATH_B) { _rtl8821ae_phy_set_txpower_index()
3185 u8 *array, u8 path, _rtl8821ae_phy_set_txpower_level_by_path()
3195 _rtl8821ae_get_txpower_index(hw, path, array[i], _rtl8821ae_phy_set_txpower_level_by_path()
3198 _rtl8821ae_phy_set_txpower_index(hw, power_index, path, _rtl8821ae_phy_set_txpower_level_by_path()
3204 u8 bw, u8 channel, u8 path) _rtl8821ae_phy_txpower_training_by_path()
3212 if (path >= rtlphy->num_total_rfpath) _rtl8821ae_phy_txpower_training_by_path()
3216 if (path == RF90_PATH_A) { _rtl8821ae_phy_txpower_training_by_path()
3242 u8 channel, u8 path) rtl8821ae_phy_set_txpower_level_by_path()
3277 _rtl8821ae_phy_set_txpower_level_by_path(hw, cck_rates, path, channel, rtl8821ae_phy_set_txpower_level_by_path()
3280 _rtl8821ae_phy_set_txpower_level_by_path(hw, ofdm_rates, path, channel, rtl8821ae_phy_set_txpower_level_by_path()
3282 _rtl8821ae_phy_set_txpower_level_by_path(hw, ht_rates_1t, path, channel, rtl8821ae_phy_set_txpower_level_by_path()
3284 _rtl8821ae_phy_set_txpower_level_by_path(hw, vht_rates_1t, path, channel, rtl8821ae_phy_set_txpower_level_by_path()
3288 _rtl8821ae_phy_set_txpower_level_by_path(hw, ht_rates_2t, path, rtl8821ae_phy_set_txpower_level_by_path()
3291 _rtl8821ae_phy_set_txpower_level_by_path(hw, vht_rates_2t, path, rtl8821ae_phy_set_txpower_level_by_path()
3297 channel, path); rtl8821ae_phy_set_txpower_level_by_path()
3305 u8 path = 0; rtl8821ae_phy_set_txpower_level() local
3307 for (path = RF90_PATH_A; path < rtlphy->num_total_rfpath; ++path) rtl8821ae_phy_set_txpower_level()
3308 rtl8821ae_phy_set_txpower_level_by_path(hw, channel, path); rtl8821ae_phy_set_txpower_level()
3546 u8 path; rtl8821ae_phy_sw_chnl_callback() local
3566 for (path = RF90_PATH_A; path < rtlphy->num_total_rfpath; path++) { rtl8821ae_phy_sw_chnl_callback()
3575 rtl8821ae_phy_set_rf_reg(hw, path, RF_CHNLBW, rtl8821ae_phy_sw_chnl_callback()
3578 rtl8821ae_phy_set_rf_reg(hw, path, RF_CHNLBW, rtl8821ae_phy_sw_chnl_callback()
3589 rtl8821ae_phy_set_rf_reg(hw, path, RF_APK, rtl8821ae_phy_sw_chnl_callback()
3727 enum radio_path path, u32 tx_x, u32 tx_y) _rtl8821ae_iqk_tx_fill_iqc()
3730 switch (path) { _rtl8821ae_iqk_tx_fill_iqc()
3753 enum radio_path path, u32 rx_x, u32 rx_y) _rtl8821ae_iqk_rx_fill_iqc()
3756 switch (path) { _rtl8821ae_iqk_rx_fill_iqc()
3775 static void _rtl8821ae_iqk_tx(struct ieee80211_hw *hw, enum radio_path path) _rtl8821ae_iqk_tx() argument
3797 switch (path) { _rtl8821ae_iqk_tx()
3799 temp_reg65 = rtl_get_rfreg(hw, path, 0x65, 0xffffffff); _rtl8821ae_iqk_tx()
3823 rtl_set_rfreg(hw, path, 0xef, RFREG_OFFSET_MASK, 0x80002); _rtl8821ae_iqk_tx()
3824 rtl_set_rfreg(hw, path, 0x18, 0x00c00, 0x3); /* BW 20M */ _rtl8821ae_iqk_tx()
3825 rtl_set_rfreg(hw, path, 0x30, RFREG_OFFSET_MASK, 0x20000); _rtl8821ae_iqk_tx()
3826 rtl_set_rfreg(hw, path, 0x31, RFREG_OFFSET_MASK, 0x0003f); _rtl8821ae_iqk_tx()
3827 rtl_set_rfreg(hw, path, 0x32, RFREG_OFFSET_MASK, 0xf3fc3); _rtl8821ae_iqk_tx()
3828 rtl_set_rfreg(hw, path, 0x65, RFREG_OFFSET_MASK, 0x931d5); _rtl8821ae_iqk_tx()
3829 rtl_set_rfreg(hw, path, 0x8f, RFREG_OFFSET_MASK, 0x8a001); _rtl8821ae_iqk_tx()
3856 rtl_set_rfreg(hw, path, 0x58, 0x7fe00, rtl_get_rfreg(hw, path, 0x8, 0xffc00)); /* Load LOK */ _rtl8821ae_iqk_tx()
3860 rtl_set_rfreg(hw, path, 0x18, 0x00c00, 0x1); _rtl8821ae_iqk_tx()
3863 rtl_set_rfreg(hw, path, 0x18, 0x00c00, 0x0); _rtl8821ae_iqk_tx()
3873 rtl_set_rfreg(hw, path, 0xef, RFREG_OFFSET_MASK, 0x80000); _rtl8821ae_iqk_tx()
3874 rtl_set_rfreg(hw, path, 0x30, RFREG_OFFSET_MASK, 0x20000); _rtl8821ae_iqk_tx()
3875 rtl_set_rfreg(hw, path, 0x31, RFREG_OFFSET_MASK, 0x0003f); _rtl8821ae_iqk_tx()
3876 rtl_set_rfreg(hw, path, 0x32, RFREG_OFFSET_MASK, 0xf3fc3); _rtl8821ae_iqk_tx()
3877 rtl_set_rfreg(hw, path, 0x65, RFREG_OFFSET_MASK, 0x931d5); _rtl8821ae_iqk_tx()
3878 rtl_set_rfreg(hw, path, 0x8f, RFREG_OFFSET_MASK, 0x8a001); _rtl8821ae_iqk_tx()
3879 rtl_set_rfreg(hw, path, 0xef, RFREG_OFFSET_MASK, 0x00000); _rtl8821ae_iqk_tx()
4037 rtl_set_rfreg(hw, path, 0xef, RFREG_OFFSET_MASK, 0x80000); _rtl8821ae_iqk_tx()
4038 rtl_set_rfreg(hw, path, 0x30, RFREG_OFFSET_MASK, 0x30000); _rtl8821ae_iqk_tx()
4039 rtl_set_rfreg(hw, path, 0x31, RFREG_OFFSET_MASK, 0x00029); _rtl8821ae_iqk_tx()
4040 rtl_set_rfreg(hw, path, 0x32, RFREG_OFFSET_MASK, 0xd7ffb); _rtl8821ae_iqk_tx()
4041 rtl_set_rfreg(hw, path, 0x65, RFREG_OFFSET_MASK, temp_reg65); _rtl8821ae_iqk_tx()
4042 rtl_set_rfreg(hw, path, 0x8f, RFREG_OFFSET_MASK, 0x8a001); _rtl8821ae_iqk_tx()
4043 rtl_set_rfreg(hw, path, 0xef, RFREG_OFFSET_MASK, 0x00000); _rtl8821ae_iqk_tx()
4147 rtl_set_rfreg(hw, path, 0xef, RFREG_OFFSET_MASK, 0x80000); _rtl8821ae_iqk_tx()
4148 rtl_set_rfreg(hw, path, 0x30, RFREG_OFFSET_MASK, 0x30000); _rtl8821ae_iqk_tx()
4149 rtl_set_rfreg(hw, path, 0x31, RFREG_OFFSET_MASK, 0x0002f); _rtl8821ae_iqk_tx()
4150 rtl_set_rfreg(hw, path, 0x32, RFREG_OFFSET_MASK, 0xfffbb); _rtl8821ae_iqk_tx()
4151 rtl_set_rfreg(hw, path, 0x8f, RFREG_OFFSET_MASK, 0x88001); _rtl8821ae_iqk_tx()
4152 rtl_set_rfreg(hw, path, 0x65, RFREG_OFFSET_MASK, 0x931d8); _rtl8821ae_iqk_tx()
4153 rtl_set_rfreg(hw, path, 0xef, RFREG_OFFSET_MASK, 0x00000); _rtl8821ae_iqk_tx()
4232 rtl_set_rfreg(hw, path, 0xef, RFREG_OFFSET_MASK, 0x80000); _rtl8821ae_iqk_tx()
4233 rtl_set_rfreg(hw, path, 0x30, RFREG_OFFSET_MASK, 0x30000); _rtl8821ae_iqk_tx()
4234 rtl_set_rfreg(hw, path, 0x31, RFREG_OFFSET_MASK, 0x00029); _rtl8821ae_iqk_tx()
4235 rtl_set_rfreg(hw, path, 0x32, RFREG_OFFSET_MASK, 0xd7ffb); _rtl8821ae_iqk_tx()
4236 rtl_set_rfreg(hw, path, 0x65, RFREG_OFFSET_MASK, temp_reg65); _rtl8821ae_iqk_tx()
4237 rtl_set_rfreg(hw, path, 0x8f, RFREG_OFFSET_MASK, 0x8a001); _rtl8821ae_iqk_tx()
4238 rtl_set_rfreg(hw, path, 0xef, RFREG_OFFSET_MASK, 0x00000); _rtl8821ae_iqk_tx()
4304 rtl_set_rfreg(hw, path, 0xef, RFREG_OFFSET_MASK, 0x80000); _rtl8821ae_iqk_tx()
4305 rtl_set_rfreg(hw, path, 0x30, RFREG_OFFSET_MASK, 0x30000); _rtl8821ae_iqk_tx()
4306 rtl_set_rfreg(hw, path, 0x31, RFREG_OFFSET_MASK, 0x0002f); _rtl8821ae_iqk_tx()
4307 rtl_set_rfreg(hw, path, 0x32, RFREG_OFFSET_MASK, 0xfffbb); _rtl8821ae_iqk_tx()
4308 rtl_set_rfreg(hw, path, 0x8f, RFREG_OFFSET_MASK, 0x88001); _rtl8821ae_iqk_tx()
4309 rtl_set_rfreg(hw, path, 0x65, RFREG_OFFSET_MASK, 0x931d8); _rtl8821ae_iqk_tx()
4310 rtl_set_rfreg(hw, path, 0xef, RFREG_OFFSET_MASK, 0x00000); _rtl8821ae_iqk_tx()
4381 rtl_set_rfreg(hw, path, 0x65, RFREG_OFFSET_MASK, temp_reg65); _rtl8821ae_iqk_tx()
4390 switch (path) { _rtl8821ae_iqk_tx()
4425 _rtl8821ae_iqk_tx_fill_iqc(hw, path, tx_x, tx_y); /* ? */ _rtl8821ae_iqk_tx()
4427 _rtl8821ae_iqk_tx_fill_iqc(hw, path, 0x200, 0x0); _rtl8821ae_iqk_tx()
4455 _rtl8821ae_iqk_rx_fill_iqc(hw, path, rx_x, rx_y); _rtl8821ae_iqk_tx()
4457 _rtl8821ae_iqk_rx_fill_iqc(hw, path, 0x200, 0x0); _rtl8821ae_iqk_tx()
4465 enum radio_path path, _rtl8821ae_iqk_restore_rf()
4474 rtl_set_rfreg(hw, path, backup_rf_reg[i], RFREG_OFFSET_MASK, _rtl8821ae_iqk_restore_rf()
4477 switch (path) { _rtl8821ae_iqk_restore_rf()
764 _rtl8821ae_phy_set_txpower_by_rate_base(struct ieee80211_hw *hw, u8 band, u8 path, u8 rate_section, u8 txnum, u8 value) _rtl8821ae_phy_set_txpower_by_rate_base() argument
833 _rtl8821ae_phy_get_txpower_by_rate_base(struct ieee80211_hw *hw, u8 band, u8 path, u8 txnum, u8 rate_section) _rtl8821ae_phy_get_txpower_by_rate_base() argument
2534 _rtl8821ae_phy_get_txpower_by_rate(struct ieee80211_hw *hw, u8 band, u8 path, u8 rate) _rtl8821ae_phy_get_txpower_by_rate() argument
2775 _rtl8821ae_phy_set_txpower_index(struct ieee80211_hw *hw, u8 power_index, u8 path, u8 rate) _rtl8821ae_phy_set_txpower_index() argument
3184 _rtl8821ae_phy_set_txpower_level_by_path(struct ieee80211_hw *hw, u8 *array, u8 path, u8 channel, u8 size) _rtl8821ae_phy_set_txpower_level_by_path() argument
3203 _rtl8821ae_phy_txpower_training_by_path(struct ieee80211_hw *hw, u8 bw, u8 channel, u8 path) _rtl8821ae_phy_txpower_training_by_path() argument
3241 rtl8821ae_phy_set_txpower_level_by_path(struct ieee80211_hw *hw, u8 channel, u8 path) rtl8821ae_phy_set_txpower_level_by_path() argument
3726 _rtl8821ae_iqk_tx_fill_iqc(struct ieee80211_hw *hw, enum radio_path path, u32 tx_x, u32 tx_y) _rtl8821ae_iqk_tx_fill_iqc() argument
3752 _rtl8821ae_iqk_rx_fill_iqc(struct ieee80211_hw *hw, enum radio_path path, u32 rx_x, u32 rx_y) _rtl8821ae_iqk_rx_fill_iqc() argument
4464 _rtl8821ae_iqk_restore_rf(struct ieee80211_hw *hw, enum radio_path path, u32 *backup_rf_reg, u32 *rf_backup, u32 rf_reg_num) _rtl8821ae_iqk_restore_rf() argument
/linux-4.4.14/tools/usb/usbip/libsrc/
H A Dvhci_driver.c169 char path[PATH_MAX+1]; read_record() local
179 snprintf(path, PATH_MAX, VHCI_STATE_PATH"/port%d", rhport); read_record()
181 file = fopen(path, "r"); read_record()
306 const char *path; usbip_vhci_attach_device2() local
313 path = udev_device_get_syspath(vhci_driver->hc_device); usbip_vhci_attach_device2()
315 path, attr_attach); usbip_vhci_attach_device2()
316 dbg("attach attribute path: %s", attach_attr_path); usbip_vhci_attach_device2()
348 const char *path; usbip_vhci_detach_device() local
354 path = udev_device_get_syspath(vhci_driver->hc_device); usbip_vhci_detach_device()
356 path, attr_detach); usbip_vhci_detach_device()
357 dbg("detach attribute path: %s", detach_attr_path); usbip_vhci_detach_device()
H A Dusbip_common.c90 dbg("%-20s = %s", "path", udev->path); dump_usb_device()
185 const char *path, *name; read_usb_device() local
202 path = udev_device_get_syspath(sdev); read_usb_device()
205 strncpy(udev->path, path, SYSFS_PATH_MAX); read_usb_device()
H A Dusbip_host_driver.c48 udev->path); read_attr_usbip_status()
121 const char *path; refresh_exported_devices() local
131 path = udev_list_entry_get_name(dev_list_entry); udev_list_entry_foreach()
132 dev = udev_device_new_from_syspath(udev_context, path); udev_list_entry_foreach()
139 edev = usbip_exported_device_new(path); udev_list_entry_foreach()
248 edev->udev.path, attr_name); usbip_host_export_device()
/linux-4.4.14/lib/
H A Dearlycpio.c52 * @path: The directory to search for, including a slash at the end
58 * to find all files inside of a directory path.
61 * filename (with the directory path cut off) of the found file.
63 * pass the absolute path of the filename in the cpio and make sure
67 struct cpio_data find_cpio_data(const char *path, void *data, find_cpio_data() argument
75 size_t mypathsize = strlen(path); find_cpio_data()
127 !memcmp(p, path, mypathsize)) { find_cpio_data()
/linux-4.4.14/drivers/acpi/
H A Dutils.c472 * acpi_handle_path: Return the object path of handle
490 * acpi_handle_printk: Print message with ACPI prefix and object path
493 * a message with ACPI prefix and object path. This function acquires
494 * the global namespace mutex to obtain an object path. In interrupt
495 * context, it shows the object path as <n/a>.
502 const char *path; acpi_handle_printk() local
508 path = acpi_handle_path(handle); acpi_handle_printk()
509 printk("%sACPI: %s: %pV", level, path ? path : "<n/a>" , &vaf); acpi_handle_printk()
512 kfree(path); acpi_handle_printk()
518 * __acpi_handle_debug: pr_debug with ACPI prefix and object path
521 * prints a message with ACPI prefix and object path. This function
522 * acquires the global namespace mutex to obtain an object path. In
523 * interrupt context, it shows the object path as <n/a>.
531 const char *path; __acpi_handle_debug() local
537 path = acpi_handle_path(handle); __acpi_handle_debug()
538 __dynamic_pr_debug(descriptor, "ACPI: %s: %pV", path ? path : "<n/a>", &vaf); __acpi_handle_debug()
541 kfree(path); __acpi_handle_debug()
/linux-4.4.14/fs/nfs/
H A Dnfs4namespace.c28 * Convert the NFSv4 pathname components into a standard posix path.
57 * return the path component of "<server>:<path>"
58 * nfspath - the "<server>:<path>" string
81 * Determine the mount path as a string
86 char *path = nfs_path(&limit, dentry, buffer, buflen, nfs4_path() local
88 if (!IS_ERR(path)) { nfs4_path()
89 char *path_component = nfs_path_component(path, limit); nfs4_path()
93 return path; nfs4_path()
98 * believe to be the server path to this dentry
104 const char *path, *fs_path; nfs4_validate_fspath() local
106 path = nfs4_path(dentry, page, PAGE_SIZE); nfs4_validate_fspath()
107 if (IS_ERR(path)) nfs4_validate_fspath()
108 return PTR_ERR(path); nfs4_validate_fspath()
114 if (strncmp(path, fs_path, strlen(fs_path)) != 0) { nfs4_validate_fspath()
115 dprintk("%s: path %s does not begin with fsroot %s\n", nfs4_validate_fspath()
116 __func__, path, fs_path); nfs4_validate_fspath()
321 /* Ensure fs path is a prefix of current dentry path */ nfs_follow_referral()
H A Dnfsroot.c41 * Gero Kuhlmann : Fixed a bug which prevented BOOTP path name
44 * without giving a path name
46 * without giving a path name. Fix BOOTP request
57 * Martin Mares : Default path now contains host name instead of
86 /* Default path we try to mount. "%s" gets replaced by our IP address */
104 /* server:export path string passed to super.c */
180 * Parse out root export path and mount options from
183 * Copy the export path into @exppath.
191 * Set the NFS remote path root_nfs_parse_options()
210 * Decode the export directory path name and NFS options from
213 * root directory path.
255 * server:/path root_nfs_data()
H A Dnamespace.c31 * nfs_path - reconstruct the path given an arbitrary dentry
32 * @base - used to return pointer to the end of devname part of path
41 * This is mainly for use in figuring out the path on the
129 * @path - The mountpoint
139 struct vfsmount *nfs_d_automount(struct path *path) nfs_d_automount() argument
142 struct nfs_server *server = NFS_SERVER(d_inode(path->dentry)); nfs_d_automount()
149 if (IS_ROOT(path->dentry)) nfs_d_automount()
160 mnt = server->nfs_client->rpc_ops->submount(server, path->dentry, fh, fattr); nfs_d_automount()
/linux-4.4.14/fs/xfs/libxfs/
H A Dxfs_attr.c309 * NOTE: this is also the error path (EEXIST, etc). xfs_attr_set()
901 blk = &state->path.blk[ state->path.active-1 ]; xfs_attr_node_addname()
931 if (state->path.active == 1) { xfs_attr_node_addname()
1001 xfs_da3_fixhashpath(state, &state->path); xfs_attr_node_addname()
1078 blk = &state->path.blk[ state->path.active-1 ]; xfs_attr_node_addname()
1081 xfs_da3_fixhashpath(state, &state->path); xfs_attr_node_addname()
1086 if (retval && (state->path.active > 1)) { xfs_attr_node_addname()
1176 blk = &state->path.blk[ state->path.active-1 ]; xfs_attr_node_removename()
1212 blk = &state->path.blk[ state->path.active-1 ]; xfs_attr_node_removename()
1215 xfs_da3_fixhashpath(state, &state->path); xfs_attr_node_removename()
1220 if (retval && (state->path.active > 1)) { xfs_attr_node_removename()
1256 ASSERT(state->path.active == 1); xfs_attr_node_removename()
1257 ASSERT(state->path.blk[0].bp); xfs_attr_node_removename()
1258 state->path.blk[0].bp = NULL; xfs_attr_node_removename()
1306 xfs_da_state_path_t *path; xfs_attr_fillstate() local
1313 * Roll down the "path" in the state structure, storing the on-disk xfs_attr_fillstate()
1314 * block number for those buffers in the "path". xfs_attr_fillstate()
1316 path = &state->path; xfs_attr_fillstate()
1317 ASSERT((path->active >= 0) && (path->active < XFS_DA_NODE_MAXDEPTH)); xfs_attr_fillstate()
1318 for (blk = path->blk, level = 0; level < path->active; blk++, level++) { xfs_attr_fillstate()
1331 path = &state->altpath; xfs_attr_fillstate()
1332 ASSERT((path->active >= 0) && (path->active < XFS_DA_NODE_MAXDEPTH)); xfs_attr_fillstate()
1333 for (blk = path->blk, level = 0; level < path->active; blk++, level++) { xfs_attr_fillstate()
1354 xfs_da_state_path_t *path; xfs_attr_refillstate() local
1361 * Roll down the "path" in the state structure, storing the on-disk xfs_attr_refillstate()
1362 * block number for those buffers in the "path". xfs_attr_refillstate()
1364 path = &state->path; xfs_attr_refillstate()
1365 ASSERT((path->active >= 0) && (path->active < XFS_DA_NODE_MAXDEPTH)); xfs_attr_refillstate()
1366 for (blk = path->blk, level = 0; level < path->active; blk++, level++) { xfs_attr_refillstate()
1383 path = &state->altpath; xfs_attr_refillstate()
1384 ASSERT((path->active >= 0) && (path->active < XFS_DA_NODE_MAXDEPTH)); xfs_attr_refillstate()
1385 for (blk = path->blk, level = 0; level < path->active; blk++, level++) { xfs_attr_refillstate()
1429 blk = &state->path.blk[ state->path.active-1 ]; xfs_attr_node_get()
1446 for (i = 0; i < state->path.active; i++) { xfs_attr_node_get()
1447 xfs_trans_brelse(args->trans, state->path.blk[i].bp); xfs_attr_node_get()
1448 state->path.blk[i].bp = NULL; xfs_attr_node_get()
/linux-4.4.14/fs/cachefiles/
H A Ddaemon.c555 struct path path; cachefiles_daemon_cull() local
575 get_fs_pwd(current->fs, &path); cachefiles_daemon_cull()
577 if (!d_can_lookup(path.dentry)) cachefiles_daemon_cull()
581 ret = cachefiles_cull(cache, path.dentry, args); cachefiles_daemon_cull()
584 path_put(&path); cachefiles_daemon_cull()
589 path_put(&path); cachefiles_daemon_cull()
627 struct path path; cachefiles_daemon_inuse() local
647 get_fs_pwd(current->fs, &path); cachefiles_daemon_inuse()
649 if (!d_can_lookup(path.dentry)) cachefiles_daemon_inuse()
653 ret = cachefiles_check_in_use(cache, path.dentry, args); cachefiles_daemon_inuse()
656 path_put(&path); cachefiles_daemon_inuse()
661 path_put(&path); cachefiles_daemon_inuse()
678 struct path path = { cachefiles_has_space() local
696 ret = vfs_statfs(&path, &stats); cachefiles_has_space()
/linux-4.4.14/drivers/atm/
H A DuPD98402.h47 #define uPD98402_CMR_PFRF 0x01 /* Send path FERF */
49 #define uPD98402_CMR_PAIS 0x04 /* Send path AIS */
74 #define uPD98402_ALM_PFRF 0x01 /* path FERF */
76 #define uPD98402_ALM_PAIS 0x04 /* path AIS */
83 #define uPD98402_PFM_PFEB 0x01 /* path FEBE */
91 #define uPD98402_IACM_PFRF 0x01 /* don't generate path FERF */
/linux-4.4.14/fs/ecryptfs/
H A Dkthread.c33 struct path path; member in struct:ecryptfs_open_req
77 *req->lower_file = dentry_open(&req->path, ecryptfs_threadfn()
142 req.path.dentry = lower_dentry; ecryptfs_privileged_open()
143 req.path.mnt = lower_mnt; ecryptfs_privileged_open()
149 (*lower_file) = dentry_open(&req.path, flags, cred); ecryptfs_privileged_open()
/linux-4.4.14/drivers/of/
H A Dunittest.c80 np = of_find_node_by_path("/testcase-data/missing-path"); of_unittest_find_node_by_name()
81 unittest(!np, "non-existent path returned node %s\n", np->full_name); of_unittest_find_node_by_name()
88 np = of_find_node_by_path("testcase-alias/missing-path"); of_unittest_find_node_by_name()
89 unittest(!np, "non-existent alias with relative path returned node %s\n", np->full_name); of_unittest_find_node_by_name()
94 "option path test failed\n"); of_unittest_find_node_by_name()
99 "option path test, subcase #1 failed\n"); of_unittest_find_node_by_name()
104 "option path test, subcase #2 failed\n"); of_unittest_find_node_by_name()
108 unittest(np, "NULL option path test failed\n"); of_unittest_find_node_by_name()
114 "option alias path test failed\n"); of_unittest_find_node_by_name()
120 "option alias path test, subcase #1 failed\n"); of_unittest_find_node_by_name()
124 unittest(np, "NULL option alias path test failed\n"); of_unittest_find_node_by_name()
712 const char *path; member in struct:__anon8751
715 { .path = "/testcase-data/match-node/name0", .data = "A", },
716 { .path = "/testcase-data/match-node/name1", .data = "B", },
717 { .path = "/testcase-data/match-node/a/name2", .data = "Ca", },
718 { .path = "/testcase-data/match-node/b/name2", .data = "Cb", },
719 { .path = "/testcase-data/match-node/c/name2", .data = "Cc", },
720 { .path = "/testcase-data/match-node/name3", .data = "E", },
721 { .path = "/testcase-data/match-node/name4", .data = "G", },
722 { .path = "/testcase-data/match-node/name5", .data = "H", },
723 { .path = "/testcase-data/match-node/name6", .data = "G", },
724 { .path = "/testcase-data/match-node/name7", .data = "I", },
725 { .path = "/testcase-data/match-node/name8", .data = "J", },
726 { .path = "/testcase-data/match-node/name9", .data = "K", },
736 np = of_find_node_by_path(match_node_tests[i].path); of_unittest_match_node()
739 match_node_tests[i].path); of_unittest_match_node()
746 match_node_tests[i].path); of_unittest_match_node()
752 match_node_tests[i].path, match_node_tests[i].data, of_unittest_match_node()
990 /* get the platform device instantiated at the path */ of_path_to_platform_device()
991 static struct platform_device *of_path_to_platform_device(const char *path) of_path_to_platform_device() argument
996 np = of_find_node_by_path(path); of_path_to_platform_device()
1006 /* find out if a platform device exists at that path */ of_path_platform_device_exists()
1007 static int of_path_platform_device_exists(const char *path) of_path_platform_device_exists() argument
1011 pdev = of_path_to_platform_device(path); of_path_platform_device_exists()
1018 /* get the i2c client device instantiated at the path */ of_path_to_i2c_client()
1019 static struct i2c_client *of_path_to_i2c_client(const char *path) of_path_to_i2c_client() argument
1024 np = of_find_node_by_path(path); of_path_to_i2c_client()
1034 /* find out if a i2c client device exists at that path */ of_path_i2c_client_exists()
1035 static int of_path_i2c_client_exists(const char *path) of_path_i2c_client_exists() argument
1039 client = of_path_to_i2c_client(path); of_path_i2c_client_exists()
1045 static int of_path_i2c_client_exists(const char *path) of_path_i2c_client_exists() argument
1056 static int of_path_device_type_exists(const char *path, of_path_device_type_exists() argument
1061 return of_path_platform_device_exists(path); of_path_device_type_exists()
1063 return of_path_i2c_client_exists(path); of_path_device_type_exists()
1091 const char *path; of_unittest_device_exists() local
1093 path = unittest_path(unittest_nr, ovtype); of_unittest_device_exists()
1097 return of_path_platform_device_exists(path); of_unittest_device_exists()
1099 return of_path_i2c_client_exists(path); of_unittest_device_exists()
/linux-4.4.14/drivers/net/ethernet/qlogic/qed/
H A Dqed_cxt.h105 * @brief qed_cxt_hw_init_common - Initailze ILT and DQ, common phase, per path.
114 * @brief qed_cxt_hw_init_pf - Initailze ILT and DQ, PF phase, per path.
123 * @brief qed_qm_init_pf - Initailze the QM PF phase, per path
/linux-4.4.14/include/uapi/rdma/
H A Drdma_netlink.h135 * RESOLVE - The client requests the local service to resolve a path.
156 * Local service path use:
157 * Specify how the path(s) will be used.
/linux-4.4.14/tools/usb/usbip/src/
H A Dusbip_detach.c48 char path[PATH_MAX+1]; detach_port() local
64 snprintf(path, PATH_MAX, VHCI_STATE_PATH"/port%d", portnum); detach_port()
66 remove(path); detach_port()
/linux-4.4.14/drivers/net/fddi/skfp/
H A Dsmtdef.c169 int path ; smt_init_mib() local
237 for (path = 0 ; path < NUMPATHS ; path++) { smt_init_mib()
238 mib->a[path].fddiPATHIndex = INDEX_PATH + path ; smt_init_mib()
240 mib->a[path].fddiPATHTVXLowerBound = smt_init_mib()
242 mib->a[path].fddiPATHT_MaxLowerBound = smt_init_mib()
244 mib->a[path].fddiPATHMaxT_Req = smt_init_mib()
/linux-4.4.14/arch/tile/mm/
H A Delf.c44 char *buf, *path; notify_exec() local
59 path = file_path(exe_file, buf, PAGE_SIZE); notify_exec()
60 if (IS_ERR(path)) notify_exec()
96 sim_notify_exec(path); notify_exec()
/linux-4.4.14/arch/x86/lib/
H A Dusercopy.c14 * We rely on the nested NMI work to allow atomic faults from the NMI path; the
/linux-4.4.14/fs/hostfs/
H A Dhostfs.h65 extern int stat_file(const char *path, struct hostfs_stat *p, int fd);
66 extern int access_file(char *path, int r, int w, int x);
67 extern int open_file(char *path, int r, int w, int append);
68 extern void *open_dir(char *path, int *err_out);
/linux-4.4.14/net/ax25/
H A Dsysctl_net_ax25.c153 char path[sizeof("net/ax25/") + IFNAMSIZ]; ax25_register_dev_sysctl() local
164 snprintf(path, sizeof(path), "net/ax25/%s", ax25_dev->dev->name); ax25_register_dev_sysctl()
165 ax25_dev->sysheader = register_net_sysctl(&init_net, path, table); ax25_register_dev_sysctl()
/linux-4.4.14/samples/bpf/
H A Dbpf_load.h23 int load_bpf_file(char *path);
H A Dtracex5_user.c10 /* install fake seccomp program to enable seccomp code path inside the kernel,
/linux-4.4.14/include/trace/
H A Ddefine_trace.h12 * TRACE_INCLUDE_PATH if the path is something other than core kernel include/trace
13 * then this macro can define the path to use. Note, the path is relative to
14 * define_trace.h, not the file including it. Full path names for out of tree

Completed in 7700 milliseconds

1234567891011>>