Searched refs:device (Results 1 - 200 of 13530) sorted by relevance

1234567891011>>

/linux-4.4.14/drivers/base/power/
H A Dpower.h3 static inline void device_pm_init_common(struct device *dev) device_pm_init_common()
14 static inline void pm_runtime_early_init(struct device *dev) pm_runtime_early_init()
20 extern void pm_runtime_init(struct device *dev);
21 extern void pm_runtime_remove(struct device *dev);
24 struct device *dev;
34 extern int device_wakeup_attach_irq(struct device *dev,
36 extern void device_wakeup_detach_irq(struct device *dev);
43 device_wakeup_attach_irq(struct device *dev, device_wakeup_attach_irq()
49 static inline void device_wakeup_detach_irq(struct device *dev) device_wakeup_detach_irq()
67 extern int dpm_sysfs_add(struct device *dev);
68 extern void dpm_sysfs_remove(struct device *dev);
69 extern void rpm_sysfs_remove(struct device *dev);
70 extern int wakeup_sysfs_add(struct device *dev);
71 extern void wakeup_sysfs_remove(struct device *dev);
72 extern int pm_qos_sysfs_add_resume_latency(struct device *dev);
73 extern void pm_qos_sysfs_remove_resume_latency(struct device *dev);
74 extern int pm_qos_sysfs_add_flags(struct device *dev);
75 extern void pm_qos_sysfs_remove_flags(struct device *dev);
76 extern int pm_qos_sysfs_add_latency_tolerance(struct device *dev);
77 extern void pm_qos_sysfs_remove_latency_tolerance(struct device *dev);
81 static inline void pm_runtime_early_init(struct device *dev) pm_runtime_early_init()
86 static inline void pm_runtime_init(struct device *dev) {} pm_runtime_remove()
87 static inline void pm_runtime_remove(struct device *dev) {} pm_runtime_remove()
89 static inline int dpm_sysfs_add(struct device *dev) { return 0; } dpm_sysfs_remove()
90 static inline void dpm_sysfs_remove(struct device *dev) {} rpm_sysfs_remove()
91 static inline void rpm_sysfs_remove(struct device *dev) {} wakeup_sysfs_add()
92 static inline int wakeup_sysfs_add(struct device *dev) { return 0; } wakeup_sysfs_remove()
93 static inline void wakeup_sysfs_remove(struct device *dev) {} pm_qos_sysfs_add()
94 static inline int pm_qos_sysfs_add(struct device *dev) { return 0; } pm_qos_sysfs_remove()
95 static inline void pm_qos_sysfs_remove(struct device *dev) {} pm_qos_sysfs_remove()
113 extern struct list_head dpm_list; /* The active device list */
115 static inline struct device *to_device(struct list_head *entry) to_device()
117 return container_of(entry, struct device, power.entry); to_device()
120 extern void device_pm_sleep_init(struct device *dev);
121 extern void device_pm_add(struct device *);
122 extern void device_pm_remove(struct device *);
123 extern void device_pm_move_before(struct device *, struct device *);
124 extern void device_pm_move_after(struct device *, struct device *);
125 extern void device_pm_move_last(struct device *);
129 static inline void device_pm_sleep_init(struct device *dev) {} device_pm_sleep_init()
131 static inline void device_pm_add(struct device *dev) {} device_pm_add()
133 static inline void device_pm_remove(struct device *dev) device_pm_remove()
138 static inline void device_pm_move_before(struct device *deva, device_pm_move_before()
139 struct device *devb) {} device_pm_move_after()
140 static inline void device_pm_move_after(struct device *deva, device_pm_move_after()
141 struct device *devb) {} device_pm_move_last()
142 static inline void device_pm_move_last(struct device *dev) {} device_pm_move_last()
146 static inline void device_pm_init(struct device *dev) device_pm_init()
/linux-4.4.14/include/linux/
H A Dcomponent.h4 struct device;
7 int (*bind)(struct device *, struct device *, void *);
8 void (*unbind)(struct device *, struct device *, void *);
11 int component_add(struct device *, const struct component_ops *);
12 void component_del(struct device *, const struct component_ops *);
14 int component_bind_all(struct device *, void *);
15 void component_unbind_all(struct device *, void *);
20 int (*add_components)(struct device *, struct master *);
21 int (*bind)(struct device *);
22 void (*unbind)(struct device *);
25 int component_master_add(struct device *, const struct component_master_ops *);
26 void component_master_del(struct device *,
30 int (*compare)(struct device *, void *), void *compare_data);
34 int component_master_add_with_match(struct device *,
36 void component_match_add(struct device *, struct component_match **,
37 int (*compare)(struct device *, void *), void *compare_data);
H A Dattribute_container.h15 struct device;
23 int (*match)(struct attribute_container *, struct device *);
42 void attribute_container_create_device(struct device *dev,
44 struct device *,
45 struct device *));
46 void attribute_container_add_device(struct device *dev,
48 struct device *,
49 struct device *));
50 void attribute_container_remove_device(struct device *dev,
52 struct device *,
53 struct device *));
54 void attribute_container_device_trigger(struct device *dev,
56 struct device *,
57 struct device *));
58 void attribute_container_trigger(struct device *dev,
60 struct device *));
61 int attribute_container_add_attrs(struct device *classdev);
62 int attribute_container_add_class_device(struct device *classdev);
64 struct device *dev,
65 struct device *classdev);
66 void attribute_container_remove_attrs(struct device *classdev);
67 void attribute_container_class_device_del(struct device *classdev);
68 struct attribute_container *attribute_container_classdev_to_container(struct device *);
69 struct device *attribute_container_find_class_device(struct attribute_container *, struct device *);
70 struct device_attribute **attribute_container_classdev_to_attrs(const struct device *classdev);
H A Dbsg.h9 struct device *class_dev;
10 struct device *parent;
14 void (*release)(struct device *);
18 struct device *parent, const char *name,
19 void (*release)(struct device *));
23 struct device *parent, const char *name, bsg_register_queue()
24 void (*release)(struct device *)) bsg_register_queue()
H A Dpm_runtime.h12 #include <linux/device.h>
34 extern int pm_generic_runtime_suspend(struct device *dev);
35 extern int pm_generic_runtime_resume(struct device *dev);
36 extern int pm_runtime_force_suspend(struct device *dev);
37 extern int pm_runtime_force_resume(struct device *dev);
39 extern int __pm_runtime_idle(struct device *dev, int rpmflags);
40 extern int __pm_runtime_suspend(struct device *dev, int rpmflags);
41 extern int __pm_runtime_resume(struct device *dev, int rpmflags);
42 extern int pm_schedule_suspend(struct device *dev, unsigned int delay);
43 extern int __pm_runtime_set_status(struct device *dev, unsigned int status);
44 extern int pm_runtime_barrier(struct device *dev);
45 extern void pm_runtime_enable(struct device *dev);
46 extern void __pm_runtime_disable(struct device *dev, bool check_resume);
47 extern void pm_runtime_allow(struct device *dev);
48 extern void pm_runtime_forbid(struct device *dev);
49 extern void pm_runtime_no_callbacks(struct device *dev);
50 extern void pm_runtime_irq_safe(struct device *dev);
51 extern void __pm_runtime_use_autosuspend(struct device *dev, bool use);
52 extern void pm_runtime_set_autosuspend_delay(struct device *dev, int delay);
53 extern unsigned long pm_runtime_autosuspend_expiration(struct device *dev);
54 extern void pm_runtime_update_max_time_suspended(struct device *dev,
56 extern void pm_runtime_set_memalloc_noio(struct device *dev, bool enable);
58 static inline bool pm_children_suspended(struct device *dev) pm_children_suspended()
64 static inline void pm_runtime_get_noresume(struct device *dev) pm_runtime_get_noresume()
69 static inline void pm_runtime_put_noidle(struct device *dev) pm_runtime_put_noidle()
74 static inline bool device_run_wake(struct device *dev) device_run_wake()
79 static inline void device_set_run_wake(struct device *dev, bool enable) device_set_run_wake()
84 static inline bool pm_runtime_suspended(struct device *dev) pm_runtime_suspended()
90 static inline bool pm_runtime_active(struct device *dev) pm_runtime_active()
96 static inline bool pm_runtime_status_suspended(struct device *dev) pm_runtime_status_suspended()
101 static inline bool pm_runtime_enabled(struct device *dev) pm_runtime_enabled()
106 static inline bool pm_runtime_callbacks_present(struct device *dev) pm_runtime_callbacks_present()
111 static inline void pm_runtime_mark_last_busy(struct device *dev) pm_runtime_mark_last_busy()
116 static inline bool pm_runtime_is_irq_safe(struct device *dev) pm_runtime_is_irq_safe()
125 static inline int pm_generic_runtime_suspend(struct device *dev) { return 0; } pm_generic_runtime_resume()
126 static inline int pm_generic_runtime_resume(struct device *dev) { return 0; } pm_runtime_force_suspend()
127 static inline int pm_runtime_force_suspend(struct device *dev) { return 0; } pm_runtime_force_resume()
128 static inline int pm_runtime_force_resume(struct device *dev) { return 0; } pm_runtime_force_resume()
130 static inline int __pm_runtime_idle(struct device *dev, int rpmflags) __pm_runtime_idle()
134 static inline int __pm_runtime_suspend(struct device *dev, int rpmflags) __pm_runtime_suspend()
138 static inline int __pm_runtime_resume(struct device *dev, int rpmflags) __pm_runtime_resume()
142 static inline int pm_schedule_suspend(struct device *dev, unsigned int delay) pm_schedule_suspend()
146 static inline int __pm_runtime_set_status(struct device *dev, __pm_runtime_set_status()
148 static inline int pm_runtime_barrier(struct device *dev) { return 0; } pm_runtime_enable()
149 static inline void pm_runtime_enable(struct device *dev) {} __pm_runtime_disable()
150 static inline void __pm_runtime_disable(struct device *dev, bool c) {} pm_runtime_allow()
151 static inline void pm_runtime_allow(struct device *dev) {} pm_runtime_forbid()
152 static inline void pm_runtime_forbid(struct device *dev) {} pm_runtime_forbid()
154 static inline bool pm_children_suspended(struct device *dev) { return false; } pm_runtime_get_noresume()
155 static inline void pm_runtime_get_noresume(struct device *dev) {} pm_runtime_put_noidle()
156 static inline void pm_runtime_put_noidle(struct device *dev) {} device_run_wake()
157 static inline bool device_run_wake(struct device *dev) { return false; } device_set_run_wake()
158 static inline void device_set_run_wake(struct device *dev, bool enable) {} pm_runtime_suspended()
159 static inline bool pm_runtime_suspended(struct device *dev) { return false; } pm_runtime_active()
160 static inline bool pm_runtime_active(struct device *dev) { return true; } pm_runtime_status_suspended()
161 static inline bool pm_runtime_status_suspended(struct device *dev) { return false; } pm_runtime_enabled()
162 static inline bool pm_runtime_enabled(struct device *dev) { return false; } pm_runtime_enabled()
164 static inline void pm_runtime_no_callbacks(struct device *dev) {} pm_runtime_irq_safe()
165 static inline void pm_runtime_irq_safe(struct device *dev) {} pm_runtime_is_irq_safe()
166 static inline bool pm_runtime_is_irq_safe(struct device *dev) { return false; } pm_runtime_is_irq_safe()
168 static inline bool pm_runtime_callbacks_present(struct device *dev) { return false; } pm_runtime_mark_last_busy()
169 static inline void pm_runtime_mark_last_busy(struct device *dev) {} __pm_runtime_use_autosuspend()
170 static inline void __pm_runtime_use_autosuspend(struct device *dev, __pm_runtime_use_autosuspend()
172 static inline void pm_runtime_set_autosuspend_delay(struct device *dev, pm_runtime_set_autosuspend_delay()
175 struct device *dev) { return 0; } pm_runtime_set_memalloc_noio()
176 static inline void pm_runtime_set_memalloc_noio(struct device *dev, pm_runtime_set_memalloc_noio()
181 static inline int pm_runtime_idle(struct device *dev) pm_runtime_idle()
186 static inline int pm_runtime_suspend(struct device *dev) pm_runtime_suspend()
191 static inline int pm_runtime_autosuspend(struct device *dev) pm_runtime_autosuspend()
196 static inline int pm_runtime_resume(struct device *dev) pm_runtime_resume()
201 static inline int pm_request_idle(struct device *dev) pm_request_idle()
206 static inline int pm_request_resume(struct device *dev) pm_request_resume()
211 static inline int pm_request_autosuspend(struct device *dev) pm_request_autosuspend()
216 static inline int pm_runtime_get(struct device *dev) pm_runtime_get()
221 static inline int pm_runtime_get_sync(struct device *dev) pm_runtime_get_sync()
226 static inline int pm_runtime_put(struct device *dev) pm_runtime_put()
231 static inline int pm_runtime_put_autosuspend(struct device *dev) pm_runtime_put_autosuspend()
237 static inline int pm_runtime_put_sync(struct device *dev) pm_runtime_put_sync()
242 static inline int pm_runtime_put_sync_suspend(struct device *dev) pm_runtime_put_sync_suspend()
247 static inline int pm_runtime_put_sync_autosuspend(struct device *dev) pm_runtime_put_sync_autosuspend()
252 static inline int pm_runtime_set_active(struct device *dev) pm_runtime_set_active()
257 static inline void pm_runtime_set_suspended(struct device *dev) pm_runtime_set_suspended()
262 static inline void pm_runtime_disable(struct device *dev) pm_runtime_disable()
267 static inline void pm_runtime_use_autosuspend(struct device *dev) pm_runtime_use_autosuspend()
272 static inline void pm_runtime_dont_use_autosuspend(struct device *dev) pm_runtime_dont_use_autosuspend()
H A Dpm_clock.h2 * pm_clock.h - Definitions and headers related to device clocks.
12 #include <linux/device.h>
24 extern int pm_clk_runtime_suspend(struct device *dev);
25 extern int pm_clk_runtime_resume(struct device *dev);
34 static inline bool pm_clk_no_clocks(struct device *dev) pm_clk_no_clocks()
40 extern void pm_clk_init(struct device *dev);
41 extern int pm_clk_create(struct device *dev);
42 extern void pm_clk_destroy(struct device *dev);
43 extern int pm_clk_add(struct device *dev, const char *con_id);
44 extern int pm_clk_add_clk(struct device *dev, struct clk *clk);
45 extern void pm_clk_remove(struct device *dev, const char *con_id);
46 extern int pm_clk_suspend(struct device *dev);
47 extern int pm_clk_resume(struct device *dev);
49 static inline bool pm_clk_no_clocks(struct device *dev) pm_clk_no_clocks()
53 static inline void pm_clk_init(struct device *dev) pm_clk_init()
56 static inline int pm_clk_create(struct device *dev) pm_clk_create()
60 static inline void pm_clk_destroy(struct device *dev) pm_clk_destroy()
63 static inline int pm_clk_add(struct device *dev, const char *con_id) pm_clk_add()
68 static inline int pm_clk_add_clk(struct device *dev, struct clk *clk) pm_clk_add_clk()
72 static inline void pm_clk_remove(struct device *dev, const char *con_id) pm_clk_remove()
H A Disa.h8 #include <linux/device.h>
12 int (*match)(struct device *, unsigned int);
13 int (*probe)(struct device *, unsigned int);
14 int (*remove)(struct device *, unsigned int);
15 void (*shutdown)(struct device *, unsigned int);
16 int (*suspend)(struct device *, unsigned int, pm_message_t);
17 int (*resume)(struct device *, unsigned int);
20 struct device *devices;
H A Dpm_opp.h21 struct device;
35 int dev_pm_opp_get_opp_count(struct device *dev);
36 unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev);
37 struct dev_pm_opp *dev_pm_opp_get_suspend_opp(struct device *dev);
39 struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
43 struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
46 struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev,
49 int dev_pm_opp_add(struct device *dev, unsigned long freq,
51 void dev_pm_opp_remove(struct device *dev, unsigned long freq);
53 int dev_pm_opp_enable(struct device *dev, unsigned long freq);
55 int dev_pm_opp_disable(struct device *dev, unsigned long freq);
57 struct srcu_notifier_head *dev_pm_opp_get_notifier(struct device *dev);
74 static inline int dev_pm_opp_get_opp_count(struct device *dev) dev_pm_opp_get_opp_count()
79 static inline unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev) dev_pm_opp_get_max_clock_latency()
84 static inline struct dev_pm_opp *dev_pm_opp_get_suspend_opp(struct device *dev) dev_pm_opp_get_suspend_opp()
89 static inline struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev, dev_pm_opp_find_freq_exact()
95 static inline struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev, dev_pm_opp_find_freq_floor()
101 static inline struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev, dev_pm_opp_find_freq_ceil()
107 static inline int dev_pm_opp_add(struct device *dev, unsigned long freq, dev_pm_opp_add()
113 static inline void dev_pm_opp_remove(struct device *dev, unsigned long freq) dev_pm_opp_remove()
117 static inline int dev_pm_opp_enable(struct device *dev, unsigned long freq) dev_pm_opp_enable()
122 static inline int dev_pm_opp_disable(struct device *dev, unsigned long freq) dev_pm_opp_disable()
128 struct device *dev) dev_pm_opp_get_notifier()
135 int dev_pm_opp_of_add_table(struct device *dev);
136 void dev_pm_opp_of_remove_table(struct device *dev);
139 int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, cpumask_var_t cpumask);
140 int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, cpumask_var_t cpumask);
142 static inline int dev_pm_opp_of_add_table(struct device *dev) dev_pm_opp_of_add_table()
147 static inline void dev_pm_opp_of_remove_table(struct device *dev) dev_pm_opp_of_remove_table()
160 static inline int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, cpumask_var_t cpumask) dev_pm_opp_of_get_sharing_cpus()
165 static inline int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, cpumask_var_t cpumask) dev_pm_opp_set_sharing_cpus()
H A Dof_device.h11 struct device;
15 const struct of_device_id *matches, const struct device *dev);
16 extern void of_device_make_bus_id(struct device *dev);
19 * of_driver_match_device - Tell if a driver's of_match_table matches a device.
21 * @dev: the device structure to match against
23 static inline int of_driver_match_device(struct device *dev, of_driver_match_device()
36 extern const void *of_device_get_match_data(const struct device *dev);
38 extern ssize_t of_device_get_modalias(struct device *dev,
41 extern void of_device_uevent(struct device *dev, struct kobj_uevent_env *env);
42 extern int of_device_uevent_modalias(struct device *dev, struct kobj_uevent_env *env);
44 static inline void of_device_node_put(struct device *dev) of_device_node_put()
51 struct device *cpu_dev; of_cpu_device_node_get()
58 void of_dma_configure(struct device *dev, struct device_node *np);
61 static inline int of_driver_match_device(struct device *dev, of_driver_match_device()
67 static inline void of_device_uevent(struct device *dev, of_device_uevent()
70 static inline const void *of_device_get_match_data(const struct device *dev) of_device_get_match_data()
75 static inline int of_device_get_modalias(struct device *dev, of_device_get_modalias()
81 static inline int of_device_uevent_modalias(struct device *dev, of_device_uevent_modalias()
87 static inline void of_device_node_put(struct device *dev) { } of_device_node_put()
90 const struct of_device_id *matches, const struct device *dev) __of_match_device()
101 static inline void of_dma_configure(struct device *dev, struct device_node *np) of_dma_configure()
H A Dsys_soc.h9 #include <linux/device.h>
19 * soc_device_register - register SoC as a device
26 * soc_device_unregister - unregister SoC device
27 * @dev: SoC device to be unregistered
32 * soc_device_to_device - helper function to fetch struct device
33 * @soc: Previously registered SoC device container
35 struct device *soc_device_to_device(struct soc_device *soc);
H A Dpm_wakeirq.h19 extern int dev_pm_set_wake_irq(struct device *dev, int irq);
20 extern int dev_pm_set_dedicated_wake_irq(struct device *dev,
22 extern void dev_pm_clear_wake_irq(struct device *dev);
23 extern void dev_pm_enable_wake_irq(struct device *dev);
24 extern void dev_pm_disable_wake_irq(struct device *dev);
28 static inline int dev_pm_set_wake_irq(struct device *dev, int irq) dev_pm_set_wake_irq()
33 static inline int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq) dev_pm_set_dedicated_wake_irq()
38 static inline void dev_pm_clear_wake_irq(struct device *dev) dev_pm_clear_wake_irq()
42 static inline void dev_pm_enable_wake_irq(struct device *dev) dev_pm_enable_wake_irq()
46 static inline void dev_pm_disable_wake_irq(struct device *dev) dev_pm_disable_wake_irq()
H A Dpwm_backlight.h18 int (*init)(struct device *dev);
19 int (*notify)(struct device *dev, int brightness);
20 void (*notify_after)(struct device *dev, int brightness);
21 void (*exit)(struct device *dev);
22 int (*check_fb)(struct device *dev, struct fb_info *info);
H A Draid_class.h16 int (*is_raid)(struct device *);
17 void (*get_resync)(struct device *);
18 void (*get_state)(struct device *);
56 raid_set_##attr(struct raid_template *r, struct device *dev, type value) { \
57 struct device *device = \
60 BUG_ON(!device); \
61 rd = dev_get_drvdata(device); \
65 raid_get_##attr(struct raid_template *r, struct device *dev) { \
66 struct device *device = \
69 BUG_ON(!device); \
70 rd = dev_get_drvdata(device); \
81 int __must_check raid_component_add(struct raid_template *, struct device *,
82 struct device *);
H A Dhwmon.h17 struct device;
20 struct device *hwmon_device_register(struct device *dev);
21 struct device *
22 hwmon_device_register_with_groups(struct device *dev, const char *name,
25 struct device *
26 devm_hwmon_device_register_with_groups(struct device *dev, const char *name,
30 void hwmon_device_unregister(struct device *dev);
31 void devm_hwmon_device_unregister(struct device *dev);
H A Dtransport_class.h12 #include <linux/device.h>
20 int (*setup)(struct transport_container *, struct device *,
21 struct device *);
22 int (*configure)(struct transport_container *, struct device *,
23 struct device *);
24 int (*remove)(struct transport_container *, struct device *,
25 struct device *);
65 void transport_remove_device(struct device *);
66 void transport_add_device(struct device *);
67 void transport_setup_device(struct device *);
68 void transport_configure_device(struct device *);
69 void transport_destroy_device(struct device *);
72 transport_register_device(struct device *dev) transport_register_device()
79 transport_unregister_device(struct device *dev) transport_unregister_device()
H A Dacpi_dma.h18 #include <linux/device.h>
23 * struct acpi_dma_spec - slave device DMA resources
26 * @dev: struct device of the DMA controller to be used in the filter
32 struct device *dev;
38 * @dev: struct device of this controller
46 struct device *dev;
62 int acpi_dma_controller_register(struct device *dev,
66 int acpi_dma_controller_free(struct device *dev);
67 int devm_acpi_dma_controller_register(struct device *dev,
71 void devm_acpi_dma_controller_free(struct device *dev);
73 struct dma_chan *acpi_dma_request_slave_chan_by_index(struct device *dev,
75 struct dma_chan *acpi_dma_request_slave_chan_by_name(struct device *dev,
82 static inline int acpi_dma_controller_register(struct device *dev, acpi_dma_controller_register()
89 static inline int acpi_dma_controller_free(struct device *dev) acpi_dma_controller_free()
93 static inline int devm_acpi_dma_controller_register(struct device *dev, devm_acpi_dma_controller_register()
100 static inline void devm_acpi_dma_controller_free(struct device *dev) devm_acpi_dma_controller_free()
105 struct device *dev, size_t index) acpi_dma_request_slave_chan_by_index()
110 struct device *dev, const char *name) acpi_dma_request_slave_chan_by_name()
H A Ddevfreq-event.h15 #include <linux/device.h>
18 * struct devfreq_event_dev - the devfreq-event device
20 * @node : Contain the devfreq-event device that have been registered.
21 * @dev : the device registered by devfreq-event class. dev.parent is
22 * the device using devfreq-event.
25 * @desc : the description for devfreq-event device.
27 * This structure contains devfreq-event device information.
32 struct device dev;
42 * @load_count : load count of devfreq-event device for the given period.
43 * @total_count : total count of devfreq-event device for the given period.
45 * (ns/us/...), or anything the device driver wants.
48 * This structure contains the data of devfreq-event device for polling period.
56 * struct devfreq_event_ops - the operations of devfreq-event device
58 * @enable : Enable the devfreq-event device.
59 * @disable : Disable the devfreq-event device.
60 * @reset : Reset all setting of the devfreq-event device.
61 * @set_event : Set the specific event type for the devfreq-event device.
65 * This structure contains devfreq-event device operations which can be
66 * implemented by devfreq-event device drivers.
81 * struct devfreq_event_desc - the descriptor of devfreq-event device
83 * @name : the name of devfreq-event device.
85 * @ops : the operation to control devfreq-event device.
87 * Each devfreq-event device is described with a this structure.
88 * This structure contains the various data for devfreq-event device.
106 struct device *dev, int index);
107 extern int devfreq_event_get_edev_count(struct device *dev);
108 extern struct devfreq_event_dev *devfreq_event_add_edev(struct device *dev,
111 extern struct devfreq_event_dev *devm_devfreq_event_add_edev(struct device *dev,
113 extern void devm_devfreq_event_remove_edev(struct device *dev,
157 struct device *dev, int index) devfreq_event_get_edev_by_phandle()
162 static inline int devfreq_event_get_edev_count(struct device *dev) devfreq_event_get_edev_count()
167 static inline struct devfreq_event_dev *devfreq_event_add_edev(struct device *dev, devfreq_event_add_edev()
179 struct device *dev, devm_devfreq_event_add_edev()
185 static inline void devm_devfreq_event_remove_edev(struct device *dev, devm_devfreq_event_remove_edev()
H A Dfirmware.h21 struct device;
43 struct device *device);
46 const char *name, struct device *device, gfp_t gfp, void *context,
49 struct device *device);
55 struct device *device) request_firmware()
61 const char *name, struct device *device, gfp_t gfp, void *context, request_firmware_nowait()
73 struct device *device) request_firmware_direct()
53 request_firmware(const struct firmware **fw, const char *name, struct device *device) request_firmware() argument
59 request_firmware_nowait( struct module *module, bool uevent, const char *name, struct device *device, gfp_t gfp, void *context, void (*cont)(const struct firmware *fw, void *context)) request_firmware_nowait() argument
71 request_firmware_direct(const struct firmware **fw, const char *name, struct device *device) request_firmware_direct() argument
H A Ddma-mapping.h6 #include <linux/device.h>
14 * It can be given to a device to use as a DMA source or target. A CPU cannot
19 void* (*alloc)(struct device *dev, size_t size,
22 void (*free)(struct device *dev, size_t size,
25 int (*mmap)(struct device *, struct vm_area_struct *,
28 int (*get_sgtable)(struct device *dev, struct sg_table *sgt, void *,
31 dma_addr_t (*map_page)(struct device *dev, struct page *page,
35 void (*unmap_page)(struct device *dev, dma_addr_t dma_handle,
42 int (*map_sg)(struct device *dev, struct scatterlist *sg,
45 void (*unmap_sg)(struct device *dev,
49 void (*sync_single_for_cpu)(struct device *dev,
52 void (*sync_single_for_device)(struct device *dev,
55 void (*sync_sg_for_cpu)(struct device *dev,
58 void (*sync_sg_for_device)(struct device *dev,
61 int (*mapping_error)(struct device *dev, dma_addr_t dma_addr);
62 int (*dma_supported)(struct device *dev, u64 mask);
63 int (*set_dma_mask)(struct device *dev, u64 mask);
65 u64 (*get_required_mask)(struct device *dev);
81 static inline int is_device_dma_capable(struct device *dev) is_device_dma_capable()
92 static inline u64 dma_get_mask(struct device *dev) dma_get_mask()
100 int dma_set_coherent_mask(struct device *dev, u64 mask);
102 static inline int dma_set_coherent_mask(struct device *dev, u64 mask) dma_set_coherent_mask()
117 static inline int dma_set_mask_and_coherent(struct device *dev, u64 mask) dma_set_mask_and_coherent()
126 * Similar to the above, except it deals with the case where the device
129 static inline int dma_coerce_mask_and_coherent(struct device *dev, u64 mask) dma_coerce_mask_and_coherent()
135 extern u64 dma_get_required_mask(struct device *dev);
138 static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base, arch_setup_dma_ops()
144 static inline void arch_teardown_dma_ops(struct device *dev) { } arch_teardown_dma_ops()
147 static inline unsigned int dma_get_max_seg_size(struct device *dev) dma_get_max_seg_size()
154 static inline unsigned int dma_set_max_seg_size(struct device *dev, dma_set_max_seg_size()
164 static inline unsigned long dma_get_seg_boundary(struct device *dev) dma_get_seg_boundary()
171 static inline int dma_set_seg_boundary(struct device *dev, unsigned long mask) dma_set_seg_boundary()
181 static inline unsigned long dma_max_pfn(struct device *dev) dma_max_pfn()
187 static inline void *dma_zalloc_coherent(struct device *dev, size_t size, dma_zalloc_coherent()
213 dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr, dma_declare_coherent_memory()
220 dma_release_declared_memory(struct device *dev) dma_release_declared_memory()
225 dma_mark_declared_memory_occupied(struct device *dev, dma_mark_declared_memory_occupied()
235 extern void *dmam_alloc_coherent(struct device *dev, size_t size,
237 extern void dmam_free_coherent(struct device *dev, size_t size, void *vaddr,
239 extern void *dmam_alloc_noncoherent(struct device *dev, size_t size,
241 extern void dmam_free_noncoherent(struct device *dev, size_t size, void *vaddr,
244 extern int dmam_declare_coherent_memory(struct device *dev,
248 extern void dmam_release_declared_memory(struct device *dev);
250 static inline int dmam_declare_coherent_memory(struct device *dev, dmam_declare_coherent_memory()
257 static inline void dmam_release_declared_memory(struct device *dev) dmam_release_declared_memory()
278 static inline void *dma_alloc_writecombine(struct device *dev, size_t size, dma_alloc_writecombine()
286 static inline void dma_free_writecombine(struct device *dev, size_t size, dma_free_writecombine()
294 static inline int dma_mmap_writecombine(struct device *dev, dma_mmap_writecombine()
H A Dreset.h4 struct device;
15 struct reset_control *reset_control_get(struct device *dev, const char *id);
17 struct reset_control *devm_reset_control_get(struct device *dev, const char *id);
19 int __must_check device_reset(struct device *dev);
21 static inline int device_reset_optional(struct device *dev) device_reset_optional()
27 struct device *dev, const char *id) reset_control_get_optional()
33 struct device *dev, const char *id) devm_reset_control_get_optional()
72 static inline int device_reset_optional(struct device *dev) device_reset_optional()
78 struct device *dev, const char *id) reset_control_get()
85 struct device *dev, const char *id) devm_reset_control_get()
92 struct device *dev, const char *id) reset_control_get_optional()
98 struct device *dev, const char *id) devm_reset_control_get_optional()
H A Ddma-debug.h25 struct device;
37 extern void debug_dma_map_page(struct device *dev, struct page *page,
42 extern void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr);
44 extern void debug_dma_unmap_page(struct device *dev, dma_addr_t addr,
47 extern void debug_dma_map_sg(struct device *dev, struct scatterlist *sg,
50 extern void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
53 extern void debug_dma_alloc_coherent(struct device *dev, size_t size,
56 extern void debug_dma_free_coherent(struct device *dev, size_t size,
59 extern void debug_dma_sync_single_for_cpu(struct device *dev,
63 extern void debug_dma_sync_single_for_device(struct device *dev,
67 extern void debug_dma_sync_single_range_for_cpu(struct device *dev,
73 extern void debug_dma_sync_single_range_for_device(struct device *dev,
78 extern void debug_dma_sync_sg_for_cpu(struct device *dev,
82 extern void debug_dma_sync_sg_for_device(struct device *dev,
86 extern void debug_dma_dump_mappings(struct device *dev);
105 static inline void debug_dma_map_page(struct device *dev, struct page *page, debug_dma_map_page()
112 static inline void debug_dma_mapping_error(struct device *dev, debug_dma_mapping_error()
117 static inline void debug_dma_unmap_page(struct device *dev, dma_addr_t addr, debug_dma_unmap_page()
123 static inline void debug_dma_map_sg(struct device *dev, struct scatterlist *sg, debug_dma_map_sg()
128 static inline void debug_dma_unmap_sg(struct device *dev, debug_dma_unmap_sg()
134 static inline void debug_dma_alloc_coherent(struct device *dev, size_t size, debug_dma_alloc_coherent()
139 static inline void debug_dma_free_coherent(struct device *dev, size_t size, debug_dma_free_coherent()
144 static inline void debug_dma_sync_single_for_cpu(struct device *dev, debug_dma_sync_single_for_cpu()
150 static inline void debug_dma_sync_single_for_device(struct device *dev, debug_dma_sync_single_for_device()
156 static inline void debug_dma_sync_single_range_for_cpu(struct device *dev, debug_dma_sync_single_range_for_cpu()
164 static inline void debug_dma_sync_single_range_for_device(struct device *dev, debug_dma_sync_single_range_for_device()
172 static inline void debug_dma_sync_sg_for_cpu(struct device *dev, debug_dma_sync_sg_for_cpu()
178 static inline void debug_dma_sync_sg_for_device(struct device *dev, debug_dma_sync_sg_for_device()
184 static inline void debug_dma_dump_mappings(struct device *dev) debug_dma_dump_mappings()
H A Dpm_qos.h10 #include <linux/device.h>
68 struct device *dev;
138 enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev, s32 mask);
139 enum pm_qos_flags_status dev_pm_qos_flags(struct device *dev, s32 mask);
140 s32 __dev_pm_qos_read_value(struct device *dev);
141 s32 dev_pm_qos_read_value(struct device *dev);
142 int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req,
146 int dev_pm_qos_add_notifier(struct device *dev,
148 int dev_pm_qos_remove_notifier(struct device *dev,
152 void dev_pm_qos_constraints_init(struct device *dev);
153 void dev_pm_qos_constraints_destroy(struct device *dev);
154 int dev_pm_qos_add_ancestor_request(struct device *dev,
157 int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value);
158 void dev_pm_qos_hide_latency_limit(struct device *dev);
159 int dev_pm_qos_expose_flags(struct device *dev, s32 value);
160 void dev_pm_qos_hide_flags(struct device *dev);
161 int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set);
162 s32 dev_pm_qos_get_user_latency_tolerance(struct device *dev);
163 int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val);
164 int dev_pm_qos_expose_latency_tolerance(struct device *dev);
165 void dev_pm_qos_hide_latency_tolerance(struct device *dev);
167 static inline s32 dev_pm_qos_requested_resume_latency(struct device *dev) dev_pm_qos_requested_resume_latency()
172 static inline s32 dev_pm_qos_requested_flags(struct device *dev) dev_pm_qos_requested_flags()
177 static inline enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev, __dev_pm_qos_flags()
180 static inline enum pm_qos_flags_status dev_pm_qos_flags(struct device *dev, dev_pm_qos_flags()
183 static inline s32 __dev_pm_qos_read_value(struct device *dev) __dev_pm_qos_read_value()
185 static inline s32 dev_pm_qos_read_value(struct device *dev) dev_pm_qos_read_value()
187 static inline int dev_pm_qos_add_request(struct device *dev, dev_pm_qos_add_request()
197 static inline int dev_pm_qos_add_notifier(struct device *dev, dev_pm_qos_add_notifier()
200 static inline int dev_pm_qos_remove_notifier(struct device *dev, dev_pm_qos_remove_notifier()
209 static inline void dev_pm_qos_constraints_init(struct device *dev) dev_pm_qos_constraints_init()
213 static inline void dev_pm_qos_constraints_destroy(struct device *dev) dev_pm_qos_constraints_destroy()
217 static inline int dev_pm_qos_add_ancestor_request(struct device *dev, dev_pm_qos_add_ancestor_request()
222 static inline int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value) dev_pm_qos_expose_latency_limit()
224 static inline void dev_pm_qos_hide_latency_limit(struct device *dev) {} dev_pm_qos_expose_flags()
225 static inline int dev_pm_qos_expose_flags(struct device *dev, s32 value) dev_pm_qos_expose_flags()
227 static inline void dev_pm_qos_hide_flags(struct device *dev) {} dev_pm_qos_update_flags()
228 static inline int dev_pm_qos_update_flags(struct device *dev, s32 m, bool set) dev_pm_qos_update_flags()
230 static inline s32 dev_pm_qos_get_user_latency_tolerance(struct device *dev) dev_pm_qos_get_user_latency_tolerance()
232 static inline int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val) dev_pm_qos_update_user_latency_tolerance()
234 static inline int dev_pm_qos_expose_latency_tolerance(struct device *dev) dev_pm_qos_expose_latency_tolerance()
236 static inline void dev_pm_qos_hide_latency_tolerance(struct device *dev) {} dev_pm_qos_hide_latency_tolerance()
238 static inline s32 dev_pm_qos_requested_resume_latency(struct device *dev) { return 0; } dev_pm_qos_requested_flags()
239 static inline s32 dev_pm_qos_requested_flags(struct device *dev) { return 0; }
H A Dof_reserved_mem.h4 struct device;
20 struct device *dev);
22 struct device *dev);
31 int of_reserved_mem_device_init(struct device *dev);
32 void of_reserved_mem_device_release(struct device *dev);
38 static inline int of_reserved_mem_device_init(struct device *dev) of_reserved_mem_device_init()
42 static inline void of_reserved_mem_device_release(struct device *pdev) { } of_reserved_mem_device_release()
H A Ddevice.h2 * device.h - generic, centralized driver model
30 #include <asm/device.h>
32 struct device;
63 * struct bus_type - The bus type of the device
67 * @dev_root: Default device to use as the parent.
71 * @drv_groups: Default attributes of the device drivers on the bus.
72 * @match: Called, perhaps multiple times, whenever a new device or driver
74 * given device can be handled by the given driver.
75 * @uevent: Called when a device is added, removed, or a few other things
77 * @probe: Called when a new device or driver add to this bus, and callback
78 * the specific driver's probe to initial the matched device.
79 * @remove: Called when a device removed from this bus.
80 * @shutdown: Called at shut-down time to quiesce the device.
82 * @online: Called to put the device back online (after offlining it).
83 * @offline: Called to put the device offline for hot-removal. May fail.
85 * @suspend: Called when a device on this bus wants to go to sleep mode.
86 * @resume: Called to bring a device on this bus out of sleep mode.
88 * device driver's pm-ops.
97 * purposes of the device model, all devices are connected via a bus, even if
99 * A USB controller is usually a PCI device, for example. The device model
108 struct device *dev_root;
114 int (*match)(struct device *dev, struct device_driver *drv);
115 int (*uevent)(struct device *dev, struct kobj_uevent_env *env);
116 int (*probe)(struct device *dev);
117 int (*remove)(struct device *dev);
118 void (*shutdown)(struct device *dev);
120 int (*online)(struct device *dev);
121 int (*offline)(struct device *dev);
123 int (*suspend)(struct device *dev, pm_message_t state);
124 int (*resume)(struct device *dev);
147 struct device *start,
149 struct device *subsys_dev_iter_next(struct subsys_dev_iter *iter);
152 int bus_for_each_dev(struct bus_type *bus, struct device *start, void *data,
153 int (*fn)(struct device *dev, void *data));
154 struct device *bus_find_device(struct bus_type *bus, struct device *start,
156 int (*match)(struct device *dev, void *data));
157 struct device *bus_find_device_by_name(struct bus_type *bus,
158 struct device *start,
160 struct device *subsys_find_device_by_id(struct bus_type *bus, unsigned int id,
161 struct device *hint);
165 int (*compare)(const struct device *a,
166 const struct device *b));
180 /* All 4 notifers below get called with the target struct device *
182 * with the device lock held in the core, so be careful.
184 #define BUS_NOTIFY_ADD_DEVICE 0x00000001 /* device added */
185 #define BUS_NOTIFY_DEL_DEVICE 0x00000002 /* device to be removed */
186 #define BUS_NOTIFY_REMOVED_DEVICE 0x00000003 /* device removed */
189 #define BUS_NOTIFY_BOUND_DRIVER 0x00000005 /* driver bound to device */
193 from the device */
199 * enum probe_type - device driver probe type to try
211 * device registration (with the exception of -EPROBE_DEFER
227 * struct device_driver - The basic device driver structure
228 * @name: Name of the device driver.
229 * @bus: The bus which the device of this driver belongs to.
236 * @probe: Called to query the existence of a specific device,
238 * to a specific device.
239 * @remove: Called when the device is removed from the system to
240 * unbind a device from this driver.
241 * @shutdown: Called at shut-down time to quiesce the device.
242 * @suspend: Called to put the device to sleep mode. Usually to a
244 * @resume: Called to bring a device from sleep mode.
247 * @pm: Power management operations of the device which matched
252 * The device driver-model tracks all of the drivers known to the system.
257 * of any specific device.
272 int (*probe) (struct device *dev);
273 int (*remove) (struct device *dev);
274 void (*shutdown) (struct device *dev);
275 int (*suspend) (struct device *dev, pm_message_t state);
276 int (*resume) (struct device *dev);
318 struct device *start,
320 int (*fn)(struct device *dev,
322 struct device *driver_find_device(struct device_driver *drv,
323 struct device *start, void *data,
324 int (*match)(struct device *dev, void *data));
327 * struct subsys_interface - interfaces to device functions
328 * @name: name of the device function
331 * @add_dev: device hookup to device function handler
332 * @remove_dev: device hookup to device function handler
343 int (*add_dev)(struct device *dev, struct subsys_interface *sif);
344 void (*remove_dev)(struct device *dev, struct subsys_interface *sif);
356 * struct class - device classes
362 * @dev_uevent: Called when a device is added, removed from this class, or a
367 * @dev_release: Called to release the device.
368 * @suspend: Used to put the device to sleep mode, usually to a low power
370 * @resume: Used to bring the device from the sleep mode.
372 * @namespace: Namespace of the device belongs to this class.
373 * @pm: The default device power management operations of this class.
377 * A class is a higher-level view of a device that abstracts out low-level
391 int (*dev_uevent)(struct device *dev, struct kobj_uevent_env *env);
392 char *(*devnode)(struct device *dev, umode_t *mode);
395 void (*dev_release)(struct device *dev);
397 int (*suspend)(struct device *dev, pm_message_t state);
398 int (*resume)(struct device *dev);
401 const void *(*namespace)(struct device *dev);
430 int class_compat_create_link(struct class_compat *cls, struct device *dev,
431 struct device *device_link);
432 void class_compat_remove_link(struct class_compat *cls, struct device *dev,
433 struct device *device_link);
437 struct device *start,
439 extern struct device *class_dev_iter_next(struct class_dev_iter *iter);
442 extern int class_for_each_device(struct class *class, struct device *start,
444 int (*fn)(struct device *dev, void *data));
445 extern struct device *class_find_device(struct class *class,
446 struct device *start, const void *data,
447 int (*match)(struct device *, const void *));
503 int (*add_dev) (struct device *, struct class_interface *);
504 void (*remove_dev) (struct device *, struct class_interface *);
524 * The type of device, "struct device" is embedded in. A class
527 * This identifies the device type and carries type-specific
535 int (*uevent)(struct device *dev, struct kobj_uevent_env *env);
536 char *(*devnode)(struct device *dev, umode_t *mode,
538 void (*release)(struct device *dev);
543 /* interface for exporting device attributes */
546 ssize_t (*show)(struct device *dev, struct device_attribute *attr,
548 ssize_t (*store)(struct device *dev, struct device_attribute *attr,
557 ssize_t device_show_ulong(struct device *dev, struct device_attribute *attr,
559 ssize_t device_store_ulong(struct device *dev, struct device_attribute *attr,
561 ssize_t device_show_int(struct device *dev, struct device_attribute *attr,
563 ssize_t device_store_int(struct device *dev, struct device_attribute *attr,
565 ssize_t device_show_bool(struct device *dev, struct device_attribute *attr,
567 ssize_t device_store_bool(struct device *dev, struct device_attribute *attr,
591 extern int device_create_file(struct device *device,
593 extern void device_remove_file(struct device *dev,
595 extern bool device_remove_file_self(struct device *dev,
597 extern int __must_check device_create_bin_file(struct device *dev,
599 extern void device_remove_bin_file(struct device *dev,
602 /* device resource management */
603 typedef void (*dr_release_t)(struct device *dev, void *res);
604 typedef int (*dr_match_t)(struct device *dev, void *res, void *match_data);
622 extern void devres_for_each_res(struct device *dev, dr_release_t release,
624 void (*fn)(struct device *, void *, void *),
627 extern void devres_add(struct device *dev, void *res);
628 extern void *devres_find(struct device *dev, dr_release_t release,
630 extern void *devres_get(struct device *dev, void *new_res,
632 extern void *devres_remove(struct device *dev, dr_release_t release,
634 extern int devres_destroy(struct device *dev, dr_release_t release,
636 extern int devres_release(struct device *dev, dr_release_t release,
640 extern void * __must_check devres_open_group(struct device *dev, void *id,
642 extern void devres_close_group(struct device *dev, void *id);
643 extern void devres_remove_group(struct device *dev, void *id);
644 extern int devres_release_group(struct device *dev, void *id);
646 /* managed devm_k.alloc/kfree for device drivers */
647 extern void *devm_kmalloc(struct device *dev, size_t size, gfp_t gfp);
649 char *devm_kvasprintf(struct device *dev, gfp_t gfp, const char *fmt,
652 char *devm_kasprintf(struct device *dev, gfp_t gfp, const char *fmt, ...); devm_kzalloc()
653 static inline void *devm_kzalloc(struct device *dev, size_t size, gfp_t gfp) devm_kzalloc()
657 static inline void *devm_kmalloc_array(struct device *dev, devm_kmalloc_array()
664 static inline void *devm_kcalloc(struct device *dev, devm_kcalloc()
669 extern void devm_kfree(struct device *dev, void *p);
670 extern char *devm_kstrdup(struct device *dev, const char *s, gfp_t gfp);
671 extern void *devm_kmemdup(struct device *dev, const void *src, size_t len,
674 extern unsigned long devm_get_free_pages(struct device *dev,
676 extern void devm_free_pages(struct device *dev, unsigned long addr);
678 void __iomem *devm_ioremap_resource(struct device *dev, struct resource *res);
681 int devm_add_action(struct device *dev, void (*action)(void *), void *data);
682 void devm_remove_action(struct device *dev, void (*action)(void *), void *data);
694 * struct device - The basic device structure
695 * @parent: The device's "parent" device, the device to which it is attached.
696 * In most cases, a parent device is some sort of bus or host
697 * controller. If parent is NULL, the device, is a top-level device,
699 * @p: Holds the private data of the driver core portions of the device.
702 * @init_name: Initial name of the device.
703 * @type: The type of device.
704 * This identifies the device type and carries type-specific
707 * @bus: Type of bus device is on.
709 * @platform_data: Platform data specific to the device.
718 * @power: For device power management.
723 * @pins: For device pin management.
726 * @msi_domain: The generic MSI domain this device is using.
727 * @numa_node: NUMA node this device is close to.
728 * @dma_mask: Dma mask (if dma'ble device).
735 * @dma_pools: Dma pools (if dma'ble device).
739 * @of_node: Associated device tree node.
740 * @fwnode: Associated device node supplied by platform firmware.
742 * @id: device instance
743 * @devres_lock: Spinlock to protect the resource of the device.
744 * @devres_head: The resources list of the device.
745 * @knode_class: The node used to add the device to the class list.
746 * @class: The class of the device.
748 * @release: Callback to free the device after all references have
750 * device (i.e. the bus driver that discovered the device).
751 * @iommu_group: IOMMU group the device belongs to.
753 * @offline_disabled: If set, the device is permanently online.
756 * At the lowest level, every device in a Linux system is represented by an
757 * instance of struct device. The device structure contains the information
758 * that the device model core needs to model the system. Most subsystems,
760 * result, it is rare for devices to be represented by bare device structures;
762 * a higher-level representation of the device.
764 struct device { struct
765 struct device *parent;
770 const char *init_name; /* initial name of the device */
777 struct bus_type *bus; /* type of bus device is on */
779 device */
780 void *platform_data; /* Platform specific data, device
798 int numa_node; /* NUMA node this device is close to */
800 u64 *dma_mask; /* dma mask (if dma'able device) */
821 struct device_node *of_node; /* associated device tree node */
822 struct fwnode_handle *fwnode; /* firmware device node */
825 u32 id; /* device instance */
834 void (*release)(struct device *dev);
841 static inline struct device *kobj_to_dev(struct kobject *kobj) kobj_to_dev()
843 return container_of(kobj, struct device, kobj); kobj_to_dev()
846 /* Get the wakeup routines, which depend on struct device */
849 static inline const char *dev_name(const struct device *dev) dev_name()
859 int dev_set_name(struct device *dev, const char *name, ...);
862 static inline int dev_to_node(struct device *dev) dev_to_node()
866 static inline void set_dev_node(struct device *dev, int node) set_dev_node()
871 static inline int dev_to_node(struct device *dev) dev_to_node()
875 static inline void set_dev_node(struct device *dev, int node) set_dev_node()
880 static inline struct irq_domain *dev_get_msi_domain(const struct device *dev) dev_get_msi_domain()
889 static inline void dev_set_msi_domain(struct device *dev, struct irq_domain *d) dev_set_msi_domain()
896 static inline void *dev_get_drvdata(const struct device *dev) dev_get_drvdata()
901 static inline void dev_set_drvdata(struct device *dev, void *data) dev_set_drvdata()
906 static inline struct pm_subsys_data *dev_to_psd(struct device *dev) dev_to_psd()
911 static inline unsigned int dev_get_uevent_suppress(const struct device *dev) dev_get_uevent_suppress()
916 static inline void dev_set_uevent_suppress(struct device *dev, int val) dev_set_uevent_suppress()
921 static inline int device_is_registered(struct device *dev) device_is_registered()
926 static inline void device_enable_async_suspend(struct device *dev) device_enable_async_suspend()
932 static inline void device_disable_async_suspend(struct device *dev) device_disable_async_suspend()
938 static inline bool device_async_suspend_enabled(struct device *dev) device_async_suspend_enabled()
943 static inline void pm_suspend_ignore_children(struct device *dev, bool enable) pm_suspend_ignore_children()
948 static inline void dev_pm_syscore_device(struct device *dev, bool val) dev_pm_syscore_device()
955 static inline void device_lock(struct device *dev) device_lock()
960 static inline int device_trylock(struct device *dev) device_trylock()
965 static inline void device_unlock(struct device *dev) device_unlock()
970 static inline void device_lock_assert(struct device *dev) device_lock_assert()
975 static inline struct device_node *dev_of_node(struct device *dev) dev_of_node()
987 extern int __must_check device_register(struct device *dev);
988 extern void device_unregister(struct device *dev);
989 extern void device_initialize(struct device *dev);
990 extern int __must_check device_add(struct device *dev);
991 extern void device_del(struct device *dev);
992 extern int device_for_each_child(struct device *dev, void *data,
993 int (*fn)(struct device *dev, void *data));
994 extern int device_for_each_child_reverse(struct device *dev, void *data,
995 int (*fn)(struct device *dev, void *data));
996 extern struct device *device_find_child(struct device *dev, void *data,
997 int (*match)(struct device *dev, void *data));
998 extern int device_rename(struct device *dev, const char *new_name);
999 extern int device_move(struct device *dev, struct device *new_parent,
1001 extern const char *device_get_devnode(struct device *dev,
1005 static inline bool device_supports_offline(struct device *dev) device_supports_offline()
1013 extern int device_offline(struct device *dev);
1014 extern int device_online(struct device *dev);
1015 extern void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode);
1016 extern void set_secondary_fwnode(struct device *dev, struct fwnode_handle *fwnode);
1019 * Root device objects for grouping under /sys/devices
1021 extern struct device *__root_device_register(const char *name,
1028 extern void root_device_unregister(struct device *root);
1030 static inline void *dev_get_platdata(const struct device *dev) dev_get_platdata()
1036 * Manual binding of a device to driver. See drivers/base/bus.c
1039 extern int __must_check device_bind_driver(struct device *dev);
1040 extern void device_release_driver(struct device *dev);
1041 extern int __must_check device_attach(struct device *dev);
1043 extern void device_initial_probe(struct device *dev);
1044 extern int __must_check device_reprobe(struct device *dev);
1050 struct device *device_create_vargs(struct class *cls, struct device *parent,
1054 struct device *device_create(struct class *cls, struct device *parent,
1058 struct device *device_create_with_groups(struct class *cls,
1059 struct device *parent, dev_t devt, void *drvdata,
1066 * about devices and actions that the general device layer doesn't
1069 /* Notify platform of device discovery */
1070 extern int (*platform_notify)(struct device *dev);
1072 extern int (*platform_notify_remove)(struct device *dev);
1076 * get_device - atomically increment the reference count for the device.
1079 extern struct device *get_device(struct device *dev);
1080 extern void put_device(struct device *dev);
1083 extern int devtmpfs_create_node(struct device *dev);
1084 extern int devtmpfs_delete_node(struct device *dev);
1087 static inline int devtmpfs_create_node(struct device *dev) { return 0; } devtmpfs_delete_node()
1088 static inline int devtmpfs_delete_node(struct device *dev) { return 0; } devtmpfs_mount()
1096 extern const char *dev_driver_string(const struct device *dev); devtmpfs_mount()
1102 int dev_vprintk_emit(int level, const struct device *dev, devtmpfs_mount()
1105 int dev_printk_emit(int level, const struct device *dev, const char *fmt, ...); devtmpfs_mount()
1108 void dev_printk(const char *level, const struct device *dev, devtmpfs_mount()
1111 void dev_emerg(const struct device *dev, const char *fmt, ...); devtmpfs_mount()
1113 void dev_alert(const struct device *dev, const char *fmt, ...); devtmpfs_mount()
1115 void dev_crit(const struct device *dev, const char *fmt, ...); devtmpfs_mount()
1117 void dev_err(const struct device *dev, const char *fmt, ...); devtmpfs_mount()
1119 void dev_warn(const struct device *dev, const char *fmt, ...); devtmpfs_mount()
1121 void dev_notice(const struct device *dev, const char *fmt, ...); devtmpfs_mount()
1123 void _dev_info(const struct device *dev, const char *fmt, ...); devtmpfs_mount()
1128 int dev_vprintk_emit(int level, const struct device *dev, dev_vprintk_emit()
1132 int dev_printk_emit(int level, const struct device *dev, const char *fmt, ...) dev_printk_emit()
1135 static inline void __dev_printk(const char *level, const struct device *dev, __dev_printk()
1139 void dev_printk(const char *level, const struct device *dev, dev_printk()
1144 void dev_emerg(const struct device *dev, const char *fmt, ...) dev_emerg()
1147 void dev_crit(const struct device *dev, const char *fmt, ...) dev_crit()
1150 void dev_alert(const struct device *dev, const char *fmt, ...) dev_alert()
1153 void dev_err(const struct device *dev, const char *fmt, ...) dev_err()
1156 void dev_warn(const struct device *dev, const char *fmt, ...) dev_warn()
1159 void dev_notice(const struct device *dev, const char *fmt, ...) dev_notice()
1162 void _dev_info(const struct device *dev, const char *fmt, ...) _dev_info()
H A Dswiotlb.h6 struct device;
42 extern phys_addr_t swiotlb_tbl_map_single(struct device *hwdev,
47 extern void swiotlb_tbl_unmap_single(struct device *hwdev,
51 extern void swiotlb_tbl_sync_single(struct device *hwdev,
58 *swiotlb_alloc_coherent(struct device *hwdev, size_t size,
62 swiotlb_free_coherent(struct device *hwdev, size_t size,
65 extern dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
69 extern void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
74 swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, int nents,
78 swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
82 swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,
86 swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
91 swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
95 swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
99 swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
103 swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
107 swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr);
110 swiotlb_dma_supported(struct device *hwdev, u64 mask);
H A Dsunxi-rsb.h15 #include <linux/device.h>
22 * struct sunxi_rsb_device - Basic representation of an RSB device
23 * @dev: Driver model representation of the device.
24 * @ctrl: RSB controller managing the bus hosting this device.
25 * @rtaddr: This device's runtime address
26 * @hwaddr: This device's hardware address
29 struct device dev;
36 static inline struct sunxi_rsb_device *to_sunxi_rsb_device(struct device *d) to_sunxi_rsb_device()
53 * struct sunxi_rsb_driver - RSB slave device driver
54 * @driver: RSB device drivers should initialize name and owner field of
56 * @probe: binds this driver to a RSB device.
57 * @remove: unbinds this driver from the RSB device.
99 * device management code.
H A Dproperty.h2 * property.h - Unified device property interface.
19 struct device;
36 bool device_property_present(struct device *dev, const char *propname);
37 int device_property_read_u8_array(struct device *dev, const char *propname,
39 int device_property_read_u16_array(struct device *dev, const char *propname,
41 int device_property_read_u32_array(struct device *dev, const char *propname,
43 int device_property_read_u64_array(struct device *dev, const char *propname,
45 int device_property_read_string_array(struct device *dev, const char *propname,
47 int device_property_read_string(struct device *dev, const char *propname,
49 int device_property_match_string(struct device *dev,
73 struct fwnode_handle *device_get_next_child_node(struct device *dev,
82 unsigned int device_get_child_node_count(struct device *dev);
84 static inline bool device_property_read_bool(struct device *dev, device_property_read_bool()
90 static inline int device_property_read_u8(struct device *dev, device_property_read_u8()
96 static inline int device_property_read_u16(struct device *dev, device_property_read_u16()
102 static inline int device_property_read_u32(struct device *dev, device_property_read_u32()
108 static inline int device_property_read_u64(struct device *dev, device_property_read_u64()
145 * struct property_entry - "Built-in" device property representation.
166 * struct property_set - Collection of "built-in" device properties.
167 * @fwnode: Handle to be pointed to by the fwnode field of struct device.
175 void device_add_property_set(struct device *dev, struct property_set *pset);
177 bool device_dma_supported(struct device *dev);
179 enum dev_dma_attr device_get_dma_attr(struct device *dev);
181 int device_get_phy_mode(struct device *dev);
183 void *device_get_mac_address(struct device *dev, char *addr, int alen);
H A Dof_platform.h14 #include <linux/device.h>
21 * struct of_dev_auxdata - lookup table entry for device names & platform_data
28 * the names of devices when creating devices from the device tree. The table
33 * the device name to look up a specific device, but the Linux-specific names
34 * are not encoded into the device tree, so the kernel needs to provide specific
39 * device name will not matter, and drivers should obtain data from the device
59 struct device *parent);
65 struct device *parent);
69 struct device *parent);
74 struct device *parent);
77 struct device *parent);
78 extern void of_platform_depopulate(struct device *parent);
83 struct device *parent) of_platform_populate()
89 struct device *parent) of_platform_default_populate()
93 static inline void of_platform_depopulate(struct device *parent) { } of_platform_depopulate()
/linux-4.4.14/arch/score/include/asm/
H A Ddevice.h4 #include <asm-generic/device.h>
/linux-4.4.14/include/linux/platform_data/
H A Domap1_bl.h4 #include <linux/device.h>
8 int (*set_power)(struct device *dev, int state);
H A Dirda-pxaficp.h12 void (*transceiver_mode)(struct device *dev, int mode);
13 int (*startup)(struct device *dev);
14 void (*shutdown)(struct device *dev);
22 void pxa2xx_transceiver_mode(struct device *dev, int mode);
H A Dirda-sa11x0.h14 int (*startup)(struct device *);
15 void (*shutdown)(struct device *);
16 int (*set_power)(struct device *, unsigned int state);
17 void (*set_speed)(struct device *, unsigned int speed);
H A Dmmc-pxamci.h7 struct device;
13 int (*init)(struct device *, irq_handler_t , void *);
14 int (*get_ro)(struct device *);
15 int (*setpower)(struct device *, unsigned int);
16 void (*exit)(struct device *, void *);
/linux-4.4.14/drivers/gpu/drm/nouveau/nvkm/engine/gr/
H A Dnv20.c33 struct nvkm_device *device = gr->base.engine.subdev.device; nv20_gr_chan_fini() local
37 nvkm_mask(device, 0x400720, 0x00000001, 0x00000000); nv20_gr_chan_fini()
38 if (nvkm_rd32(device, 0x400144) & 0x00010000) nv20_gr_chan_fini()
39 chid = (nvkm_rd32(device, 0x400148) & 0x1f000000) >> 24; nv20_gr_chan_fini()
41 nvkm_wr32(device, 0x400784, inst >> 4); nv20_gr_chan_fini()
42 nvkm_wr32(device, 0x400788, 0x00000002); nv20_gr_chan_fini()
43 nvkm_msec(device, 2000, nv20_gr_chan_fini()
44 if (!nvkm_rd32(device, 0x400700)) nv20_gr_chan_fini()
47 nvkm_wr32(device, 0x400144, 0x10000000); nv20_gr_chan_fini()
48 nvkm_mask(device, 0x400148, 0xff000000, 0x1f000000); nv20_gr_chan_fini()
50 nvkm_mask(device, 0x400720, 0x00000001, 0x00000001); nv20_gr_chan_fini()
88 ret = nvkm_memory_new(gr->base.engine.subdev.device, nv20_gr_chan_new()
151 struct nvkm_device *device = gr->base.engine.subdev.device; nv20_gr_tile() local
152 struct nvkm_fifo *fifo = device->fifo; nv20_gr_tile()
158 nvkm_wr32(device, NV20_PGRAPH_TLIMIT(i), tile->limit); nv20_gr_tile()
159 nvkm_wr32(device, NV20_PGRAPH_TSIZE(i), tile->pitch); nv20_gr_tile()
160 nvkm_wr32(device, NV20_PGRAPH_TILE(i), tile->addr); nv20_gr_tile()
162 nvkm_wr32(device, NV10_PGRAPH_RDI_INDEX, 0x00EA0030 + 4 * i); nv20_gr_tile()
163 nvkm_wr32(device, NV10_PGRAPH_RDI_DATA, tile->limit); nv20_gr_tile()
164 nvkm_wr32(device, NV10_PGRAPH_RDI_INDEX, 0x00EA0050 + 4 * i); nv20_gr_tile()
165 nvkm_wr32(device, NV10_PGRAPH_RDI_DATA, tile->pitch); nv20_gr_tile()
166 nvkm_wr32(device, NV10_PGRAPH_RDI_INDEX, 0x00EA0010 + 4 * i); nv20_gr_tile()
167 nvkm_wr32(device, NV10_PGRAPH_RDI_DATA, tile->addr); nv20_gr_tile()
169 if (device->chipset != 0x34) { nv20_gr_tile()
170 nvkm_wr32(device, NV20_PGRAPH_ZCOMP(i), tile->zcomp); nv20_gr_tile()
171 nvkm_wr32(device, NV10_PGRAPH_RDI_INDEX, 0x00ea0090 + 4 * i); nv20_gr_tile()
172 nvkm_wr32(device, NV10_PGRAPH_RDI_DATA, tile->zcomp); nv20_gr_tile()
183 struct nvkm_device *device = subdev->device; nv20_gr_intr() local
185 u32 stat = nvkm_rd32(device, NV03_PGRAPH_INTR); nv20_gr_intr()
186 u32 nsource = nvkm_rd32(device, NV03_PGRAPH_NSOURCE); nv20_gr_intr()
187 u32 nstatus = nvkm_rd32(device, NV03_PGRAPH_NSTATUS); nv20_gr_intr()
188 u32 addr = nvkm_rd32(device, NV04_PGRAPH_TRAPPED_ADDR); nv20_gr_intr()
192 u32 data = nvkm_rd32(device, NV04_PGRAPH_TRAPPED_DATA); nv20_gr_intr()
193 u32 class = nvkm_rd32(device, 0x400160 + subc * 4) & 0xfff; nv20_gr_intr()
198 chan = nvkm_fifo_chan_chid(device->fifo, chid, &flags); nv20_gr_intr()
200 nvkm_wr32(device, NV03_PGRAPH_INTR, stat); nv20_gr_intr()
201 nvkm_wr32(device, NV04_PGRAPH_FIFO, 0x00000001); nv20_gr_intr()
215 nvkm_fifo_chan_put(device->fifo, flags, &chan); nv20_gr_intr()
222 return nvkm_memory_new(gr->base.engine.subdev.device, nv20_gr_oneinit()
231 struct nvkm_device *device = gr->base.engine.subdev.device; nv20_gr_init() local
235 nvkm_wr32(device, NV20_PGRAPH_CHANNEL_CTX_TABLE, nv20_gr_init()
238 if (device->chipset == 0x20) { nv20_gr_init()
239 nvkm_wr32(device, NV10_PGRAPH_RDI_INDEX, 0x003d0000); nv20_gr_init()
241 nvkm_wr32(device, NV10_PGRAPH_RDI_DATA, 0x00000000); nv20_gr_init()
242 nvkm_msec(device, 2000, nv20_gr_init()
243 if (!nvkm_rd32(device, 0x400700)) nv20_gr_init()
247 nvkm_wr32(device, NV10_PGRAPH_RDI_INDEX, 0x02c80000); nv20_gr_init()
249 nvkm_wr32(device, NV10_PGRAPH_RDI_DATA, 0x00000000); nv20_gr_init()
250 nvkm_msec(device, 2000, nv20_gr_init()
251 if (!nvkm_rd32(device, 0x400700)) nv20_gr_init()
256 nvkm_wr32(device, NV03_PGRAPH_INTR , 0xFFFFFFFF); nv20_gr_init()
257 nvkm_wr32(device, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF); nv20_gr_init()
259 nvkm_wr32(device, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF); nv20_gr_init()
260 nvkm_wr32(device, NV04_PGRAPH_DEBUG_0, 0x00000000); nv20_gr_init()
261 nvkm_wr32(device, NV04_PGRAPH_DEBUG_1, 0x00118700); nv20_gr_init()
262 nvkm_wr32(device, NV04_PGRAPH_DEBUG_3, 0xF3CE0475); /* 0x4 = auto ctx switch */ nv20_gr_init()
263 nvkm_wr32(device, NV10_PGRAPH_DEBUG_4, 0x00000000); nv20_gr_init()
264 nvkm_wr32(device, 0x40009C , 0x00000040); nv20_gr_init()
266 if (device->chipset >= 0x25) { nv20_gr_init()
267 nvkm_wr32(device, 0x400890, 0x00a8cfff); nv20_gr_init()
268 nvkm_wr32(device, 0x400610, 0x304B1FB6); nv20_gr_init()
269 nvkm_wr32(device, 0x400B80, 0x1cbd3883); nv20_gr_init()
270 nvkm_wr32(device, 0x400B84, 0x44000000); nv20_gr_init()
271 nvkm_wr32(device, 0x400098, 0x40000080); nv20_gr_init()
272 nvkm_wr32(device, 0x400B88, 0x000000ff); nv20_gr_init()
275 nvkm_wr32(device, 0x400880, 0x0008c7df); nv20_gr_init()
276 nvkm_wr32(device, 0x400094, 0x00000005); nv20_gr_init()
277 nvkm_wr32(device, 0x400B80, 0x45eae20e); nv20_gr_init()
278 nvkm_wr32(device, 0x400B84, 0x24000000); nv20_gr_init()
279 nvkm_wr32(device, 0x400098, 0x00000040); nv20_gr_init()
280 nvkm_wr32(device, NV10_PGRAPH_RDI_INDEX, 0x00E00038); nv20_gr_init()
281 nvkm_wr32(device, NV10_PGRAPH_RDI_DATA , 0x00000030); nv20_gr_init()
282 nvkm_wr32(device, NV10_PGRAPH_RDI_INDEX, 0x00E10038); nv20_gr_init()
283 nvkm_wr32(device, NV10_PGRAPH_RDI_DATA , 0x00000030); nv20_gr_init()
286 nvkm_wr32(device, 0x4009a0, nvkm_rd32(device, 0x100324)); nv20_gr_init()
287 nvkm_wr32(device, NV10_PGRAPH_RDI_INDEX, 0x00EA000C); nv20_gr_init()
288 nvkm_wr32(device, NV10_PGRAPH_RDI_DATA, nvkm_rd32(device, 0x100324)); nv20_gr_init()
290 nvkm_wr32(device, NV10_PGRAPH_CTX_CONTROL, 0x10000100); nv20_gr_init()
291 nvkm_wr32(device, NV10_PGRAPH_STATE , 0xFFFFFFFF); nv20_gr_init()
293 tmp = nvkm_rd32(device, NV10_PGRAPH_SURFACE) & 0x0007ff00; nv20_gr_init()
294 nvkm_wr32(device, NV10_PGRAPH_SURFACE, tmp); nv20_gr_init()
295 tmp = nvkm_rd32(device, NV10_PGRAPH_SURFACE) | 0x00020100; nv20_gr_init()
296 nvkm_wr32(device, NV10_PGRAPH_SURFACE, tmp); nv20_gr_init()
299 vramsz = device->func->resource_size(device, 1) - 1; nv20_gr_init()
300 nvkm_wr32(device, 0x4009A4, nvkm_rd32(device, 0x100200)); nv20_gr_init()
301 nvkm_wr32(device, 0x4009A8, nvkm_rd32(device, 0x100204)); nv20_gr_init()
302 nvkm_wr32(device, NV10_PGRAPH_RDI_INDEX, 0x00EA0000); nv20_gr_init()
303 nvkm_wr32(device, NV10_PGRAPH_RDI_DATA , nvkm_rd32(device, 0x100200)); nv20_gr_init()
304 nvkm_wr32(device, NV10_PGRAPH_RDI_INDEX, 0x00EA0004); nv20_gr_init()
305 nvkm_wr32(device, NV10_PGRAPH_RDI_DATA , nvkm_rd32(device, 0x100204)); nv20_gr_init()
306 nvkm_wr32(device, 0x400820, 0); nv20_gr_init()
307 nvkm_wr32(device, 0x400824, 0); nv20_gr_init()
308 nvkm_wr32(device, 0x400864, vramsz - 1); nv20_gr_init()
309 nvkm_wr32(device, 0x400868, vramsz - 1); nv20_gr_init()
312 nvkm_wr32(device, 0x400B20, 0x00000000); nv20_gr_init()
313 nvkm_wr32(device, 0x400B04, 0xFFFFFFFF); nv20_gr_init()
315 nvkm_wr32(device, NV03_PGRAPH_ABS_UCLIP_XMIN, 0); nv20_gr_init()
316 nvkm_wr32(device, NV03_PGRAPH_ABS_UCLIP_YMIN, 0); nv20_gr_init()
317 nvkm_wr32(device, NV03_PGRAPH_ABS_UCLIP_XMAX, 0x7fff); nv20_gr_init()
318 nvkm_wr32(device, NV03_PGRAPH_ABS_UCLIP_YMAX, 0x7fff); nv20_gr_init()
331 nv20_gr_new_(const struct nvkm_gr_func *func, struct nvkm_device *device, nv20_gr_new_() argument
340 return nvkm_gr_ctor(func, device, index, 0x00001000, true, &gr->base); nv20_gr_new_()
372 nv20_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr) nv20_gr_new() argument
374 return nv20_gr_new_(&nv20_gr, device, index, pgr); nv20_gr_new()
H A Dnv30.c35 ret = nvkm_memory_new(gr->base.engine.subdev.device, nv30_gr_chan_new()
106 struct nvkm_device *device = gr->base.engine.subdev.device; nv30_gr_init() local
108 nvkm_wr32(device, NV20_PGRAPH_CHANNEL_CTX_TABLE, nv30_gr_init()
111 nvkm_wr32(device, NV03_PGRAPH_INTR , 0xFFFFFFFF); nv30_gr_init()
112 nvkm_wr32(device, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF); nv30_gr_init()
114 nvkm_wr32(device, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF); nv30_gr_init()
115 nvkm_wr32(device, NV04_PGRAPH_DEBUG_0, 0x00000000); nv30_gr_init()
116 nvkm_wr32(device, NV04_PGRAPH_DEBUG_1, 0x401287c0); nv30_gr_init()
117 nvkm_wr32(device, 0x400890, 0x01b463ff); nv30_gr_init()
118 nvkm_wr32(device, NV04_PGRAPH_DEBUG_3, 0xf2de0475); nv30_gr_init()
119 nvkm_wr32(device, NV10_PGRAPH_DEBUG_4, 0x00008000); nv30_gr_init()
120 nvkm_wr32(device, NV04_PGRAPH_LIMIT_VIOL_PIX, 0xf04bdff6); nv30_gr_init()
121 nvkm_wr32(device, 0x400B80, 0x1003d888); nv30_gr_init()
122 nvkm_wr32(device, 0x400B84, 0x0c000000); nv30_gr_init()
123 nvkm_wr32(device, 0x400098, 0x00000000); nv30_gr_init()
124 nvkm_wr32(device, 0x40009C, 0x0005ad00); nv30_gr_init()
125 nvkm_wr32(device, 0x400B88, 0x62ff00ff); /* suspiciously like PGRAPH_DEBUG_2 */ nv30_gr_init()
126 nvkm_wr32(device, 0x4000a0, 0x00000000); nv30_gr_init()
127 nvkm_wr32(device, 0x4000a4, 0x00000008); nv30_gr_init()
128 nvkm_wr32(device, 0x4008a8, 0xb784a400); nv30_gr_init()
129 nvkm_wr32(device, 0x400ba0, 0x002f8685); nv30_gr_init()
130 nvkm_wr32(device, 0x400ba4, 0x00231f3f); nv30_gr_init()
131 nvkm_wr32(device, 0x4008a4, 0x40000020); nv30_gr_init()
133 if (device->chipset == 0x34) { nv30_gr_init()
134 nvkm_wr32(device, NV10_PGRAPH_RDI_INDEX, 0x00EA0004); nv30_gr_init()
135 nvkm_wr32(device, NV10_PGRAPH_RDI_DATA , 0x00200201); nv30_gr_init()
136 nvkm_wr32(device, NV10_PGRAPH_RDI_INDEX, 0x00EA0008); nv30_gr_init()
137 nvkm_wr32(device, NV10_PGRAPH_RDI_DATA , 0x00000008); nv30_gr_init()
138 nvkm_wr32(device, NV10_PGRAPH_RDI_INDEX, 0x00EA0000); nv30_gr_init()
139 nvkm_wr32(device, NV10_PGRAPH_RDI_DATA , 0x00000032); nv30_gr_init()
140 nvkm_wr32(device, NV10_PGRAPH_RDI_INDEX, 0x00E00004); nv30_gr_init()
141 nvkm_wr32(device, NV10_PGRAPH_RDI_DATA , 0x00000002); nv30_gr_init()
144 nvkm_wr32(device, 0x4000c0, 0x00000016); nv30_gr_init()
146 nvkm_wr32(device, NV10_PGRAPH_CTX_CONTROL, 0x10000100); nv30_gr_init()
147 nvkm_wr32(device, NV10_PGRAPH_STATE , 0xFFFFFFFF); nv30_gr_init()
148 nvkm_wr32(device, 0x0040075c , 0x00000001); nv30_gr_init()
152 nvkm_wr32(device, 0x4009A4, nvkm_rd32(device, 0x100200)); nv30_gr_init()
153 nvkm_wr32(device, 0x4009A8, nvkm_rd32(device, 0x100204)); nv30_gr_init()
154 if (device->chipset != 0x34) { nv30_gr_init()
155 nvkm_wr32(device, 0x400750, 0x00EA0000); nv30_gr_init()
156 nvkm_wr32(device, 0x400754, nvkm_rd32(device, 0x100200)); nv30_gr_init()
157 nvkm_wr32(device, 0x400750, 0x00EA0004); nv30_gr_init()
158 nvkm_wr32(device, 0x400754, nvkm_rd32(device, 0x100204)); nv30_gr_init()
195 nv30_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr) nv30_gr_new() argument
197 return nv20_gr_new_(&nv30_gr, device, index, pgr); nv30_gr_new()
H A Dnv40.c36 return nvkm_rd32(gr->engine.subdev.device, 0x1540); nv40_gr_units()
47 int ret = nvkm_gpuobj_new(object->engine->subdev.device, 20, align, nv40_gr_object_bind()
79 int ret = nvkm_gpuobj_new(gr->base.engine.subdev.device, gr->size, nv40_gr_chan_bind()
84 nv40_grctx_fill(gr->base.engine.subdev.device, *pgpuobj); nv40_gr_chan_bind()
97 struct nvkm_device *device = subdev->device; nv40_gr_chan_fini() local
101 nvkm_mask(device, 0x400720, 0x00000001, 0x00000000); nv40_gr_chan_fini()
103 if (nvkm_rd32(device, 0x40032c) == inst) { nv40_gr_chan_fini()
105 nvkm_wr32(device, 0x400720, 0x00000000); nv40_gr_chan_fini()
106 nvkm_wr32(device, 0x400784, inst); nv40_gr_chan_fini()
107 nvkm_mask(device, 0x400310, 0x00000020, 0x00000020); nv40_gr_chan_fini()
108 nvkm_mask(device, 0x400304, 0x00000001, 0x00000001); nv40_gr_chan_fini()
109 if (nvkm_msec(device, 2000, nv40_gr_chan_fini()
110 if (!(nvkm_rd32(device, 0x400300) & 0x00000001)) nv40_gr_chan_fini()
113 u32 insn = nvkm_rd32(device, 0x400308); nv40_gr_chan_fini()
119 nvkm_mask(device, 0x40032c, 0x01000000, 0x00000000); nv40_gr_chan_fini()
122 if (nvkm_rd32(device, 0x400330) == inst) nv40_gr_chan_fini()
123 nvkm_mask(device, 0x400330, 0x01000000, 0x00000000); nv40_gr_chan_fini()
125 nvkm_mask(device, 0x400720, 0x00000001, 0x00000001); nv40_gr_chan_fini()
176 struct nvkm_device *device = gr->base.engine.subdev.device; nv40_gr_tile() local
177 struct nvkm_fifo *fifo = device->fifo; nv40_gr_tile()
183 switch (device->chipset) { nv40_gr_tile()
189 nvkm_wr32(device, NV20_PGRAPH_TSIZE(i), tile->pitch); nv40_gr_tile()
190 nvkm_wr32(device, NV20_PGRAPH_TLIMIT(i), tile->limit); nv40_gr_tile()
191 nvkm_wr32(device, NV20_PGRAPH_TILE(i), tile->addr); nv40_gr_tile()
192 nvkm_wr32(device, NV40_PGRAPH_TSIZE1(i), tile->pitch); nv40_gr_tile()
193 nvkm_wr32(device, NV40_PGRAPH_TLIMIT1(i), tile->limit); nv40_gr_tile()
194 nvkm_wr32(device, NV40_PGRAPH_TILE1(i), tile->addr); nv40_gr_tile()
195 switch (device->chipset) { nv40_gr_tile()
198 nvkm_wr32(device, NV20_PGRAPH_ZCOMP(i), tile->zcomp); nv40_gr_tile()
199 nvkm_wr32(device, NV40_PGRAPH_ZCOMP1(i), tile->zcomp); nv40_gr_tile()
204 nvkm_wr32(device, NV41_PGRAPH_ZCOMP0(i), tile->zcomp); nv40_gr_tile()
205 nvkm_wr32(device, NV41_PGRAPH_ZCOMP1(i), tile->zcomp); nv40_gr_tile()
214 nvkm_wr32(device, NV47_PGRAPH_TSIZE(i), tile->pitch); nv40_gr_tile()
215 nvkm_wr32(device, NV47_PGRAPH_TLIMIT(i), tile->limit); nv40_gr_tile()
216 nvkm_wr32(device, NV47_PGRAPH_TILE(i), tile->addr); nv40_gr_tile()
217 nvkm_wr32(device, NV40_PGRAPH_TSIZE1(i), tile->pitch); nv40_gr_tile()
218 nvkm_wr32(device, NV40_PGRAPH_TLIMIT1(i), tile->limit); nv40_gr_tile()
219 nvkm_wr32(device, NV40_PGRAPH_TILE1(i), tile->addr); nv40_gr_tile()
220 nvkm_wr32(device, NV47_PGRAPH_ZCOMP0(i), tile->zcomp); nv40_gr_tile()
221 nvkm_wr32(device, NV47_PGRAPH_ZCOMP1(i), tile->zcomp); nv40_gr_tile()
237 struct nvkm_device *device = subdev->device; nv40_gr_intr() local
238 u32 stat = nvkm_rd32(device, NV03_PGRAPH_INTR); nv40_gr_intr()
239 u32 nsource = nvkm_rd32(device, NV03_PGRAPH_NSOURCE); nv40_gr_intr()
240 u32 nstatus = nvkm_rd32(device, NV03_PGRAPH_NSTATUS); nv40_gr_intr()
241 u32 inst = nvkm_rd32(device, 0x40032c) & 0x000fffff; nv40_gr_intr()
242 u32 addr = nvkm_rd32(device, NV04_PGRAPH_TRAPPED_ADDR); nv40_gr_intr()
245 u32 data = nvkm_rd32(device, NV04_PGRAPH_TRAPPED_DATA); nv40_gr_intr()
246 u32 class = nvkm_rd32(device, 0x400160 + subc * 4) & 0xffff; nv40_gr_intr()
263 nvkm_mask(device, 0x402000, 0, 0); nv40_gr_intr()
267 nvkm_wr32(device, NV03_PGRAPH_INTR, stat); nv40_gr_intr()
268 nvkm_wr32(device, NV04_PGRAPH_FIFO, 0x00000001); nv40_gr_intr()
290 struct nvkm_device *device = gr->base.engine.subdev.device; nv40_gr_init() local
295 ret = nv40_grctx_init(device, &gr->size); nv40_gr_init()
300 nvkm_wr32(device, NV40_PGRAPH_CTXCTL_CUR, 0x00000000); nv40_gr_init()
302 nvkm_wr32(device, NV03_PGRAPH_INTR , 0xFFFFFFFF); nv40_gr_init()
303 nvkm_wr32(device, NV40_PGRAPH_INTR_EN, 0xFFFFFFFF); nv40_gr_init()
305 nvkm_wr32(device, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF); nv40_gr_init()
306 nvkm_wr32(device, NV04_PGRAPH_DEBUG_0, 0x00000000); nv40_gr_init()
307 nvkm_wr32(device, NV04_PGRAPH_DEBUG_1, 0x401287c0); nv40_gr_init()
308 nvkm_wr32(device, NV04_PGRAPH_DEBUG_3, 0xe0de8055); nv40_gr_init()
309 nvkm_wr32(device, NV10_PGRAPH_DEBUG_4, 0x00008000); nv40_gr_init()
310 nvkm_wr32(device, NV04_PGRAPH_LIMIT_VIOL_PIX, 0x00be3c5f); nv40_gr_init()
312 nvkm_wr32(device, NV10_PGRAPH_CTX_CONTROL, 0x10010100); nv40_gr_init()
313 nvkm_wr32(device, NV10_PGRAPH_STATE , 0xFFFFFFFF); nv40_gr_init()
315 j = nvkm_rd32(device, 0x1540) & 0xff; nv40_gr_init()
319 nvkm_wr32(device, 0x405000, i); nv40_gr_init()
322 if (device->chipset == 0x40) { nv40_gr_init()
323 nvkm_wr32(device, 0x4009b0, 0x83280fff); nv40_gr_init()
324 nvkm_wr32(device, 0x4009b4, 0x000000a0); nv40_gr_init()
326 nvkm_wr32(device, 0x400820, 0x83280eff); nv40_gr_init()
327 nvkm_wr32(device, 0x400824, 0x000000a0); nv40_gr_init()
330 switch (device->chipset) { nv40_gr_init()
333 nvkm_wr32(device, 0x4009b8, 0x0078e366); nv40_gr_init()
334 nvkm_wr32(device, 0x4009bc, 0x0000014c); nv40_gr_init()
339 nvkm_wr32(device, 0x400828, 0x007596ff); nv40_gr_init()
340 nvkm_wr32(device, 0x40082c, 0x00000108); nv40_gr_init()
343 nvkm_wr32(device, 0x400828, 0x0072cb77); nv40_gr_init()
344 nvkm_wr32(device, 0x40082c, 0x00000108); nv40_gr_init()
351 nvkm_wr32(device, 0x400860, 0); nv40_gr_init()
352 nvkm_wr32(device, 0x400864, 0); nv40_gr_init()
357 nvkm_wr32(device, 0x400828, 0x07830610); nv40_gr_init()
358 nvkm_wr32(device, 0x40082c, 0x0000016A); nv40_gr_init()
364 nvkm_wr32(device, 0x400b38, 0x2ffff800); nv40_gr_init()
365 nvkm_wr32(device, 0x400b3c, 0x00006000); nv40_gr_init()
368 switch (device->chipset) { nv40_gr_init()
371 nvkm_wr32(device, 0x400bc4, 0x1003d888); nv40_gr_init()
372 nvkm_wr32(device, 0x400bbc, 0xb7a7b500); nv40_gr_init()
375 nvkm_wr32(device, 0x400bc4, 0x0000e024); nv40_gr_init()
376 nvkm_wr32(device, 0x400bbc, 0xb7a7b520); nv40_gr_init()
381 nvkm_wr32(device, 0x400bc4, 0x1003d888); nv40_gr_init()
382 nvkm_wr32(device, 0x400bbc, 0xb7a7b540); nv40_gr_init()
389 vramsz = device->func->resource_size(device, 1) - 1; nv40_gr_init()
390 switch (device->chipset) { nv40_gr_init()
392 nvkm_wr32(device, 0x4009A4, nvkm_rd32(device, 0x100200)); nv40_gr_init()
393 nvkm_wr32(device, 0x4009A8, nvkm_rd32(device, 0x100204)); nv40_gr_init()
394 nvkm_wr32(device, 0x4069A4, nvkm_rd32(device, 0x100200)); nv40_gr_init()
395 nvkm_wr32(device, 0x4069A8, nvkm_rd32(device, 0x100204)); nv40_gr_init()
396 nvkm_wr32(device, 0x400820, 0); nv40_gr_init()
397 nvkm_wr32(device, 0x400824, 0); nv40_gr_init()
398 nvkm_wr32(device, 0x400864, vramsz); nv40_gr_init()
399 nvkm_wr32(device, 0x400868, vramsz); nv40_gr_init()
402 switch (device->chipset) { nv40_gr_init()
410 nvkm_wr32(device, 0x4009F0, nvkm_rd32(device, 0x100200)); nv40_gr_init()
411 nvkm_wr32(device, 0x4009F4, nvkm_rd32(device, 0x100204)); nv40_gr_init()
414 nvkm_wr32(device, 0x400DF0, nvkm_rd32(device, 0x100200)); nv40_gr_init()
415 nvkm_wr32(device, 0x400DF4, nvkm_rd32(device, 0x100204)); nv40_gr_init()
418 nvkm_wr32(device, 0x4069F0, nvkm_rd32(device, 0x100200)); nv40_gr_init()
419 nvkm_wr32(device, 0x4069F4, nvkm_rd32(device, 0x100204)); nv40_gr_init()
420 nvkm_wr32(device, 0x400840, 0); nv40_gr_init()
421 nvkm_wr32(device, 0x400844, 0); nv40_gr_init()
422 nvkm_wr32(device, 0x4008A0, vramsz); nv40_gr_init()
423 nvkm_wr32(device, 0x4008A4, vramsz); nv40_gr_init()
431 nv40_gr_new_(const struct nvkm_gr_func *func, struct nvkm_device *device, nv40_gr_new_() argument
441 return nvkm_gr_ctor(func, device, index, 0x00001000, true, &gr->base); nv40_gr_new_()
473 nv40_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr) nv40_gr_new() argument
475 return nv40_gr_new_(&nv40_gr, device, index, pgr); nv40_gr_new()
H A Dgm20b.c32 struct nvkm_device *device = gr->base.engine.subdev.device; gm20b_gr_init_gpc_mmu() local
37 nvkm_wr32(device, 0x100ce4, 0xffffffff); gm20b_gr_init_gpc_mmu()
41 val = nvkm_rd32(device, 0x100c80); gm20b_gr_init_gpc_mmu()
43 nvkm_wr32(device, 0x418880, val); gm20b_gr_init_gpc_mmu()
44 nvkm_wr32(device, 0x418890, 0); gm20b_gr_init_gpc_mmu()
45 nvkm_wr32(device, 0x418894, 0); gm20b_gr_init_gpc_mmu()
47 nvkm_wr32(device, 0x4188b0, nvkm_rd32(device, 0x100cc4)); gm20b_gr_init_gpc_mmu()
48 nvkm_wr32(device, 0x4188b4, nvkm_rd32(device, 0x100cc8)); gm20b_gr_init_gpc_mmu()
49 nvkm_wr32(device, 0x4188b8, nvkm_rd32(device, 0x100ccc)); gm20b_gr_init_gpc_mmu()
51 nvkm_wr32(device, 0x4188ac, nvkm_rd32(device, 0x100800)); gm20b_gr_init_gpc_mmu()
57 struct nvkm_device *device = gr->base.engine.subdev.device; gm20b_gr_set_hww_esr_report_mask() local
58 nvkm_wr32(device, 0x419e44, 0xdffffe); gm20b_gr_set_hww_esr_report_mask()
59 nvkm_wr32(device, 0x419e4c, 0x5); gm20b_gr_set_hww_esr_report_mask()
80 gm20b_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr) gm20b_gr_new() argument
82 return gk20a_gr_new_(&gm20b_gr, device, index, pgr); gm20b_gr_new()
H A Dgk104.c183 struct nvkm_device *device = gr->base.engine.subdev.device; gk104_gr_init() local
190 nvkm_wr32(device, GPC_BCAST(0x0880), 0x00000000); gk104_gr_init()
191 nvkm_wr32(device, GPC_BCAST(0x08a4), 0x00000000); gk104_gr_init()
192 nvkm_wr32(device, GPC_BCAST(0x0888), 0x00000000); gk104_gr_init()
193 nvkm_wr32(device, GPC_BCAST(0x088c), 0x00000000); gk104_gr_init()
194 nvkm_wr32(device, GPC_BCAST(0x0890), 0x00000000); gk104_gr_init()
195 nvkm_wr32(device, GPC_BCAST(0x0894), 0x00000000); gk104_gr_init()
196 nvkm_wr32(device, GPC_BCAST(0x08b4), nvkm_memory_addr(gr->unk4188b4) >> 8); gk104_gr_init()
197 nvkm_wr32(device, GPC_BCAST(0x08b8), nvkm_memory_addr(gr->unk4188b8) >> 8); gk104_gr_init()
201 nvkm_wr32(device, GPC_UNIT(0, 0x3018), 0x00000001); gk104_gr_init()
214 nvkm_wr32(device, GPC_BCAST(0x0980), data[0]); gk104_gr_init()
215 nvkm_wr32(device, GPC_BCAST(0x0984), data[1]); gk104_gr_init()
216 nvkm_wr32(device, GPC_BCAST(0x0988), data[2]); gk104_gr_init()
217 nvkm_wr32(device, GPC_BCAST(0x098c), data[3]); gk104_gr_init()
220 nvkm_wr32(device, GPC_UNIT(gpc, 0x0914), gk104_gr_init()
222 nvkm_wr32(device, GPC_UNIT(gpc, 0x0910), 0x00040000 | gk104_gr_init()
224 nvkm_wr32(device, GPC_UNIT(gpc, 0x0918), magicgpc918); gk104_gr_init()
227 nvkm_wr32(device, GPC_BCAST(0x3fd4), magicgpc918); gk104_gr_init()
228 nvkm_wr32(device, GPC_BCAST(0x08ac), nvkm_rd32(device, 0x100800)); gk104_gr_init()
230 nvkm_wr32(device, 0x400500, 0x00010001); gk104_gr_init()
232 nvkm_wr32(device, 0x400100, 0xffffffff); gk104_gr_init()
233 nvkm_wr32(device, 0x40013c, 0xffffffff); gk104_gr_init()
235 nvkm_wr32(device, 0x409ffc, 0x00000000); gk104_gr_init()
236 nvkm_wr32(device, 0x409c14, 0x00003e3e); gk104_gr_init()
237 nvkm_wr32(device, 0x409c24, 0x000f0001); gk104_gr_init()
238 nvkm_wr32(device, 0x404000, 0xc0000000); gk104_gr_init()
239 nvkm_wr32(device, 0x404600, 0xc0000000); gk104_gr_init()
240 nvkm_wr32(device, 0x408030, 0xc0000000); gk104_gr_init()
241 nvkm_wr32(device, 0x404490, 0xc0000000); gk104_gr_init()
242 nvkm_wr32(device, 0x406018, 0xc0000000); gk104_gr_init()
243 nvkm_wr32(device, 0x407020, 0x40000000); gk104_gr_init()
244 nvkm_wr32(device, 0x405840, 0xc0000000); gk104_gr_init()
245 nvkm_wr32(device, 0x405844, 0x00ffffff); gk104_gr_init()
246 nvkm_mask(device, 0x419cc0, 0x00000008, 0x00000008); gk104_gr_init()
247 nvkm_mask(device, 0x419eb4, 0x00001000, 0x00001000); gk104_gr_init()
250 nvkm_wr32(device, GPC_UNIT(gpc, 0x3038), 0xc0000000); gk104_gr_init()
251 nvkm_wr32(device, GPC_UNIT(gpc, 0x0420), 0xc0000000); gk104_gr_init()
252 nvkm_wr32(device, GPC_UNIT(gpc, 0x0900), 0xc0000000); gk104_gr_init()
253 nvkm_wr32(device, GPC_UNIT(gpc, 0x1028), 0xc0000000); gk104_gr_init()
254 nvkm_wr32(device, GPC_UNIT(gpc, 0x0824), 0xc0000000); gk104_gr_init()
256 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x508), 0xffffffff); gk104_gr_init()
257 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x50c), 0xffffffff); gk104_gr_init()
258 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x224), 0xc0000000); gk104_gr_init()
259 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x48c), 0xc0000000); gk104_gr_init()
260 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x084), 0xc0000000); gk104_gr_init()
261 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x644), 0x001ffffe); gk104_gr_init()
262 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x64c), 0x0000000f); gk104_gr_init()
264 nvkm_wr32(device, GPC_UNIT(gpc, 0x2c90), 0xffffffff); gk104_gr_init()
265 nvkm_wr32(device, GPC_UNIT(gpc, 0x2c94), 0xffffffff); gk104_gr_init()
269 nvkm_wr32(device, ROP_UNIT(rop, 0x144), 0xc0000000); gk104_gr_init()
270 nvkm_wr32(device, ROP_UNIT(rop, 0x070), 0xc0000000); gk104_gr_init()
271 nvkm_wr32(device, ROP_UNIT(rop, 0x204), 0xffffffff); gk104_gr_init()
272 nvkm_wr32(device, ROP_UNIT(rop, 0x208), 0xffffffff); gk104_gr_init()
275 nvkm_wr32(device, 0x400108, 0xffffffff); gk104_gr_init()
276 nvkm_wr32(device, 0x400138, 0xffffffff); gk104_gr_init()
277 nvkm_wr32(device, 0x400118, 0xffffffff); gk104_gr_init()
278 nvkm_wr32(device, 0x400130, 0xffffffff); gk104_gr_init()
279 nvkm_wr32(device, 0x40011c, 0xffffffff); gk104_gr_init()
280 nvkm_wr32(device, 0x400134, 0xffffffff); gk104_gr_init()
282 nvkm_wr32(device, 0x400054, 0x34ce3464); gk104_gr_init()
327 gk104_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr) gk104_gr_new() argument
329 return gf100_gr_new_(&gk104_gr, device, index, pgr); gk104_gr_new()
H A Dgf100.c46 struct nvkm_device *device = gr->base.engine.subdev.device; gf100_gr_zbc_clear_color() local
48 nvkm_wr32(device, 0x405804, gr->zbc_color[zbc].ds[0]); gf100_gr_zbc_clear_color()
49 nvkm_wr32(device, 0x405808, gr->zbc_color[zbc].ds[1]); gf100_gr_zbc_clear_color()
50 nvkm_wr32(device, 0x40580c, gr->zbc_color[zbc].ds[2]); gf100_gr_zbc_clear_color()
51 nvkm_wr32(device, 0x405810, gr->zbc_color[zbc].ds[3]); gf100_gr_zbc_clear_color()
53 nvkm_wr32(device, 0x405814, gr->zbc_color[zbc].format); gf100_gr_zbc_clear_color()
54 nvkm_wr32(device, 0x405820, zbc); gf100_gr_zbc_clear_color()
55 nvkm_wr32(device, 0x405824, 0x00000004); /* TRIGGER | WRITE | COLOR */ gf100_gr_zbc_clear_color()
62 struct nvkm_ltc *ltc = gr->base.engine.subdev.device->ltc; gf100_gr_zbc_color_get()
97 struct nvkm_device *device = gr->base.engine.subdev.device; gf100_gr_zbc_clear_depth() local
99 nvkm_wr32(device, 0x405818, gr->zbc_depth[zbc].ds); gf100_gr_zbc_clear_depth()
100 nvkm_wr32(device, 0x40581c, gr->zbc_depth[zbc].format); gf100_gr_zbc_clear_depth()
101 nvkm_wr32(device, 0x405820, zbc); gf100_gr_zbc_clear_depth()
102 nvkm_wr32(device, 0x405824, 0x00000005); /* TRIGGER | WRITE | DEPTH */ gf100_gr_zbc_clear_depth()
109 struct nvkm_ltc *ltc = gr->base.engine.subdev.device->ltc; gf100_gr_zbc_depth_get()
233 gf100_gr_mthd_set_shader_exceptions(struct nvkm_device *device, u32 data) gf100_gr_mthd_set_shader_exceptions() argument
235 nvkm_wr32(device, 0x419e44, data ? 0xffffffff : 0x00000000); gf100_gr_mthd_set_shader_exceptions()
236 nvkm_wr32(device, 0x419e4c, data ? 0xffffffff : 0x00000000); gf100_gr_mthd_set_shader_exceptions()
240 gf100_gr_mthd_sw(struct nvkm_device *device, u16 class, u32 mthd, u32 data) gf100_gr_mthd_sw() argument
247 gf100_gr_mthd_set_shader_exceptions(device, data); gf100_gr_mthd_sw()
287 ret = nvkm_gpuobj_new(gr->base.engine.subdev.device, gr->size, gf100_gr_chan_bind()
351 struct nvkm_device *device = gr->base.engine.subdev.device; gf100_gr_chan_new() local
364 ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 0x100, gf100_gr_chan_new()
378 ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, gf100_gr_chan_new()
684 struct nvkm_ltc *ltc = gr->base.engine.subdev.device->ltc; gf100_gr_zbc_init()
711 struct nvkm_device *device = subdev->device; gf100_gr_wait_idle() local
720 nvkm_rd32(device, 0x400700); gf100_gr_wait_idle()
722 gr_enabled = nvkm_rd32(device, 0x200) & 0x1000; gf100_gr_wait_idle()
723 ctxsw_active = nvkm_rd32(device, 0x2640) & 0x8000; gf100_gr_wait_idle()
724 gr_busy = nvkm_rd32(device, 0x40060c) & 0x1; gf100_gr_wait_idle()
739 struct nvkm_device *device = gr->base.engine.subdev.device; gf100_gr_mmio() local
747 nvkm_wr32(device, addr, init->data); pack_for_each_init()
756 struct nvkm_device *device = gr->base.engine.subdev.device; gf100_gr_icmd() local
761 nvkm_wr32(device, 0x400208, 0x80000000); gf100_gr_icmd()
768 nvkm_wr32(device, 0x400204, init->data); pack_for_each_init()
773 nvkm_wr32(device, 0x400200, addr); pack_for_each_init()
780 nvkm_msec(device, 2000, pack_for_each_init()
781 if (!(nvkm_rd32(device, 0x400700) & 0x00000004)) pack_for_each_init()
788 nvkm_wr32(device, 0x400208, 0x00000000);
794 struct nvkm_device *device = gr->base.engine.subdev.device; gf100_gr_mthd() local
805 nvkm_wr32(device, 0x40448c, init->data); pack_for_each_init()
810 nvkm_wr32(device, 0x404488, ctrl | (addr << 14)); pack_for_each_init()
858 struct nvkm_device *device = subdev->device; gf100_gr_trap_gpc_rop() local
862 trap[0] = nvkm_rd32(device, GPC_UNIT(gpc, 0x0420)) & 0x3fffffff; gf100_gr_trap_gpc_rop()
863 trap[1] = nvkm_rd32(device, GPC_UNIT(gpc, 0x0434)); gf100_gr_trap_gpc_rop()
864 trap[2] = nvkm_rd32(device, GPC_UNIT(gpc, 0x0438)); gf100_gr_trap_gpc_rop()
865 trap[3] = nvkm_rd32(device, GPC_UNIT(gpc, 0x043c)); gf100_gr_trap_gpc_rop()
873 nvkm_wr32(device, GPC_UNIT(gpc, 0x0420), 0xc0000000); gf100_gr_trap_gpc_rop()
900 struct nvkm_device *device = subdev->device; gf100_gr_trap_mp() local
901 u32 werr = nvkm_rd32(device, TPC_UNIT(gpc, tpc, 0x648)); gf100_gr_trap_mp()
902 u32 gerr = nvkm_rd32(device, TPC_UNIT(gpc, tpc, 0x650)); gf100_gr_trap_mp()
913 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x648), 0x00000000); gf100_gr_trap_mp()
914 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x650), gerr); gf100_gr_trap_mp()
921 struct nvkm_device *device = subdev->device; gf100_gr_trap_tpc() local
922 u32 stat = nvkm_rd32(device, TPC_UNIT(gpc, tpc, 0x0508)); gf100_gr_trap_tpc()
925 u32 trap = nvkm_rd32(device, TPC_UNIT(gpc, tpc, 0x0224)); gf100_gr_trap_tpc()
927 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x0224), 0xc0000000); gf100_gr_trap_tpc()
937 u32 trap = nvkm_rd32(device, TPC_UNIT(gpc, tpc, 0x0084)); gf100_gr_trap_tpc()
939 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x0084), 0xc0000000); gf100_gr_trap_tpc()
944 u32 trap = nvkm_rd32(device, TPC_UNIT(gpc, tpc, 0x048c)); gf100_gr_trap_tpc()
946 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x048c), 0xc0000000); gf100_gr_trap_tpc()
959 struct nvkm_device *device = subdev->device; gf100_gr_trap_gpc() local
960 u32 stat = nvkm_rd32(device, GPC_UNIT(gpc, 0x2c90)); gf100_gr_trap_gpc()
969 u32 trap = nvkm_rd32(device, GPC_UNIT(gpc, 0x0900)); gf100_gr_trap_gpc()
971 nvkm_wr32(device, GPC_UNIT(gpc, 0x0900), 0xc0000000); gf100_gr_trap_gpc()
976 u32 trap = nvkm_rd32(device, GPC_UNIT(gpc, 0x1028)); gf100_gr_trap_gpc()
978 nvkm_wr32(device, GPC_UNIT(gpc, 0x1028), 0xc0000000); gf100_gr_trap_gpc()
983 u32 trap = nvkm_rd32(device, GPC_UNIT(gpc, 0x0824)); gf100_gr_trap_gpc()
985 nvkm_wr32(device, GPC_UNIT(gpc, 0x0824), 0xc0000000); gf100_gr_trap_gpc()
993 nvkm_wr32(device, GPC_UNIT(gpc, 0x2c90), mask); gf100_gr_trap_gpc()
1007 struct nvkm_device *device = subdev->device; gf100_gr_trap_intr() local
1008 u32 trap = nvkm_rd32(device, 0x400108); gf100_gr_trap_intr()
1012 u32 stat = nvkm_rd32(device, 0x404000); gf100_gr_trap_intr()
1014 nvkm_wr32(device, 0x404000, 0xc0000000); gf100_gr_trap_intr()
1015 nvkm_wr32(device, 0x400108, 0x00000001); gf100_gr_trap_intr()
1020 u32 stat = nvkm_rd32(device, 0x404600); gf100_gr_trap_intr()
1022 nvkm_wr32(device, 0x404600, 0xc0000000); gf100_gr_trap_intr()
1023 nvkm_wr32(device, 0x400108, 0x00000002); gf100_gr_trap_intr()
1028 u32 stat = nvkm_rd32(device, 0x408030); gf100_gr_trap_intr()
1030 nvkm_wr32(device, 0x408030, 0xc0000000); gf100_gr_trap_intr()
1031 nvkm_wr32(device, 0x400108, 0x00000008); gf100_gr_trap_intr()
1036 u32 stat = nvkm_rd32(device, 0x405840); gf100_gr_trap_intr()
1038 nvkm_wr32(device, 0x405840, 0xc0000000); gf100_gr_trap_intr()
1039 nvkm_wr32(device, 0x400108, 0x00000010); gf100_gr_trap_intr()
1044 u32 stat = nvkm_rd32(device, 0x40601c); gf100_gr_trap_intr()
1046 nvkm_wr32(device, 0x40601c, 0xc0000000); gf100_gr_trap_intr()
1047 nvkm_wr32(device, 0x400108, 0x00000040); gf100_gr_trap_intr()
1052 u32 stat = nvkm_rd32(device, 0x404490); gf100_gr_trap_intr()
1054 nvkm_wr32(device, 0x404490, 0xc0000000); gf100_gr_trap_intr()
1055 nvkm_wr32(device, 0x400108, 0x00000080); gf100_gr_trap_intr()
1060 u32 stat = nvkm_rd32(device, 0x407020) & 0x3fffffff; gf100_gr_trap_intr()
1067 nvkm_wr32(device, 0x407020, 0x40000000); gf100_gr_trap_intr()
1068 nvkm_wr32(device, 0x400108, 0x00000100); gf100_gr_trap_intr()
1073 u32 stat = nvkm_rd32(device, 0x400118); gf100_gr_trap_intr()
1078 nvkm_wr32(device, 0x400118, mask); gf100_gr_trap_intr()
1082 nvkm_wr32(device, 0x400108, 0x01000000); gf100_gr_trap_intr()
1088 u32 statz = nvkm_rd32(device, ROP_UNIT(rop, 0x070)); gf100_gr_trap_intr()
1089 u32 statc = nvkm_rd32(device, ROP_UNIT(rop, 0x144)); gf100_gr_trap_intr()
1092 nvkm_wr32(device, ROP_UNIT(rop, 0x070), 0xc0000000); gf100_gr_trap_intr()
1093 nvkm_wr32(device, ROP_UNIT(rop, 0x144), 0xc0000000); gf100_gr_trap_intr()
1095 nvkm_wr32(device, 0x400108, 0x02000000); gf100_gr_trap_intr()
1101 nvkm_wr32(device, 0x400108, trap); gf100_gr_trap_intr()
1109 struct nvkm_device *device = subdev->device; gf100_gr_ctxctl_debug_unit() local
1111 nvkm_rd32(device, base + 0x400)); gf100_gr_ctxctl_debug_unit()
1113 nvkm_rd32(device, base + 0x800), gf100_gr_ctxctl_debug_unit()
1114 nvkm_rd32(device, base + 0x804), gf100_gr_ctxctl_debug_unit()
1115 nvkm_rd32(device, base + 0x808), gf100_gr_ctxctl_debug_unit()
1116 nvkm_rd32(device, base + 0x80c)); gf100_gr_ctxctl_debug_unit()
1118 nvkm_rd32(device, base + 0x810), gf100_gr_ctxctl_debug_unit()
1119 nvkm_rd32(device, base + 0x814), gf100_gr_ctxctl_debug_unit()
1120 nvkm_rd32(device, base + 0x818), gf100_gr_ctxctl_debug_unit()
1121 nvkm_rd32(device, base + 0x81c)); gf100_gr_ctxctl_debug_unit()
1127 struct nvkm_device *device = gr->base.engine.subdev.device; gf100_gr_ctxctl_debug() local
1128 u32 gpcnr = nvkm_rd32(device, 0x409604) & 0xffff; gf100_gr_ctxctl_debug()
1140 struct nvkm_device *device = subdev->device; gf100_gr_ctxctl_isr() local
1141 u32 stat = nvkm_rd32(device, 0x409c18); gf100_gr_ctxctl_isr()
1144 u32 code = nvkm_rd32(device, 0x409814); gf100_gr_ctxctl_isr()
1146 u32 class = nvkm_rd32(device, 0x409808); gf100_gr_ctxctl_isr()
1147 u32 addr = nvkm_rd32(device, 0x40980c); gf100_gr_ctxctl_isr()
1150 u32 data = nvkm_rd32(device, 0x409810); gf100_gr_ctxctl_isr()
1156 nvkm_wr32(device, 0x409c20, 0x00000001); gf100_gr_ctxctl_isr()
1166 nvkm_wr32(device, 0x409c20, 0x00080000); gf100_gr_ctxctl_isr()
1173 nvkm_wr32(device, 0x409c20, stat); gf100_gr_ctxctl_isr()
1182 struct nvkm_device *device = subdev->device; gf100_gr_intr() local
1185 u64 inst = nvkm_rd32(device, 0x409b00) & 0x0fffffff; gf100_gr_intr()
1186 u32 stat = nvkm_rd32(device, 0x400100); gf100_gr_intr()
1187 u32 addr = nvkm_rd32(device, 0x400704); gf100_gr_intr()
1190 u32 data = nvkm_rd32(device, 0x400708); gf100_gr_intr()
1191 u32 code = nvkm_rd32(device, 0x400110); gf100_gr_intr()
1196 chan = nvkm_fifo_chan_inst(device->fifo, (u64)inst << 12, &flags); gf100_gr_intr()
1202 if (device->card_type < NV_E0 || subc < 4) gf100_gr_intr()
1203 class = nvkm_rd32(device, 0x404200 + (subc * 4)); gf100_gr_intr()
1212 nvkm_wr32(device, 0x400100, 0x00000001); gf100_gr_intr()
1217 if (!gf100_gr_mthd_sw(device, class, mthd, data)) { gf100_gr_intr()
1223 nvkm_wr32(device, 0x400100, 0x00000010); gf100_gr_intr()
1231 nvkm_wr32(device, 0x400100, 0x00000020); gf100_gr_intr()
1242 nvkm_wr32(device, 0x400100, 0x00100000); gf100_gr_intr()
1250 nvkm_wr32(device, 0x400100, 0x00200000); gf100_gr_intr()
1256 nvkm_wr32(device, 0x400100, 0x00080000); gf100_gr_intr()
1262 nvkm_wr32(device, 0x400100, stat); gf100_gr_intr()
1265 nvkm_wr32(device, 0x400500, 0x00010001); gf100_gr_intr()
1266 nvkm_fifo_chan_put(device->fifo, flags, &chan); gf100_gr_intr()
1273 struct nvkm_device *device = gr->base.engine.subdev.device; gf100_gr_init_fw() local
1276 nvkm_wr32(device, fuc_base + 0x01c0, 0x01000000); gf100_gr_init_fw()
1278 nvkm_wr32(device, fuc_base + 0x01c4, data->data[i]); gf100_gr_init_fw()
1280 nvkm_wr32(device, fuc_base + 0x0180, 0x01000000); gf100_gr_init_fw()
1283 nvkm_wr32(device, fuc_base + 0x0188, i >> 6); gf100_gr_init_fw()
1284 nvkm_wr32(device, fuc_base + 0x0184, code->data[i]); gf100_gr_init_fw()
1289 nvkm_wr32(device, fuc_base + 0x0184, 0); gf100_gr_init_fw()
1297 struct nvkm_device *device = gr->base.engine.subdev.device; gf100_gr_init_csdata() local
1303 nvkm_wr32(device, falcon + 0x01c0, 0x02000000 + starstar); gf100_gr_init_csdata()
1304 star = nvkm_rd32(device, falcon + 0x01c4); gf100_gr_init_csdata()
1305 temp = nvkm_rd32(device, falcon + 0x01c4); gf100_gr_init_csdata()
1308 nvkm_wr32(device, falcon + 0x01c0, 0x01000000 + star); gf100_gr_init_csdata()
1317 nvkm_wr32(device, falcon + 0x01c4, data); pack_for_each_init()
1329 nvkm_wr32(device, falcon + 0x01c4, (--xfer << 26) | addr);
1330 nvkm_wr32(device, falcon + 0x01c0, 0x01000004 + starstar);
1331 nvkm_wr32(device, falcon + 0x01c4, star + 4);
1339 struct nvkm_device *device = subdev->device; gf100_gr_init_ctxctl() local
1344 nvkm_mc_unk260(device->mc, 0); gf100_gr_init_ctxctl()
1347 nvkm_mc_unk260(device->mc, 1); gf100_gr_init_ctxctl()
1350 nvkm_wr32(device, 0x409840, 0xffffffff); gf100_gr_init_ctxctl()
1351 nvkm_wr32(device, 0x41a10c, 0x00000000); gf100_gr_init_ctxctl()
1352 nvkm_wr32(device, 0x40910c, 0x00000000); gf100_gr_init_ctxctl()
1353 nvkm_wr32(device, 0x41a100, 0x00000002); gf100_gr_init_ctxctl()
1354 nvkm_wr32(device, 0x409100, 0x00000002); gf100_gr_init_ctxctl()
1355 if (nvkm_msec(device, 2000, gf100_gr_init_ctxctl()
1356 if (nvkm_rd32(device, 0x409800) & 0x00000001) gf100_gr_init_ctxctl()
1361 nvkm_wr32(device, 0x409840, 0xffffffff); gf100_gr_init_ctxctl()
1362 nvkm_wr32(device, 0x409500, 0x7fffffff); gf100_gr_init_ctxctl()
1363 nvkm_wr32(device, 0x409504, 0x00000021); gf100_gr_init_ctxctl()
1365 nvkm_wr32(device, 0x409840, 0xffffffff); gf100_gr_init_ctxctl()
1366 nvkm_wr32(device, 0x409500, 0x00000000); gf100_gr_init_ctxctl()
1367 nvkm_wr32(device, 0x409504, 0x00000010); gf100_gr_init_ctxctl()
1368 if (nvkm_msec(device, 2000, gf100_gr_init_ctxctl()
1369 if ((gr->size = nvkm_rd32(device, 0x409800))) gf100_gr_init_ctxctl()
1374 nvkm_wr32(device, 0x409840, 0xffffffff); gf100_gr_init_ctxctl()
1375 nvkm_wr32(device, 0x409500, 0x00000000); gf100_gr_init_ctxctl()
1376 nvkm_wr32(device, 0x409504, 0x00000016); gf100_gr_init_ctxctl()
1377 if (nvkm_msec(device, 2000, gf100_gr_init_ctxctl()
1378 if (nvkm_rd32(device, 0x409800)) gf100_gr_init_ctxctl()
1383 nvkm_wr32(device, 0x409840, 0xffffffff); gf100_gr_init_ctxctl()
1384 nvkm_wr32(device, 0x409500, 0x00000000); gf100_gr_init_ctxctl()
1385 nvkm_wr32(device, 0x409504, 0x00000025); gf100_gr_init_ctxctl()
1386 if (nvkm_msec(device, 2000, gf100_gr_init_ctxctl()
1387 if (nvkm_rd32(device, 0x409800)) gf100_gr_init_ctxctl()
1392 if (device->chipset >= 0xe0) { gf100_gr_init_ctxctl()
1393 nvkm_wr32(device, 0x409800, 0x00000000); gf100_gr_init_ctxctl()
1394 nvkm_wr32(device, 0x409500, 0x00000001); gf100_gr_init_ctxctl()
1395 nvkm_wr32(device, 0x409504, 0x00000030); gf100_gr_init_ctxctl()
1396 if (nvkm_msec(device, 2000, gf100_gr_init_ctxctl()
1397 if (nvkm_rd32(device, 0x409800)) gf100_gr_init_ctxctl()
1402 nvkm_wr32(device, 0x409810, 0xb00095c8); gf100_gr_init_ctxctl()
1403 nvkm_wr32(device, 0x409800, 0x00000000); gf100_gr_init_ctxctl()
1404 nvkm_wr32(device, 0x409500, 0x00000001); gf100_gr_init_ctxctl()
1405 nvkm_wr32(device, 0x409504, 0x00000031); gf100_gr_init_ctxctl()
1406 if (nvkm_msec(device, 2000, gf100_gr_init_ctxctl()
1407 if (nvkm_rd32(device, 0x409800)) gf100_gr_init_ctxctl()
1412 nvkm_wr32(device, 0x409810, 0x00080420); gf100_gr_init_ctxctl()
1413 nvkm_wr32(device, 0x409800, 0x00000000); gf100_gr_init_ctxctl()
1414 nvkm_wr32(device, 0x409500, 0x00000001); gf100_gr_init_ctxctl()
1415 nvkm_wr32(device, 0x409504, 0x00000032); gf100_gr_init_ctxctl()
1416 if (nvkm_msec(device, 2000, gf100_gr_init_ctxctl()
1417 if (nvkm_rd32(device, 0x409800)) gf100_gr_init_ctxctl()
1422 nvkm_wr32(device, 0x409614, 0x00000070); gf100_gr_init_ctxctl()
1423 nvkm_wr32(device, 0x409614, 0x00000770); gf100_gr_init_ctxctl()
1424 nvkm_wr32(device, 0x40802c, 0x00000001); gf100_gr_init_ctxctl()
1442 nvkm_mc_unk260(device->mc, 0); gf100_gr_init_ctxctl()
1443 nvkm_wr32(device, 0x4091c0, 0x01000000); gf100_gr_init_ctxctl()
1445 nvkm_wr32(device, 0x4091c4, gr->func->fecs.ucode->data.data[i]); gf100_gr_init_ctxctl()
1447 nvkm_wr32(device, 0x409180, 0x01000000); gf100_gr_init_ctxctl()
1450 nvkm_wr32(device, 0x409188, i >> 6); gf100_gr_init_ctxctl()
1451 nvkm_wr32(device, 0x409184, gr->func->fecs.ucode->code.data[i]); gf100_gr_init_ctxctl()
1455 nvkm_wr32(device, 0x41a1c0, 0x01000000); gf100_gr_init_ctxctl()
1457 nvkm_wr32(device, 0x41a1c4, gr->func->gpccs.ucode->data.data[i]); gf100_gr_init_ctxctl()
1459 nvkm_wr32(device, 0x41a180, 0x01000000); gf100_gr_init_ctxctl()
1462 nvkm_wr32(device, 0x41a188, i >> 6); gf100_gr_init_ctxctl()
1463 nvkm_wr32(device, 0x41a184, gr->func->gpccs.ucode->code.data[i]); gf100_gr_init_ctxctl()
1465 nvkm_mc_unk260(device->mc, 1); gf100_gr_init_ctxctl()
1474 nvkm_wr32(device, 0x40910c, 0x00000000); gf100_gr_init_ctxctl()
1475 nvkm_wr32(device, 0x409100, 0x00000002); gf100_gr_init_ctxctl()
1476 if (nvkm_msec(device, 2000, gf100_gr_init_ctxctl()
1477 if (nvkm_rd32(device, 0x409800) & 0x80000000) gf100_gr_init_ctxctl()
1484 gr->size = nvkm_rd32(device, 0x409804); gf100_gr_init_ctxctl()
1500 struct nvkm_device *device = gr->base.engine.subdev.device; gf100_gr_oneinit() local
1503 nvkm_pmu_pgob(device->pmu, false); gf100_gr_oneinit()
1505 ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 256, false, gf100_gr_oneinit()
1510 ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 256, false, gf100_gr_oneinit()
1525 gr->rop_nr = (nvkm_rd32(device, 0x409604) & 0x001f0000) >> 16; gf100_gr_oneinit()
1526 gr->gpc_nr = nvkm_rd32(device, 0x409604) & 0x0000001f; gf100_gr_oneinit()
1528 gr->tpc_nr[i] = nvkm_rd32(device, GPC_UNIT(i, 0x2608)); gf100_gr_oneinit()
1532 u8 mask = nvkm_rd32(device, GPC_UNIT(i, 0x0c30 + (j * 4))); gf100_gr_oneinit()
1540 switch (device->chipset) { gf100_gr_oneinit()
1585 nvkm_pmu_pgob(gr->base.engine.subdev.device->pmu, false); gf100_gr_init_()
1631 struct nvkm_device *device = subdev->device; gf100_gr_ctor_fw() local
1638 /* Convert device name to lowercase */ gf100_gr_ctor_fw()
1639 strncpy(cname, device->chip->name, sizeof(cname)); gf100_gr_ctor_fw()
1648 ret = request_firmware(&fw, f, device->dev); gf100_gr_ctor_fw()
1661 gf100_gr_ctor(const struct gf100_gr_func *func, struct nvkm_device *device, gf100_gr_ctor() argument
1667 gr->firmware = nvkm_boolopt(device->cfgopt, "NvGrUseFW", gf100_gr_ctor()
1670 ret = nvkm_gr_ctor(&gf100_gr_, device, index, 0x08001000, gf100_gr_ctor()
1689 gf100_gr_new_(const struct gf100_gr_func *func, struct nvkm_device *device, gf100_gr_new_() argument
1696 return gf100_gr_ctor(func, device, index, gr); gf100_gr_new_()
1702 struct nvkm_device *device = gr->base.engine.subdev.device; gf100_gr_init() local
1709 nvkm_wr32(device, GPC_BCAST(0x0880), 0x00000000); gf100_gr_init()
1710 nvkm_wr32(device, GPC_BCAST(0x08a4), 0x00000000); gf100_gr_init()
1711 nvkm_wr32(device, GPC_BCAST(0x0888), 0x00000000); gf100_gr_init()
1712 nvkm_wr32(device, GPC_BCAST(0x088c), 0x00000000); gf100_gr_init()
1713 nvkm_wr32(device, GPC_BCAST(0x0890), 0x00000000); gf100_gr_init()
1714 nvkm_wr32(device, GPC_BCAST(0x0894), 0x00000000); gf100_gr_init()
1715 nvkm_wr32(device, GPC_BCAST(0x08b4), nvkm_memory_addr(gr->unk4188b4) >> 8); gf100_gr_init()
1716 nvkm_wr32(device, GPC_BCAST(0x08b8), nvkm_memory_addr(gr->unk4188b8) >> 8); gf100_gr_init()
1720 nvkm_mask(device, TPC_UNIT(0, 0, 0x05c), 0x00000001, 0x00000001); gf100_gr_init()
1732 nvkm_wr32(device, GPC_BCAST(0x0980), data[0]); gf100_gr_init()
1733 nvkm_wr32(device, GPC_BCAST(0x0984), data[1]); gf100_gr_init()
1734 nvkm_wr32(device, GPC_BCAST(0x0988), data[2]); gf100_gr_init()
1735 nvkm_wr32(device, GPC_BCAST(0x098c), data[3]); gf100_gr_init()
1738 nvkm_wr32(device, GPC_UNIT(gpc, 0x0914), gf100_gr_init()
1740 nvkm_wr32(device, GPC_UNIT(gpc, 0x0910), 0x00040000 | gf100_gr_init()
1742 nvkm_wr32(device, GPC_UNIT(gpc, 0x0918), magicgpc918); gf100_gr_init()
1745 if (device->chipset != 0xd7) gf100_gr_init()
1746 nvkm_wr32(device, GPC_BCAST(0x1bd4), magicgpc918); gf100_gr_init()
1748 nvkm_wr32(device, GPC_BCAST(0x3fd4), magicgpc918); gf100_gr_init()
1750 nvkm_wr32(device, GPC_BCAST(0x08ac), nvkm_rd32(device, 0x100800)); gf100_gr_init()
1752 nvkm_wr32(device, 0x400500, 0x00010001); gf100_gr_init()
1754 nvkm_wr32(device, 0x400100, 0xffffffff); gf100_gr_init()
1755 nvkm_wr32(device, 0x40013c, 0xffffffff); gf100_gr_init()
1757 nvkm_wr32(device, 0x409c24, 0x000f0000); gf100_gr_init()
1758 nvkm_wr32(device, 0x404000, 0xc0000000); gf100_gr_init()
1759 nvkm_wr32(device, 0x404600, 0xc0000000); gf100_gr_init()
1760 nvkm_wr32(device, 0x408030, 0xc0000000); gf100_gr_init()
1761 nvkm_wr32(device, 0x40601c, 0xc0000000); gf100_gr_init()
1762 nvkm_wr32(device, 0x404490, 0xc0000000); gf100_gr_init()
1763 nvkm_wr32(device, 0x406018, 0xc0000000); gf100_gr_init()
1764 nvkm_wr32(device, 0x405840, 0xc0000000); gf100_gr_init()
1765 nvkm_wr32(device, 0x405844, 0x00ffffff); gf100_gr_init()
1766 nvkm_mask(device, 0x419cc0, 0x00000008, 0x00000008); gf100_gr_init()
1767 nvkm_mask(device, 0x419eb4, 0x00001000, 0x00001000); gf100_gr_init()
1770 nvkm_wr32(device, GPC_UNIT(gpc, 0x0420), 0xc0000000); gf100_gr_init()
1771 nvkm_wr32(device, GPC_UNIT(gpc, 0x0900), 0xc0000000); gf100_gr_init()
1772 nvkm_wr32(device, GPC_UNIT(gpc, 0x1028), 0xc0000000); gf100_gr_init()
1773 nvkm_wr32(device, GPC_UNIT(gpc, 0x0824), 0xc0000000); gf100_gr_init()
1775 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x508), 0xffffffff); gf100_gr_init()
1776 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x50c), 0xffffffff); gf100_gr_init()
1777 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x224), 0xc0000000); gf100_gr_init()
1778 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x48c), 0xc0000000); gf100_gr_init()
1779 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x084), 0xc0000000); gf100_gr_init()
1780 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x644), 0x001ffffe); gf100_gr_init()
1781 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x64c), 0x0000000f); gf100_gr_init()
1783 nvkm_wr32(device, GPC_UNIT(gpc, 0x2c90), 0xffffffff); gf100_gr_init()
1784 nvkm_wr32(device, GPC_UNIT(gpc, 0x2c94), 0xffffffff); gf100_gr_init()
1788 nvkm_wr32(device, ROP_UNIT(rop, 0x144), 0xc0000000); gf100_gr_init()
1789 nvkm_wr32(device, ROP_UNIT(rop, 0x070), 0xc0000000); gf100_gr_init()
1790 nvkm_wr32(device, ROP_UNIT(rop, 0x204), 0xffffffff); gf100_gr_init()
1791 nvkm_wr32(device, ROP_UNIT(rop, 0x208), 0xffffffff); gf100_gr_init()
1794 nvkm_wr32(device, 0x400108, 0xffffffff); gf100_gr_init()
1795 nvkm_wr32(device, 0x400138, 0xffffffff); gf100_gr_init()
1796 nvkm_wr32(device, 0x400118, 0xffffffff); gf100_gr_init()
1797 nvkm_wr32(device, 0x400130, 0xffffffff); gf100_gr_init()
1798 nvkm_wr32(device, 0x40011c, 0xffffffff); gf100_gr_init()
1799 nvkm_wr32(device, 0x400134, 0xffffffff); gf100_gr_init()
1801 nvkm_wr32(device, 0x400054, 0x34ce3464); gf100_gr_init()
1845 gf100_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr) gf100_gr_new() argument
1847 return gf100_gr_new_(&gf100_gr, device, index, pgr); gf100_gr_new()
H A Dnv50.c33 return nvkm_rd32(gr->engine.subdev.device, 0x1540); nv50_gr_units()
44 int ret = nvkm_gpuobj_new(object->engine->subdev.device, 16, nv50_gr_object_bind()
71 int ret = nvkm_gpuobj_new(gr->base.engine.subdev.device, gr->size, nv50_gr_chan_bind()
75 nv50_grctx_fill(gr->base.engine.subdev.device, *pgpuobj); nv50_gr_chan_bind()
241 struct nvkm_device *device = subdev->device; nv50_gr_prop_trap() local
242 u32 e0c = nvkm_rd32(device, ustatus_addr + 0x04); nv50_gr_prop_trap()
243 u32 e10 = nvkm_rd32(device, ustatus_addr + 0x08); nv50_gr_prop_trap()
244 u32 e14 = nvkm_rd32(device, ustatus_addr + 0x0c); nv50_gr_prop_trap()
245 u32 e18 = nvkm_rd32(device, ustatus_addr + 0x10); nv50_gr_prop_trap()
246 u32 e1c = nvkm_rd32(device, ustatus_addr + 0x14); nv50_gr_prop_trap()
247 u32 e20 = nvkm_rd32(device, ustatus_addr + 0x18); nv50_gr_prop_trap()
248 u32 e24 = nvkm_rd32(device, ustatus_addr + 0x1c); nv50_gr_prop_trap()
283 struct nvkm_device *device = subdev->device; nv50_gr_mp_trap() local
284 u32 units = nvkm_rd32(device, 0x1540); nv50_gr_mp_trap()
292 if (device->chipset < 0xa0) nv50_gr_mp_trap()
296 mp10 = nvkm_rd32(device, addr + 0x10); nv50_gr_mp_trap()
297 status = nvkm_rd32(device, addr + 0x14); nv50_gr_mp_trap()
301 nvkm_rd32(device, addr + 0x20); nv50_gr_mp_trap()
302 pc = nvkm_rd32(device, addr + 0x24); nv50_gr_mp_trap()
303 oplow = nvkm_rd32(device, addr + 0x70); nv50_gr_mp_trap()
304 ophigh = nvkm_rd32(device, addr + 0x74); nv50_gr_mp_trap()
313 nvkm_wr32(device, addr + 0x10, mp10); nv50_gr_mp_trap()
314 nvkm_wr32(device, addr + 0x14, 0); nv50_gr_mp_trap()
327 struct nvkm_device *device = subdev->device; nv50_gr_tp_trap() local
328 u32 units = nvkm_rd32(device, 0x1540); nv50_gr_tp_trap()
336 if (device->chipset < 0xa0) nv50_gr_tp_trap()
340 ustatus = nvkm_rd32(device, ustatus_addr) & 0x7fffffff; nv50_gr_tp_trap()
350 nvkm_rd32(device, r)); nv50_gr_tp_trap()
385 nvkm_wr32(device, ustatus_addr, 0xc0000000); nv50_gr_tp_trap()
397 struct nvkm_device *device = subdev->device; nv50_gr_trap_handler() local
398 u32 status = nvkm_rd32(device, 0x400108); nv50_gr_trap_handler()
411 ustatus = nvkm_rd32(device, 0x400804) & 0x7fffffff; nv50_gr_trap_handler()
416 nvkm_wr32(device, 0x400500, 0x00000000); nv50_gr_trap_handler()
420 u32 addr = nvkm_rd32(device, 0x400808); nv50_gr_trap_handler()
423 u32 datal = nvkm_rd32(device, 0x40080c); nv50_gr_trap_handler()
424 u32 datah = nvkm_rd32(device, 0x400810); nv50_gr_trap_handler()
425 u32 class = nvkm_rd32(device, 0x400814); nv50_gr_trap_handler()
426 u32 r848 = nvkm_rd32(device, 0x400848); nv50_gr_trap_handler()
441 nvkm_wr32(device, 0x400808, 0); nv50_gr_trap_handler()
442 nvkm_wr32(device, 0x4008e8, nvkm_rd32(device, 0x4008e8) & 3); nv50_gr_trap_handler()
443 nvkm_wr32(device, 0x400848, 0); nv50_gr_trap_handler()
448 u32 addr = nvkm_rd32(device, 0x40084c); nv50_gr_trap_handler()
451 u32 data = nvkm_rd32(device, 0x40085c); nv50_gr_trap_handler()
452 u32 class = nvkm_rd32(device, 0x400814); nv50_gr_trap_handler()
466 nvkm_wr32(device, 0x40084c, 0); nv50_gr_trap_handler()
475 nvkm_wr32(device, 0x400804, 0xc0000000); nv50_gr_trap_handler()
476 nvkm_wr32(device, 0x400108, 0x001); nv50_gr_trap_handler()
484 u32 ustatus = nvkm_rd32(device, 0x406800) & 0x7fffffff; nv50_gr_trap_handler()
491 nvkm_rd32(device, 0x406804), nv50_gr_trap_handler()
492 nvkm_rd32(device, 0x406808), nv50_gr_trap_handler()
493 nvkm_rd32(device, 0x40680c), nv50_gr_trap_handler()
494 nvkm_rd32(device, 0x406810)); nv50_gr_trap_handler()
498 nvkm_wr32(device, 0x400040, 2); nv50_gr_trap_handler()
499 nvkm_wr32(device, 0x400040, 0); nv50_gr_trap_handler()
500 nvkm_wr32(device, 0x406800, 0xc0000000); nv50_gr_trap_handler()
501 nvkm_wr32(device, 0x400108, 0x002); nv50_gr_trap_handler()
507 u32 ustatus = nvkm_rd32(device, 0x400c04) & 0x7fffffff; nv50_gr_trap_handler()
514 nvkm_rd32(device, 0x400c00), nv50_gr_trap_handler()
515 nvkm_rd32(device, 0x400c08), nv50_gr_trap_handler()
516 nvkm_rd32(device, 0x400c0c), nv50_gr_trap_handler()
517 nvkm_rd32(device, 0x400c10)); nv50_gr_trap_handler()
520 nvkm_wr32(device, 0x400c04, 0xc0000000); nv50_gr_trap_handler()
521 nvkm_wr32(device, 0x400108, 0x004); nv50_gr_trap_handler()
527 ustatus = nvkm_rd32(device, 0x401800) & 0x7fffffff; nv50_gr_trap_handler()
534 nvkm_rd32(device, 0x401804), nv50_gr_trap_handler()
535 nvkm_rd32(device, 0x401808), nv50_gr_trap_handler()
536 nvkm_rd32(device, 0x40180c), nv50_gr_trap_handler()
537 nvkm_rd32(device, 0x401810)); nv50_gr_trap_handler()
541 nvkm_wr32(device, 0x400040, 0x80); nv50_gr_trap_handler()
542 nvkm_wr32(device, 0x400040, 0); nv50_gr_trap_handler()
543 nvkm_wr32(device, 0x401800, 0xc0000000); nv50_gr_trap_handler()
544 nvkm_wr32(device, 0x400108, 0x008); nv50_gr_trap_handler()
550 ustatus = nvkm_rd32(device, 0x405018) & 0x7fffffff; nv50_gr_trap_handler()
558 nvkm_rd32(device, 0x405000), nv50_gr_trap_handler()
559 nvkm_rd32(device, 0x405004), nv50_gr_trap_handler()
560 nvkm_rd32(device, 0x405008), nv50_gr_trap_handler()
561 nvkm_rd32(device, 0x40500c), nv50_gr_trap_handler()
562 nvkm_rd32(device, 0x405010), nv50_gr_trap_handler()
563 nvkm_rd32(device, 0x405014), nv50_gr_trap_handler()
564 nvkm_rd32(device, 0x40501c)); nv50_gr_trap_handler()
567 nvkm_wr32(device, 0x405018, 0xc0000000); nv50_gr_trap_handler()
568 nvkm_wr32(device, 0x400108, 0x010); nv50_gr_trap_handler()
576 ustatus = nvkm_rd32(device, 0x402000) & 0x7fffffff; nv50_gr_trap_handler()
579 nvkm_wr32(device, 0x402000, 0xc0000000); nv50_gr_trap_handler()
587 nvkm_wr32(device, 0x400108, 0x040); nv50_gr_trap_handler()
595 nvkm_wr32(device, 0x400108, 0x080); nv50_gr_trap_handler()
604 nvkm_wr32(device, 0x400108, 0x100); nv50_gr_trap_handler()
611 nvkm_wr32(device, 0x400108, status); nv50_gr_trap_handler()
622 struct nvkm_device *device = subdev->device; nv50_gr_intr() local
624 u32 stat = nvkm_rd32(device, 0x400100); nv50_gr_intr()
625 u32 inst = nvkm_rd32(device, 0x40032c) & 0x0fffffff; nv50_gr_intr()
626 u32 addr = nvkm_rd32(device, 0x400704); nv50_gr_intr()
629 u32 data = nvkm_rd32(device, 0x400708); nv50_gr_intr()
630 u32 class = nvkm_rd32(device, 0x400814); nv50_gr_intr()
638 chan = nvkm_fifo_chan_inst(device->fifo, (u64)inst << 12, &flags); nv50_gr_intr()
645 u32 ecode = nvkm_rd32(device, 0x400110); nv50_gr_intr()
658 nvkm_wr32(device, 0x400100, stat); nv50_gr_intr()
659 nvkm_wr32(device, 0x400500, 0x00010001); nv50_gr_intr()
670 if (nvkm_rd32(device, 0x400824) & (1 << 31)) nv50_gr_intr()
671 nvkm_wr32(device, 0x400824, nvkm_rd32(device, 0x400824) & ~(1 << 31)); nv50_gr_intr()
673 nvkm_fifo_chan_put(device->fifo, flags, &chan); nv50_gr_intr()
680 struct nvkm_device *device = gr->base.engine.subdev.device; nv50_gr_init() local
684 nvkm_wr32(device, 0x40008c, 0x00000004); nv50_gr_init()
687 nvkm_wr32(device, 0x400804, 0xc0000000); nv50_gr_init()
688 nvkm_wr32(device, 0x406800, 0xc0000000); nv50_gr_init()
689 nvkm_wr32(device, 0x400c04, 0xc0000000); nv50_gr_init()
690 nvkm_wr32(device, 0x401800, 0xc0000000); nv50_gr_init()
691 nvkm_wr32(device, 0x405018, 0xc0000000); nv50_gr_init()
692 nvkm_wr32(device, 0x402000, 0xc0000000); nv50_gr_init()
694 units = nvkm_rd32(device, 0x001540); nv50_gr_init()
699 if (device->chipset < 0xa0) { nv50_gr_init()
700 nvkm_wr32(device, 0x408900 + (i << 12), 0xc0000000); nv50_gr_init()
701 nvkm_wr32(device, 0x408e08 + (i << 12), 0xc0000000); nv50_gr_init()
702 nvkm_wr32(device, 0x408314 + (i << 12), 0xc0000000); nv50_gr_init()
704 nvkm_wr32(device, 0x408600 + (i << 11), 0xc0000000); nv50_gr_init()
705 nvkm_wr32(device, 0x408708 + (i << 11), 0xc0000000); nv50_gr_init()
706 nvkm_wr32(device, 0x40831c + (i << 11), 0xc0000000); nv50_gr_init()
710 nvkm_wr32(device, 0x400108, 0xffffffff); nv50_gr_init()
711 nvkm_wr32(device, 0x400138, 0xffffffff); nv50_gr_init()
712 nvkm_wr32(device, 0x400100, 0xffffffff); nv50_gr_init()
713 nvkm_wr32(device, 0x40013c, 0xffffffff); nv50_gr_init()
714 nvkm_wr32(device, 0x400500, 0x00010001); nv50_gr_init()
717 ret = nv50_grctx_init(device, &gr->size); nv50_gr_init()
721 nvkm_wr32(device, 0x400824, 0x00000000); nv50_gr_init()
722 nvkm_wr32(device, 0x400828, 0x00000000); nv50_gr_init()
723 nvkm_wr32(device, 0x40082c, 0x00000000); nv50_gr_init()
724 nvkm_wr32(device, 0x400830, 0x00000000); nv50_gr_init()
725 nvkm_wr32(device, 0x40032c, 0x00000000); nv50_gr_init()
726 nvkm_wr32(device, 0x400330, 0x00000000); nv50_gr_init()
729 switch (device->chipset & 0xf0) { nv50_gr_init()
733 nvkm_wr32(device, 0x402ca8, 0x00000800); nv50_gr_init()
737 if (device->chipset == 0xa0 || nv50_gr_init()
738 device->chipset == 0xaa || nv50_gr_init()
739 device->chipset == 0xac) { nv50_gr_init()
740 nvkm_wr32(device, 0x402ca8, 0x00000802); nv50_gr_init()
742 nvkm_wr32(device, 0x402cc0, 0x00000000); nv50_gr_init()
743 nvkm_wr32(device, 0x402ca8, 0x00000002); nv50_gr_init()
751 nvkm_wr32(device, 0x402c20 + (i * 0x10), 0x00000000); nv50_gr_init()
752 nvkm_wr32(device, 0x402c24 + (i * 0x10), 0x00000000); nv50_gr_init()
753 nvkm_wr32(device, 0x402c28 + (i * 0x10), 0x00000000); nv50_gr_init()
754 nvkm_wr32(device, 0x402c2c + (i * 0x10), 0x00000000); nv50_gr_init()
761 nv50_gr_new_(const struct nvkm_gr_func *func, struct nvkm_device *device, nv50_gr_new_() argument
771 return nvkm_gr_ctor(func, device, index, 0x00201000, true, &gr->base); nv50_gr_new_()
791 nv50_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr) nv50_gr_new() argument
793 return nv50_gr_new_(&nv50_gr, device, index, pgr); nv50_gr_new()
H A Dgm204.c241 struct nvkm_device *device = gr->base.engine.subdev.device; gm204_gr_init() local
248 tmp = nvkm_rd32(device, 0x100c80); /*XXX: mask? */ gm204_gr_init()
249 nvkm_wr32(device, 0x418880, 0x00001000 | (tmp & 0x00000fff)); gm204_gr_init()
250 nvkm_wr32(device, 0x418890, 0x00000000); gm204_gr_init()
251 nvkm_wr32(device, 0x418894, 0x00000000); gm204_gr_init()
252 nvkm_wr32(device, 0x4188b4, nvkm_memory_addr(gr->unk4188b4) >> 8); gm204_gr_init()
253 nvkm_wr32(device, 0x4188b8, nvkm_memory_addr(gr->unk4188b8) >> 8); gm204_gr_init()
254 nvkm_mask(device, 0x4188b0, 0x00040000, 0x00040000); gm204_gr_init()
257 nvkm_wr32(device, 0x100cc8, nvkm_memory_addr(gr->unk4188b4) >> 8); gm204_gr_init()
258 nvkm_wr32(device, 0x100ccc, nvkm_memory_addr(gr->unk4188b8) >> 8); gm204_gr_init()
259 nvkm_mask(device, 0x100cc4, 0x00040000, 0x00040000); gm204_gr_init()
265 nvkm_wr32(device, GPC_UNIT(0, 0x3018), 0x00000001); gm204_gr_init()
278 nvkm_wr32(device, GPC_BCAST(0x0980), data[0]); gm204_gr_init()
279 nvkm_wr32(device, GPC_BCAST(0x0984), data[1]); gm204_gr_init()
280 nvkm_wr32(device, GPC_BCAST(0x0988), data[2]); gm204_gr_init()
281 nvkm_wr32(device, GPC_BCAST(0x098c), data[3]); gm204_gr_init()
284 nvkm_wr32(device, GPC_UNIT(gpc, 0x0914), gm204_gr_init()
286 nvkm_wr32(device, GPC_UNIT(gpc, 0x0910), 0x00040000 | gm204_gr_init()
288 nvkm_wr32(device, GPC_UNIT(gpc, 0x0918), magicgpc918); gm204_gr_init()
291 nvkm_wr32(device, GPC_BCAST(0x3fd4), magicgpc918); gm204_gr_init()
292 nvkm_wr32(device, GPC_BCAST(0x08ac), nvkm_rd32(device, 0x100800)); gm204_gr_init()
293 nvkm_wr32(device, GPC_BCAST(0x033c), nvkm_rd32(device, 0x100804)); gm204_gr_init()
295 nvkm_wr32(device, 0x400500, 0x00010001); gm204_gr_init()
296 nvkm_wr32(device, 0x400100, 0xffffffff); gm204_gr_init()
297 nvkm_wr32(device, 0x40013c, 0xffffffff); gm204_gr_init()
298 nvkm_wr32(device, 0x400124, 0x00000002); gm204_gr_init()
299 nvkm_wr32(device, 0x409c24, 0x000e0000); gm204_gr_init()
300 nvkm_wr32(device, 0x405848, 0xc0000000); gm204_gr_init()
301 nvkm_wr32(device, 0x40584c, 0x00000001); gm204_gr_init()
302 nvkm_wr32(device, 0x404000, 0xc0000000); gm204_gr_init()
303 nvkm_wr32(device, 0x404600, 0xc0000000); gm204_gr_init()
304 nvkm_wr32(device, 0x408030, 0xc0000000); gm204_gr_init()
305 nvkm_wr32(device, 0x404490, 0xc0000000); gm204_gr_init()
306 nvkm_wr32(device, 0x406018, 0xc0000000); gm204_gr_init()
307 nvkm_wr32(device, 0x407020, 0x40000000); gm204_gr_init()
308 nvkm_wr32(device, 0x405840, 0xc0000000); gm204_gr_init()
309 nvkm_wr32(device, 0x405844, 0x00ffffff); gm204_gr_init()
310 nvkm_mask(device, 0x419cc0, 0x00000008, 0x00000008); gm204_gr_init()
314 nvkm_wr32(device, PPC_UNIT(gpc, ppc, 0x038), 0xc0000000); gm204_gr_init()
315 nvkm_wr32(device, GPC_UNIT(gpc, 0x0420), 0xc0000000); gm204_gr_init()
316 nvkm_wr32(device, GPC_UNIT(gpc, 0x0900), 0xc0000000); gm204_gr_init()
317 nvkm_wr32(device, GPC_UNIT(gpc, 0x1028), 0xc0000000); gm204_gr_init()
318 nvkm_wr32(device, GPC_UNIT(gpc, 0x0824), 0xc0000000); gm204_gr_init()
320 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x508), 0xffffffff); gm204_gr_init()
321 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x50c), 0xffffffff); gm204_gr_init()
322 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x224), 0xc0000000); gm204_gr_init()
323 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x48c), 0xc0000000); gm204_gr_init()
324 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x084), 0xc0000000); gm204_gr_init()
325 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x430), 0xc0000000); gm204_gr_init()
326 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x644), 0x00dffffe); gm204_gr_init()
327 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x64c), 0x00000005); gm204_gr_init()
329 nvkm_wr32(device, GPC_UNIT(gpc, 0x2c90), 0xffffffff); gm204_gr_init()
330 nvkm_wr32(device, GPC_UNIT(gpc, 0x2c94), 0xffffffff); gm204_gr_init()
334 nvkm_wr32(device, ROP_UNIT(rop, 0x144), 0x40000000); gm204_gr_init()
335 nvkm_wr32(device, ROP_UNIT(rop, 0x070), 0x40000000); gm204_gr_init()
336 nvkm_wr32(device, ROP_UNIT(rop, 0x204), 0xffffffff); gm204_gr_init()
337 nvkm_wr32(device, ROP_UNIT(rop, 0x208), 0xffffffff); gm204_gr_init()
340 nvkm_wr32(device, 0x400108, 0xffffffff); gm204_gr_init()
341 nvkm_wr32(device, 0x400138, 0xffffffff); gm204_gr_init()
342 nvkm_wr32(device, 0x400118, 0xffffffff); gm204_gr_init()
343 nvkm_wr32(device, 0x400130, 0xffffffff); gm204_gr_init()
344 nvkm_wr32(device, 0x40011c, 0xffffffff); gm204_gr_init()
345 nvkm_wr32(device, 0x400134, 0xffffffff); gm204_gr_init()
347 nvkm_wr32(device, 0x400054, 0x2c350f63); gm204_gr_init()
370 gm204_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr) gm204_gr_new() argument
372 return gf100_gr_new_(&gm204_gr, device, index, pgr); gm204_gr_new()
H A Dgm107.c294 struct nvkm_device *device = gr->base.engine.subdev.device; gm107_gr_init_bios() local
295 struct nvkm_bios *bios = device->bios; gm107_gr_init_bios()
303 nvkm_wr32(device, regs[E].ctrl, infoE.data); gm107_gr_init_bios()
305 nvkm_wr32(device, regs[E].data, infoX.data); gm107_gr_init_bios()
313 struct nvkm_device *device = gr->base.engine.subdev.device; gm107_gr_init() local
320 nvkm_wr32(device, GPC_BCAST(0x0880), 0x00000000); gm107_gr_init()
321 nvkm_wr32(device, GPC_BCAST(0x0890), 0x00000000); gm107_gr_init()
322 nvkm_wr32(device, GPC_BCAST(0x0894), 0x00000000); gm107_gr_init()
323 nvkm_wr32(device, GPC_BCAST(0x08b4), nvkm_memory_addr(gr->unk4188b4) >> 8); gm107_gr_init()
324 nvkm_wr32(device, GPC_BCAST(0x08b8), nvkm_memory_addr(gr->unk4188b8) >> 8); gm107_gr_init()
330 nvkm_wr32(device, GPC_UNIT(0, 0x3018), 0x00000001); gm107_gr_init()
343 nvkm_wr32(device, GPC_BCAST(0x0980), data[0]); gm107_gr_init()
344 nvkm_wr32(device, GPC_BCAST(0x0984), data[1]); gm107_gr_init()
345 nvkm_wr32(device, GPC_BCAST(0x0988), data[2]); gm107_gr_init()
346 nvkm_wr32(device, GPC_BCAST(0x098c), data[3]); gm107_gr_init()
349 nvkm_wr32(device, GPC_UNIT(gpc, 0x0914), gm107_gr_init()
351 nvkm_wr32(device, GPC_UNIT(gpc, 0x0910), 0x00040000 | gm107_gr_init()
353 nvkm_wr32(device, GPC_UNIT(gpc, 0x0918), magicgpc918); gm107_gr_init()
356 nvkm_wr32(device, GPC_BCAST(0x3fd4), magicgpc918); gm107_gr_init()
357 nvkm_wr32(device, GPC_BCAST(0x08ac), nvkm_rd32(device, 0x100800)); gm107_gr_init()
359 nvkm_wr32(device, 0x400500, 0x00010001); gm107_gr_init()
361 nvkm_wr32(device, 0x400100, 0xffffffff); gm107_gr_init()
362 nvkm_wr32(device, 0x40013c, 0xffffffff); gm107_gr_init()
363 nvkm_wr32(device, 0x400124, 0x00000002); gm107_gr_init()
364 nvkm_wr32(device, 0x409c24, 0x000e0000); gm107_gr_init()
366 nvkm_wr32(device, 0x404000, 0xc0000000); gm107_gr_init()
367 nvkm_wr32(device, 0x404600, 0xc0000000); gm107_gr_init()
368 nvkm_wr32(device, 0x408030, 0xc0000000); gm107_gr_init()
369 nvkm_wr32(device, 0x404490, 0xc0000000); gm107_gr_init()
370 nvkm_wr32(device, 0x406018, 0xc0000000); gm107_gr_init()
371 nvkm_wr32(device, 0x407020, 0x40000000); gm107_gr_init()
372 nvkm_wr32(device, 0x405840, 0xc0000000); gm107_gr_init()
373 nvkm_wr32(device, 0x405844, 0x00ffffff); gm107_gr_init()
374 nvkm_mask(device, 0x419cc0, 0x00000008, 0x00000008); gm107_gr_init()
378 nvkm_wr32(device, PPC_UNIT(gpc, ppc, 0x038), 0xc0000000); gm107_gr_init()
379 nvkm_wr32(device, GPC_UNIT(gpc, 0x0420), 0xc0000000); gm107_gr_init()
380 nvkm_wr32(device, GPC_UNIT(gpc, 0x0900), 0xc0000000); gm107_gr_init()
381 nvkm_wr32(device, GPC_UNIT(gpc, 0x1028), 0xc0000000); gm107_gr_init()
382 nvkm_wr32(device, GPC_UNIT(gpc, 0x0824), 0xc0000000); gm107_gr_init()
384 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x508), 0xffffffff); gm107_gr_init()
385 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x50c), 0xffffffff); gm107_gr_init()
386 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x224), 0xc0000000); gm107_gr_init()
387 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x48c), 0xc0000000); gm107_gr_init()
388 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x084), 0xc0000000); gm107_gr_init()
389 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x430), 0xc0000000); gm107_gr_init()
390 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x644), 0x00dffffe); gm107_gr_init()
391 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x64c), 0x00000005); gm107_gr_init()
393 nvkm_wr32(device, GPC_UNIT(gpc, 0x2c90), 0xffffffff); gm107_gr_init()
394 nvkm_wr32(device, GPC_UNIT(gpc, 0x2c94), 0xffffffff); gm107_gr_init()
398 nvkm_wr32(device, ROP_UNIT(rop, 0x144), 0x40000000); gm107_gr_init()
399 nvkm_wr32(device, ROP_UNIT(rop, 0x070), 0x40000000); gm107_gr_init()
400 nvkm_wr32(device, ROP_UNIT(rop, 0x204), 0xffffffff); gm107_gr_init()
401 nvkm_wr32(device, ROP_UNIT(rop, 0x208), 0xffffffff); gm107_gr_init()
404 nvkm_wr32(device, 0x400108, 0xffffffff); gm107_gr_init()
405 nvkm_wr32(device, 0x400138, 0xffffffff); gm107_gr_init()
406 nvkm_wr32(device, 0x400118, 0xffffffff); gm107_gr_init()
407 nvkm_wr32(device, 0x400130, 0xffffffff); gm107_gr_init()
408 nvkm_wr32(device, 0x40011c, 0xffffffff); gm107_gr_init()
409 nvkm_wr32(device, 0x400134, 0xffffffff); gm107_gr_init()
411 nvkm_wr32(device, 0x400054, 0x2c350f63); gm107_gr_init()
456 gm107_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr) gm107_gr_new() argument
458 return gf100_gr_new_(&gm107_gr, device, index, pgr); gm107_gr_new()
H A Dnv44.c34 struct nvkm_device *device = gr->base.engine.subdev.device; nv44_gr_tile() local
35 struct nvkm_fifo *fifo = device->fifo; nv44_gr_tile()
41 switch (device->chipset) { nv44_gr_tile()
44 nvkm_wr32(device, NV20_PGRAPH_TSIZE(i), tile->pitch); nv44_gr_tile()
45 nvkm_wr32(device, NV20_PGRAPH_TLIMIT(i), tile->limit); nv44_gr_tile()
46 nvkm_wr32(device, NV20_PGRAPH_TILE(i), tile->addr); nv44_gr_tile()
53 nvkm_wr32(device, NV47_PGRAPH_TSIZE(i), tile->pitch); nv44_gr_tile()
54 nvkm_wr32(device, NV47_PGRAPH_TLIMIT(i), tile->limit); nv44_gr_tile()
55 nvkm_wr32(device, NV47_PGRAPH_TILE(i), tile->addr); nv44_gr_tile()
56 nvkm_wr32(device, NV40_PGRAPH_TSIZE1(i), tile->pitch); nv44_gr_tile()
57 nvkm_wr32(device, NV40_PGRAPH_TLIMIT1(i), tile->limit); nv44_gr_tile()
58 nvkm_wr32(device, NV40_PGRAPH_TILE1(i), tile->addr); nv44_gr_tile()
61 nvkm_wr32(device, NV20_PGRAPH_TSIZE(i), tile->pitch); nv44_gr_tile()
62 nvkm_wr32(device, NV20_PGRAPH_TLIMIT(i), tile->limit); nv44_gr_tile()
63 nvkm_wr32(device, NV20_PGRAPH_TILE(i), tile->addr); nv44_gr_tile()
64 nvkm_wr32(device, NV40_PGRAPH_TSIZE1(i), tile->pitch); nv44_gr_tile()
65 nvkm_wr32(device, NV40_PGRAPH_TLIMIT1(i), tile->limit); nv44_gr_tile()
66 nvkm_wr32(device, NV40_PGRAPH_TILE1(i), tile->addr); nv44_gr_tile()
105 nv44_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr) nv44_gr_new() argument
107 return nv40_gr_new_(&nv44_gr, device, index, pgr); nv44_gr_new()
H A Dnv04.c445 nv04_gr_set_ctx1(struct nvkm_device *device, u32 inst, u32 mask, u32 value) nv04_gr_set_ctx1() argument
447 int subc = (nvkm_rd32(device, NV04_PGRAPH_TRAPPED_ADDR) >> 13) & 0x7; nv04_gr_set_ctx1()
450 tmp = nvkm_rd32(device, 0x700000 + inst); nv04_gr_set_ctx1()
453 nvkm_wr32(device, 0x700000 + inst, tmp); nv04_gr_set_ctx1()
455 nvkm_wr32(device, NV04_PGRAPH_CTX_SWITCH1, tmp); nv04_gr_set_ctx1()
456 nvkm_wr32(device, NV04_PGRAPH_CTX_CACHE1 + (subc << 2), tmp); nv04_gr_set_ctx1()
460 nv04_gr_set_ctx_val(struct nvkm_device *device, u32 inst, u32 mask, u32 value) nv04_gr_set_ctx_val() argument
465 ctx1 = nvkm_rd32(device, 0x700000 + inst); nv04_gr_set_ctx_val()
469 tmp = nvkm_rd32(device, 0x70000c + inst); nv04_gr_set_ctx_val()
472 nvkm_wr32(device, 0x70000c + inst, tmp); nv04_gr_set_ctx_val()
504 nv04_gr_set_ctx1(device, inst, 0x01000000, valid << 24); nv04_gr_set_ctx_val()
508 nv04_gr_mthd_set_operation(struct nvkm_device *device, u32 inst, u32 data) nv04_gr_mthd_set_operation() argument
510 u8 class = nvkm_rd32(device, 0x700000) & 0x000000ff; nv04_gr_mthd_set_operation()
516 nv04_gr_set_ctx1(device, inst, 0x00038000, data << 15); nv04_gr_mthd_set_operation()
518 nv04_gr_set_ctx_val(device, inst, 0, 0); nv04_gr_mthd_set_operation()
523 nv04_gr_mthd_surf3d_clip_h(struct nvkm_device *device, u32 inst, u32 data) nv04_gr_mthd_surf3d_clip_h() argument
535 nvkm_wr32(device, 0x40053c, min); nv04_gr_mthd_surf3d_clip_h()
536 nvkm_wr32(device, 0x400544, max); nv04_gr_mthd_surf3d_clip_h()
541 nv04_gr_mthd_surf3d_clip_v(struct nvkm_device *device, u32 inst, u32 data) nv04_gr_mthd_surf3d_clip_v() argument
553 nvkm_wr32(device, 0x400540, min); nv04_gr_mthd_surf3d_clip_v()
554 nvkm_wr32(device, 0x400548, max); nv04_gr_mthd_surf3d_clip_v()
559 nv04_gr_mthd_bind_class(struct nvkm_device *device, u32 inst) nv04_gr_mthd_bind_class() argument
561 return nvkm_rd32(device, 0x700000 + (inst << 4)); nv04_gr_mthd_bind_class()
565 nv04_gr_mthd_bind_surf2d(struct nvkm_device *device, u32 inst, u32 data) nv04_gr_mthd_bind_surf2d() argument
567 switch (nv04_gr_mthd_bind_class(device, data)) { nv04_gr_mthd_bind_surf2d()
569 nv04_gr_set_ctx1(device, inst, 0x00004000, 0); nv04_gr_mthd_bind_surf2d()
570 nv04_gr_set_ctx_val(device, inst, 0x02000000, 0); nv04_gr_mthd_bind_surf2d()
573 nv04_gr_set_ctx1(device, inst, 0x00004000, 0); nv04_gr_mthd_bind_surf2d()
574 nv04_gr_set_ctx_val(device, inst, 0x02000000, 0x02000000); nv04_gr_mthd_bind_surf2d()
581 nv04_gr_mthd_bind_surf2d_swzsurf(struct nvkm_device *device, u32 inst, u32 data) nv04_gr_mthd_bind_surf2d_swzsurf() argument
583 switch (nv04_gr_mthd_bind_class(device, data)) { nv04_gr_mthd_bind_surf2d_swzsurf()
585 nv04_gr_set_ctx1(device, inst, 0x00004000, 0); nv04_gr_mthd_bind_surf2d_swzsurf()
586 nv04_gr_set_ctx_val(device, inst, 0x02000000, 0); nv04_gr_mthd_bind_surf2d_swzsurf()
589 nv04_gr_set_ctx1(device, inst, 0x00004000, 0); nv04_gr_mthd_bind_surf2d_swzsurf()
590 nv04_gr_set_ctx_val(device, inst, 0x02000000, 0x02000000); nv04_gr_mthd_bind_surf2d_swzsurf()
593 nv04_gr_set_ctx1(device, inst, 0x00004000, 0x00004000); nv04_gr_mthd_bind_surf2d_swzsurf()
594 nv04_gr_set_ctx_val(device, inst, 0x02000000, 0x02000000); nv04_gr_mthd_bind_surf2d_swzsurf()
601 nv01_gr_mthd_bind_patt(struct nvkm_device *device, u32 inst, u32 data) nv01_gr_mthd_bind_patt() argument
603 switch (nv04_gr_mthd_bind_class(device, data)) { nv01_gr_mthd_bind_patt()
605 nv04_gr_set_ctx_val(device, inst, 0x08000000, 0); nv01_gr_mthd_bind_patt()
608 nv04_gr_set_ctx_val(device, inst, 0x08000000, 0x08000000); nv01_gr_mthd_bind_patt()
615 nv04_gr_mthd_bind_patt(struct nvkm_device *device, u32 inst, u32 data) nv04_gr_mthd_bind_patt() argument
617 switch (nv04_gr_mthd_bind_class(device, data)) { nv04_gr_mthd_bind_patt()
619 nv04_gr_set_ctx_val(device, inst, 0x08000000, 0); nv04_gr_mthd_bind_patt()
622 nv04_gr_set_ctx_val(device, inst, 0x08000000, 0x08000000); nv04_gr_mthd_bind_patt()
629 nv04_gr_mthd_bind_rop(struct nvkm_device *device, u32 inst, u32 data) nv04_gr_mthd_bind_rop() argument
631 switch (nv04_gr_mthd_bind_class(device, data)) { nv04_gr_mthd_bind_rop()
633 nv04_gr_set_ctx_val(device, inst, 0x10000000, 0); nv04_gr_mthd_bind_rop()
636 nv04_gr_set_ctx_val(device, inst, 0x10000000, 0x10000000); nv04_gr_mthd_bind_rop()
643 nv04_gr_mthd_bind_beta1(struct nvkm_device *device, u32 inst, u32 data) nv04_gr_mthd_bind_beta1() argument
645 switch (nv04_gr_mthd_bind_class(device, data)) { nv04_gr_mthd_bind_beta1()
647 nv04_gr_set_ctx_val(device, inst, 0x20000000, 0); nv04_gr_mthd_bind_beta1()
650 nv04_gr_set_ctx_val(device, inst, 0x20000000, 0x20000000); nv04_gr_mthd_bind_beta1()
657 nv04_gr_mthd_bind_beta4(struct nvkm_device *device, u32 inst, u32 data) nv04_gr_mthd_bind_beta4() argument
659 switch (nv04_gr_mthd_bind_class(device, data)) { nv04_gr_mthd_bind_beta4()
661 nv04_gr_set_ctx_val(device, inst, 0x40000000, 0); nv04_gr_mthd_bind_beta4()
664 nv04_gr_set_ctx_val(device, inst, 0x40000000, 0x40000000); nv04_gr_mthd_bind_beta4()
671 nv04_gr_mthd_bind_surf_dst(struct nvkm_device *device, u32 inst, u32 data) nv04_gr_mthd_bind_surf_dst() argument
673 switch (nv04_gr_mthd_bind_class(device, data)) { nv04_gr_mthd_bind_surf_dst()
675 nv04_gr_set_ctx_val(device, inst, 0x02000000, 0); nv04_gr_mthd_bind_surf_dst()
678 nv04_gr_set_ctx_val(device, inst, 0x02000000, 0x02000000); nv04_gr_mthd_bind_surf_dst()
685 nv04_gr_mthd_bind_surf_src(struct nvkm_device *device, u32 inst, u32 data) nv04_gr_mthd_bind_surf_src() argument
687 switch (nv04_gr_mthd_bind_class(device, data)) { nv04_gr_mthd_bind_surf_src()
689 nv04_gr_set_ctx_val(device, inst, 0x04000000, 0); nv04_gr_mthd_bind_surf_src()
692 nv04_gr_set_ctx_val(device, inst, 0x04000000, 0x04000000); nv04_gr_mthd_bind_surf_src()
699 nv04_gr_mthd_bind_surf_color(struct nvkm_device *device, u32 inst, u32 data) nv04_gr_mthd_bind_surf_color() argument
701 switch (nv04_gr_mthd_bind_class(device, data)) { nv04_gr_mthd_bind_surf_color()
703 nv04_gr_set_ctx_val(device, inst, 0x02000000, 0); nv04_gr_mthd_bind_surf_color()
706 nv04_gr_set_ctx_val(device, inst, 0x02000000, 0x02000000); nv04_gr_mthd_bind_surf_color()
713 nv04_gr_mthd_bind_surf_zeta(struct nvkm_device *device, u32 inst, u32 data) nv04_gr_mthd_bind_surf_zeta() argument
715 switch (nv04_gr_mthd_bind_class(device, data)) { nv04_gr_mthd_bind_surf_zeta()
717 nv04_gr_set_ctx_val(device, inst, 0x04000000, 0); nv04_gr_mthd_bind_surf_zeta()
720 nv04_gr_set_ctx_val(device, inst, 0x04000000, 0x04000000); nv04_gr_mthd_bind_surf_zeta()
727 nv01_gr_mthd_bind_clip(struct nvkm_device *device, u32 inst, u32 data) nv01_gr_mthd_bind_clip() argument
729 switch (nv04_gr_mthd_bind_class(device, data)) { nv01_gr_mthd_bind_clip()
731 nv04_gr_set_ctx1(device, inst, 0x2000, 0); nv01_gr_mthd_bind_clip()
734 nv04_gr_set_ctx1(device, inst, 0x2000, 0x2000); nv01_gr_mthd_bind_clip()
741 nv01_gr_mthd_bind_chroma(struct nvkm_device *device, u32 inst, u32 data) nv01_gr_mthd_bind_chroma() argument
743 switch (nv04_gr_mthd_bind_class(device, data)) { nv01_gr_mthd_bind_chroma()
745 nv04_gr_set_ctx1(device, inst, 0x1000, 0); nv01_gr_mthd_bind_chroma()
751 nv04_gr_set_ctx1(device, inst, 0x1000, 0x1000); nv01_gr_mthd_bind_chroma()
758 nv03_gr_mthd_gdi(struct nvkm_device *device, u32 inst, u32 mthd, u32 data) nv03_gr_mthd_gdi() argument
770 return func(device, inst, data); nv03_gr_mthd_gdi()
774 nv04_gr_mthd_gdi(struct nvkm_device *device, u32 inst, u32 mthd, u32 data) nv04_gr_mthd_gdi() argument
787 return func(device, inst, data); nv04_gr_mthd_gdi()
791 nv01_gr_mthd_blit(struct nvkm_device *device, u32 inst, u32 mthd, u32 data) nv01_gr_mthd_blit() argument
806 return func(device, inst, data); nv01_gr_mthd_blit()
810 nv04_gr_mthd_blit(struct nvkm_device *device, u32 inst, u32 mthd, u32 data) nv04_gr_mthd_blit() argument
825 return func(device, inst, data); nv04_gr_mthd_blit()
829 nv04_gr_mthd_iifc(struct nvkm_device *device, u32 inst, u32 mthd, u32 data) nv04_gr_mthd_iifc() argument
844 return func(device, inst, data); nv04_gr_mthd_iifc()
848 nv01_gr_mthd_ifc(struct nvkm_device *device, u32 inst, u32 mthd, u32 data) nv01_gr_mthd_ifc() argument
862 return func(device, inst, data); nv01_gr_mthd_ifc()
866 nv04_gr_mthd_ifc(struct nvkm_device *device, u32 inst, u32 mthd, u32 data) nv04_gr_mthd_ifc() argument
881 return func(device, inst, data); nv04_gr_mthd_ifc()
885 nv03_gr_mthd_sifc(struct nvkm_device *device, u32 inst, u32 mthd, u32 data) nv03_gr_mthd_sifc() argument
898 return func(device, inst, data); nv03_gr_mthd_sifc()
902 nv04_gr_mthd_sifc(struct nvkm_device *device, u32 inst, u32 mthd, u32 data) nv04_gr_mthd_sifc() argument
916 return func(device, inst, data); nv04_gr_mthd_sifc()
920 nv03_gr_mthd_sifm(struct nvkm_device *device, u32 inst, u32 mthd, u32 data) nv03_gr_mthd_sifm() argument
932 return func(device, inst, data); nv03_gr_mthd_sifm()
936 nv04_gr_mthd_sifm(struct nvkm_device *device, u32 inst, u32 mthd, u32 data) nv04_gr_mthd_sifm() argument
949 return func(device, inst, data); nv04_gr_mthd_sifm()
953 nv04_gr_mthd_surf3d(struct nvkm_device *device, u32 inst, u32 mthd, u32 data) nv04_gr_mthd_surf3d() argument
962 return func(device, inst, data); nv04_gr_mthd_surf3d()
966 nv03_gr_mthd_ttri(struct nvkm_device *device, u32 inst, u32 mthd, u32 data) nv03_gr_mthd_ttri() argument
976 return func(device, inst, data); nv03_gr_mthd_ttri()
980 nv01_gr_mthd_prim(struct nvkm_device *device, u32 inst, u32 mthd, u32 data) nv01_gr_mthd_prim() argument
993 return func(device, inst, data); nv01_gr_mthd_prim()
997 nv04_gr_mthd_prim(struct nvkm_device *device, u32 inst, u32 mthd, u32 data) nv04_gr_mthd_prim() argument
1011 return func(device, inst, data); nv04_gr_mthd_prim()
1015 nv04_gr_mthd(struct nvkm_device *device, u32 inst, u32 mthd, u32 data) nv04_gr_mthd() argument
1018 switch (nvkm_rd32(device, 0x700000 + inst) & 0x000000ff) { nv04_gr_mthd()
1039 return func(device, inst, mthd, data); nv04_gr_mthd()
1046 int ret = nvkm_gpuobj_new(object->engine->subdev.device, 16, align, nv04_gr_object_bind()
1074 struct nvkm_device *device = gr->base.engine.subdev.device; nv04_gr_channel() local
1076 if (nvkm_rd32(device, NV04_PGRAPH_CTX_CONTROL) & 0x00010000) { nv04_gr_channel()
1077 int chid = nvkm_rd32(device, NV04_PGRAPH_CTX_USER) >> 24; nv04_gr_channel()
1087 struct nvkm_device *device = chan->gr->base.engine.subdev.device; nv04_gr_load_context() local
1091 nvkm_wr32(device, nv04_gr_ctx_regs[i], chan->nv04[i]); nv04_gr_load_context()
1093 nvkm_wr32(device, NV04_PGRAPH_CTX_CONTROL, 0x10010100); nv04_gr_load_context()
1094 nvkm_mask(device, NV04_PGRAPH_CTX_USER, 0xff000000, chid << 24); nv04_gr_load_context()
1095 nvkm_mask(device, NV04_PGRAPH_FFINTFC_ST2, 0xfff00000, 0x00000000); nv04_gr_load_context()
1102 struct nvkm_device *device = chan->gr->base.engine.subdev.device; nv04_gr_unload_context() local
1106 chan->nv04[i] = nvkm_rd32(device, nv04_gr_ctx_regs[i]); nv04_gr_unload_context()
1108 nvkm_wr32(device, NV04_PGRAPH_CTX_CONTROL, 0x10000000); nv04_gr_unload_context()
1109 nvkm_mask(device, NV04_PGRAPH_CTX_USER, 0xff000000, 0x0f000000); nv04_gr_unload_context()
1116 struct nvkm_device *device = gr->base.engine.subdev.device; nv04_gr_context_switch() local
1129 chid = (nvkm_rd32(device, NV04_PGRAPH_TRAPPED_ADDR) >> 24) & 0x0f; nv04_gr_context_switch()
1165 struct nvkm_device *device = gr->base.engine.subdev.device; nv04_gr_chan_fini() local
1169 nvkm_mask(device, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000); nv04_gr_chan_fini()
1172 nvkm_mask(device, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001); nv04_gr_chan_fini()
1214 struct nvkm_device *device = subdev->device; nv04_gr_idle() local
1217 if (device->card_type == NV_40) nv04_gr_idle()
1220 if (nvkm_msec(device, 2000, nv04_gr_idle()
1221 if (!(nvkm_rd32(device, NV04_PGRAPH_STATUS) & mask)) nv04_gr_idle()
1225 nvkm_rd32(device, NV04_PGRAPH_STATUS)); nv04_gr_idle()
1276 struct nvkm_device *device = subdev->device; nv04_gr_intr() local
1277 u32 stat = nvkm_rd32(device, NV03_PGRAPH_INTR); nv04_gr_intr()
1278 u32 nsource = nvkm_rd32(device, NV03_PGRAPH_NSOURCE); nv04_gr_intr()
1279 u32 nstatus = nvkm_rd32(device, NV03_PGRAPH_NSTATUS); nv04_gr_intr()
1280 u32 addr = nvkm_rd32(device, NV04_PGRAPH_TRAPPED_ADDR); nv04_gr_intr()
1284 u32 data = nvkm_rd32(device, NV04_PGRAPH_TRAPPED_DATA); nv04_gr_intr()
1285 u32 class = nvkm_rd32(device, 0x400180 + subc * 4) & 0xff; nv04_gr_intr()
1286 u32 inst = (nvkm_rd32(device, 0x40016c) & 0xffff) << 4; nv04_gr_intr()
1297 if (!nv04_gr_mthd(device, inst, mthd, data)) nv04_gr_intr()
1303 nvkm_wr32(device, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_CONTEXT_SWITCH); nv04_gr_intr()
1309 nvkm_wr32(device, NV03_PGRAPH_INTR, stat); nv04_gr_intr()
1310 nvkm_wr32(device, NV04_PGRAPH_FIFO, 0x00000001); nv04_gr_intr()
1331 struct nvkm_device *device = gr->base.engine.subdev.device; nv04_gr_init() local
1334 nvkm_wr32(device, NV03_PGRAPH_INTR, 0xFFFFFFFF); nv04_gr_init()
1335 nvkm_wr32(device, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF); nv04_gr_init()
1337 nvkm_wr32(device, NV04_PGRAPH_VALID1, 0); nv04_gr_init()
1338 nvkm_wr32(device, NV04_PGRAPH_VALID2, 0); nv04_gr_init()
1339 /*nvkm_wr32(device, NV04_PGRAPH_DEBUG_0, 0x000001FF); nv04_gr_init()
1340 nvkm_wr32(device, NV04_PGRAPH_DEBUG_0, 0x001FFFFF);*/ nv04_gr_init()
1341 nvkm_wr32(device, NV04_PGRAPH_DEBUG_0, 0x1231c000); nv04_gr_init()
1344 nvkm_wr32(device, NV04_PGRAPH_DEBUG_1, 0x72111100); nv04_gr_init()
1346 /*nvkm_wr32(device, NV04_PGRAPH_DEBUG_2, 0x11d5f870);*/ nv04_gr_init()
1347 nvkm_wr32(device, NV04_PGRAPH_DEBUG_2, 0x11d5f071); nv04_gr_init()
1350 /*nvkm_wr32(device, NV04_PGRAPH_DEBUG_3, 0xfad4ff31);*/ nv04_gr_init()
1351 nvkm_wr32(device, NV04_PGRAPH_DEBUG_3, 0xf0d4ff31); nv04_gr_init()
1354 nvkm_wr32(device, NV04_PGRAPH_STATE , 0xFFFFFFFF); nv04_gr_init()
1355 nvkm_wr32(device, NV04_PGRAPH_CTX_CONTROL , 0x10000100); nv04_gr_init()
1356 nvkm_mask(device, NV04_PGRAPH_CTX_USER, 0xff000000, 0x0f000000); nv04_gr_init()
1359 nvkm_wr32(device, NV04_PGRAPH_PATTERN_SHAPE, 0x00000000); nv04_gr_init()
1360 nvkm_wr32(device, NV04_PGRAPH_BETA_AND , 0xFFFFFFFF); nv04_gr_init()
1416 nv04_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr) nv04_gr_new() argument
1425 return nvkm_gr_ctor(&nv04_gr, device, index, 0x00001000, nv04_gr_new()
H A Dgk20a.c153 struct nvkm_device *device = subdev->device; gk20a_gr_wait_mem_scrubbing() local
155 if (nvkm_msec(device, 2000, gk20a_gr_wait_mem_scrubbing()
156 if (!(nvkm_rd32(device, 0x40910c) & 0x00000006)) gk20a_gr_wait_mem_scrubbing()
163 if (nvkm_msec(device, 2000, gk20a_gr_wait_mem_scrubbing()
164 if (!(nvkm_rd32(device, 0x41a10c) & 0x00000006)) gk20a_gr_wait_mem_scrubbing()
177 struct nvkm_device *device = gr->base.engine.subdev.device; gk20a_gr_set_hww_esr_report_mask() local
178 nvkm_wr32(device, 0x419e44, 0x1ffffe); gk20a_gr_set_hww_esr_report_mask()
179 nvkm_wr32(device, 0x419e4c, 0x7f); gk20a_gr_set_hww_esr_report_mask()
185 struct nvkm_device *device = gr->base.engine.subdev.device; gk20a_gr_init() local
193 nvkm_wr32(device, 0x40802c, 0x1); gk20a_gr_init()
206 nvkm_wr32(device, 0x100cc8, nvkm_memory_addr(gr->unk4188b4) >> 8); gk20a_gr_init()
207 nvkm_wr32(device, 0x100ccc, nvkm_memory_addr(gr->unk4188b8) >> 8); gk20a_gr_init()
213 nvkm_mask(device, 0x503018, 0x1, 0x1); gk20a_gr_init()
227 nvkm_wr32(device, GPC_BCAST(0x0980), data[0]); gk20a_gr_init()
228 nvkm_wr32(device, GPC_BCAST(0x0984), data[1]); gk20a_gr_init()
229 nvkm_wr32(device, GPC_BCAST(0x0988), data[2]); gk20a_gr_init()
230 nvkm_wr32(device, GPC_BCAST(0x098c), data[3]); gk20a_gr_init()
233 nvkm_wr32(device, GPC_UNIT(gpc, 0x0914), gk20a_gr_init()
235 nvkm_wr32(device, GPC_UNIT(gpc, 0x0910), 0x00040000 | gk20a_gr_init()
237 nvkm_wr32(device, GPC_UNIT(gpc, 0x0918), magicgpc918); gk20a_gr_init()
240 nvkm_wr32(device, GPC_BCAST(0x3fd4), magicgpc918); gk20a_gr_init()
243 nvkm_wr32(device, 0x400500, 0x00010001); gk20a_gr_init()
246 nvkm_wr32(device, 0x400100, 0xffffffff); gk20a_gr_init()
247 nvkm_wr32(device, 0x40013c, 0xffffffff); gk20a_gr_init()
250 nvkm_wr32(device, 0x409c24, 0x000f0000); gk20a_gr_init()
253 nvkm_wr32(device, 0x404000, 0xc0000000); gk20a_gr_init()
254 nvkm_wr32(device, 0x404600, 0xc0000000); gk20a_gr_init()
260 nvkm_wr32(device, 0x419d0c, 0x2); gk20a_gr_init()
261 nvkm_wr32(device, 0x41ac94, (((1 << gr->tpc_total) - 1) & 0xff) << 16); gk20a_gr_init()
264 nvkm_wr32(device, 0x400108, 0xffffffff); gk20a_gr_init()
265 nvkm_wr32(device, 0x400138, 0xffffffff); gk20a_gr_init()
266 nvkm_wr32(device, 0x400118, 0xffffffff); gk20a_gr_init()
267 nvkm_wr32(device, 0x400130, 0xffffffff); gk20a_gr_init()
268 nvkm_wr32(device, 0x40011c, 0xffffffff); gk20a_gr_init()
269 nvkm_wr32(device, 0x400134, 0xffffffff); gk20a_gr_init()
286 gk20a_gr_new_(const struct gf100_gr_func *func, struct nvkm_device *device, gk20a_gr_new_() argument
297 ret = gf100_gr_ctor(func, device, index, gr); gk20a_gr_new_()
353 gk20a_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr) gk20a_gr_new() argument
355 return gk20a_gr_new_(&gk20a_gr, device, index, pgr); gk20a_gr_new()
H A Dnv10.c417 nvkm_wr32(device, NV10_PGRAPH_PIPE_ADDRESS, addr); \
419 state[__i] = nvkm_rd32(device, NV10_PGRAPH_PIPE_DATA); \
425 nvkm_wr32(device, NV10_PGRAPH_PIPE_ADDRESS, addr); \
427 nvkm_wr32(device, NV10_PGRAPH_PIPE_DATA, state[__i]); \
433 struct nvkm_device *device = chan->object.engine->subdev.device; nv17_gr_mthd_lma_window() local
447 PIPE_SAVE(device, pipe_0x0040, 0x0040); nv17_gr_mthd_lma_window()
448 PIPE_SAVE(device, pipe->pipe_0x0200, 0x0200); nv17_gr_mthd_lma_window()
450 PIPE_RESTORE(device, chan->lma_window, 0x6790); nv17_gr_mthd_lma_window()
454 xfmode0 = nvkm_rd32(device, NV10_PGRAPH_XFMODE0); nv17_gr_mthd_lma_window()
455 xfmode1 = nvkm_rd32(device, NV10_PGRAPH_XFMODE1); nv17_gr_mthd_lma_window()
457 PIPE_SAVE(device, pipe->pipe_0x4400, 0x4400); nv17_gr_mthd_lma_window()
458 PIPE_SAVE(device, pipe_0x64c0, 0x64c0); nv17_gr_mthd_lma_window()
459 PIPE_SAVE(device, pipe_0x6ab0, 0x6ab0); nv17_gr_mthd_lma_window()
460 PIPE_SAVE(device, pipe_0x6a80, 0x6a80); nv17_gr_mthd_lma_window()
464 nvkm_wr32(device, NV10_PGRAPH_XFMODE0, 0x10000000); nv17_gr_mthd_lma_window()
465 nvkm_wr32(device, NV10_PGRAPH_XFMODE1, 0x00000000); nv17_gr_mthd_lma_window()
466 nvkm_wr32(device, NV10_PGRAPH_PIPE_ADDRESS, 0x000064c0); nv17_gr_mthd_lma_window()
468 nvkm_wr32(device, NV10_PGRAPH_PIPE_DATA, 0x3f800000); nv17_gr_mthd_lma_window()
470 nvkm_wr32(device, NV10_PGRAPH_PIPE_DATA, 0x00000000); nv17_gr_mthd_lma_window()
472 nvkm_wr32(device, NV10_PGRAPH_PIPE_ADDRESS, 0x00006ab0); nv17_gr_mthd_lma_window()
474 nvkm_wr32(device, NV10_PGRAPH_PIPE_DATA, 0x3f800000); nv17_gr_mthd_lma_window()
476 nvkm_wr32(device, NV10_PGRAPH_PIPE_ADDRESS, 0x00006a80); nv17_gr_mthd_lma_window()
478 nvkm_wr32(device, NV10_PGRAPH_PIPE_DATA, 0x00000000); nv17_gr_mthd_lma_window()
480 nvkm_wr32(device, NV10_PGRAPH_PIPE_ADDRESS, 0x00000040); nv17_gr_mthd_lma_window()
481 nvkm_wr32(device, NV10_PGRAPH_PIPE_DATA, 0x00000008); nv17_gr_mthd_lma_window()
483 PIPE_RESTORE(device, pipe->pipe_0x0200, 0x0200); nv17_gr_mthd_lma_window()
487 PIPE_RESTORE(device, pipe_0x0040, 0x0040); nv17_gr_mthd_lma_window()
489 nvkm_wr32(device, NV10_PGRAPH_XFMODE0, xfmode0); nv17_gr_mthd_lma_window()
490 nvkm_wr32(device, NV10_PGRAPH_XFMODE1, xfmode1); nv17_gr_mthd_lma_window()
492 PIPE_RESTORE(device, pipe_0x64c0, 0x64c0); nv17_gr_mthd_lma_window()
493 PIPE_RESTORE(device, pipe_0x6ab0, 0x6ab0); nv17_gr_mthd_lma_window()
494 PIPE_RESTORE(device, pipe_0x6a80, 0x6a80); nv17_gr_mthd_lma_window()
495 PIPE_RESTORE(device, pipe->pipe_0x4400, 0x4400); nv17_gr_mthd_lma_window()
497 nvkm_wr32(device, NV10_PGRAPH_PIPE_ADDRESS, 0x000000c0); nv17_gr_mthd_lma_window()
498 nvkm_wr32(device, NV10_PGRAPH_PIPE_DATA, 0x00000000); nv17_gr_mthd_lma_window()
506 struct nvkm_device *device = chan->object.engine->subdev.device; nv17_gr_mthd_lma_enable() local
511 nvkm_mask(device, NV10_PGRAPH_DEBUG_4, 0x00000100, 0x00000100); nv17_gr_mthd_lma_enable()
512 nvkm_mask(device, 0x4006b0, 0x08000000, 0x08000000); nv17_gr_mthd_lma_enable()
549 struct nvkm_device *device = gr->base.engine.subdev.device; nv10_gr_channel() local
551 if (nvkm_rd32(device, 0x400144) & 0x00010000) { nv10_gr_channel()
552 int chid = nvkm_rd32(device, 0x400148) >> 24; nv10_gr_channel()
564 struct nvkm_device *device = gr->base.engine.subdev.device; nv10_gr_save_pipe() local
583 struct nvkm_device *device = gr->base.engine.subdev.device; nv10_gr_load_pipe() local
589 xfmode0 = nvkm_rd32(device, NV10_PGRAPH_XFMODE0); nv10_gr_load_pipe()
590 xfmode1 = nvkm_rd32(device, NV10_PGRAPH_XFMODE1); nv10_gr_load_pipe()
591 nvkm_wr32(device, NV10_PGRAPH_XFMODE0, 0x10000000); nv10_gr_load_pipe()
592 nvkm_wr32(device, NV10_PGRAPH_XFMODE1, 0x00000000); nv10_gr_load_pipe()
593 nvkm_wr32(device, NV10_PGRAPH_PIPE_ADDRESS, 0x000064c0); nv10_gr_load_pipe()
595 nvkm_wr32(device, NV10_PGRAPH_PIPE_DATA, 0x3f800000); nv10_gr_load_pipe()
597 nvkm_wr32(device, NV10_PGRAPH_PIPE_DATA, 0x00000000); nv10_gr_load_pipe()
599 nvkm_wr32(device, NV10_PGRAPH_PIPE_ADDRESS, 0x00006ab0); nv10_gr_load_pipe()
601 nvkm_wr32(device, NV10_PGRAPH_PIPE_DATA, 0x3f800000); nv10_gr_load_pipe()
603 nvkm_wr32(device, NV10_PGRAPH_PIPE_ADDRESS, 0x00006a80); nv10_gr_load_pipe()
605 nvkm_wr32(device, NV10_PGRAPH_PIPE_DATA, 0x00000000); nv10_gr_load_pipe()
607 nvkm_wr32(device, NV10_PGRAPH_PIPE_ADDRESS, 0x00000040); nv10_gr_load_pipe()
608 nvkm_wr32(device, NV10_PGRAPH_PIPE_DATA, 0x00000008); nv10_gr_load_pipe()
615 nvkm_wr32(device, NV10_PGRAPH_XFMODE0, xfmode0); nv10_gr_load_pipe()
616 nvkm_wr32(device, NV10_PGRAPH_XFMODE1, xfmode1); nv10_gr_load_pipe()
815 struct nvkm_device *device = gr->base.engine.subdev.device; nv10_gr_load_dma_vtxbuf() local
827 int class = nvkm_rd32(device, NV10_PGRAPH_CTX_CACHE(i, 0)) & 0xfff; nv10_gr_load_dma_vtxbuf()
839 ctx_user = nvkm_rd32(device, NV10_PGRAPH_CTX_USER); nv10_gr_load_dma_vtxbuf()
841 ctx_switch[i] = nvkm_rd32(device, NV10_PGRAPH_CTX_SWITCH(i)); nv10_gr_load_dma_vtxbuf()
844 st2 = nvkm_rd32(device, NV10_PGRAPH_FFINTFC_ST2); nv10_gr_load_dma_vtxbuf()
845 st2_dl = nvkm_rd32(device, NV10_PGRAPH_FFINTFC_ST2_DL); nv10_gr_load_dma_vtxbuf()
846 st2_dh = nvkm_rd32(device, NV10_PGRAPH_FFINTFC_ST2_DH); nv10_gr_load_dma_vtxbuf()
847 fifo_ptr = nvkm_rd32(device, NV10_PGRAPH_FFINTFC_FIFO_PTR); nv10_gr_load_dma_vtxbuf()
850 fifo[i] = nvkm_rd32(device, 0x4007a0 + 4 * i); nv10_gr_load_dma_vtxbuf()
854 nvkm_wr32(device, NV10_PGRAPH_CTX_SWITCH(i), nv10_gr_load_dma_vtxbuf()
855 nvkm_rd32(device, NV10_PGRAPH_CTX_CACHE(subchan, i))); nv10_gr_load_dma_vtxbuf()
856 nvkm_mask(device, NV10_PGRAPH_CTX_USER, 0xe000, subchan << 13); nv10_gr_load_dma_vtxbuf()
859 nvkm_wr32(device, NV10_PGRAPH_FFINTFC_FIFO_PTR, 0); nv10_gr_load_dma_vtxbuf()
860 nvkm_wr32(device, NV10_PGRAPH_FFINTFC_ST2, nv10_gr_load_dma_vtxbuf()
862 nvkm_wr32(device, NV10_PGRAPH_FFINTFC_ST2_DL, inst); nv10_gr_load_dma_vtxbuf()
863 nvkm_mask(device, NV10_PGRAPH_CTX_CONTROL, 0, 0x10000); nv10_gr_load_dma_vtxbuf()
864 nvkm_mask(device, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001); nv10_gr_load_dma_vtxbuf()
865 nvkm_mask(device, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000); nv10_gr_load_dma_vtxbuf()
869 nvkm_wr32(device, 0x4007a0 + 4 * i, fifo[i]); nv10_gr_load_dma_vtxbuf()
871 nvkm_wr32(device, NV10_PGRAPH_FFINTFC_FIFO_PTR, fifo_ptr); nv10_gr_load_dma_vtxbuf()
872 nvkm_wr32(device, NV10_PGRAPH_FFINTFC_ST2, st2); nv10_gr_load_dma_vtxbuf()
873 nvkm_wr32(device, NV10_PGRAPH_FFINTFC_ST2_DL, st2_dl); nv10_gr_load_dma_vtxbuf()
874 nvkm_wr32(device, NV10_PGRAPH_FFINTFC_ST2_DH, st2_dh); nv10_gr_load_dma_vtxbuf()
878 nvkm_wr32(device, NV10_PGRAPH_CTX_SWITCH(i), ctx_switch[i]); nv10_gr_load_dma_vtxbuf()
879 nvkm_wr32(device, NV10_PGRAPH_CTX_USER, ctx_user); nv10_gr_load_dma_vtxbuf()
886 struct nvkm_device *device = gr->base.engine.subdev.device; nv10_gr_load_context() local
891 nvkm_wr32(device, nv10_gr_ctx_regs[i], chan->nv10[i]); nv10_gr_load_context()
893 if (device->card_type >= NV_11 && device->chipset >= 0x17) { nv10_gr_load_context()
895 nvkm_wr32(device, nv17_gr_ctx_regs[i], chan->nv17[i]); nv10_gr_load_context()
900 inst = nvkm_rd32(device, NV10_PGRAPH_GLOBALSTATE1) & 0xffff; nv10_gr_load_context()
903 nvkm_wr32(device, NV10_PGRAPH_CTX_CONTROL, 0x10010100); nv10_gr_load_context()
904 nvkm_mask(device, NV10_PGRAPH_CTX_USER, 0xff000000, chid << 24); nv10_gr_load_context()
905 nvkm_mask(device, NV10_PGRAPH_FFINTFC_ST2, 0x30000000, 0x00000000); nv10_gr_load_context()
913 struct nvkm_device *device = gr->base.engine.subdev.device; nv10_gr_unload_context() local
917 chan->nv10[i] = nvkm_rd32(device, nv10_gr_ctx_regs[i]); nv10_gr_unload_context()
919 if (device->card_type >= NV_11 && device->chipset >= 0x17) { nv10_gr_unload_context()
921 chan->nv17[i] = nvkm_rd32(device, nv17_gr_ctx_regs[i]); nv10_gr_unload_context()
926 nvkm_wr32(device, NV10_PGRAPH_CTX_CONTROL, 0x10000000); nv10_gr_unload_context()
927 nvkm_mask(device, NV10_PGRAPH_CTX_USER, 0xff000000, 0x1f000000); nv10_gr_unload_context()
934 struct nvkm_device *device = gr->base.engine.subdev.device; nv10_gr_context_switch() local
947 chid = (nvkm_rd32(device, NV04_PGRAPH_TRAPPED_ADDR) >> 20) & 0x1f; nv10_gr_context_switch()
958 struct nvkm_device *device = gr->base.engine.subdev.device; nv10_gr_chan_fini() local
962 nvkm_mask(device, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000); nv10_gr_chan_fini()
965 nvkm_mask(device, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001); nv10_gr_chan_fini()
1007 struct nvkm_device *device = gr->base.engine.subdev.device; nv10_gr_chan_new() local
1024 if (device->card_type >= NV_11 && device->chipset >= 0x17) { nv10_gr_chan_new()
1027 nvkm_rd32(device, NV10_PGRAPH_DEBUG_4)); nv10_gr_chan_new()
1028 NV17_WRITE_CTX(0x004006b0, nvkm_rd32(device, 0x004006b0)); nv10_gr_chan_new()
1052 struct nvkm_device *device = gr->base.engine.subdev.device; nv10_gr_tile() local
1053 struct nvkm_fifo *fifo = device->fifo; nv10_gr_tile()
1059 nvkm_wr32(device, NV10_PGRAPH_TLIMIT(i), tile->limit); nv10_gr_tile()
1060 nvkm_wr32(device, NV10_PGRAPH_TSIZE(i), tile->pitch); nv10_gr_tile()
1061 nvkm_wr32(device, NV10_PGRAPH_TILE(i), tile->addr); nv10_gr_tile()
1085 struct nvkm_device *device = subdev->device; nv10_gr_intr() local
1086 u32 stat = nvkm_rd32(device, NV03_PGRAPH_INTR); nv10_gr_intr()
1087 u32 nsource = nvkm_rd32(device, NV03_PGRAPH_NSOURCE); nv10_gr_intr()
1088 u32 nstatus = nvkm_rd32(device, NV03_PGRAPH_NSTATUS); nv10_gr_intr()
1089 u32 addr = nvkm_rd32(device, NV04_PGRAPH_TRAPPED_ADDR); nv10_gr_intr()
1093 u32 data = nvkm_rd32(device, NV04_PGRAPH_TRAPPED_DATA); nv10_gr_intr()
1094 u32 class = nvkm_rd32(device, 0x400160 + subc * 4) & 0xfff; nv10_gr_intr()
1111 nvkm_wr32(device, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_CONTEXT_SWITCH); nv10_gr_intr()
1117 nvkm_wr32(device, NV03_PGRAPH_INTR, stat); nv10_gr_intr()
1118 nvkm_wr32(device, NV04_PGRAPH_FIFO, 0x00000001); nv10_gr_intr()
1139 struct nvkm_device *device = gr->base.engine.subdev.device; nv10_gr_init() local
1141 nvkm_wr32(device, NV03_PGRAPH_INTR , 0xFFFFFFFF); nv10_gr_init()
1142 nvkm_wr32(device, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF); nv10_gr_init()
1144 nvkm_wr32(device, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF); nv10_gr_init()
1145 nvkm_wr32(device, NV04_PGRAPH_DEBUG_0, 0x00000000); nv10_gr_init()
1146 nvkm_wr32(device, NV04_PGRAPH_DEBUG_1, 0x00118700); nv10_gr_init()
1147 /* nvkm_wr32(device, NV04_PGRAPH_DEBUG_2, 0x24E00810); */ /* 0x25f92ad9 */ nv10_gr_init()
1148 nvkm_wr32(device, NV04_PGRAPH_DEBUG_2, 0x25f92ad9); nv10_gr_init()
1149 nvkm_wr32(device, NV04_PGRAPH_DEBUG_3, 0x55DE0830 | (1 << 29) | (1 << 31)); nv10_gr_init()
1151 if (device->card_type >= NV_11 && device->chipset >= 0x17) { nv10_gr_init()
1152 nvkm_wr32(device, NV10_PGRAPH_DEBUG_4, 0x1f000000); nv10_gr_init()
1153 nvkm_wr32(device, 0x400a10, 0x03ff3fb6); nv10_gr_init()
1154 nvkm_wr32(device, 0x400838, 0x002f8684); nv10_gr_init()
1155 nvkm_wr32(device, 0x40083c, 0x00115f3f); nv10_gr_init()
1156 nvkm_wr32(device, 0x4006b0, 0x40000020); nv10_gr_init()
1158 nvkm_wr32(device, NV10_PGRAPH_DEBUG_4, 0x00000000); nv10_gr_init()
1161 nvkm_wr32(device, NV10_PGRAPH_CTX_SWITCH(0), 0x00000000); nv10_gr_init()
1162 nvkm_wr32(device, NV10_PGRAPH_CTX_SWITCH(1), 0x00000000); nv10_gr_init()
1163 nvkm_wr32(device, NV10_PGRAPH_CTX_SWITCH(2), 0x00000000); nv10_gr_init()
1164 nvkm_wr32(device, NV10_PGRAPH_CTX_SWITCH(3), 0x00000000); nv10_gr_init()
1165 nvkm_wr32(device, NV10_PGRAPH_CTX_SWITCH(4), 0x00000000); nv10_gr_init()
1166 nvkm_wr32(device, NV10_PGRAPH_STATE, 0xFFFFFFFF); nv10_gr_init()
1168 nvkm_mask(device, NV10_PGRAPH_CTX_USER, 0xff000000, 0x1f000000); nv10_gr_init()
1169 nvkm_wr32(device, NV10_PGRAPH_CTX_CONTROL, 0x10000100); nv10_gr_init()
1170 nvkm_wr32(device, NV10_PGRAPH_FFINTFC_ST2, 0x08000000); nv10_gr_init()
1175 nv10_gr_new_(const struct nvkm_gr_func *func, struct nvkm_device *device, nv10_gr_new_() argument
1185 return nvkm_gr_ctor(func, device, index, 0x00001000, true, &gr->base); nv10_gr_new_()
1218 nv10_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr) nv10_gr_new() argument
1220 return nv10_gr_new_(&nv10_gr, device, index, pgr); nv10_gr_new()
/linux-4.4.14/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/
H A Dgk104.c34 magic_(struct nvkm_device *device, u32 ctrl, int size) magic_() argument
36 nvkm_wr32(device, 0x00c800, 0x00000000); magic_()
37 nvkm_wr32(device, 0x00c808, 0x00000000); magic_()
38 nvkm_wr32(device, 0x00c800, ctrl); magic_()
39 nvkm_msec(device, 2000, magic_()
40 if (nvkm_rd32(device, 0x00c800) & 0x40000000) { magic_()
42 nvkm_wr32(device, 0x00c804, 0x00000000); magic_()
46 nvkm_wr32(device, 0x00c800, 0x00000000); magic_()
50 magic(struct nvkm_device *device, u32 ctrl) magic() argument
52 magic_(device, 0x8000a41f | ctrl, 6); magic()
53 magic_(device, 0x80000421 | ctrl, 1); magic()
59 struct nvkm_device *device = pmu->subdev.device; gk104_pmu_pgob() local
61 if (!(nvkm_fuse_read(device->fuse, 0x31c) & 0x00000001)) gk104_pmu_pgob()
64 nvkm_mask(device, 0x000200, 0x00001000, 0x00000000); gk104_pmu_pgob()
65 nvkm_rd32(device, 0x000200); gk104_pmu_pgob()
66 nvkm_mask(device, 0x000200, 0x08000000, 0x08000000); gk104_pmu_pgob()
69 nvkm_mask(device, 0x10a78c, 0x00000002, 0x00000002); gk104_pmu_pgob()
70 nvkm_mask(device, 0x10a78c, 0x00000001, 0x00000001); gk104_pmu_pgob()
71 nvkm_mask(device, 0x10a78c, 0x00000001, 0x00000000); gk104_pmu_pgob()
73 nvkm_mask(device, 0x020004, 0xc0000000, enable ? 0xc0000000 : 0x40000000); gk104_pmu_pgob()
76 nvkm_mask(device, 0x10a78c, 0x00000002, 0x00000000); gk104_pmu_pgob()
77 nvkm_mask(device, 0x10a78c, 0x00000001, 0x00000001); gk104_pmu_pgob()
78 nvkm_mask(device, 0x10a78c, 0x00000001, 0x00000000); gk104_pmu_pgob()
80 nvkm_mask(device, 0x000200, 0x08000000, 0x00000000); gk104_pmu_pgob()
81 nvkm_mask(device, 0x000200, 0x00001000, 0x00001000); gk104_pmu_pgob()
82 nvkm_rd32(device, 0x000200); gk104_pmu_pgob()
84 if (nvkm_boolopt(device->cfgopt, "War00C800_0", true)) { gk104_pmu_pgob()
85 switch (device->chipset) { gk104_pmu_pgob()
87 magic(device, 0x04000000); gk104_pmu_pgob()
88 magic(device, 0x06000000); gk104_pmu_pgob()
89 magic(device, 0x0c000000); gk104_pmu_pgob()
90 magic(device, 0x0e000000); gk104_pmu_pgob()
93 magic(device, 0x02000000); gk104_pmu_pgob()
94 magic(device, 0x04000000); gk104_pmu_pgob()
95 magic(device, 0x0a000000); gk104_pmu_pgob()
98 magic(device, 0x02000000); gk104_pmu_pgob()
116 gk104_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu) gk104_pmu_new() argument
118 return nvkm_pmu_new_(&gk104_pmu, device, index, ppmu); gk104_pmu_new()
H A Dbase.c40 struct nvkm_device *device = subdev->device; nvkm_pmu_send() local
44 addr = nvkm_rd32(device, 0x10a4a0); nvkm_pmu_send()
45 if (nvkm_msec(device, 2000, nvkm_pmu_send()
46 u32 tmp = nvkm_rd32(device, 0x10a4b0); nvkm_pmu_send()
64 nvkm_wr32(device, 0x10a580, 0x00000001); nvkm_pmu_send()
65 } while (nvkm_rd32(device, 0x10a580) != 0x00000001); nvkm_pmu_send()
68 nvkm_wr32(device, 0x10a1c0, 0x01000000 | (((addr & 0x07) << 4) + nvkm_pmu_send()
70 nvkm_wr32(device, 0x10a1c4, process); nvkm_pmu_send()
71 nvkm_wr32(device, 0x10a1c4, message); nvkm_pmu_send()
72 nvkm_wr32(device, 0x10a1c4, data0); nvkm_pmu_send()
73 nvkm_wr32(device, 0x10a1c4, data1); nvkm_pmu_send()
74 nvkm_wr32(device, 0x10a4a0, (addr + 1) & 0x0f); nvkm_pmu_send()
77 nvkm_wr32(device, 0x10a580, 0x00000000); nvkm_pmu_send()
95 struct nvkm_device *device = subdev->device; nvkm_pmu_recv() local
99 u32 addr = nvkm_rd32(device, 0x10a4cc); nvkm_pmu_recv()
100 if (addr == nvkm_rd32(device, 0x10a4c8)) nvkm_pmu_recv()
105 nvkm_wr32(device, 0x10a580, 0x00000002); nvkm_pmu_recv()
106 } while (nvkm_rd32(device, 0x10a580) != 0x00000002); nvkm_pmu_recv()
109 nvkm_wr32(device, 0x10a1c0, 0x02000000 | (((addr & 0x07) << 4) + nvkm_pmu_recv()
111 process = nvkm_rd32(device, 0x10a1c4); nvkm_pmu_recv()
112 message = nvkm_rd32(device, 0x10a1c4); nvkm_pmu_recv()
113 data0 = nvkm_rd32(device, 0x10a1c4); nvkm_pmu_recv()
114 data1 = nvkm_rd32(device, 0x10a1c4); nvkm_pmu_recv()
115 nvkm_wr32(device, 0x10a4cc, (addr + 1) & 0x0f); nvkm_pmu_recv()
118 nvkm_wr32(device, 0x10a580, 0x00000000); nvkm_pmu_recv()
147 struct nvkm_device *device = pmu->subdev.device; nvkm_pmu_intr() local
148 u32 disp = nvkm_rd32(device, 0x10a01c); nvkm_pmu_intr()
149 u32 intr = nvkm_rd32(device, 0x10a008) & disp & ~(disp >> 16); nvkm_pmu_intr()
152 u32 stat = nvkm_rd32(device, 0x10a16c); nvkm_pmu_intr()
156 nvkm_rd32(device, 0x10a168)); nvkm_pmu_intr()
157 nvkm_wr32(device, 0x10a16c, 0x00000000); nvkm_pmu_intr()
164 nvkm_wr32(device, 0x10a004, 0x00000040); nvkm_pmu_intr()
170 nvkm_rd32(device, 0x10a7a0), nvkm_pmu_intr()
171 nvkm_rd32(device, 0x10a7a4)); nvkm_pmu_intr()
172 nvkm_wr32(device, 0x10a004, 0x00000080); nvkm_pmu_intr()
178 nvkm_wr32(device, 0x10a004, intr); nvkm_pmu_intr()
186 struct nvkm_device *device = pmu->subdev.device; nvkm_pmu_fini() local
188 nvkm_wr32(device, 0x10a014, 0x00000060); nvkm_pmu_fini()
197 struct nvkm_device *device = pmu->subdev.device; nvkm_pmu_init() local
201 nvkm_wr32(device, 0x10a014, 0x0000ffff); /* INTR_EN_CLR = ALL */ nvkm_pmu_init()
202 nvkm_msec(device, 2000, nvkm_pmu_init()
203 if (!nvkm_rd32(device, 0x10a04c)) nvkm_pmu_init()
206 nvkm_mask(device, 0x000200, 0x00002000, 0x00000000); nvkm_pmu_init()
207 nvkm_mask(device, 0x000200, 0x00002000, 0x00002000); nvkm_pmu_init()
208 nvkm_rd32(device, 0x000200); nvkm_pmu_init()
209 nvkm_msec(device, 2000, nvkm_pmu_init()
210 if (!(nvkm_rd32(device, 0x10a10c) & 0x00000006)) nvkm_pmu_init()
215 nvkm_wr32(device, 0x10a1c0, 0x01000000); nvkm_pmu_init()
217 nvkm_wr32(device, 0x10a1c4, pmu->func->data.data[i]); nvkm_pmu_init()
220 nvkm_wr32(device, 0x10a180, 0x01000000); nvkm_pmu_init()
223 nvkm_wr32(device, 0x10a188, i >> 6); nvkm_pmu_init()
224 nvkm_wr32(device, 0x10a184, pmu->func->code.data[i]); nvkm_pmu_init()
228 nvkm_wr32(device, 0x10a10c, 0x00000000); nvkm_pmu_init()
229 nvkm_wr32(device, 0x10a104, 0x00000000); nvkm_pmu_init()
230 nvkm_wr32(device, 0x10a100, 0x00000002); nvkm_pmu_init()
233 if (nvkm_msec(device, 2000, nvkm_pmu_init()
234 if (nvkm_rd32(device, 0x10a4d0)) nvkm_pmu_init()
238 pmu->send.base = nvkm_rd32(device, 0x10a4d0) & 0x0000ffff; nvkm_pmu_init()
239 pmu->send.size = nvkm_rd32(device, 0x10a4d0) >> 16; nvkm_pmu_init()
242 if (nvkm_msec(device, 2000, nvkm_pmu_init()
243 if (nvkm_rd32(device, 0x10a4dc)) nvkm_pmu_init()
247 pmu->recv.base = nvkm_rd32(device, 0x10a4dc) & 0x0000ffff; nvkm_pmu_init()
248 pmu->recv.size = nvkm_rd32(device, 0x10a4dc) >> 16; nvkm_pmu_init()
250 nvkm_wr32(device, 0x10a010, 0x000000e0); nvkm_pmu_init()
269 nvkm_pmu_new_(const struct nvkm_pmu_func *func, struct nvkm_device *device, nvkm_pmu_new_() argument
275 nvkm_subdev_ctor(&nvkm_pmu, device, index, 0, &pmu->subdev); nvkm_pmu_new_()
H A Dgk110.c34 struct nvkm_device *device = pmu->subdev.device; gk110_pmu_pgob() local
58 nvkm_mask(device, 0x000200, 0x00001000, 0x00000000); gk110_pmu_pgob()
59 nvkm_rd32(device, 0x000200); gk110_pmu_pgob()
60 nvkm_mask(device, 0x000200, 0x08000000, 0x08000000); gk110_pmu_pgob()
63 nvkm_mask(device, 0x10a78c, 0x00000002, 0x00000002); gk110_pmu_pgob()
64 nvkm_mask(device, 0x10a78c, 0x00000001, 0x00000001); gk110_pmu_pgob()
65 nvkm_mask(device, 0x10a78c, 0x00000001, 0x00000000); gk110_pmu_pgob()
67 nvkm_mask(device, 0x0206b4, 0x00000000, 0x00000000); gk110_pmu_pgob()
69 nvkm_wr32(device, magic[i].addr, magic[i].data); gk110_pmu_pgob()
70 nvkm_msec(device, 2000, gk110_pmu_pgob()
71 if (!(nvkm_rd32(device, magic[i].addr) & 0x80000000)) gk110_pmu_pgob()
76 nvkm_mask(device, 0x10a78c, 0x00000002, 0x00000000); gk110_pmu_pgob()
77 nvkm_mask(device, 0x10a78c, 0x00000001, 0x00000001); gk110_pmu_pgob()
78 nvkm_mask(device, 0x10a78c, 0x00000001, 0x00000000); gk110_pmu_pgob()
80 nvkm_mask(device, 0x000200, 0x08000000, 0x00000000); gk110_pmu_pgob()
81 nvkm_mask(device, 0x000200, 0x00001000, 0x00001000); gk110_pmu_pgob()
82 nvkm_rd32(device, 0x000200); gk110_pmu_pgob()
95 gk110_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu) gk110_pmu_new() argument
97 return nvkm_pmu_new_(&gk110_pmu, device, index, ppmu); gk110_pmu_new()
/linux-4.4.14/drivers/block/drbd/
H A Ddrbd_actlog.c70 * sectors-written since device creation, and other data generation tag
86 * allows to cover device sizes of up to 2**54 Byte (16 PiB) */
95 void *drbd_md_get_buffer(struct drbd_device *device, const char *intent) drbd_md_get_buffer() argument
99 wait_event(device->misc_wait, drbd_md_get_buffer()
100 (r = atomic_cmpxchg(&device->md_io.in_use, 0, 1)) == 0 || drbd_md_get_buffer()
101 device->state.disk <= D_FAILED); drbd_md_get_buffer()
106 device->md_io.current_use = intent; drbd_md_get_buffer()
107 device->md_io.start_jif = jiffies; drbd_md_get_buffer()
108 device->md_io.submit_jif = device->md_io.start_jif - 1; drbd_md_get_buffer()
109 return page_address(device->md_io.page); drbd_md_get_buffer()
112 void drbd_md_put_buffer(struct drbd_device *device) drbd_md_put_buffer() argument
114 if (atomic_dec_and_test(&device->md_io.in_use)) drbd_md_put_buffer()
115 wake_up(&device->misc_wait); drbd_md_put_buffer()
118 void wait_until_done_or_force_detached(struct drbd_device *device, struct drbd_backing_dev *bdev, wait_until_done_or_force_detached() argument
130 dt = wait_event_timeout(device->misc_wait, wait_until_done_or_force_detached()
131 *done || test_bit(FORCE_DETACH, &device->flags), dt); wait_until_done_or_force_detached()
133 drbd_err(device, "meta-data IO operation timed out\n"); wait_until_done_or_force_detached()
134 drbd_chk_io_error(device, 1, DRBD_FORCE_DETACH); wait_until_done_or_force_detached()
138 static int _drbd_md_sync_page_io(struct drbd_device *device, _drbd_md_sync_page_io() argument
147 device->md_io.done = 0; _drbd_md_sync_page_io()
148 device->md_io.error = -ENODEV; _drbd_md_sync_page_io()
150 if ((rw & WRITE) && !test_bit(MD_NO_FUA, &device->flags)) _drbd_md_sync_page_io()
158 if (bio_add_page(bio, device->md_io.page, size, 0) != size) _drbd_md_sync_page_io()
160 bio->bi_private = device; _drbd_md_sync_page_io()
164 if (!(rw & WRITE) && device->state.disk == D_DISKLESS && device->ldev == NULL) _drbd_md_sync_page_io()
167 else if (!get_ldev_if_state(device, D_ATTACHING)) { _drbd_md_sync_page_io()
169 drbd_err(device, "ASSERT FAILED: get_ldev_if_state() == 1 in _drbd_md_sync_page_io()\n"); _drbd_md_sync_page_io()
175 atomic_inc(&device->md_io.in_use); /* drbd_md_put_buffer() is in the completion handler */ _drbd_md_sync_page_io()
176 device->md_io.submit_jif = jiffies; _drbd_md_sync_page_io()
177 if (drbd_insert_fault(device, (rw & WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD)) _drbd_md_sync_page_io()
181 wait_until_done_or_force_detached(device, bdev, &device->md_io.done); _drbd_md_sync_page_io()
183 err = device->md_io.error; _drbd_md_sync_page_io()
190 int drbd_md_sync_page_io(struct drbd_device *device, struct drbd_backing_dev *bdev, drbd_md_sync_page_io() argument
194 D_ASSERT(device, atomic_read(&device->md_io.in_use) == 1); drbd_md_sync_page_io()
198 dynamic_drbd_dbg(device, "meta_data io: %s [%d]:%s(,%llus,%s) %pS\n", drbd_md_sync_page_io()
205 drbd_alert(device, "%s [%d]:%s(,%llus,%s) out of range md access!\n", drbd_md_sync_page_io()
209 err = _drbd_md_sync_page_io(device, bdev, sector, rw); drbd_md_sync_page_io()
211 drbd_err(device, "drbd_md_sync_page_io(,%llus,%s) failed with error %d\n", drbd_md_sync_page_io()
217 static struct bm_extent *find_active_resync_extent(struct drbd_device *device, unsigned int enr) find_active_resync_extent() argument
220 tmp = lc_find(device->resync, enr/AL_EXT_PER_BM_SECT); find_active_resync_extent()
229 static struct lc_element *_al_get(struct drbd_device *device, unsigned int enr, bool nonblock) _al_get() argument
235 spin_lock_irq(&device->al_lock); _al_get()
236 bm_ext = find_active_resync_extent(device, enr); _al_get()
239 spin_unlock_irq(&device->al_lock); _al_get()
241 wake_up(&device->al_wait); _al_get()
245 al_ext = lc_try_get(device->act_log, enr); _al_get()
247 al_ext = lc_get(device->act_log, enr); _al_get()
248 spin_unlock_irq(&device->al_lock); _al_get()
252 bool drbd_al_begin_io_fastpath(struct drbd_device *device, struct drbd_interval *i) drbd_al_begin_io_fastpath() argument
259 D_ASSERT(device, (unsigned)(last - first) <= 1); drbd_al_begin_io_fastpath()
260 D_ASSERT(device, atomic_read(&device->local_cnt) > 0); drbd_al_begin_io_fastpath()
266 return _al_get(device, first, true); drbd_al_begin_io_fastpath()
269 bool drbd_al_begin_io_prepare(struct drbd_device *device, struct drbd_interval *i) drbd_al_begin_io_prepare() argument
278 D_ASSERT(device, first <= last); drbd_al_begin_io_prepare()
279 D_ASSERT(device, atomic_read(&device->local_cnt) > 0); drbd_al_begin_io_prepare()
283 wait_event(device->al_wait, drbd_al_begin_io_prepare()
284 (al_ext = _al_get(device, enr, false)) != NULL); drbd_al_begin_io_prepare()
291 static int al_write_transaction(struct drbd_device *device);
293 void drbd_al_begin_io_commit(struct drbd_device *device) drbd_al_begin_io_commit() argument
300 wait_event(device->al_wait, drbd_al_begin_io_commit()
301 device->act_log->pending_changes == 0 || drbd_al_begin_io_commit()
302 (locked = lc_try_lock_for_transaction(device->act_log))); drbd_al_begin_io_commit()
307 if (device->act_log->pending_changes) { drbd_al_begin_io_commit()
311 write_al_updates = rcu_dereference(device->ldev->disk_conf)->al_updates; drbd_al_begin_io_commit()
315 al_write_transaction(device); drbd_al_begin_io_commit()
316 spin_lock_irq(&device->al_lock); drbd_al_begin_io_commit()
321 lc_committed(device->act_log); drbd_al_begin_io_commit()
322 spin_unlock_irq(&device->al_lock); drbd_al_begin_io_commit()
324 lc_unlock(device->act_log); drbd_al_begin_io_commit()
325 wake_up(&device->al_wait); drbd_al_begin_io_commit()
332 void drbd_al_begin_io(struct drbd_device *device, struct drbd_interval *i) drbd_al_begin_io() argument
334 if (drbd_al_begin_io_prepare(device, i)) drbd_al_begin_io()
335 drbd_al_begin_io_commit(device); drbd_al_begin_io()
338 int drbd_al_begin_io_nonblock(struct drbd_device *device, struct drbd_interval *i) drbd_al_begin_io_nonblock() argument
340 struct lru_cache *al = device->act_log; drbd_al_begin_io_nonblock()
349 D_ASSERT(device, first <= last); drbd_al_begin_io_nonblock()
368 __set_bit(__LC_STARVING, &device->act_log->flags); drbd_al_begin_io_nonblock()
375 tmp = lc_find(device->resync, enr/AL_EXT_PER_BM_SECT); drbd_al_begin_io_nonblock()
391 al_ext = lc_get_cumulative(device->act_log, enr); drbd_al_begin_io_nonblock()
393 drbd_info(device, "LOGIC BUG for enr=%u\n", enr); drbd_al_begin_io_nonblock()
398 void drbd_al_complete_io(struct drbd_device *device, struct drbd_interval *i) drbd_al_complete_io() argument
408 D_ASSERT(device, first <= last); drbd_al_complete_io()
409 spin_lock_irqsave(&device->al_lock, flags); drbd_al_complete_io()
412 extent = lc_find(device->act_log, enr); drbd_al_complete_io()
414 drbd_err(device, "al_complete_io() called on inactive extent %u\n", enr); drbd_al_complete_io()
417 lc_put(device->act_log, extent); drbd_al_complete_io()
419 spin_unlock_irqrestore(&device->al_lock, flags); drbd_al_complete_io()
420 wake_up(&device->al_wait); drbd_al_complete_io()
441 static sector_t al_tr_number_to_on_disk_sector(struct drbd_device *device) al_tr_number_to_on_disk_sector() argument
443 const unsigned int stripes = device->ldev->md.al_stripes; al_tr_number_to_on_disk_sector()
444 const unsigned int stripe_size_4kB = device->ldev->md.al_stripe_size_4k; al_tr_number_to_on_disk_sector()
447 unsigned int t = device->al_tr_number % (device->ldev->md.al_size_4k); al_tr_number_to_on_disk_sector()
456 return device->ldev->md.md_offset + device->ldev->md.al_offset + t; al_tr_number_to_on_disk_sector()
459 int al_write_transaction(struct drbd_device *device) al_write_transaction() argument
469 if (!get_ldev(device)) { al_write_transaction()
470 drbd_err(device, "disk is %s, cannot start al transaction\n", al_write_transaction()
471 drbd_disk_str(device->state.disk)); al_write_transaction()
476 if (device->state.disk < D_INCONSISTENT) { al_write_transaction()
477 drbd_err(device, al_write_transaction()
479 drbd_disk_str(device->state.disk)); al_write_transaction()
480 put_ldev(device); al_write_transaction()
485 buffer = drbd_md_get_buffer(device, __func__); al_write_transaction()
487 drbd_err(device, "disk failed while waiting for md_io buffer\n"); al_write_transaction()
488 put_ldev(device); al_write_transaction()
494 buffer->tr_number = cpu_to_be32(device->al_tr_number); al_write_transaction()
502 spin_lock_irq(&device->al_lock); al_write_transaction()
503 list_for_each_entry(e, &device->act_log->to_be_changed, list) { al_write_transaction()
511 drbd_bm_mark_for_writeout(device, al_write_transaction()
515 spin_unlock_irq(&device->al_lock); al_write_transaction()
524 buffer->context_size = cpu_to_be16(device->act_log->nr_elements); al_write_transaction()
525 buffer->context_start_slot_nr = cpu_to_be16(device->al_tr_cycle); al_write_transaction()
528 device->act_log->nr_elements - device->al_tr_cycle); al_write_transaction()
530 unsigned idx = device->al_tr_cycle + i; al_write_transaction()
531 extent_nr = lc_element_by_index(device->act_log, idx)->lc_number; al_write_transaction()
537 device->al_tr_cycle += AL_CONTEXT_PER_TRANSACTION; al_write_transaction()
538 if (device->al_tr_cycle >= device->act_log->nr_elements) al_write_transaction()
539 device->al_tr_cycle = 0; al_write_transaction()
541 sector = al_tr_number_to_on_disk_sector(device); al_write_transaction()
546 if (drbd_bm_write_hinted(device)) al_write_transaction()
551 write_al_updates = rcu_dereference(device->ldev->disk_conf)->al_updates; al_write_transaction()
554 if (drbd_md_sync_page_io(device, device->ldev, sector, WRITE)) { al_write_transaction()
556 drbd_chk_io_error(device, 1, DRBD_META_IO_ERROR); al_write_transaction()
558 device->al_tr_number++; al_write_transaction()
559 device->al_writ_cnt++; al_write_transaction()
564 drbd_md_put_buffer(device); al_write_transaction()
565 put_ldev(device); al_write_transaction()
570 static int _try_lc_del(struct drbd_device *device, struct lc_element *al_ext) _try_lc_del() argument
574 spin_lock_irq(&device->al_lock); _try_lc_del()
577 lc_del(device->act_log, al_ext); _try_lc_del()
578 spin_unlock_irq(&device->al_lock); _try_lc_del()
585 * @device: DRBD device.
590 * You need to lock device->act_log with lc_try_lock() / lc_unlock()
592 void drbd_al_shrink(struct drbd_device *device) drbd_al_shrink() argument
597 D_ASSERT(device, test_bit(__LC_LOCKED, &device->act_log->flags)); drbd_al_shrink()
599 for (i = 0; i < device->act_log->nr_elements; i++) { drbd_al_shrink()
600 al_ext = lc_element_by_index(device->act_log, i); drbd_al_shrink()
603 wait_event(device->al_wait, _try_lc_del(device, al_ext)); drbd_al_shrink()
606 wake_up(&device->al_wait); drbd_al_shrink()
609 int drbd_initialize_al(struct drbd_device *device, void *buffer) drbd_initialize_al() argument
612 struct drbd_md *md = &device->ldev->md; drbd_initialize_al()
623 int err = drbd_md_sync_page_io(device, device->ldev, al_base + i * 8, WRITE); drbd_initialize_al()
649 static bool update_rs_extent(struct drbd_device *device, update_rs_extent() argument
655 D_ASSERT(device, atomic_read(&device->local_cnt)); update_rs_extent()
665 e = lc_find(device->resync, enr); update_rs_extent()
667 e = lc_get(device->resync, enr); update_rs_extent()
678 drbd_warn(device, "BAD! enr=%u rs_left=%d " update_rs_extent()
682 drbd_conn_str(device->state.conn)); update_rs_extent()
690 ext->rs_left = drbd_bm_e_weight(device, enr); update_rs_extent()
699 int rs_left = drbd_bm_e_weight(device, enr); update_rs_extent()
701 drbd_warn(device, "changing resync lce: %d[%u;%02lx]" update_rs_extent()
708 drbd_warn(device, "Kicking resync_lru element enr=%u " update_rs_extent()
716 lc_committed(device->resync); update_rs_extent()
719 lc_put(device->resync, &ext->lce); update_rs_extent()
728 drbd_err(device, "lc_get() failed! locked=%d/%d flags=%lu\n", update_rs_extent()
729 device->resync_locked, update_rs_extent()
730 device->resync->nr_elements, update_rs_extent()
731 device->resync->flags); update_rs_extent()
736 void drbd_advance_rs_marks(struct drbd_device *device, unsigned long still_to_go) drbd_advance_rs_marks() argument
739 unsigned long last = device->rs_mark_time[device->rs_last_mark]; drbd_advance_rs_marks()
740 int next = (device->rs_last_mark + 1) % DRBD_SYNC_MARKS; drbd_advance_rs_marks()
742 if (device->rs_mark_left[device->rs_last_mark] != still_to_go && drbd_advance_rs_marks()
743 device->state.conn != C_PAUSED_SYNC_T && drbd_advance_rs_marks()
744 device->state.conn != C_PAUSED_SYNC_S) { drbd_advance_rs_marks()
745 device->rs_mark_time[next] = now; drbd_advance_rs_marks()
746 device->rs_mark_left[next] = still_to_go; drbd_advance_rs_marks()
747 device->rs_last_mark = next; drbd_advance_rs_marks()
753 static bool lazy_bitmap_update_due(struct drbd_device *device) lazy_bitmap_update_due() argument
755 return time_after(jiffies, device->rs_last_bcast + 2*HZ); lazy_bitmap_update_due()
758 static void maybe_schedule_on_disk_bitmap_update(struct drbd_device *device, bool rs_done) maybe_schedule_on_disk_bitmap_update() argument
761 set_bit(RS_DONE, &device->flags); maybe_schedule_on_disk_bitmap_update()
763 else if (!lazy_bitmap_update_due(device)) maybe_schedule_on_disk_bitmap_update()
766 drbd_device_post_work(device, RS_PROGRESS); maybe_schedule_on_disk_bitmap_update()
769 static int update_sync_bits(struct drbd_device *device, update_sync_bits() argument
794 c = drbd_bm_count_bits(device, sbnr, tbnr); update_sync_bits()
796 c = drbd_bm_clear_bits(device, sbnr, tbnr); update_sync_bits()
798 c = drbd_bm_set_bits(device, sbnr, tbnr); update_sync_bits()
801 spin_lock_irqsave(&device->al_lock, flags); update_sync_bits()
802 cleared += update_rs_extent(device, BM_BIT_TO_EXT(sbnr), c, mode); update_sync_bits()
803 spin_unlock_irqrestore(&device->al_lock, flags); update_sync_bits()
810 unsigned long still_to_go = drbd_bm_total_weight(device); update_sync_bits()
811 bool rs_is_done = (still_to_go <= device->rs_failed); update_sync_bits()
812 drbd_advance_rs_marks(device, still_to_go); update_sync_bits()
814 maybe_schedule_on_disk_bitmap_update(device, rs_is_done); update_sync_bits()
816 device->rs_failed += count; update_sync_bits()
817 wake_up(&device->al_wait); update_sync_bits()
829 int __drbd_change_sync(struct drbd_device *device, sector_t sector, int size, __drbd_change_sync() argument
842 drbd_err(device, "%s: sector=%llus size=%d nonsense!\n", __drbd_change_sync()
848 if (!get_ldev(device)) __drbd_change_sync()
851 nr_sectors = drbd_get_capacity(device->this_bdev); __drbd_change_sync()
878 count = update_sync_bits(device, sbnr, ebnr, mode); __drbd_change_sync()
880 put_ldev(device); __drbd_change_sync()
885 struct bm_extent *_bme_get(struct drbd_device *device, unsigned int enr) _bme_get() argument
892 spin_lock_irq(&device->al_lock); _bme_get()
893 if (device->resync_locked > device->resync->nr_elements/2) { _bme_get()
894 spin_unlock_irq(&device->al_lock); _bme_get()
897 e = lc_get(device->resync, enr); _bme_get()
901 bm_ext->rs_left = drbd_bm_e_weight(device, enr); _bme_get()
903 lc_committed(device->resync); _bme_get()
907 device->resync_locked++; _bme_get()
910 rs_flags = device->resync->flags; _bme_get()
911 spin_unlock_irq(&device->al_lock); _bme_get()
913 wake_up(&device->al_wait); _bme_get()
917 drbd_warn(device, "Have to wait for element" _bme_get()
925 static int _is_in_al(struct drbd_device *device, unsigned int enr) _is_in_al() argument
929 spin_lock_irq(&device->al_lock); _is_in_al()
930 rv = lc_is_used(device->act_log, enr); _is_in_al()
931 spin_unlock_irq(&device->al_lock); _is_in_al()
938 * @device: DRBD device.
943 int drbd_rs_begin_io(struct drbd_device *device, sector_t sector) drbd_rs_begin_io() argument
951 sig = wait_event_interruptible(device->al_wait, drbd_rs_begin_io()
952 (bm_ext = _bme_get(device, enr))); drbd_rs_begin_io()
960 sa = drbd_rs_c_min_rate_throttle(device); drbd_rs_begin_io()
963 sig = wait_event_interruptible(device->al_wait, drbd_rs_begin_io()
964 !_is_in_al(device, enr * AL_EXT_PER_BM_SECT + i) || drbd_rs_begin_io()
968 spin_lock_irq(&device->al_lock); drbd_rs_begin_io()
969 if (lc_put(device->resync, &bm_ext->lce) == 0) { drbd_rs_begin_io()
971 device->resync_locked--; drbd_rs_begin_io()
972 wake_up(&device->al_wait); drbd_rs_begin_io()
974 spin_unlock_irq(&device->al_lock); drbd_rs_begin_io()
988 * @device: DRBD device.
995 int drbd_try_rs_begin_io(struct drbd_device *device, sector_t sector) drbd_try_rs_begin_io() argument
1002 bool throttle = drbd_rs_should_slow_down(device, sector, true); drbd_try_rs_begin_io()
1009 if (throttle && device->resync_wenr != enr) drbd_try_rs_begin_io()
1012 spin_lock_irq(&device->al_lock); drbd_try_rs_begin_io()
1013 if (device->resync_wenr != LC_FREE && device->resync_wenr != enr) { drbd_try_rs_begin_io()
1027 e = lc_find(device->resync, device->resync_wenr); drbd_try_rs_begin_io()
1030 D_ASSERT(device, !test_bit(BME_LOCKED, &bm_ext->flags)); drbd_try_rs_begin_io()
1031 D_ASSERT(device, test_bit(BME_NO_WRITES, &bm_ext->flags)); drbd_try_rs_begin_io()
1033 device->resync_wenr = LC_FREE; drbd_try_rs_begin_io()
1034 if (lc_put(device->resync, &bm_ext->lce) == 0) { drbd_try_rs_begin_io()
1036 device->resync_locked--; drbd_try_rs_begin_io()
1038 wake_up(&device->al_wait); drbd_try_rs_begin_io()
1040 drbd_alert(device, "LOGIC BUG\n"); drbd_try_rs_begin_io()
1044 e = lc_try_get(device->resync, enr); drbd_try_rs_begin_io()
1050 device->resync_locked++; drbd_try_rs_begin_io()
1057 D_ASSERT(device, bm_ext->lce.refcnt > 0); drbd_try_rs_begin_io()
1062 if (device->resync_locked > device->resync->nr_elements-3) drbd_try_rs_begin_io()
1065 e = lc_get(device->resync, enr); drbd_try_rs_begin_io()
1068 const unsigned long rs_flags = device->resync->flags; drbd_try_rs_begin_io()
1070 drbd_warn(device, "Have to wait for element" drbd_try_rs_begin_io()
1076 bm_ext->rs_left = drbd_bm_e_weight(device, enr); drbd_try_rs_begin_io()
1078 lc_committed(device->resync); drbd_try_rs_begin_io()
1079 wake_up(&device->al_wait); drbd_try_rs_begin_io()
1080 D_ASSERT(device, test_bit(BME_LOCKED, &bm_ext->flags) == 0); drbd_try_rs_begin_io()
1083 D_ASSERT(device, bm_ext->lce.refcnt == 1); drbd_try_rs_begin_io()
1084 device->resync_locked++; drbd_try_rs_begin_io()
1089 if (lc_is_used(device->act_log, al_enr+i)) drbd_try_rs_begin_io()
1094 device->resync_wenr = LC_FREE; drbd_try_rs_begin_io()
1095 spin_unlock_irq(&device->al_lock); drbd_try_rs_begin_io()
1101 D_ASSERT(device, !test_bit(BME_LOCKED, &bm_ext->flags)); drbd_try_rs_begin_io()
1102 D_ASSERT(device, test_bit(BME_NO_WRITES, &bm_ext->flags)); drbd_try_rs_begin_io()
1104 device->resync_wenr = LC_FREE; drbd_try_rs_begin_io()
1105 if (lc_put(device->resync, &bm_ext->lce) == 0) { drbd_try_rs_begin_io()
1107 device->resync_locked--; drbd_try_rs_begin_io()
1109 wake_up(&device->al_wait); drbd_try_rs_begin_io()
1111 device->resync_wenr = enr; drbd_try_rs_begin_io()
1113 spin_unlock_irq(&device->al_lock); drbd_try_rs_begin_io()
1117 void drbd_rs_complete_io(struct drbd_device *device, sector_t sector) drbd_rs_complete_io() argument
1124 spin_lock_irqsave(&device->al_lock, flags); drbd_rs_complete_io()
1125 e = lc_find(device->resync, enr); drbd_rs_complete_io()
1128 spin_unlock_irqrestore(&device->al_lock, flags); drbd_rs_complete_io()
1130 drbd_err(device, "drbd_rs_complete_io() called, but extent not found\n"); drbd_rs_complete_io()
1135 spin_unlock_irqrestore(&device->al_lock, flags); drbd_rs_complete_io()
1136 drbd_err(device, "drbd_rs_complete_io(,%llu [=%u]) called, " drbd_rs_complete_io()
1142 if (lc_put(device->resync, &bm_ext->lce) == 0) { drbd_rs_complete_io()
1144 device->resync_locked--; drbd_rs_complete_io()
1145 wake_up(&device->al_wait); drbd_rs_complete_io()
1148 spin_unlock_irqrestore(&device->al_lock, flags); drbd_rs_complete_io()
1153 * @device: DRBD device.
1155 void drbd_rs_cancel_all(struct drbd_device *device) drbd_rs_cancel_all() argument
1157 spin_lock_irq(&device->al_lock); drbd_rs_cancel_all()
1159 if (get_ldev_if_state(device, D_FAILED)) { /* Makes sure ->resync is there. */ drbd_rs_cancel_all()
1160 lc_reset(device->resync); drbd_rs_cancel_all()
1161 put_ldev(device); drbd_rs_cancel_all()
1163 device->resync_locked = 0; drbd_rs_cancel_all()
1164 device->resync_wenr = LC_FREE; drbd_rs_cancel_all()
1165 spin_unlock_irq(&device->al_lock); drbd_rs_cancel_all()
1166 wake_up(&device->al_wait); drbd_rs_cancel_all()
1171 * @device: DRBD device.
1176 int drbd_rs_del_all(struct drbd_device *device) drbd_rs_del_all() argument
1182 spin_lock_irq(&device->al_lock); drbd_rs_del_all()
1184 if (get_ldev_if_state(device, D_FAILED)) { drbd_rs_del_all()
1186 for (i = 0; i < device->resync->nr_elements; i++) { drbd_rs_del_all()
1187 e = lc_element_by_index(device->resync, i); drbd_rs_del_all()
1191 if (bm_ext->lce.lc_number == device->resync_wenr) { drbd_rs_del_all()
1192 drbd_info(device, "dropping %u in drbd_rs_del_all, apparently" drbd_rs_del_all()
1194 device->resync_wenr); drbd_rs_del_all()
1195 D_ASSERT(device, !test_bit(BME_LOCKED, &bm_ext->flags)); drbd_rs_del_all()
1196 D_ASSERT(device, test_bit(BME_NO_WRITES, &bm_ext->flags)); drbd_rs_del_all()
1198 device->resync_wenr = LC_FREE; drbd_rs_del_all()
1199 lc_put(device->resync, &bm_ext->lce); drbd_rs_del_all()
1202 drbd_info(device, "Retrying drbd_rs_del_all() later. " drbd_rs_del_all()
1204 put_ldev(device); drbd_rs_del_all()
1205 spin_unlock_irq(&device->al_lock); drbd_rs_del_all()
1208 D_ASSERT(device, !test_bit(BME_LOCKED, &bm_ext->flags)); drbd_rs_del_all()
1209 D_ASSERT(device, !test_bit(BME_NO_WRITES, &bm_ext->flags)); drbd_rs_del_all()
1210 lc_del(device->resync, &bm_ext->lce); drbd_rs_del_all()
1212 D_ASSERT(device, device->resync->used == 0); drbd_rs_del_all()
1213 put_ldev(device); drbd_rs_del_all()
1215 spin_unlock_irq(&device->al_lock); drbd_rs_del_all()
1216 wake_up(&device->al_wait); drbd_rs_del_all()
H A Ddrbd_worker.c60 Each state transition on an device holds a read lock. In case we have
70 struct drbd_device *device; drbd_md_endio() local
72 device = bio->bi_private; drbd_md_endio()
73 device->md_io.error = bio->bi_error; drbd_md_endio()
76 * to timeout on the lower level device, and eventually detach from it. drbd_md_endio()
84 * ASSERT(atomic_read(&device->md_io_in_use) == 1) there. drbd_md_endio()
86 drbd_md_put_buffer(device); drbd_md_endio()
87 device->md_io.done = 1; drbd_md_endio()
88 wake_up(&device->misc_wait); drbd_md_endio()
90 if (device->ldev) /* special case: drbd_md_read() during drbd_adm_attach() */ drbd_md_endio()
91 put_ldev(device); drbd_md_endio()
101 struct drbd_device *device = peer_device->device; __releases() local
103 spin_lock_irqsave(&device->resource->req_lock, flags); __releases()
104 device->read_cnt += peer_req->i.size >> 9; __releases()
106 if (list_empty(&device->read_ee)) __releases()
107 wake_up(&device->ee_wait); __releases()
109 __drbd_chk_io_error(device, DRBD_READ_ERROR); __releases()
110 spin_unlock_irqrestore(&device->resource->req_lock, flags); __releases()
113 put_ldev(device); __releases()
122 struct drbd_device *device = peer_device->device; __releases() local
137 spin_lock_irqsave(&device->resource->req_lock, flags); __releases()
138 device->writ_cnt += peer_req->i.size >> 9; __releases()
139 list_move_tail(&peer_req->w.list, &device->done_ee); __releases()
149 do_wake = list_empty(block_id == ID_SYNCER ? &device->sync_ee : &device->active_ee); __releases()
154 __drbd_chk_io_error(device, DRBD_WRITE_ERROR); __releases()
155 spin_unlock_irqrestore(&device->resource->req_lock, flags); __releases()
158 drbd_rs_complete_io(device, i.sector); __releases()
161 wake_up(&device->ee_wait); __releases()
164 drbd_al_complete_io(device, &i); __releases()
167 put_ldev(device); __releases()
176 struct drbd_device *device = peer_req->peer_device->device; drbd_peer_request_endio() local
181 drbd_warn(device, "%s: error=%d s=%llus\n", drbd_peer_request_endio()
204 struct drbd_device *device = req->device; drbd_request_endio() local
224 * If later the local backing device "recovers", and now DMAs some data drbd_request_endio()
238 drbd_emerg(device, "delayed completion of aborted local request; disk-timeout may be too aggressive\n"); drbd_request_endio()
263 spin_lock_irqsave(&device->resource->req_lock, flags); drbd_request_endio()
265 spin_unlock_irqrestore(&device->resource->req_lock, flags); drbd_request_endio()
266 put_ldev(device); drbd_request_endio()
269 complete_master_bio(device, &m); drbd_request_endio()
324 struct drbd_device *device = peer_device->device; w_e_send_csum() local
346 drbd_free_peer_req(device, peer_req); w_e_send_csum()
348 inc_rs_pending(device); w_e_send_csum()
354 drbd_err(device, "kmalloc() of digest failed.\n"); w_e_send_csum()
360 drbd_free_peer_req(device, peer_req); w_e_send_csum()
363 drbd_err(device, "drbd_send_drequest(..., csum) failed\n"); w_e_send_csum()
371 struct drbd_device *device = peer_device->device; read_for_csum() local
374 if (!get_ldev(device)) read_for_csum()
385 spin_lock_irq(&device->resource->req_lock); read_for_csum()
386 list_add_tail(&peer_req->w.list, &device->read_ee); read_for_csum()
387 spin_unlock_irq(&device->resource->req_lock); read_for_csum()
389 atomic_add(size >> 9, &device->rs_sect_ev); read_for_csum()
390 if (drbd_submit_peer_request(device, peer_req, READ, DRBD_FAULT_RS_RD) == 0) read_for_csum()
397 spin_lock_irq(&device->resource->req_lock); read_for_csum()
399 spin_unlock_irq(&device->resource->req_lock); read_for_csum()
401 drbd_free_peer_req(device, peer_req); read_for_csum()
403 put_ldev(device); read_for_csum()
409 struct drbd_device *device = w_resync_timer() local
412 switch (device->state.conn) { w_resync_timer()
414 make_ov_request(device, cancel); w_resync_timer()
417 make_resync_request(device, cancel); w_resync_timer()
426 struct drbd_device *device = (struct drbd_device *) data; resync_timer_fn() local
429 &first_peer_device(device)->connection->sender_work, resync_timer_fn()
430 &device->resync_work); resync_timer_fn()
477 static int drbd_rs_controller(struct drbd_device *device, unsigned int sect_in) drbd_rs_controller() argument
489 dc = rcu_dereference(device->ldev->disk_conf); drbd_rs_controller()
490 plan = rcu_dereference(device->rs_plan_s); drbd_rs_controller()
494 if (device->rs_in_flight + sect_in == 0) { /* At start of resync */ drbd_rs_controller()
501 correction = want - device->rs_in_flight - plan->total; drbd_rs_controller()
521 drbd_warn(device, "si=%u if=%d wa=%u co=%d st=%d cps=%d pl=%d cc=%d rs=%d\n", drbd_rs_controller()
522 sect_in, device->rs_in_flight, want, correction, drbd_rs_controller()
523 steps, cps, device->rs_planed, curr_corr, req_sect); drbd_rs_controller()
529 static int drbd_rs_number_requests(struct drbd_device *device) drbd_rs_number_requests() argument
534 sect_in = atomic_xchg(&device->rs_sect_in, 0); drbd_rs_number_requests()
535 device->rs_in_flight -= sect_in; drbd_rs_number_requests()
538 mxb = drbd_get_max_buffers(device) / 2; drbd_rs_number_requests()
539 if (rcu_dereference(device->rs_plan_s)->size) { drbd_rs_number_requests()
540 number = drbd_rs_controller(device, sect_in) >> (BM_BLOCK_SHIFT - 9); drbd_rs_number_requests()
541 device->c_sync_rate = number * HZ * (BM_BLOCK_SIZE / 1024) / SLEEP_TIME; drbd_rs_number_requests()
543 device->c_sync_rate = rcu_dereference(device->ldev->disk_conf)->resync_rate; drbd_rs_number_requests()
544 number = SLEEP_TIME * device->c_sync_rate / ((BM_BLOCK_SIZE / 1024) * HZ); drbd_rs_number_requests()
558 if (mxb - device->rs_in_flight/8 < number) drbd_rs_number_requests()
559 number = mxb - device->rs_in_flight/8; drbd_rs_number_requests()
564 static int make_resync_request(struct drbd_device *const device, int cancel) make_resync_request() argument
566 struct drbd_peer_device *const peer_device = first_peer_device(device); make_resync_request()
570 const sector_t capacity = drbd_get_capacity(device->this_bdev); make_resync_request()
579 if (device->rs_total == 0) { make_resync_request()
581 drbd_resync_finished(device); make_resync_request()
585 if (!get_ldev(device)) { make_resync_request()
586 /* Since we only need to access device->rsync a make_resync_request()
587 get_ldev_if_state(device,D_FAILED) would be sufficient, but make_resync_request()
590 drbd_err(device, "Disk broke down during resync!\n"); make_resync_request()
594 max_bio_size = queue_max_hw_sectors(device->rq_queue) << 9; make_resync_request()
595 number = drbd_rs_number_requests(device); make_resync_request()
620 bit = drbd_bm_find_next(device, device->bm_resync_fo); make_resync_request()
623 device->bm_resync_fo = drbd_bm_bits(device); make_resync_request()
624 put_ldev(device); make_resync_request()
630 if (drbd_try_rs_begin_io(device, sector)) { make_resync_request()
631 device->bm_resync_fo = bit; make_resync_request()
634 device->bm_resync_fo = bit + 1; make_resync_request()
636 if (unlikely(drbd_bm_test_bit(device, bit) == 0)) { make_resync_request()
637 drbd_rs_complete_io(device, sector); make_resync_request()
666 if (drbd_bm_test_bit(device, bit+1) != 1) make_resync_request()
677 device->bm_resync_fo = bit + 1; make_resync_request()
684 if (device->use_csums) { make_resync_request()
687 put_ldev(device); make_resync_request()
690 drbd_rs_complete_io(device, sector); make_resync_request()
691 device->bm_resync_fo = BM_SECT_TO_BIT(sector); make_resync_request()
703 inc_rs_pending(device); make_resync_request()
707 drbd_err(device, "drbd_send_drequest() failed, aborting...\n"); make_resync_request()
708 dec_rs_pending(device); make_resync_request()
709 put_ldev(device); make_resync_request()
715 if (device->bm_resync_fo >= drbd_bm_bits(device)) { make_resync_request()
722 put_ldev(device); make_resync_request()
727 device->rs_in_flight += (i << (BM_BLOCK_SHIFT - 9)); make_resync_request()
728 mod_timer(&device->resync_timer, jiffies + SLEEP_TIME); make_resync_request()
729 put_ldev(device); make_resync_request()
733 static int make_ov_request(struct drbd_device *device, int cancel) make_ov_request() argument
737 const sector_t capacity = drbd_get_capacity(device->this_bdev); make_ov_request()
743 number = drbd_rs_number_requests(device); make_ov_request()
745 sector = device->ov_position; make_ov_request()
754 && verify_can_do_stop_sector(device) make_ov_request()
755 && sector >= device->ov_stop_sector; make_ov_request()
761 if (drbd_try_rs_begin_io(device, sector)) { make_ov_request()
762 device->ov_position = sector; make_ov_request()
769 inc_rs_pending(device); make_ov_request()
770 if (drbd_send_ov_request(first_peer_device(device), sector, size)) { make_ov_request()
771 dec_rs_pending(device); make_ov_request()
776 device->ov_position = sector; make_ov_request()
779 device->rs_in_flight += (i << (BM_BLOCK_SHIFT - 9)); make_ov_request()
781 mod_timer(&device->resync_timer, jiffies + SLEEP_TIME); make_ov_request()
789 struct drbd_device *device = dw->device; w_ov_finished() local
791 ov_out_of_sync_print(device); w_ov_finished()
792 drbd_resync_finished(device); w_ov_finished()
801 struct drbd_device *device = dw->device; w_resync_finished() local
804 drbd_resync_finished(device); w_resync_finished()
809 static void ping_peer(struct drbd_device *device) ping_peer() argument
811 struct drbd_connection *connection = first_peer_device(device)->connection; ping_peer()
816 test_bit(GOT_PING_ACK, &connection->flags) || device->state.conn < C_CONNECTED); ping_peer()
819 int drbd_resync_finished(struct drbd_device *device) drbd_resync_finished() argument
831 if (drbd_rs_del_all(device)) { drbd_resync_finished()
841 dw->device = device; drbd_resync_finished()
842 drbd_queue_work(&first_peer_device(device)->connection->sender_work, drbd_resync_finished()
846 drbd_err(device, "Warn failed to drbd_rs_del_all() and to kmalloc(dw).\n"); drbd_resync_finished()
849 dt = (jiffies - device->rs_start - device->rs_paused) / HZ; drbd_resync_finished()
853 db = device->rs_total; drbd_resync_finished()
855 if (device->state.conn == C_VERIFY_S || device->state.conn == C_VERIFY_T) drbd_resync_finished()
856 db -= device->ov_left; drbd_resync_finished()
859 device->rs_paused /= HZ; drbd_resync_finished()
861 if (!get_ldev(device)) drbd_resync_finished()
864 ping_peer(device); drbd_resync_finished()
866 spin_lock_irq(&device->resource->req_lock); drbd_resync_finished()
867 os = drbd_read_state(device); drbd_resync_finished()
879 drbd_info(device, "%s done (total %lu sec; paused %lu sec; %lu K/sec)\n", drbd_resync_finished()
881 dt + device->rs_paused, device->rs_paused, dbdt); drbd_resync_finished()
883 n_oos = drbd_bm_total_weight(device); drbd_resync_finished()
887 drbd_alert(device, "Online verify found %lu %dk block out of sync!\n", drbd_resync_finished()
892 D_ASSERT(device, (n_oos - device->rs_failed) == 0); drbd_resync_finished()
897 if (device->use_csums && device->rs_total) { drbd_resync_finished()
898 const unsigned long s = device->rs_same_csum; drbd_resync_finished()
899 const unsigned long t = device->rs_total; drbd_resync_finished()
903 drbd_info(device, "%u %% had equal checksums, eliminated: %luK; " drbd_resync_finished()
906 Bit2KB(device->rs_same_csum), drbd_resync_finished()
907 Bit2KB(device->rs_total - device->rs_same_csum), drbd_resync_finished()
908 Bit2KB(device->rs_total)); drbd_resync_finished()
912 if (device->rs_failed) { drbd_resync_finished()
913 drbd_info(device, " %lu failed blocks\n", device->rs_failed); drbd_resync_finished()
927 if (device->p_uuid) { drbd_resync_finished()
930 _drbd_uuid_set(device, i, device->p_uuid[i]); drbd_resync_finished()
931 drbd_uuid_set(device, UI_BITMAP, device->ldev->md.uuid[UI_CURRENT]); drbd_resync_finished()
932 _drbd_uuid_set(device, UI_CURRENT, device->p_uuid[UI_CURRENT]); drbd_resync_finished()
934 drbd_err(device, "device->p_uuid is NULL! BUG\n"); drbd_resync_finished()
941 drbd_uuid_set_bm(device, 0UL); drbd_resync_finished()
942 drbd_print_uuids(device, "updated UUIDs"); drbd_resync_finished()
943 if (device->p_uuid) { drbd_resync_finished()
948 device->p_uuid[i] = device->ldev->md.uuid[i]; drbd_resync_finished()
953 _drbd_set_state(device, ns, CS_VERBOSE, NULL); drbd_resync_finished()
955 spin_unlock_irq(&device->resource->req_lock); drbd_resync_finished()
956 put_ldev(device); drbd_resync_finished()
958 device->rs_total = 0; drbd_resync_finished()
959 device->rs_failed = 0; drbd_resync_finished()
960 device->rs_paused = 0; drbd_resync_finished()
962 /* reset start sector, if we reached end of device */ drbd_resync_finished()
963 if (verify_done && device->ov_left == 0) drbd_resync_finished()
964 device->ov_start_sector = 0; drbd_resync_finished()
966 drbd_md_sync(device); drbd_resync_finished()
969 drbd_khelper(device, khelper_cmd); drbd_resync_finished()
975 static void move_to_net_ee_or_free(struct drbd_device *device, struct drbd_peer_request *peer_req) move_to_net_ee_or_free() argument
980 atomic_add(i, &device->pp_in_use_by_net); move_to_net_ee_or_free()
981 atomic_sub(i, &device->pp_in_use); move_to_net_ee_or_free()
982 spin_lock_irq(&device->resource->req_lock); move_to_net_ee_or_free()
983 list_add_tail(&peer_req->w.list, &device->net_ee); move_to_net_ee_or_free()
984 spin_unlock_irq(&device->resource->req_lock); move_to_net_ee_or_free()
987 drbd_free_peer_req(device, peer_req); move_to_net_ee_or_free()
992 * @device: DRBD device.
1000 struct drbd_device *device = peer_device->device; w_e_end_data_req() local
1004 drbd_free_peer_req(device, peer_req); w_e_end_data_req()
1005 dec_unacked(device); w_e_end_data_req()
1013 drbd_err(device, "Sending NegDReply. sector=%llus.\n", w_e_end_data_req()
1019 dec_unacked(device); w_e_end_data_req()
1021 move_to_net_ee_or_free(device, peer_req); w_e_end_data_req()
1024 drbd_err(device, "drbd_send_block() failed\n"); w_e_end_data_req()
1037 struct drbd_device *device = peer_device->device; w_e_end_rsdata_req() local
1041 drbd_free_peer_req(device, peer_req); w_e_end_rsdata_req()
1042 dec_unacked(device); w_e_end_rsdata_req()
1046 if (get_ldev_if_state(device, D_FAILED)) { w_e_end_rsdata_req()
1047 drbd_rs_complete_io(device, peer_req->i.sector); w_e_end_rsdata_req()
1048 put_ldev(device); w_e_end_rsdata_req()
1051 if (device->state.conn == C_AHEAD) { w_e_end_rsdata_req()
1054 if (likely(device->state.pdsk >= D_INCONSISTENT)) { w_e_end_rsdata_req()
1055 inc_rs_pending(device); w_e_end_rsdata_req()
1059 drbd_err(device, "Not sending RSDataReply, " w_e_end_rsdata_req()
1065 drbd_err(device, "Sending NegRSDReply. sector %llus.\n", w_e_end_rsdata_req()
1071 drbd_rs_failed_io(device, peer_req->i.sector, peer_req->i.size); w_e_end_rsdata_req()
1074 dec_unacked(device); w_e_end_rsdata_req()
1076 move_to_net_ee_or_free(device, peer_req); w_e_end_rsdata_req()
1079 drbd_err(device, "drbd_send_block() failed\n"); w_e_end_rsdata_req()
1087 struct drbd_device *device = peer_device->device; w_e_end_csum_rs_req() local
1094 drbd_free_peer_req(device, peer_req); w_e_end_csum_rs_req()
1095 dec_unacked(device); w_e_end_csum_rs_req()
1099 if (get_ldev(device)) { w_e_end_csum_rs_req()
1100 drbd_rs_complete_io(device, peer_req->i.sector); w_e_end_csum_rs_req()
1101 put_ldev(device); w_e_end_csum_rs_req()
1112 D_ASSERT(device, digest_size == di->digest_size); w_e_end_csum_rs_req()
1122 drbd_set_in_sync(device, peer_req->i.sector, peer_req->i.size); w_e_end_csum_rs_req()
1124 device->rs_same_csum += peer_req->i.size >> BM_BLOCK_SHIFT; w_e_end_csum_rs_req()
1127 inc_rs_pending(device); w_e_end_csum_rs_req()
1136 drbd_err(device, "Sending NegDReply. I guess it gets messy.\n"); w_e_end_csum_rs_req()
1139 dec_unacked(device); w_e_end_csum_rs_req()
1140 move_to_net_ee_or_free(device, peer_req); w_e_end_csum_rs_req()
1143 drbd_err(device, "drbd_send_block/ack() failed\n"); w_e_end_csum_rs_req()
1151 struct drbd_device *device = peer_device->device; w_e_end_ov_req() local
1178 drbd_free_peer_req(device, peer_req); w_e_end_ov_req()
1180 inc_rs_pending(device); w_e_end_ov_req()
1183 dec_rs_pending(device); w_e_end_ov_req()
1188 drbd_free_peer_req(device, peer_req); w_e_end_ov_req()
1189 dec_unacked(device); w_e_end_ov_req()
1193 void drbd_ov_out_of_sync_found(struct drbd_device *device, sector_t sector, int size) drbd_ov_out_of_sync_found() argument
1195 if (device->ov_last_oos_start + device->ov_last_oos_size == sector) { drbd_ov_out_of_sync_found()
1196 device->ov_last_oos_size += size>>9; drbd_ov_out_of_sync_found()
1198 device->ov_last_oos_start = sector; drbd_ov_out_of_sync_found()
1199 device->ov_last_oos_size = size>>9; drbd_ov_out_of_sync_found()
1201 drbd_set_out_of_sync(device, sector, size); drbd_ov_out_of_sync_found()
1208 struct drbd_device *device = peer_device->device; w_e_end_ov_reply() local
1218 drbd_free_peer_req(device, peer_req); w_e_end_ov_reply()
1219 dec_unacked(device); w_e_end_ov_reply()
1225 if (get_ldev(device)) { w_e_end_ov_reply()
1226 drbd_rs_complete_io(device, peer_req->i.sector); w_e_end_ov_reply()
1227 put_ldev(device); w_e_end_ov_reply()
1238 D_ASSERT(device, digest_size == di->digest_size); w_e_end_ov_reply()
1249 drbd_free_peer_req(device, peer_req); w_e_end_ov_reply()
1251 drbd_ov_out_of_sync_found(device, sector, size); w_e_end_ov_reply()
1253 ov_out_of_sync_print(device); w_e_end_ov_reply()
1258 dec_unacked(device); w_e_end_ov_reply()
1260 --device->ov_left; w_e_end_ov_reply()
1263 if ((device->ov_left & 0x200) == 0x200) w_e_end_ov_reply()
1264 drbd_advance_rs_marks(device, device->ov_left); w_e_end_ov_reply()
1266 stop_sector_reached = verify_can_do_stop_sector(device) && w_e_end_ov_reply()
1267 (sector + (size>>9)) >= device->ov_stop_sector; w_e_end_ov_reply()
1269 if (device->ov_left == 0 || stop_sector_reached) { w_e_end_ov_reply()
1270 ov_out_of_sync_print(device); w_e_end_ov_reply()
1271 drbd_resync_finished(device); w_e_end_ov_reply()
1300 struct drbd_device *device = w_send_write_hint() local
1306 sock = &first_peer_device(device)->connection->data; w_send_write_hint()
1307 if (!drbd_prepare_command(first_peer_device(device), sock)) w_send_write_hint()
1309 return drbd_send_command(first_peer_device(device), sock, P_UNPLUG_REMOTE, 0, NULL, 0); w_send_write_hint()
1336 struct drbd_device *device = req->device; w_send_out_of_sync() local
1337 struct drbd_peer_device *const peer_device = first_peer_device(device); w_send_out_of_sync()
1367 struct drbd_device *device = req->device; w_send_dblock() local
1368 struct drbd_peer_device *const peer_device = first_peer_device(device); w_send_dblock()
1396 struct drbd_device *device = req->device; w_send_read_req() local
1397 struct drbd_peer_device *const peer_device = first_peer_device(device); w_send_read_req()
1422 struct drbd_device *device = req->device; w_restart_disk_io() local
1425 drbd_al_begin_io(device, &req->i); w_restart_disk_io()
1428 req->private_bio->bi_bdev = device->ldev->backing_bdev; w_restart_disk_io()
1434 static int _drbd_may_sync_now(struct drbd_device *device) _drbd_may_sync_now() argument
1436 struct drbd_device *odev = device; _drbd_may_sync_now()
1460 * @device: DRBD device.
1464 static int _drbd_pause_after(struct drbd_device *device) _drbd_pause_after() argument
1484 * @device: DRBD device.
1488 static int _drbd_resume_next(struct drbd_device *device) _drbd_resume_next() argument
1508 void resume_next_sg(struct drbd_device *device) resume_next_sg() argument
1511 _drbd_resume_next(device); resume_next_sg()
1515 void suspend_other_sg(struct drbd_device *device) suspend_other_sg() argument
1518 _drbd_pause_after(device); suspend_other_sg()
1523 enum drbd_ret_code drbd_resync_after_valid(struct drbd_device *device, int o_minor) drbd_resync_after_valid() argument
1536 if (odev == device) drbd_resync_after_valid()
1561 void drbd_resync_after_changed(struct drbd_device *device) drbd_resync_after_changed() argument
1566 changes = _drbd_pause_after(device); drbd_resync_after_changed()
1567 changes |= _drbd_resume_next(device); drbd_resync_after_changed()
1571 void drbd_rs_controller_reset(struct drbd_device *device) drbd_rs_controller_reset() argument
1573 struct gendisk *disk = device->ldev->backing_bdev->bd_contains->bd_disk; drbd_rs_controller_reset()
1576 atomic_set(&device->rs_sect_in, 0); drbd_rs_controller_reset()
1577 atomic_set(&device->rs_sect_ev, 0); drbd_rs_controller_reset()
1578 device->rs_in_flight = 0; drbd_rs_controller_reset()
1579 device->rs_last_events = drbd_rs_controller_reset()
1588 plan = rcu_dereference(device->rs_plan_s); drbd_rs_controller_reset()
1596 struct drbd_device *device = (struct drbd_device *) data; start_resync_timer_fn() local
1597 drbd_device_post_work(device, RS_START); start_resync_timer_fn()
1600 static void do_start_resync(struct drbd_device *device) do_start_resync() argument
1602 if (atomic_read(&device->unacked_cnt) || atomic_read(&device->rs_pending_cnt)) { do_start_resync()
1603 drbd_warn(device, "postponing start_resync ...\n"); do_start_resync()
1604 device->start_resync_timer.expires = jiffies + HZ/10; do_start_resync()
1605 add_timer(&device->start_resync_timer); do_start_resync()
1609 drbd_start_resync(device, C_SYNC_SOURCE); do_start_resync()
1610 clear_bit(AHEAD_TO_SYNC_SOURCE, &device->flags); do_start_resync()
1613 static bool use_checksum_based_resync(struct drbd_connection *connection, struct drbd_device *device) use_checksum_based_resync() argument
1622 || test_bit(CRASHED_PRIMARY, &device->flags)); /* or only after Primary crash? */ use_checksum_based_resync()
1627 * @device: DRBD device.
1633 void drbd_start_resync(struct drbd_device *device, enum drbd_conns side) drbd_start_resync() argument
1635 struct drbd_peer_device *peer_device = first_peer_device(device); drbd_start_resync()
1640 if (device->state.conn >= C_SYNC_SOURCE && device->state.conn < C_AHEAD) { drbd_start_resync()
1641 drbd_err(device, "Resync already running!\n"); drbd_start_resync()
1645 if (!test_bit(B_RS_H_DONE, &device->flags)) { drbd_start_resync()
1650 r = drbd_khelper(device, "before-resync-target"); drbd_start_resync()
1653 drbd_info(device, "before-resync-target handler returned %d, " drbd_start_resync()
1659 r = drbd_khelper(device, "before-resync-source"); drbd_start_resync()
1663 drbd_info(device, "before-resync-source handler returned %d, " drbd_start_resync()
1666 drbd_info(device, "before-resync-source handler returned %d, " drbd_start_resync()
1679 if (!mutex_trylock(device->state_mutex)) { drbd_start_resync()
1680 set_bit(B_RS_H_DONE, &device->flags); drbd_start_resync()
1681 device->start_resync_timer.expires = jiffies + HZ/5; drbd_start_resync()
1682 add_timer(&device->start_resync_timer); drbd_start_resync()
1686 mutex_lock(device->state_mutex); drbd_start_resync()
1688 clear_bit(B_RS_H_DONE, &device->flags); drbd_start_resync()
1692 spin_lock_irq(&device->resource->req_lock); drbd_start_resync()
1695 if (device->state.conn < C_CONNECTED drbd_start_resync()
1696 || !get_ldev_if_state(device, D_NEGOTIATING)) { drbd_start_resync()
1698 spin_unlock_irq(&device->resource->req_lock); drbd_start_resync()
1699 mutex_unlock(device->state_mutex); drbd_start_resync()
1703 ns = drbd_read_state(device); drbd_start_resync()
1705 ns.aftr_isp = !_drbd_may_sync_now(device); drbd_start_resync()
1714 r = __drbd_set_state(device, ns, CS_VERBOSE, NULL); drbd_start_resync()
1715 ns = drbd_read_state(device); drbd_start_resync()
1721 unsigned long tw = drbd_bm_total_weight(device); drbd_start_resync()
1725 device->rs_failed = 0; drbd_start_resync()
1726 device->rs_paused = 0; drbd_start_resync()
1727 device->rs_same_csum = 0; drbd_start_resync()
1728 device->rs_last_sect_ev = 0; drbd_start_resync()
1729 device->rs_total = tw; drbd_start_resync()
1730 device->rs_start = now; drbd_start_resync()
1732 device->rs_mark_left[i] = tw; drbd_start_resync()
1733 device->rs_mark_time[i] = now; drbd_start_resync()
1735 _drbd_pause_after(device); drbd_start_resync()
1737 * Open coded drbd_rs_cancel_all(device), we already have IRQs drbd_start_resync()
1739 spin_lock(&device->al_lock); drbd_start_resync()
1740 lc_reset(device->resync); drbd_start_resync()
1741 device->resync_locked = 0; drbd_start_resync()
1742 device->resync_wenr = LC_FREE; drbd_start_resync()
1743 spin_unlock(&device->al_lock); drbd_start_resync()
1746 spin_unlock_irq(&device->resource->req_lock); drbd_start_resync()
1749 wake_up(&device->al_wait); /* for lc_reset() above */ drbd_start_resync()
1752 device->rs_last_bcast = jiffies - HZ; drbd_start_resync()
1754 drbd_info(device, "Began resync as %s (will sync %lu KB [%lu bits set]).\n", drbd_start_resync()
1756 (unsigned long) device->rs_total << (BM_BLOCK_SHIFT-10), drbd_start_resync()
1757 (unsigned long) device->rs_total); drbd_start_resync()
1759 device->bm_resync_fo = 0; drbd_start_resync()
1760 device->use_csums = use_checksum_based_resync(connection, device); drbd_start_resync()
1762 device->use_csums = 0; drbd_start_resync()
1775 if (connection->agreed_pro_version < 95 && device->rs_total == 0) { drbd_start_resync()
1796 drbd_resync_finished(device); drbd_start_resync()
1799 drbd_rs_controller_reset(device); drbd_start_resync()
1800 /* ns.conn may already be != device->state.conn, drbd_start_resync()
1805 mod_timer(&device->resync_timer, jiffies); drbd_start_resync()
1807 drbd_md_sync(device); drbd_start_resync()
1809 put_ldev(device); drbd_start_resync()
1810 mutex_unlock(device->state_mutex); drbd_start_resync()
1813 static void update_on_disk_bitmap(struct drbd_device *device, bool resync_done) update_on_disk_bitmap() argument
1816 device->rs_last_bcast = jiffies; update_on_disk_bitmap()
1818 if (!get_ldev(device)) update_on_disk_bitmap()
1821 drbd_bm_write_lazy(device, 0); update_on_disk_bitmap()
1822 if (resync_done && is_sync_state(device->state.conn)) update_on_disk_bitmap()
1823 drbd_resync_finished(device); update_on_disk_bitmap()
1825 drbd_bcast_event(device, &sib); update_on_disk_bitmap()
1827 device->rs_last_bcast = jiffies; update_on_disk_bitmap()
1828 put_ldev(device); update_on_disk_bitmap()
1831 static void drbd_ldev_destroy(struct drbd_device *device) drbd_ldev_destroy() argument
1833 lc_destroy(device->resync); drbd_ldev_destroy()
1834 device->resync = NULL; drbd_ldev_destroy()
1835 lc_destroy(device->act_log); drbd_ldev_destroy()
1836 device->act_log = NULL; drbd_ldev_destroy()
1839 drbd_free_ldev(device->ldev); drbd_ldev_destroy()
1840 device->ldev = NULL; drbd_ldev_destroy()
1843 clear_bit(GOING_DISKLESS, &device->flags); drbd_ldev_destroy()
1844 wake_up(&device->misc_wait); drbd_ldev_destroy()
1847 static void go_diskless(struct drbd_device *device) go_diskless() argument
1849 D_ASSERT(device, device->state.disk == D_FAILED); go_diskless()
1868 if (device->bitmap && device->ldev) { go_diskless()
1873 if (drbd_bitmap_io_from_worker(device, drbd_bm_write, go_diskless()
1875 if (test_bit(WAS_READ_ERROR, &device->flags)) { go_diskless()
1876 drbd_md_set_flag(device, MDF_FULL_SYNC); go_diskless()
1877 drbd_md_sync(device); go_diskless()
1882 drbd_force_state(device, NS(disk, D_DISKLESS)); go_diskless()
1885 static int do_md_sync(struct drbd_device *device) do_md_sync() argument
1887 drbd_warn(device, "md_sync_timer expired! Worker calls drbd_md_sync().\n"); do_md_sync()
1888 drbd_md_sync(device); do_md_sync()
1915 static void do_device_work(struct drbd_device *device, const unsigned long todo) do_device_work() argument
1918 do_md_sync(device); do_device_work()
1921 update_on_disk_bitmap(device, test_bit(RS_DONE, &todo)); do_device_work()
1923 go_diskless(device); do_device_work()
1925 drbd_ldev_destroy(device); do_device_work()
1927 do_start_resync(device); do_device_work()
1956 struct drbd_device *device = peer_device->device; do_unqueued_work() local
1957 unsigned long todo = get_work_bits(&device->flags); do_unqueued_work()
1961 kref_get(&device->kref); do_unqueued_work()
1963 do_device_work(device, todo); do_unqueued_work()
1964 kref_put(&device->kref, drbd_destroy_device); do_unqueued_work()
2123 struct drbd_device *device = peer_device->device; drbd_worker() local
2124 D_ASSERT(device, device->state.disk == D_DISKLESS && device->state.conn == C_STANDALONE); drbd_worker()
2125 kref_get(&device->kref); drbd_worker()
2127 drbd_device_cleanup(device); drbd_worker()
2128 kref_put(&device->kref, drbd_destroy_device); drbd_worker()
H A Ddrbd_state.c35 struct drbd_device *device; member in struct:after_state_chg_work
52 static void after_state_ch(struct drbd_device *device, union drbd_state os,
57 static union drbd_state sanitize_state(struct drbd_device *device, union drbd_state os,
73 struct drbd_device *device = peer_device->device; conn_all_vols_unconf() local
74 if (device->state.disk != D_DISKLESS || conn_all_vols_unconf()
75 device->state.conn != C_STANDALONE || conn_all_vols_unconf()
76 device->state.role != R_SECONDARY) { conn_all_vols_unconf()
113 struct drbd_device *device = peer_device->device; conn_highest_role() local
114 role = max_role(role, device->state.role); conn_highest_role()
129 struct drbd_device *device = peer_device->device; conn_highest_peer() local
130 peer = max_role(peer, device->state.peer); conn_highest_peer()
145 struct drbd_device *device = peer_device->device; conn_highest_disk() local
146 disk_state = max_t(enum drbd_disk_state, disk_state, device->state.disk); conn_highest_disk()
161 struct drbd_device *device = peer_device->device; conn_lowest_disk() local
162 disk_state = min_t(enum drbd_disk_state, disk_state, device->state.disk); conn_lowest_disk()
177 struct drbd_device *device = peer_device->device; conn_highest_pdsk() local
178 disk_state = max_t(enum drbd_disk_state, disk_state, device->state.pdsk); conn_highest_pdsk()
193 struct drbd_device *device = peer_device->device; conn_lowest_conn() local
194 conn = min_t(enum drbd_conns, conn, device->state.conn); conn_lowest_conn()
209 if (peer_device->device->state.conn == C_WF_REPORT_PARAMS) { no_peer_wf_report_params()
225 wake_up(&peer_device->device->state_wait); wake_up_all_devices()
233 * @device: DRBD device.
237 static int cl_wide_st_chg(struct drbd_device *device, cl_wide_st_chg() argument
259 drbd_change_state(struct drbd_device *device, enum chg_state_flags f, drbd_change_state() argument
266 spin_lock_irqsave(&device->resource->req_lock, flags); drbd_change_state()
267 ns = apply_mask_val(drbd_read_state(device), mask, val); drbd_change_state()
268 rv = _drbd_set_state(device, ns, f, NULL); drbd_change_state()
269 spin_unlock_irqrestore(&device->resource->req_lock, flags); drbd_change_state()
276 * @device: DRBD device.
280 void drbd_force_state(struct drbd_device *device, drbd_force_state() argument
283 drbd_change_state(device, CS_HARD, mask, val); drbd_force_state()
287 _req_st_cond(struct drbd_device *device, union drbd_state mask, _req_st_cond() argument
294 if (test_and_clear_bit(CL_ST_CHG_SUCCESS, &device->flags)) _req_st_cond()
297 if (test_and_clear_bit(CL_ST_CHG_FAIL, &device->flags)) _req_st_cond()
300 spin_lock_irqsave(&device->resource->req_lock, flags); _req_st_cond()
301 os = drbd_read_state(device); _req_st_cond()
302 ns = sanitize_state(device, os, apply_mask_val(os, mask, val), NULL); _req_st_cond()
307 if (!cl_wide_st_chg(device, os, ns)) _req_st_cond()
310 rv = is_valid_state(device, ns); _req_st_cond()
312 rv = is_valid_soft_transition(os, ns, first_peer_device(device)->connection); _req_st_cond()
317 spin_unlock_irqrestore(&device->resource->req_lock, flags); _req_st_cond()
324 * @device: DRBD device.
333 drbd_req_state(struct drbd_device *device, union drbd_state mask, drbd_req_state() argument
344 mutex_lock(device->state_mutex); drbd_req_state()
346 spin_lock_irqsave(&device->resource->req_lock, flags); drbd_req_state()
347 os = drbd_read_state(device); drbd_req_state()
348 ns = sanitize_state(device, os, apply_mask_val(os, mask, val), NULL); drbd_req_state()
351 spin_unlock_irqrestore(&device->resource->req_lock, flags); drbd_req_state()
355 if (cl_wide_st_chg(device, os, ns)) { drbd_req_state()
356 rv = is_valid_state(device, ns); drbd_req_state()
358 rv = is_valid_soft_transition(os, ns, first_peer_device(device)->connection); drbd_req_state()
359 spin_unlock_irqrestore(&device->resource->req_lock, flags); drbd_req_state()
363 print_st_err(device, os, ns, rv); drbd_req_state()
367 if (drbd_send_state_req(first_peer_device(device), mask, val)) { drbd_req_state()
370 print_st_err(device, os, ns, rv); drbd_req_state()
374 wait_event(device->state_wait, drbd_req_state()
375 (rv = _req_st_cond(device, mask, val))); drbd_req_state()
379 print_st_err(device, os, ns, rv); drbd_req_state()
382 spin_lock_irqsave(&device->resource->req_lock, flags); drbd_req_state()
383 ns = apply_mask_val(drbd_read_state(device), mask, val); drbd_req_state()
384 rv = _drbd_set_state(device, ns, f, &done); drbd_req_state()
386 rv = _drbd_set_state(device, ns, f, &done); drbd_req_state()
389 spin_unlock_irqrestore(&device->resource->req_lock, flags); drbd_req_state()
392 D_ASSERT(device, current != first_peer_device(device)->connection->worker.task); drbd_req_state()
398 mutex_unlock(device->state_mutex); drbd_req_state()
405 * @device: DRBD device.
414 _drbd_request_state(struct drbd_device *device, union drbd_state mask, _drbd_request_state() argument
419 wait_event(device->state_wait, _drbd_request_state()
420 (rv = drbd_req_state(device, mask, val, f)) != SS_IN_TRANSIENT_STATE); _drbd_request_state()
426 _drbd_request_state_holding_state_mutex(struct drbd_device *device, union drbd_state mask, _drbd_request_state_holding_state_mutex() argument
433 wait_event_cmd(device->state_wait, _drbd_request_state_holding_state_mutex()
434 (rv = drbd_req_state(device, mask, val, f)) != SS_IN_TRANSIENT_STATE, _drbd_request_state_holding_state_mutex()
435 mutex_unlock(device->state_mutex), _drbd_request_state_holding_state_mutex()
436 mutex_lock(device->state_mutex)); _drbd_request_state_holding_state_mutex()
441 static void print_st(struct drbd_device *device, const char *name, union drbd_state ns) print_st() argument
443 drbd_err(device, " %s = { cs:%s ro:%s/%s ds:%s/%s %c%c%c%c%c%c }\n", print_st()
459 void print_st_err(struct drbd_device *device, union drbd_state os, print_st_err() argument
464 drbd_err(device, "State change failed: %s\n", drbd_set_st_err_str(err)); print_st_err()
465 print_st(device, " state", os); print_st_err()
466 print_st(device, "wanted", ns); print_st_err()
500 static void drbd_pr_state_change(struct drbd_device *device, union drbd_state os, union drbd_state ns, drbd_pr_state_change() argument
522 drbd_info(device, "%s\n", pb); drbd_pr_state_change()
545 * @device: DRBD device.
549 is_valid_state(struct drbd_device *device, union drbd_state ns) is_valid_state() argument
559 if (get_ldev(device)) { is_valid_state()
560 fp = rcu_dereference(device->ldev->disk_conf)->fencing; is_valid_state()
561 put_ldev(device); is_valid_state()
564 nc = rcu_dereference(first_peer_device(device)->connection->net_conf); is_valid_state()
569 else if (conn_highest_peer(first_peer_device(device)->connection) == R_PRIMARY) is_valid_state()
576 else if (ns.role == R_SECONDARY && device->open_cnt) is_valid_state()
610 first_peer_device(device)->connection->agreed_pro_version < 88) is_valid_state()
632 * @device: DRBD device.
743 static void print_sanitize_warnings(struct drbd_device *device, enum sanitize_state_warnings warn) print_sanitize_warnings() argument
755 drbd_warn(device, "%s\n", msg_table[warn]); print_sanitize_warnings()
760 * @device: DRBD device.
768 static union drbd_state sanitize_state(struct drbd_device *device, union drbd_state os, sanitize_state() argument
778 if (get_ldev(device)) { sanitize_state()
780 fp = rcu_dereference(device->ldev->disk_conf)->fencing; sanitize_state()
782 put_ldev(device); sanitize_state()
808 get_ldev_if_state(device, D_NEGOTIATING)) { sanitize_state()
809 if (device->ed_uuid == device->ldev->md.uuid[UI_CURRENT]) { sanitize_state()
810 ns.disk = device->new_state_tmp.disk; sanitize_state()
811 ns.pdsk = device->new_state_tmp.pdsk; sanitize_state()
818 put_ldev(device); sanitize_state()
914 if (device->resource->res_opts.on_no_data == OND_SUSPEND_IO && sanitize_state()
934 void drbd_resume_al(struct drbd_device *device) drbd_resume_al() argument
936 if (test_and_clear_bit(AL_SUSPENDED, &device->flags)) drbd_resume_al()
937 drbd_info(device, "Resumed AL updates\n"); drbd_resume_al()
941 static void set_ov_position(struct drbd_device *device, enum drbd_conns cs) set_ov_position() argument
943 if (first_peer_device(device)->connection->agreed_pro_version < 90) set_ov_position()
944 device->ov_start_sector = 0; set_ov_position()
945 device->rs_total = drbd_bm_bits(device); set_ov_position()
946 device->ov_position = 0; set_ov_position()
953 device->ov_start_sector = ~(sector_t)0; set_ov_position()
955 unsigned long bit = BM_SECT_TO_BIT(device->ov_start_sector); set_ov_position()
956 if (bit >= device->rs_total) { set_ov_position()
957 device->ov_start_sector = set_ov_position()
958 BM_BIT_TO_SECT(device->rs_total - 1); set_ov_position()
959 device->rs_total = 1; set_ov_position()
961 device->rs_total -= bit; set_ov_position()
962 device->ov_position = device->ov_start_sector; set_ov_position()
964 device->ov_left = device->rs_total; set_ov_position()
969 * @device: DRBD device.
977 __drbd_set_state(struct drbd_device *device, union drbd_state ns, __drbd_set_state() argument
980 struct drbd_peer_device *peer_device = first_peer_device(device); __drbd_set_state()
987 os = drbd_read_state(device); __drbd_set_state()
989 ns = sanitize_state(device, os, ns, &ssw); __drbd_set_state()
1001 rv = is_valid_state(device, ns); __drbd_set_state()
1006 if (is_valid_state(device, os) == rv) __drbd_set_state()
1014 print_st_err(device, os, ns, rv); __drbd_set_state()
1018 print_sanitize_warnings(device, ssw); __drbd_set_state()
1020 drbd_pr_state_change(device, os, ns, flags); __drbd_set_state()
1035 atomic_inc(&device->local_cnt); __drbd_set_state()
1038 clear_bit(RS_DONE, &device->flags); __drbd_set_state()
1040 /* changes to local_cnt and device flags should be visible before __drbd_set_state()
1044 device->state.i = ns.i; __drbd_set_state()
1045 device->resource->susp = ns.susp; __drbd_set_state()
1046 device->resource->susp_nod = ns.susp_nod; __drbd_set_state()
1047 device->resource->susp_fen = ns.susp_fen; __drbd_set_state()
1056 drbd_print_uuids(device, "attached to UUIDs"); __drbd_set_state()
1065 wake_up(&device->misc_wait); __drbd_set_state()
1066 wake_up(&device->state_wait); __drbd_set_state()
1070 * Log the last position, unless end-of-device. */ __drbd_set_state()
1073 device->ov_start_sector = __drbd_set_state()
1074 BM_BIT_TO_SECT(drbd_bm_bits(device) - device->ov_left); __drbd_set_state()
1075 if (device->ov_left) __drbd_set_state()
1076 drbd_info(device, "Online Verify reached sector %llu\n", __drbd_set_state()
1077 (unsigned long long)device->ov_start_sector); __drbd_set_state()
1082 drbd_info(device, "Syncer continues.\n"); __drbd_set_state()
1083 device->rs_paused += (long)jiffies __drbd_set_state()
1084 -(long)device->rs_mark_time[device->rs_last_mark]; __drbd_set_state()
1086 mod_timer(&device->resync_timer, jiffies); __drbd_set_state()
1091 drbd_info(device, "Resync suspended\n"); __drbd_set_state()
1092 device->rs_mark_time[device->rs_last_mark] = jiffies; __drbd_set_state()
1100 set_ov_position(device, ns.conn); __drbd_set_state()
1101 device->rs_start = now; __drbd_set_state()
1102 device->rs_last_sect_ev = 0; __drbd_set_state()
1103 device->ov_last_oos_size = 0; __drbd_set_state()
1104 device->ov_last_oos_start = 0; __drbd_set_state()
1107 device->rs_mark_left[i] = device->ov_left; __drbd_set_state()
1108 device->rs_mark_time[i] = now; __drbd_set_state()
1111 drbd_rs_controller_reset(device); __drbd_set_state()
1114 drbd_info(device, "Starting Online Verify from sector %llu\n", __drbd_set_state()
1115 (unsigned long long)device->ov_position); __drbd_set_state()
1116 mod_timer(&device->resync_timer, jiffies); __drbd_set_state()
1120 if (get_ldev(device)) { __drbd_set_state()
1121 u32 mdf = device->ldev->md.flags & ~(MDF_CONSISTENT|MDF_PRIMARY_IND| __drbd_set_state()
1126 if (test_bit(CRASHED_PRIMARY, &device->flags)) __drbd_set_state()
1128 if (device->state.role == R_PRIMARY || __drbd_set_state()
1129 (device->state.pdsk < D_INCONSISTENT && device->state.peer == R_PRIMARY)) __drbd_set_state()
1131 if (device->state.conn > C_WF_REPORT_PARAMS) __drbd_set_state()
1133 if (device->state.disk > D_INCONSISTENT) __drbd_set_state()
1135 if (device->state.disk > D_OUTDATED) __drbd_set_state()
1137 if (device->state.pdsk <= D_OUTDATED && device->state.pdsk >= D_INCONSISTENT) __drbd_set_state()
1139 if (mdf != device->ldev->md.flags) { __drbd_set_state()
1140 device->ldev->md.flags = mdf; __drbd_set_state()
1141 drbd_md_mark_dirty(device); __drbd_set_state()
1144 drbd_set_ed_uuid(device, device->ldev->md.uuid[UI_CURRENT]); __drbd_set_state()
1145 put_ldev(device); __drbd_set_state()
1151 set_bit(CONSIDER_RESYNC, &device->flags); __drbd_set_state()
1168 drbd_resume_al(device); __drbd_set_state()
1177 device->last_reattach_jif = jiffies; __drbd_set_state()
1185 ascw->device = device; __drbd_set_state()
1190 drbd_err(device, "Could not kmalloc an ascw\n"); __drbd_set_state()
1200 struct drbd_device *device = ascw->device; w_after_state_ch() local
1202 after_state_ch(device, ascw->os, ascw->ns, ascw->flags); w_after_state_ch()
1210 static void abw_start_sync(struct drbd_device *device, int rv) abw_start_sync() argument
1213 drbd_err(device, "Writing the bitmap failed not starting resync.\n"); abw_start_sync()
1214 _drbd_request_state(device, NS(conn, C_CONNECTED), CS_VERBOSE); abw_start_sync()
1218 switch (device->state.conn) { abw_start_sync()
1220 _drbd_request_state(device, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE); abw_start_sync()
1223 drbd_start_resync(device, C_SYNC_SOURCE); abw_start_sync()
1228 int drbd_bitmap_io_from_worker(struct drbd_device *device, drbd_bitmap_io_from_worker() argument
1234 D_ASSERT(device, current == first_peer_device(device)->connection->worker.task); drbd_bitmap_io_from_worker()
1236 /* open coded non-blocking drbd_suspend_io(device); */ drbd_bitmap_io_from_worker()
1237 set_bit(SUSPEND_IO, &device->flags); drbd_bitmap_io_from_worker()
1239 drbd_bm_lock(device, why, flags); drbd_bitmap_io_from_worker()
1240 rv = io_fn(device); drbd_bitmap_io_from_worker()
1241 drbd_bm_unlock(device); drbd_bitmap_io_from_worker()
1243 drbd_resume_io(device); drbd_bitmap_io_from_worker()
1250 * @device: DRBD device.
1255 static void after_state_ch(struct drbd_device *device, union drbd_state os, after_state_ch() argument
1258 struct drbd_resource *resource = device->resource; after_state_ch()
1259 struct drbd_peer_device *peer_device = first_peer_device(device); after_state_ch()
1269 clear_bit(CRASHED_PRIMARY, &device->flags); after_state_ch()
1270 if (device->p_uuid) after_state_ch()
1271 device->p_uuid[UI_FLAGS] &= ~((u64)2); after_state_ch()
1275 drbd_bcast_event(device, &sib); after_state_ch()
1279 drbd_khelper(device, "pri-on-incon-degr"); after_state_ch()
1287 spin_lock_irq(&device->resource->req_lock); after_state_ch()
1302 spin_unlock_irq(&device->resource->req_lock);
1306 spin_lock_irq(&device->resource->req_lock);
1314 clear_bit(NEW_CUR_UUID, &peer_device->device->flags);
1322 spin_unlock_irq(&device->resource->req_lock);
1331 connection->agreed_pro_version >= 96 && get_ldev(device)) {
1333 put_ldev(device);
1341 device->rs_total = 0;
1342 device->rs_failed = 0;
1343 atomic_set(&device->rs_pending_cnt, 0);
1344 drbd_rs_cancel_all(device);
1353 device->state.conn == C_WF_BITMAP_S)
1354 drbd_queue_bitmap_io(device, &drbd_send_bitmap, NULL,
1365 if (get_ldev(device)) {
1367 device->ldev->md.uuid[UI_BITMAP] == 0 && ns.disk >= D_UP_TO_DATE) {
1368 if (drbd_suspended(device)) {
1369 set_bit(NEW_CUR_UUID, &device->flags);
1371 drbd_uuid_new_current(device);
1375 put_ldev(device);
1379 if (ns.pdsk < D_INCONSISTENT && get_ldev(device)) {
1381 device->ldev->md.uuid[UI_BITMAP] == 0 && ns.disk >= D_UP_TO_DATE) {
1382 drbd_uuid_new_current(device);
1390 drbd_bitmap_io_from_worker(device, &drbd_bm_write,
1392 put_ldev(device);
1399 device->state.conn <= C_CONNECTED && get_ldev(device)) {
1402 drbd_bitmap_io_from_worker(device, &drbd_bm_write,
1404 put_ldev(device);
1424 suspend_other_sg(device);
1438 drbd_queue_bitmap_io(device,
1451 if (device->ldev) {
1453 eh = rcu_dereference(device->ldev->disk_conf)->on_io_error;
1456 was_io_error = test_and_clear_bit(WAS_IO_ERROR, &device->flags);
1459 drbd_khelper(device, "local-io-error");
1474 if (test_and_clear_bit(FORCE_DETACH, &device->flags))
1475 tl_abort_disk_io(device);
1480 if (device->state.disk != D_FAILED)
1481 drbd_err(device,
1483 drbd_disk_str(device->state.disk));
1488 drbd_rs_cancel_all(device);
1493 drbd_md_sync(device);
1495 put_ldev(device);
1504 if (device->state.disk != D_DISKLESS)
1505 drbd_err(device,
1507 drbd_disk_str(device->state.disk));
1513 put_ldev(device);
1522 test_and_clear_bit(RESYNC_AFTER_NEG, &device->flags)) {
1524 resync_after_online_grow(device);
1531 resume_next_sg(device);
1542 && verify_can_do_stop_sector(device))
1552 if (os.conn > C_CONNECTED && ns.conn <= C_CONNECTED && get_ldev(device)) {
1553 drbd_queue_bitmap_io(device, &drbd_bm_write_copy_pages, NULL,
1555 put_ldev(device);
1562 resume_next_sg(device);
1565 drbd_md_sync(device);
1613 struct drbd_device *device = peer_device->device; w_after_conn_state_ch() local
1614 if (test_bit(NEW_CUR_UUID, &device->flags)) { w_after_conn_state_ch()
1615 drbd_uuid_new_current(device); w_after_conn_state_ch()
1616 clear_bit(NEW_CUR_UUID, &device->flags); w_after_conn_state_ch()
1651 struct drbd_device *device = peer_device->device; conn_old_common_state() local
1652 os = device->state; conn_old_common_state()
1693 struct drbd_device *device = peer_device->device; conn_is_valid_transition() local
1694 os = drbd_read_state(device); conn_is_valid_transition()
1695 ns = sanitize_state(device, os, apply_mask_val(os, mask, val), NULL); conn_is_valid_transition()
1706 rv = is_valid_state(device, ns); conn_is_valid_transition()
1708 if (is_valid_state(device, os) == rv) conn_is_valid_transition()
1716 print_st_err(device, os, ns, rv); conn_is_valid_transition()
1753 struct drbd_device *device = peer_device->device; conn_set_state() local
1755 os = drbd_read_state(device); conn_set_state()
1757 ns = sanitize_state(device, os, ns, NULL); conn_set_state()
1762 rv = __drbd_set_state(device, ns, flags, NULL); conn_set_state()
1766 ns.i = device->state.i; conn_set_state()
H A Ddrbd_proc.c63 static void drbd_get_syncer_progress(struct drbd_device *device, drbd_get_syncer_progress() argument
69 typecheck(unsigned long, device->rs_total); drbd_get_syncer_progress()
70 *rs_total = device->rs_total; drbd_get_syncer_progress()
77 *bits_left = device->ov_left; drbd_get_syncer_progress()
79 *bits_left = drbd_bm_total_weight(device) - device->rs_failed; drbd_get_syncer_progress()
113 static void drbd_syncer_progress(struct drbd_device *device, struct seq_file *seq, drbd_syncer_progress() argument
121 drbd_get_syncer_progress(device, state, &rs_total, &rs_left, &res); drbd_syncer_progress()
164 i = (device->rs_last_mark + 2) % DRBD_SYNC_MARKS; drbd_syncer_progress()
165 dt = (jiffies - device->rs_mark_time[i]) / HZ; drbd_syncer_progress()
171 db = device->rs_mark_left[i] - rs_left; drbd_syncer_progress()
184 i = (device->rs_last_mark + DRBD_SYNC_MARKS-1) % DRBD_SYNC_MARKS; drbd_syncer_progress()
185 dt = (jiffies - device->rs_mark_time[i]) / HZ; drbd_syncer_progress()
188 db = device->rs_mark_left[i] - rs_left; drbd_syncer_progress()
197 dt = (jiffies - device->rs_start - device->rs_paused) / HZ; drbd_syncer_progress()
208 seq_printf_with_thousands_grouping(seq, device->c_sync_rate); drbd_syncer_progress()
215 unsigned long bm_bits = drbd_bm_bits(device); drbd_syncer_progress()
220 bit_pos = bm_bits - device->ov_left; drbd_syncer_progress()
221 if (verify_can_do_stop_sector(device)) drbd_syncer_progress()
222 stop_sector = device->ov_stop_sector; drbd_syncer_progress()
224 bit_pos = device->bm_resync_fo; drbd_syncer_progress()
242 struct drbd_device *device; drbd_seq_show() local
277 idr_for_each_entry(&drbd_devices, device, i) { drbd_seq_show()
282 state = device->state; drbd_seq_show()
290 /* reset device->congestion_reason */ drbd_seq_show()
291 bdi_rw_congested(&device->rq_queue->backing_dev_info); drbd_seq_show()
293 nc = rcu_dereference(first_peer_device(device)->connection->net_conf); drbd_seq_show()
305 drbd_suspended(device) ? 's' : 'r', drbd_seq_show()
309 device->congestion_reason ?: '-', drbd_seq_show()
310 test_bit(AL_SUSPENDED, &device->flags) ? 's' : '-', drbd_seq_show()
311 device->send_cnt/2, drbd_seq_show()
312 device->recv_cnt/2, drbd_seq_show()
313 device->writ_cnt/2, drbd_seq_show()
314 device->read_cnt/2, drbd_seq_show()
315 device->al_writ_cnt, drbd_seq_show()
316 device->bm_writ_cnt, drbd_seq_show()
317 atomic_read(&device->local_cnt), drbd_seq_show()
318 atomic_read(&device->ap_pending_cnt) + drbd_seq_show()
319 atomic_read(&device->rs_pending_cnt), drbd_seq_show()
320 atomic_read(&device->unacked_cnt), drbd_seq_show()
321 atomic_read(&device->ap_bio_cnt), drbd_seq_show()
322 first_peer_device(device)->connection->epochs, drbd_seq_show()
323 write_ordering_chars[device->resource->write_ordering] drbd_seq_show()
327 drbd_bm_total_weight(device))); drbd_seq_show()
333 drbd_syncer_progress(device, seq, state); drbd_seq_show()
335 if (proc_details >= 1 && get_ldev_if_state(device, D_FAILED)) { drbd_seq_show()
336 lc_seq_printf_stats(seq, device->resync); drbd_seq_show()
337 lc_seq_printf_stats(seq, device->act_log); drbd_seq_show()
338 put_ldev(device); drbd_seq_show()
342 seq_printf(seq, "\tblocked on activity log: %d\n", atomic_read(&device->ap_actlog_cnt)); drbd_seq_show()
H A Ddrbd_receiver.c155 static struct page *__drbd_alloc_pages(struct drbd_device *device, __drbd_alloc_pages() argument
201 static void reclaim_finished_net_peer_reqs(struct drbd_device *device, reclaim_finished_net_peer_reqs() argument
211 list_for_each_entry_safe(peer_req, tmp, &device->net_ee, w.list) { reclaim_finished_net_peer_reqs()
218 static void drbd_kick_lo_and_reclaim_net(struct drbd_device *device) drbd_kick_lo_and_reclaim_net() argument
223 spin_lock_irq(&device->resource->req_lock); drbd_kick_lo_and_reclaim_net()
224 reclaim_finished_net_peer_reqs(device, &reclaimed); drbd_kick_lo_and_reclaim_net()
225 spin_unlock_irq(&device->resource->req_lock); drbd_kick_lo_and_reclaim_net()
228 drbd_free_net_peer_req(device, peer_req); drbd_kick_lo_and_reclaim_net()
233 * @device: DRBD device.
254 struct drbd_device *device = peer_device->device; drbd_alloc_pages() local
265 if (atomic_read(&device->pp_in_use) < mxb) drbd_alloc_pages()
266 page = __drbd_alloc_pages(device, number); drbd_alloc_pages()
271 drbd_kick_lo_and_reclaim_net(device); drbd_alloc_pages()
273 if (atomic_read(&device->pp_in_use) < mxb) { drbd_alloc_pages()
274 page = __drbd_alloc_pages(device, number); drbd_alloc_pages()
283 drbd_warn(device, "drbd_alloc_pages interrupted!\n"); drbd_alloc_pages()
293 atomic_add(number, &device->pp_in_use); drbd_alloc_pages()
301 static void drbd_free_pages(struct drbd_device *device, struct page *page, int is_net) drbd_free_pages() argument
303 atomic_t *a = is_net ? &device->pp_in_use_by_net : &device->pp_in_use; drbd_free_pages()
321 drbd_warn(device, "ASSERTION FAILED: %s: %d < 0\n", drbd_free_pages()
344 struct drbd_device *device = peer_device->device; __must_hold() local
349 if (drbd_insert_fault(device, DRBD_FAULT_AL_EE)) __must_hold()
355 drbd_err(device, "%s: allocation failed\n", __func__); __must_hold()
387 void __drbd_free_peer_req(struct drbd_device *device, struct drbd_peer_request *peer_req, __drbd_free_peer_req() argument
393 drbd_free_pages(device, peer_req->pages, is_net); __drbd_free_peer_req()
394 D_ASSERT(device, atomic_read(&peer_req->pending_bios) == 0); __drbd_free_peer_req()
395 D_ASSERT(device, drbd_interval_empty(&peer_req->i)); __drbd_free_peer_req()
398 drbd_al_complete_io(device, &peer_req->i); __drbd_free_peer_req()
403 int drbd_free_peer_reqs(struct drbd_device *device, struct list_head *list) drbd_free_peer_reqs() argument
408 int is_net = list == &device->net_ee; drbd_free_peer_reqs()
410 spin_lock_irq(&device->resource->req_lock); drbd_free_peer_reqs()
412 spin_unlock_irq(&device->resource->req_lock); drbd_free_peer_reqs()
415 __drbd_free_peer_req(device, peer_req, is_net); drbd_free_peer_reqs()
424 static int drbd_finish_peer_reqs(struct drbd_device *device) drbd_finish_peer_reqs() argument
431 spin_lock_irq(&device->resource->req_lock); drbd_finish_peer_reqs()
432 reclaim_finished_net_peer_reqs(device, &reclaimed); drbd_finish_peer_reqs()
433 list_splice_init(&device->done_ee, &work_list); drbd_finish_peer_reqs()
434 spin_unlock_irq(&device->resource->req_lock); drbd_finish_peer_reqs()
437 drbd_free_net_peer_req(device, peer_req); drbd_finish_peer_reqs()
450 drbd_free_peer_req(device, peer_req); drbd_finish_peer_reqs()
452 wake_up(&device->ee_wait); drbd_finish_peer_reqs()
457 static void _drbd_wait_ee_list_empty(struct drbd_device *device, _drbd_wait_ee_list_empty() argument
465 prepare_to_wait(&device->ee_wait, &wait, TASK_UNINTERRUPTIBLE); _drbd_wait_ee_list_empty()
466 spin_unlock_irq(&device->resource->req_lock); _drbd_wait_ee_list_empty()
468 finish_wait(&device->ee_wait, &wait); _drbd_wait_ee_list_empty()
469 spin_lock_irq(&device->resource->req_lock); _drbd_wait_ee_list_empty()
473 static void drbd_wait_ee_list_empty(struct drbd_device *device, drbd_wait_ee_list_empty() argument
476 spin_lock_irq(&device->resource->req_lock); drbd_wait_ee_list_empty()
477 _drbd_wait_ee_list_empty(device, head); drbd_wait_ee_list_empty()
478 spin_unlock_irq(&device->resource->req_lock); drbd_wait_ee_list_empty()
871 struct drbd_device *device = peer_device->device; drbd_connected() local
874 atomic_set(&device->packet_seq, 0); drbd_connected()
875 device->peer_seq = 0; drbd_connected()
877 device->state_mutex = peer_device->connection->agreed_pro_version < 100 ? drbd_connected()
879 &device->own_state_mutex; drbd_connected()
888 clear_bit(USE_DEGR_WFC_T, &device->flags); drbd_connected()
889 clear_bit(RESIZE_PENDING, &device->flags); drbd_connected()
890 atomic_set(&device->ap_in_flight, 0); drbd_connected()
891 mod_timer(&device->request_timer, jiffies + HZ); /* just start it here. */ drbd_connected()
1047 /* drbd_request_state(device, NS(conn, WFAuth)); */ conn_connect()
1072 mutex_lock(peer_device->device->state_mutex); conn_connect()
1077 mutex_unlock(peer_device->device->state_mutex); conn_connect()
1081 struct drbd_device *device = peer_device->device; conn_connect() local
1082 kref_get(&device->kref); conn_connect()
1086 set_bit(DISCARD_MY_DATA, &device->flags); conn_connect()
1088 clear_bit(DISCARD_MY_DATA, &device->flags); conn_connect()
1091 kref_put(&device->kref, drbd_destroy_device); conn_connect()
1184 struct drbd_device *device = peer_device->device; drbd_flush() local
1186 if (!get_ldev(device)) drbd_flush()
1188 kref_get(&device->kref); drbd_flush()
1196 device->flush_jif = jiffies; drbd_flush()
1197 set_bit(FLUSH_PENDING, &device->flags); drbd_flush()
1198 rv = blkdev_issue_flush(device->ldev->backing_bdev, drbd_flush()
1200 clear_bit(FLUSH_PENDING, &device->flags); drbd_flush()
1202 drbd_info(device, "local disk flush failed with status %d\n", rv); drbd_flush()
1208 put_ldev(device); drbd_flush()
1209 kref_put(&device->kref, drbd_destroy_device); drbd_flush()
1221 * @device: DRBD device.
1318 struct drbd_device *device; drbd_bump_write_ordering() local
1331 idr_for_each_entry(&resource->devices, device, vnr) { drbd_bump_write_ordering()
1332 if (get_ldev(device)) { drbd_bump_write_ordering()
1333 wo = max_allowed_wo(device->ldev, wo); drbd_bump_write_ordering()
1334 if (device->ldev == bdev) drbd_bump_write_ordering()
1336 put_ldev(device); drbd_bump_write_ordering()
1352 * @device: DRBD device.
1367 int drbd_submit_peer_request(struct drbd_device *device, drbd_submit_peer_request() argument
1383 conn_wait_active_ee_empty(first_peer_device(device)->connection); drbd_submit_peer_request()
1388 spin_lock_irq(&device->resource->req_lock); drbd_submit_peer_request()
1389 list_add_tail(&peer_req->w.list, &device->active_ee); drbd_submit_peer_request()
1390 spin_unlock_irq(&device->resource->req_lock); drbd_submit_peer_request()
1391 if (blkdev_issue_zeroout(device->ldev->backing_bdev, drbd_submit_peer_request()
1415 drbd_err(device, "submit_ee: Allocation of a bio failed (nr_pages=%u)\n", nr_pages); drbd_submit_peer_request()
1420 bio->bi_bdev = device->ldev->backing_bdev; drbd_submit_peer_request()
1441 drbd_err(device, page_chain_for_each()
1454 D_ASSERT(device, data_size == 0);
1456 D_ASSERT(device, page == NULL);
1467 drbd_generic_make_request(device, fault_type, bio);
1480 static void drbd_remove_epoch_entry_interval(struct drbd_device *device, drbd_remove_epoch_entry_interval() argument
1485 drbd_remove_interval(&device->write_requests, i); drbd_remove_epoch_entry_interval()
1490 wake_up(&device->misc_wait); drbd_remove_epoch_entry_interval()
1500 struct drbd_device *device = peer_device->device; conn_wait_active_ee_empty() local
1502 kref_get(&device->kref); conn_wait_active_ee_empty()
1504 drbd_wait_ee_list_empty(device, &device->active_ee); conn_wait_active_ee_empty()
1505 kref_put(&device->kref, drbd_destroy_device); conn_wait_active_ee_empty()
1524 * not a specific (peer)device. receive_Barrier()
1591 struct drbd_device *device = peer_device->device; __must_hold() local
1592 const sector_t capacity = drbd_get_capacity(device->this_bdev); __must_hold()
1629 drbd_err(device, "request from peer beyond end of local disk: " __must_hold()
1653 if (drbd_insert_fault(device, DRBD_FAULT_RECEIVE)) { page_chain_for_each()
1654 drbd_err(device, "Fault injection: Corrupting data on receive\n"); page_chain_for_each()
1659 drbd_free_peer_req(device, peer_req); page_chain_for_each()
1668 drbd_err(device, "Digest integrity check FAILED: %llus +%u\n",
1670 drbd_free_peer_req(device, peer_req);
1674 device->recv_cnt += data_size >> 9;
1702 drbd_free_pages(peer_device->device, page, 0); drbd_drain_block()
1727 peer_device->device->recv_cnt += data_size>>9; recv_dless_read()
1730 D_ASSERT(peer_device->device, sector == bio->bi_iter.bi_sector); recv_dless_read()
1750 D_ASSERT(peer_device->device, data_size == 0);
1763 struct drbd_device *device = peer_device->device; e_end_resync_block() local
1767 D_ASSERT(device, drbd_interval_empty(&peer_req->i)); e_end_resync_block()
1770 drbd_set_in_sync(device, sector, peer_req->i.size); e_end_resync_block()
1774 drbd_rs_failed_io(device, sector, peer_req->i.size); e_end_resync_block()
1778 dec_unacked(device); e_end_resync_block()
1786 struct drbd_device *device = peer_device->device; __releases() local
1793 dec_rs_pending(device); __releases()
1795 inc_unacked(device); __releases()
1802 spin_lock_irq(&device->resource->req_lock); __releases()
1803 list_add_tail(&peer_req->w.list, &device->sync_ee); __releases()
1804 spin_unlock_irq(&device->resource->req_lock); __releases()
1806 atomic_add(pi->size >> 9, &device->rs_sect_ev); __releases()
1807 if (drbd_submit_peer_request(device, peer_req, WRITE, DRBD_FAULT_RS_WR) == 0) __releases()
1811 drbd_err(device, "submit failed, triggering re-connect\n"); __releases()
1812 spin_lock_irq(&device->resource->req_lock); __releases()
1814 spin_unlock_irq(&device->resource->req_lock); __releases()
1816 drbd_free_peer_req(device, peer_req); __releases()
1818 put_ldev(device); __releases()
1823 find_request(struct drbd_device *device, struct rb_root *root, u64 id, find_request() argument
1833 drbd_err(device, "%s: failed to find request 0x%lx, sector %llus\n", func, find_request()
1842 struct drbd_device *device; receive_DataReply() local
1851 device = peer_device->device; receive_DataReply()
1855 spin_lock_irq(&device->resource->req_lock); receive_DataReply()
1856 req = find_request(device, &device->read_requests, p->block_id, sector, false, __func__); receive_DataReply()
1857 spin_unlock_irq(&device->resource->req_lock); receive_DataReply()
1877 struct drbd_device *device; receive_RSDataReply() local
1885 device = peer_device->device; receive_RSDataReply()
1888 D_ASSERT(device, p->block_id == ID_SYNCER); receive_RSDataReply()
1890 if (get_ldev(device)) { receive_RSDataReply()
1897 drbd_err(device, "Can not write resync data to local disk.\n"); receive_RSDataReply()
1904 atomic_add(pi->size >> 9, &device->rs_sect_in); receive_RSDataReply()
1909 static void restart_conflicting_writes(struct drbd_device *device, restart_conflicting_writes() argument
1915 drbd_for_each_overlap(i, &device->write_requests, sector, size) { restart_conflicting_writes()
1936 struct drbd_device *device = peer_device->device; e_end_block() local
1942 pcmd = (device->state.conn >= C_SYNC_SOURCE && e_end_block()
1943 device->state.conn <= C_PAUSED_SYNC_T && e_end_block()
1948 drbd_set_in_sync(device, sector, peer_req->i.size); e_end_block()
1954 dec_unacked(device); e_end_block()
1960 spin_lock_irq(&device->resource->req_lock); e_end_block()
1961 D_ASSERT(device, !drbd_interval_empty(&peer_req->i)); e_end_block()
1962 drbd_remove_epoch_entry_interval(device, peer_req); e_end_block()
1964 restart_conflicting_writes(device, sector, peer_req->i.size); e_end_block()
1965 spin_unlock_irq(&device->resource->req_lock); e_end_block()
1967 D_ASSERT(device, drbd_interval_empty(&peer_req->i)); e_end_block()
1969 drbd_may_finish_epoch(first_peer_device(device)->connection, peer_req->epoch, EV_PUT + (cancel ? EV_CLEANUP : 0)); e_end_block()
1982 dec_unacked(peer_device->device); e_send_ack()
2019 struct drbd_device *device = peer_device->device; update_peer_seq() local
2023 spin_lock(&device->peer_seq_lock); update_peer_seq()
2024 newest_peer_seq = seq_max(device->peer_seq, peer_seq); update_peer_seq()
2025 device->peer_seq = newest_peer_seq; update_peer_seq()
2026 spin_unlock(&device->peer_seq_lock); update_peer_seq()
2027 /* wake up only if we actually changed device->peer_seq */ update_peer_seq()
2029 wake_up(&device->seq_wait); update_peer_seq()
2039 static bool overlapping_resync_write(struct drbd_device *device, struct drbd_peer_request *peer_req) overlapping_resync_write() argument
2044 spin_lock_irq(&device->resource->req_lock); overlapping_resync_write()
2045 list_for_each_entry(rs_req, &device->sync_ee, w.list) { overlapping_resync_write()
2052 spin_unlock_irq(&device->resource->req_lock); overlapping_resync_write()
2066 * In case packet_seq is larger than device->peer_seq number, there are
2068 * In case we are the logically next packet, we update device->peer_seq
2080 struct drbd_device *device = peer_device->device; wait_for_and_update_peer_seq() local
2088 spin_lock(&device->peer_seq_lock); wait_for_and_update_peer_seq()
2090 if (!seq_greater(peer_seq - 1, device->peer_seq)) { wait_for_and_update_peer_seq()
2091 device->peer_seq = seq_max(device->peer_seq, peer_seq); wait_for_and_update_peer_seq()
2101 tp = rcu_dereference(first_peer_device(device)->connection->net_conf)->two_primaries; wait_for_and_update_peer_seq()
2108 prepare_to_wait(&device->seq_wait, &wait, TASK_INTERRUPTIBLE); wait_for_and_update_peer_seq()
2109 spin_unlock(&device->peer_seq_lock); wait_for_and_update_peer_seq()
2114 spin_lock(&device->peer_seq_lock); wait_for_and_update_peer_seq()
2117 drbd_err(device, "Timed out waiting for missing ack packets; disconnecting\n"); wait_for_and_update_peer_seq()
2121 spin_unlock(&device->peer_seq_lock); wait_for_and_update_peer_seq()
2122 finish_wait(&device->seq_wait, &wait); wait_for_and_update_peer_seq()
2137 static void fail_postponed_requests(struct drbd_device *device, sector_t sector, fail_postponed_requests() argument
2143 drbd_for_each_overlap(i, &device->write_requests, sector, size) { fail_postponed_requests()
2154 spin_unlock_irq(&device->resource->req_lock); fail_postponed_requests()
2156 complete_master_bio(device, &m); fail_postponed_requests()
2157 spin_lock_irq(&device->resource->req_lock); fail_postponed_requests()
2162 static int handle_write_conflicts(struct drbd_device *device, handle_write_conflicts() argument
2177 drbd_insert_interval(&device->write_requests, &peer_req->i); handle_write_conflicts()
2180 drbd_for_each_overlap(i, &device->write_requests, sector, size) { handle_write_conflicts()
2192 err = drbd_wait_misc(device, i); handle_write_conflicts()
2210 drbd_alert(device, "Concurrent writes detected: " handle_write_conflicts()
2219 list_add_tail(&peer_req->w.list, &device->done_ee); handle_write_conflicts()
2229 drbd_alert(device, "Concurrent writes detected: " handle_write_conflicts()
2247 err = drbd_wait_misc(device, &req->i); handle_write_conflicts()
2250 fail_postponed_requests(device, sector, size); handle_write_conflicts()
2266 drbd_remove_epoch_entry_interval(device, peer_req); handle_write_conflicts()
2274 struct drbd_device *device; receive_Data() local
2287 device = peer_device->device; receive_Data()
2289 if (!get_ldev(device)) { receive_Data()
2310 put_ldev(device); receive_Data()
2321 struct request_queue *q = bdev_get_queue(device->ldev->backing_bdev); receive_Data()
2329 D_ASSERT(device, peer_req->i.size == 0); receive_Data()
2330 D_ASSERT(device, dp_flags & DP_FLUSH); receive_Data()
2359 inc_unacked(device); receive_Data()
2367 drbd_send_ack(first_peer_device(device), P_RECV_ACK, peer_req); receive_Data()
2372 D_ASSERT(device, dp_flags & DP_SEND_WRITE_ACK); receive_Data()
2377 spin_lock_irq(&device->resource->req_lock); receive_Data()
2378 err = handle_write_conflicts(device, peer_req); receive_Data()
2380 spin_unlock_irq(&device->resource->req_lock); receive_Data()
2382 put_ldev(device); receive_Data()
2389 spin_lock_irq(&device->resource->req_lock); receive_Data()
2396 list_add_tail(&peer_req->w.list, &device->active_ee); receive_Data()
2397 spin_unlock_irq(&device->resource->req_lock); receive_Data()
2399 if (device->state.conn == C_SYNC_TARGET) receive_Data()
2400 wait_event(device->ee_wait, !overlapping_resync_write(device, peer_req)); receive_Data()
2402 if (device->state.pdsk < D_INCONSISTENT) { receive_Data()
2404 drbd_set_out_of_sync(device, peer_req->i.sector, peer_req->i.size); receive_Data()
2406 drbd_al_begin_io(device, &peer_req->i); receive_Data()
2410 err = drbd_submit_peer_request(device, peer_req, rw, DRBD_FAULT_DT_WR); receive_Data()
2415 drbd_err(device, "submit failed, triggering re-connect\n"); receive_Data()
2416 spin_lock_irq(&device->resource->req_lock); receive_Data()
2418 drbd_remove_epoch_entry_interval(device, peer_req); receive_Data()
2419 spin_unlock_irq(&device->resource->req_lock); receive_Data()
2422 drbd_al_complete_io(device, &peer_req->i); receive_Data()
2427 put_ldev(device); receive_Data()
2428 drbd_free_peer_req(device, peer_req); receive_Data()
2432 /* We may throttle resync, if the lower device seems to be busy,
2435 * To decide whether or not the lower device is busy, we use a scheme similar
2443 bool drbd_rs_should_slow_down(struct drbd_device *device, sector_t sector, drbd_rs_should_slow_down() argument
2447 bool throttle = drbd_rs_c_min_rate_throttle(device); drbd_rs_should_slow_down()
2452 spin_lock_irq(&device->al_lock); drbd_rs_should_slow_down()
2453 tmp = lc_find(device->resync, BM_SECT_TO_EXT(sector)); drbd_rs_should_slow_down()
2461 spin_unlock_irq(&device->al_lock); drbd_rs_should_slow_down()
2466 bool drbd_rs_c_min_rate_throttle(struct drbd_device *device) drbd_rs_c_min_rate_throttle() argument
2468 struct gendisk *disk = device->ldev->backing_bdev->bd_contains->bd_disk; drbd_rs_c_min_rate_throttle()
2474 c_min_rate = rcu_dereference(device->ldev->disk_conf)->c_min_rate; drbd_rs_c_min_rate_throttle()
2483 atomic_read(&device->rs_sect_ev); drbd_rs_c_min_rate_throttle()
2485 if (atomic_read(&device->ap_actlog_cnt) drbd_rs_c_min_rate_throttle()
2486 || curr_events - device->rs_last_events > 64) { drbd_rs_c_min_rate_throttle()
2490 device->rs_last_events = curr_events; drbd_rs_c_min_rate_throttle()
2494 i = (device->rs_last_mark + DRBD_SYNC_MARKS-1) % DRBD_SYNC_MARKS; drbd_rs_c_min_rate_throttle()
2496 if (device->state.conn == C_VERIFY_S || device->state.conn == C_VERIFY_T) drbd_rs_c_min_rate_throttle()
2497 rs_left = device->ov_left; drbd_rs_c_min_rate_throttle()
2499 rs_left = drbd_bm_total_weight(device) - device->rs_failed; drbd_rs_c_min_rate_throttle()
2501 dt = ((long)jiffies - (long)device->rs_mark_time[i]) / HZ; drbd_rs_c_min_rate_throttle()
2504 db = device->rs_mark_left[i] - rs_left; drbd_rs_c_min_rate_throttle()
2516 struct drbd_device *device; receive_DataRequest() local
2528 device = peer_device->device; receive_DataRequest()
2529 capacity = drbd_get_capacity(device->this_bdev); receive_DataRequest()
2535 drbd_err(device, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__, receive_DataRequest()
2540 drbd_err(device, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__, receive_DataRequest()
2545 if (!get_ldev_if_state(device, D_UP_TO_DATE)) { receive_DataRequest()
2558 dec_rs_pending(device); receive_DataRequest()
2565 drbd_err(device, "Can not satisfy peer's read request, " receive_DataRequest()
2578 put_ldev(device); receive_DataRequest()
2594 device->bm_resync_fo = BM_SECT_TO_BIT(sector); receive_DataRequest()
2614 D_ASSERT(device, peer_device->connection->agreed_pro_version >= 89); receive_DataRequest()
2617 device->bm_resync_fo = BM_SECT_TO_BIT(sector); receive_DataRequest()
2619 device->use_csums = true; receive_DataRequest()
2622 atomic_add(size >> 9, &device->rs_sect_in); receive_DataRequest()
2624 dec_rs_pending(device); receive_DataRequest()
2632 if (device->ov_start_sector == ~(sector_t)0 && receive_DataRequest()
2636 device->ov_start_sector = sector; receive_DataRequest()
2637 device->ov_position = sector; receive_DataRequest()
2638 device->ov_left = drbd_bm_bits(device) - BM_SECT_TO_BIT(sector); receive_DataRequest()
2639 device->rs_total = device->ov_left; receive_DataRequest()
2641 device->rs_mark_left[i] = device->ov_left; receive_DataRequest()
2642 device->rs_mark_time[i] = now; receive_DataRequest()
2644 drbd_info(device, "Online Verify start sector: %llu\n", receive_DataRequest()
2682 spin_lock_irq(&device->resource->req_lock); receive_DataRequest()
2683 list_add_tail(&peer_req->w.list, &device->read_ee); receive_DataRequest()
2684 spin_unlock_irq(&device->resource->req_lock); receive_DataRequest()
2687 if (device->state.peer != R_PRIMARY receive_DataRequest()
2688 && drbd_rs_should_slow_down(device, sector, false)) receive_DataRequest()
2691 if (drbd_rs_begin_io(device, sector)) receive_DataRequest()
2695 atomic_add(size >> 9, &device->rs_sect_ev); receive_DataRequest()
2699 inc_unacked(device); receive_DataRequest()
2700 if (drbd_submit_peer_request(device, peer_req, READ, fault_type) == 0) receive_DataRequest()
2704 drbd_err(device, "submit failed, triggering re-connect\n"); receive_DataRequest()
2707 spin_lock_irq(&device->resource->req_lock); receive_DataRequest()
2709 spin_unlock_irq(&device->resource->req_lock); receive_DataRequest()
2712 put_ldev(device); receive_DataRequest()
2713 drbd_free_peer_req(device, peer_req); receive_DataRequest()
2722 struct drbd_device *device = peer_device->device; __must_hold() local
2727 self = device->ldev->md.uuid[UI_BITMAP] & 1; __must_hold()
2728 peer = device->p_uuid[UI_BITMAP] & 1; __must_hold()
2730 ch_peer = device->p_uuid[UI_SIZE]; __must_hold()
2731 ch_self = device->comm_bm_set; __must_hold()
2741 drbd_err(device, "Configuration error.\n"); __must_hold()
2765 drbd_warn(device, "Discard younger/older primary did not find a decision\n" __must_hold()
2803 struct drbd_device *device = peer_device->device; __must_hold() local
2817 drbd_err(device, "Configuration error.\n"); __must_hold()
2823 if (hg == -1 && device->state.role == R_SECONDARY) __must_hold()
2825 if (hg == 1 && device->state.role == R_PRIMARY) __must_hold()
2832 return device->state.role == R_PRIMARY ? 1 : -1; __must_hold()
2835 if (hg == -1 && device->state.role == R_PRIMARY) { __must_hold()
2841 rv2 = drbd_change_state(device, CS_VERBOSE, NS(role, R_SECONDARY)); __must_hold()
2843 drbd_khelper(device, "pri-lost-after-sb"); __must_hold()
2845 drbd_warn(device, "Successfully gave up primary role.\n"); __must_hold()
2860 struct drbd_device *device = peer_device->device; __must_hold() local
2876 drbd_err(device, "Configuration error.\n"); __must_hold()
2891 rv2 = drbd_change_state(device, CS_VERBOSE, NS(role, R_SECONDARY)); __must_hold()
2893 drbd_khelper(device, "pri-lost-after-sb"); __must_hold()
2895 drbd_warn(device, "Successfully gave up primary role.\n"); __must_hold()
2905 static void drbd_uuid_dump(struct drbd_device *device, char *text, u64 *uuid, drbd_uuid_dump() argument
2909 drbd_info(device, "%s uuid info vanished while I was looking!\n", text); drbd_uuid_dump()
2912 drbd_info(device, "%s %016llX:%016llX:%016llX:%016llX bits:%llu flags:%llX\n", drbd_uuid_dump()
2934 static int drbd_uuid_compare(struct drbd_device *const device, int *rule_nr) __must_hold(local) __must_hold()
2936 struct drbd_peer_device *const peer_device = first_peer_device(device); __must_hold()
2941 self = device->ldev->md.uuid[UI_CURRENT] & ~((u64)1); __must_hold()
2942 peer = device->p_uuid[UI_CURRENT] & ~((u64)1); __must_hold()
2961 if (device->p_uuid[UI_BITMAP] == (u64)0 && device->ldev->md.uuid[UI_BITMAP] != (u64)0) { __must_hold()
2966 if ((device->ldev->md.uuid[UI_BITMAP] & ~((u64)1)) == (device->p_uuid[UI_HISTORY_START] & ~((u64)1)) && __must_hold()
2967 (device->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (device->p_uuid[UI_HISTORY_START + 1] & ~((u64)1))) { __must_hold()
2968 drbd_info(device, "was SyncSource, missed the resync finished event, corrected myself:\n"); __must_hold()
2969 drbd_uuid_move_history(device); __must_hold()
2970 device->ldev->md.uuid[UI_HISTORY_START] = device->ldev->md.uuid[UI_BITMAP]; __must_hold()
2971 device->ldev->md.uuid[UI_BITMAP] = 0; __must_hold()
2973 drbd_uuid_dump(device, "self", device->ldev->md.uuid, __must_hold()
2974 device->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(device) : 0, 0); __must_hold()
2977 drbd_info(device, "was SyncSource (peer failed to write sync_uuid)\n"); __must_hold()
2984 if (device->ldev->md.uuid[UI_BITMAP] == (u64)0 && device->p_uuid[UI_BITMAP] != (u64)0) { __must_hold()
2989 if ((device->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (device->p_uuid[UI_BITMAP] & ~((u64)1)) && __must_hold()
2990 (device->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) == (device->p_uuid[UI_HISTORY_START] & ~((u64)1))) { __must_hold()
2991 drbd_info(device, "was SyncTarget, peer missed the resync finished event, corrected peer:\n"); __must_hold()
2993 device->p_uuid[UI_HISTORY_START + 1] = device->p_uuid[UI_HISTORY_START]; __must_hold()
2994 device->p_uuid[UI_HISTORY_START] = device->p_uuid[UI_BITMAP]; __must_hold()
2995 device->p_uuid[UI_BITMAP] = 0UL; __must_hold()
2997 drbd_uuid_dump(device, "peer", device->p_uuid, device->p_uuid[UI_SIZE], device->p_uuid[UI_FLAGS]); __must_hold()
3000 drbd_info(device, "was SyncTarget (failed to write sync_uuid)\n"); __must_hold()
3008 rct = (test_bit(CRASHED_PRIMARY, &device->flags) ? 1 : 0) + __must_hold()
3009 (device->p_uuid[UI_FLAGS] & 2); __must_hold()
3025 peer = device->p_uuid[UI_BITMAP] & ~((u64)1); __must_hold()
3030 peer = device->p_uuid[UI_HISTORY_START] & ~((u64)1); __must_hold()
3033 (device->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == __must_hold()
3034 (device->p_uuid[UI_HISTORY_START + 1] & ~((u64)1)) : __must_hold()
3035 peer + UUID_NEW_BM_OFFSET == (device->p_uuid[UI_BITMAP] & ~((u64)1))) { __must_hold()
3042 device->p_uuid[UI_BITMAP] = device->p_uuid[UI_HISTORY_START]; __must_hold()
3043 device->p_uuid[UI_HISTORY_START] = device->p_uuid[UI_HISTORY_START + 1]; __must_hold()
3045 drbd_info(device, "Lost last syncUUID packet, corrected:\n"); __must_hold()
3046 drbd_uuid_dump(device, "peer", device->p_uuid, device->p_uuid[UI_SIZE], device->p_uuid[UI_FLAGS]); __must_hold()
3053 self = device->ldev->md.uuid[UI_CURRENT] & ~((u64)1); __must_hold()
3055 peer = device->p_uuid[i] & ~((u64)1); __must_hold()
3061 self = device->ldev->md.uuid[UI_BITMAP] & ~((u64)1); __must_hold()
3062 peer = device->p_uuid[UI_CURRENT] & ~((u64)1); __must_hold()
3067 self = device->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1); __must_hold()
3070 (device->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) == __must_hold()
3071 (device->p_uuid[UI_HISTORY_START] & ~((u64)1)) : __must_hold()
3072 self + UUID_NEW_BM_OFFSET == (device->ldev->md.uuid[UI_BITMAP] & ~((u64)1))) { __must_hold()
3079 __drbd_uuid_set(device, UI_BITMAP, device->ldev->md.uuid[UI_HISTORY_START]); __must_hold()
3080 __drbd_uuid_set(device, UI_HISTORY_START, device->ldev->md.uuid[UI_HISTORY_START + 1]); __must_hold()
3082 drbd_info(device, "Last syncUUID did not get through, corrected:\n"); __must_hold()
3083 drbd_uuid_dump(device, "self", device->ldev->md.uuid, __must_hold()
3084 device->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(device) : 0, 0); __must_hold()
3092 peer = device->p_uuid[UI_CURRENT] & ~((u64)1); __must_hold()
3094 self = device->ldev->md.uuid[i] & ~((u64)1); __must_hold()
3100 self = device->ldev->md.uuid[UI_BITMAP] & ~((u64)1); __must_hold()
3101 peer = device->p_uuid[UI_BITMAP] & ~((u64)1); __must_hold()
3107 self = device->ldev->md.uuid[i] & ~((u64)1); __must_hold()
3109 peer = device->p_uuid[j] & ~((u64)1); __must_hold()
3125 struct drbd_device *device = peer_device->device; __must_hold() local
3131 mydisk = device->state.disk; __must_hold()
3133 mydisk = device->new_state_tmp.disk; __must_hold()
3135 drbd_info(device, "drbd_sync_handshake:\n"); __must_hold()
3137 spin_lock_irq(&device->ldev->md.uuid_lock); __must_hold()
3138 drbd_uuid_dump(device, "self", device->ldev->md.uuid, device->comm_bm_set, 0); __must_hold()
3139 drbd_uuid_dump(device, "peer", device->p_uuid, __must_hold()
3140 device->p_uuid[UI_SIZE], device->p_uuid[UI_FLAGS]); __must_hold()
3142 hg = drbd_uuid_compare(device, &rule_nr); __must_hold()
3143 spin_unlock_irq(&device->ldev->md.uuid_lock); __must_hold()
3145 drbd_info(device, "uuid_compare()=%d by rule %d\n", hg, rule_nr); __must_hold()
3148 drbd_alert(device, "Unrelated data, aborting!\n"); __must_hold()
3152 drbd_alert(device, "To resolve this both sides have to support at least protocol %d\n", -hg - 1000); __must_hold()
3162 drbd_info(device, "Becoming sync %s due to disk states.\n", __must_hold()
3167 drbd_khelper(device, "initial-split-brain"); __must_hold()
3173 int pcount = (device->state.role == R_PRIMARY) __must_hold()
3189 drbd_warn(device, "Split-Brain detected, %d primaries, " __must_hold()
3193 drbd_warn(device, "Doing a full sync, since" __must_hold()
3201 if (test_bit(DISCARD_MY_DATA, &device->flags) && !(device->p_uuid[UI_FLAGS]&1)) __must_hold()
3203 if (!test_bit(DISCARD_MY_DATA, &device->flags) && (device->p_uuid[UI_FLAGS]&1)) __must_hold()
3207 drbd_warn(device, "Split-Brain detected, manually solved. " __must_hold()
3220 drbd_alert(device, "Split-Brain detected but unresolved, dropping connection!\n"); __must_hold()
3221 drbd_khelper(device, "split-brain"); __must_hold()
3226 drbd_err(device, "I shall become SyncSource, but I am inconsistent!\n"); __must_hold()
3231 device->state.role == R_PRIMARY && device->state.disk >= D_CONSISTENT) { __must_hold()
3234 drbd_khelper(device, "pri-lost"); __must_hold()
3237 drbd_err(device, "I shall become SyncTarget, but I am primary!\n"); __must_hold()
3240 drbd_warn(device, "Becoming SyncTarget, violating the stable-data" __must_hold()
3247 drbd_info(device, "dry-run connect: No resync, would become Connected immediately.\n"); __must_hold()
3249 drbd_info(device, "dry-run connect: Would become %s, doing a %s resync.", __must_hold()
3256 drbd_info(device, "Writing the whole bitmap, full sync required after drbd_sync_handshake.\n"); __must_hold()
3257 if (drbd_bitmap_io(device, &drbd_bmio_set_n_write, "set_n_write from sync_handshake", __must_hold()
3268 if (drbd_bm_total_weight(device)) { __must_hold()
3269 drbd_info(device, "No resync, but %lu bits in bitmap!\n", __must_hold()
3270 drbd_bm_total_weight(device)); __must_hold()
3446 static struct crypto_hash *drbd_crypto_alloc_digest_safe(const struct drbd_device *device, drbd_crypto_alloc_digest_safe() argument
3456 drbd_err(device, "Can not allocate \"%s\" as %s (reason: %ld)\n", drbd_crypto_alloc_digest_safe()
3484 * config_unknown_volume - device configuration command for unknown volume
3486 * When a device is added to an existing connection, the node on which the
3487 * device is added first will send configuration commands to its peer but the
3488 * peer will not know about the device yet. It will warn and ignore these
3489 * commands. Once the device is added on the second node, the second node will
3490 * send the same device configuration commands, but in the other direction.
3504 struct drbd_device *device; receive_SyncParam() local
3519 device = peer_device->device; receive_SyncParam()
3528 drbd_err(device, "SyncParam packet too long: received %u, expected <= %u bytes\n", receive_SyncParam()
3539 D_ASSERT(device, data_size == 0); receive_SyncParam()
3543 D_ASSERT(device, data_size == 0); receive_SyncParam()
3556 if (get_ldev(device)) { receive_SyncParam()
3559 put_ldev(device); receive_SyncParam()
3561 drbd_err(device, "Allocation of new disk_conf failed\n"); receive_SyncParam()
3565 old_disk_conf = device->ldev->disk_conf; receive_SyncParam()
3574 drbd_err(device, "verify-alg of wrong size, " receive_SyncParam()
3586 D_ASSERT(device, p->verify_alg[data_size-1] == 0); receive_SyncParam()
3592 D_ASSERT(device, p->verify_alg[SHARED_SECRET_MAX-1] == 0); receive_SyncParam()
3593 D_ASSERT(device, p->csums_alg[SHARED_SECRET_MAX-1] == 0); receive_SyncParam()
3599 if (device->state.conn == C_WF_REPORT_PARAMS) { receive_SyncParam()
3600 drbd_err(device, "Different verify-alg settings. me=\"%s\" peer=\"%s\"\n", receive_SyncParam()
3604 verify_tfm = drbd_crypto_alloc_digest_safe(device, receive_SyncParam()
3613 if (device->state.conn == C_WF_REPORT_PARAMS) { receive_SyncParam()
3614 drbd_err(device, "Different csums-alg settings. me=\"%s\" peer=\"%s\"\n", receive_SyncParam()
3618 csums_tfm = drbd_crypto_alloc_digest_safe(device, receive_SyncParam()
3633 if (fifo_size != device->rs_plan_s->size) { receive_SyncParam()
3636 drbd_err(device, "kmalloc of fifo_buffer failed"); receive_SyncParam()
3637 put_ldev(device); receive_SyncParam()
3646 drbd_err(device, "Allocation of new net_conf failed\n"); receive_SyncParam()
3657 drbd_info(device, "using verify-alg: \"%s\"\n", p->verify_alg); receive_SyncParam()
3664 drbd_info(device, "using csums-alg: \"%s\"\n", p->csums_alg); receive_SyncParam()
3671 rcu_assign_pointer(device->ldev->disk_conf, new_disk_conf); receive_SyncParam()
3672 put_ldev(device); receive_SyncParam()
3676 old_plan = device->rs_plan_s; receive_SyncParam()
3677 rcu_assign_pointer(device->rs_plan_s, new_plan); receive_SyncParam()
3691 put_ldev(device); receive_SyncParam()
3700 put_ldev(device); receive_SyncParam()
3714 static void warn_if_differ_considerably(struct drbd_device *device, warn_if_differ_considerably() argument
3722 drbd_warn(device, "Considerable difference in %s: %llus vs. %llus\n", s, warn_if_differ_considerably()
3729 struct drbd_device *device; receive_sizes() local
3739 device = peer_device->device; receive_sizes()
3747 device->p_size = p_size; receive_sizes()
3749 if (get_ldev(device)) { receive_sizes()
3751 my_usize = rcu_dereference(device->ldev->disk_conf)->disk_size; receive_sizes()
3754 warn_if_differ_considerably(device, "lower level device sizes", receive_sizes()
3755 p_size, drbd_get_max_capacity(device->ldev)); receive_sizes()
3756 warn_if_differ_considerably(device, "user requested size", receive_sizes()
3761 if (device->state.conn == C_WF_REPORT_PARAMS) receive_sizes()
3764 /* Never shrink a device with usable data during connect. receive_sizes()
3766 if (drbd_new_dev_size(device, device->ldev, p_usize, 0) < receive_sizes()
3767 drbd_get_capacity(device->this_bdev) && receive_sizes()
3768 device->state.disk >= D_OUTDATED && receive_sizes()
3769 device->state.conn < C_CONNECTED) { receive_sizes()
3770 drbd_err(device, "The peer's disk size is too small!\n"); receive_sizes()
3772 put_ldev(device); receive_sizes()
3781 drbd_err(device, "Allocation of new disk_conf failed\n"); receive_sizes()
3782 put_ldev(device); receive_sizes()
3787 old_disk_conf = device->ldev->disk_conf; receive_sizes()
3791 rcu_assign_pointer(device->ldev->disk_conf, new_disk_conf); receive_sizes()
3796 drbd_info(device, "Peer sets u_size to %lu sectors\n", receive_sizes()
3800 put_ldev(device); receive_sizes()
3803 device->peer_max_bio_size = be32_to_cpu(p->max_bio_size); receive_sizes()
3810 if (get_ldev(device)) { receive_sizes()
3811 drbd_reconsider_max_bio_size(device, device->ldev); receive_sizes()
3812 dd = drbd_determine_dev_size(device, ddsf, NULL); receive_sizes()
3813 put_ldev(device); receive_sizes()
3816 drbd_md_sync(device); receive_sizes()
3831 drbd_reconsider_max_bio_size(device, NULL); receive_sizes()
3832 drbd_set_my_capacity(device, p_csize ?: p_usize ?: p_size); receive_sizes()
3835 if (get_ldev(device)) { receive_sizes()
3836 if (device->ldev->known_size != drbd_get_capacity(device->ldev->backing_bdev)) { receive_sizes()
3837 device->ldev->known_size = drbd_get_capacity(device->ldev->backing_bdev); receive_sizes()
3841 put_ldev(device); receive_sizes()
3844 if (device->state.conn > C_WF_REPORT_PARAMS) { receive_sizes()
3846 drbd_get_capacity(device->this_bdev) || ldsc) { receive_sizes()
3851 if (test_and_clear_bit(RESIZE_PENDING, &device->flags) || receive_sizes()
3852 (dd == DS_GREW && device->state.conn == C_CONNECTED)) { receive_sizes()
3853 if (device->state.pdsk >= D_INCONSISTENT && receive_sizes()
3854 device->state.disk >= D_INCONSISTENT) { receive_sizes()
3856 drbd_info(device, "Resync of new storage suppressed with --assume-clean\n"); receive_sizes()
3858 resync_after_online_grow(device); receive_sizes()
3860 set_bit(RESYNC_AFTER_NEG, &device->flags); receive_sizes()
3870 struct drbd_device *device; receive_uuids() local
3878 device = peer_device->device; receive_uuids()
3882 drbd_err(device, "kmalloc of p_uuid failed\n"); receive_uuids()
3889 kfree(device->p_uuid); receive_uuids()
3890 device->p_uuid = p_uuid; receive_uuids()
3892 if (device->state.conn < C_CONNECTED && receive_uuids()
3893 device->state.disk < D_INCONSISTENT && receive_uuids()
3894 device->state.role == R_PRIMARY && receive_uuids()
3895 (device->ed_uuid & ~((u64)1)) != (p_uuid[UI_CURRENT] & ~((u64)1))) { receive_uuids()
3896 drbd_err(device, "Can only connect to data with current UUID=%016llX\n", receive_uuids()
3897 (unsigned long long)device->ed_uuid); receive_uuids()
3902 if (get_ldev(device)) { receive_uuids()
3904 device->state.conn == C_CONNECTED && receive_uuids()
3906 device->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED && receive_uuids()
3909 drbd_info(device, "Accepted new current UUID, preparing to skip initial sync\n"); receive_uuids()
3910 drbd_bitmap_io(device, &drbd_bmio_clear_n_write, receive_uuids()
3913 _drbd_uuid_set(device, UI_CURRENT, p_uuid[UI_CURRENT]); receive_uuids()
3914 _drbd_uuid_set(device, UI_BITMAP, 0); receive_uuids()
3915 _drbd_set_state(_NS2(device, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE), receive_uuids()
3917 drbd_md_sync(device); receive_uuids()
3920 put_ldev(device); receive_uuids()
3921 } else if (device->state.disk < D_INCONSISTENT && receive_uuids()
3922 device->state.role == R_PRIMARY) { receive_uuids()
3925 updated_uuids = drbd_set_ed_uuid(device, p_uuid[UI_CURRENT]); receive_uuids()
3932 mutex_lock(device->state_mutex); receive_uuids()
3933 mutex_unlock(device->state_mutex); receive_uuids()
3934 if (device->state.conn >= C_CONNECTED && device->state.disk < D_INCONSISTENT) receive_uuids()
3935 updated_uuids |= drbd_set_ed_uuid(device, p_uuid[UI_CURRENT]); receive_uuids()
3938 drbd_print_uuids(device, "receiver updated UUIDs to"); receive_uuids()
3977 struct drbd_device *device; receive_req_state() local
3985 device = peer_device->device; receive_req_state()
3991 mutex_is_locked(device->state_mutex)) { receive_req_state()
3999 rv = drbd_change_state(device, CS_VERBOSE, mask, val); receive_req_state()
4002 drbd_md_sync(device); receive_req_state()
4034 struct drbd_device *device; receive_state() local
4044 device = peer_device->device; receive_state()
4050 real_peer_disk = device->p_uuid[UI_FLAGS] & 4 ? D_INCONSISTENT : D_CONSISTENT; receive_state()
4051 drbd_info(device, "real peer disk state = %s\n", drbd_disk_str(real_peer_disk)); receive_state()
4054 spin_lock_irq(&device->resource->req_lock); receive_state()
4056 os = ns = drbd_read_state(device); receive_state()
4057 spin_unlock_irq(&device->resource->req_lock); receive_state()
4089 if (drbd_bm_total_weight(device) <= device->rs_failed) receive_state()
4090 drbd_resync_finished(device); receive_state()
4098 ov_out_of_sync_print(device); receive_state()
4099 drbd_resync_finished(device); receive_state()
4118 if (device->p_uuid && peer_state.disk >= D_NEGOTIATING && receive_state()
4119 get_ldev_if_state(device, D_NEGOTIATING)) { receive_state()
4131 cr |= test_bit(CONSIDER_RESYNC, &device->flags); receive_state()
4141 put_ldev(device); receive_state()
4144 if (device->state.disk == D_NEGOTIATING) { receive_state()
4145 drbd_force_state(device, NS(disk, D_FAILED)); receive_state()
4147 drbd_err(device, "Disk attach process on the peer node was aborted.\n"); receive_state()
4153 D_ASSERT(device, os.conn == C_WF_REPORT_PARAMS); receive_state()
4160 spin_lock_irq(&device->resource->req_lock); receive_state()
4161 if (os.i != drbd_read_state(device).i) receive_state()
4163 clear_bit(CONSIDER_RESYNC, &device->flags); receive_state()
4168 ns.disk = device->new_state_tmp.disk; receive_state()
4170 if (ns.pdsk == D_CONSISTENT && drbd_suspended(device) && ns.conn == C_CONNECTED && os.conn < C_CONNECTED && receive_state()
4171 test_bit(NEW_CUR_UUID, &device->flags)) { receive_state()
4174 spin_unlock_irq(&device->resource->req_lock); receive_state()
4175 drbd_err(device, "Aborting Connect, can not thaw IO with an only Consistent peer\n"); receive_state()
4177 drbd_uuid_new_current(device); receive_state()
4178 clear_bit(NEW_CUR_UUID, &device->flags); receive_state()
4182 rv = _drbd_set_state(device, ns, cs_flags, NULL); receive_state()
4183 ns = drbd_read_state(device); receive_state()
4184 spin_unlock_irq(&device->resource->req_lock); receive_state()
4202 clear_bit(DISCARD_MY_DATA, &device->flags); receive_state()
4204 drbd_md_sync(device); /* update connected indicator, la_size_sect, ... */ receive_state()
4212 struct drbd_device *device; receive_sync_uuid() local
4218 device = peer_device->device; receive_sync_uuid()
4220 wait_event(device->misc_wait, receive_sync_uuid()
4221 device->state.conn == C_WF_SYNC_UUID || receive_sync_uuid()
4222 device->state.conn == C_BEHIND || receive_sync_uuid()
4223 device->state.conn < C_CONNECTED || receive_sync_uuid()
4224 device->state.disk < D_NEGOTIATING); receive_sync_uuid()
4226 /* D_ASSERT(device, device->state.conn == C_WF_SYNC_UUID ); */ receive_sync_uuid()
4230 if (get_ldev_if_state(device, D_NEGOTIATING)) { receive_sync_uuid()
4231 _drbd_uuid_set(device, UI_CURRENT, be64_to_cpu(p->uuid)); receive_sync_uuid()
4232 _drbd_uuid_set(device, UI_BITMAP, 0UL); receive_sync_uuid()
4234 drbd_print_uuids(device, "updated sync uuid"); receive_sync_uuid()
4235 drbd_start_resync(device, C_SYNC_TARGET); receive_sync_uuid()
4237 put_ldev(device); receive_sync_uuid()
4239 drbd_err(device, "Ignoring SyncUUID packet!\n"); receive_sync_uuid()
4271 drbd_bm_merge_lel(peer_device->device, c->word_offset, num_words, p); receive_bitmap_plain()
4335 _drbd_bm_set_bits(peer_device->device, s, e); recv_bm_rle_bits()
4389 void INFO_bm_xfer_stats(struct drbd_device *device, INFO_bm_xfer_stats() argument
4393 unsigned int header_size = drbd_header_size(first_peer_device(device)->connection); INFO_bm_xfer_stats()
4417 drbd_info(device, "%s bitmap stats [Bytes(packets)]: plain %u(%u), RLE %u(%u), " INFO_bm_xfer_stats()
4436 struct drbd_device *device; receive_bitmap() local
4443 device = peer_device->device; receive_bitmap()
4445 drbd_bm_lock(device, "receive bitmap", BM_LOCKED_SET_ALLOWED); receive_bitmap()
4450 .bm_bits = drbd_bm_bits(device), receive_bitmap()
4451 .bm_words = drbd_bm_words(device), receive_bitmap()
4463 drbd_err(device, "ReportCBitmap packet too large\n"); receive_bitmap()
4468 drbd_err(device, "ReportCBitmap packet too small (l:%u)\n", pi->size); receive_bitmap()
4477 drbd_warn(device, "receive_bitmap: cmd neither ReportBitMap nor ReportCBitMap (is 0x%x)", pi->cmd); receive_bitmap()
4495 INFO_bm_xfer_stats(device, "receive", &c); receive_bitmap()
4497 if (device->state.conn == C_WF_BITMAP_T) { receive_bitmap()
4500 err = drbd_send_bitmap(device); receive_bitmap()
4504 rv = _drbd_request_state(device, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE); receive_bitmap()
4505 D_ASSERT(device, rv == SS_SUCCESS); receive_bitmap()
4506 } else if (device->state.conn != C_WF_BITMAP_S) { receive_bitmap()
4509 drbd_info(device, "unexpected cstate (%s) in receive_bitmap\n", receive_bitmap()
4510 drbd_conn_str(device->state.conn)); receive_bitmap()
4515 drbd_bm_unlock(device); receive_bitmap()
4516 if (!err && device->state.conn == C_WF_BITMAP_S) receive_bitmap()
4517 drbd_start_resync(device, C_SYNC_SOURCE); receive_bitmap()
4541 struct drbd_device *device; receive_out_of_sync() local
4547 device = peer_device->device; receive_out_of_sync()
4549 switch (device->state.conn) { receive_out_of_sync()
4555 drbd_err(device, "ASSERT FAILED cstate = %s, expected: WFSyncUUID|WFBitMapT|Behind\n", receive_out_of_sync()
4556 drbd_conn_str(device->state.conn)); receive_out_of_sync()
4559 drbd_set_out_of_sync(device, be64_to_cpu(p->sector), be32_to_cpu(p->blksize)); receive_out_of_sync()
4670 struct drbd_device *device = peer_device->device; conn_disconnect() local
4671 kref_get(&device->kref); conn_disconnect()
4674 kref_put(&device->kref, drbd_destroy_device); conn_disconnect()
4703 struct drbd_device *device = peer_device->device; drbd_disconnected() local
4707 spin_lock_irq(&device->resource->req_lock); drbd_disconnected()
4708 _drbd_wait_ee_list_empty(device, &device->active_ee); drbd_disconnected()
4709 _drbd_wait_ee_list_empty(device, &device->sync_ee); drbd_disconnected()
4710 _drbd_wait_ee_list_empty(device, &device->read_ee); drbd_disconnected()
4711 spin_unlock_irq(&device->resource->req_lock); drbd_disconnected()
4723 drbd_rs_cancel_all(device); drbd_disconnected()
4724 device->rs_total = 0; drbd_disconnected()
4725 device->rs_failed = 0; drbd_disconnected()
4726 atomic_set(&device->rs_pending_cnt, 0); drbd_disconnected()
4727 wake_up(&device->misc_wait); drbd_disconnected()
4729 del_timer_sync(&device->resync_timer); drbd_disconnected()
4730 resync_timer_fn((unsigned long)device); drbd_disconnected()
4737 drbd_finish_peer_reqs(device); drbd_disconnected()
4746 drbd_rs_cancel_all(device); drbd_disconnected()
4748 kfree(device->p_uuid); drbd_disconnected()
4749 device->p_uuid = NULL; drbd_disconnected()
4751 if (!drbd_suspended(device)) drbd_disconnected()
4754 drbd_md_sync(device); drbd_disconnected()
4758 wait_event(device->misc_wait, !test_bit(BITMAP_IO, &device->flags)); drbd_disconnected()
4767 i = drbd_free_peer_reqs(device, &device->net_ee); drbd_disconnected()
4769 drbd_info(device, "net_ee not empty, killed %u entries\n", i); drbd_disconnected()
4770 i = atomic_read(&device->pp_in_use_by_net); drbd_disconnected()
4772 drbd_info(device, "pp_in_use_by_net = %d, expected 0\n", i); drbd_disconnected()
4773 i = atomic_read(&device->pp_in_use); drbd_disconnected()
4775 drbd_info(device, "pp_in_use = %d, expected 0\n", i); drbd_disconnected()
4777 D_ASSERT(device, list_empty(&device->read_ee)); drbd_disconnected()
4778 D_ASSERT(device, list_empty(&device->active_ee)); drbd_disconnected()
4779 D_ASSERT(device, list_empty(&device->sync_ee)); drbd_disconnected()
4780 D_ASSERT(device, list_empty(&device->done_ee)); drbd_disconnected()
5119 struct drbd_device *device; got_RqSReply() local
5126 device = peer_device->device; got_RqSReply()
5129 D_ASSERT(device, connection->agreed_pro_version < 100); got_RqSReply()
5134 set_bit(CL_ST_CHG_SUCCESS, &device->flags); got_RqSReply()
5136 set_bit(CL_ST_CHG_FAIL, &device->flags); got_RqSReply()
5137 drbd_err(device, "Requested state change failed by peer: %s (%d)\n", got_RqSReply()
5140 wake_up(&device->state_wait); got_RqSReply()
5164 struct drbd_device *device; got_IsInSync() local
5172 device = peer_device->device; got_IsInSync()
5174 D_ASSERT(device, peer_device->connection->agreed_pro_version >= 89); got_IsInSync()
5178 if (get_ldev(device)) { got_IsInSync()
5179 drbd_rs_complete_io(device, sector); got_IsInSync()
5180 drbd_set_in_sync(device, sector, blksize); got_IsInSync()
5182 device->rs_same_csum += (blksize >> BM_BLOCK_SHIFT); got_IsInSync()
5183 put_ldev(device); got_IsInSync()
5185 dec_rs_pending(device); got_IsInSync()
5186 atomic_add(blksize >> 9, &device->rs_sect_in); got_IsInSync()
5192 validate_req_change_req_state(struct drbd_device *device, u64 id, sector_t sector, validate_req_change_req_state() argument
5199 spin_lock_irq(&device->resource->req_lock); validate_req_change_req_state()
5200 req = find_request(device, root, id, sector, missing_ok, func); validate_req_change_req_state()
5202 spin_unlock_irq(&device->resource->req_lock); validate_req_change_req_state()
5206 spin_unlock_irq(&device->resource->req_lock); validate_req_change_req_state()
5209 complete_master_bio(device, &m); validate_req_change_req_state()
5216 struct drbd_device *device; got_BlockAck() local
5225 device = peer_device->device; got_BlockAck()
5230 drbd_set_in_sync(device, sector, blksize); got_BlockAck()
5231 dec_rs_pending(device); got_BlockAck()
5254 return validate_req_change_req_state(device, p->block_id, sector, got_BlockAck()
5255 &device->write_requests, __func__, got_BlockAck()
5262 struct drbd_device *device; got_NegAck() local
5271 device = peer_device->device; got_NegAck()
5276 dec_rs_pending(device); got_NegAck()
5277 drbd_rs_failed_io(device, sector, size); got_NegAck()
5281 err = validate_req_change_req_state(device, p->block_id, sector, got_NegAck()
5282 &device->write_requests, __func__, got_NegAck()
5290 drbd_set_out_of_sync(device, sector, size); got_NegAck()
5298 struct drbd_device *device; got_NegDReply() local
5305 device = peer_device->device; got_NegDReply()
5309 drbd_err(device, "Got NegDReply; Sector %llus, len %u.\n", got_NegDReply()
5312 return validate_req_change_req_state(device, p->block_id, sector, got_NegDReply()
5313 &device->read_requests, __func__, got_NegDReply()
5320 struct drbd_device *device; got_NegRSDReply() local
5328 device = peer_device->device; got_NegRSDReply()
5335 dec_rs_pending(device); got_NegRSDReply()
5337 if (get_ldev_if_state(device, D_FAILED)) { got_NegRSDReply()
5338 drbd_rs_complete_io(device, sector); got_NegRSDReply()
5341 drbd_rs_failed_io(device, sector, size); got_NegRSDReply()
5347 put_ldev(device); got_NegRSDReply()
5363 struct drbd_device *device = peer_device->device; got_BarrierAck() local
5365 if (device->state.conn == C_AHEAD && got_BarrierAck()
5366 atomic_read(&device->ap_in_flight) == 0 && got_BarrierAck()
5367 !test_and_set_bit(AHEAD_TO_SYNC_SOURCE, &device->flags)) { got_BarrierAck()
5368 device->start_resync_timer.expires = jiffies + HZ; got_BarrierAck()
5369 add_timer(&device->start_resync_timer); got_BarrierAck()
5380 struct drbd_device *device; got_OVResult() local
5389 device = peer_device->device; got_OVResult()
5397 drbd_ov_out_of_sync_found(device, sector, size); got_OVResult()
5399 ov_out_of_sync_print(device); got_OVResult()
5401 if (!get_ldev(device)) got_OVResult()
5404 drbd_rs_complete_io(device, sector); got_OVResult()
5405 dec_rs_pending(device); got_OVResult()
5407 --device->ov_left; got_OVResult()
5410 if ((device->ov_left & 0x200) == 0x200) got_OVResult()
5411 drbd_advance_rs_marks(device, device->ov_left); got_OVResult()
5413 if (device->ov_left == 0) { got_OVResult()
5417 dw->device = device; got_OVResult()
5420 drbd_err(device, "kmalloc(dw) failed."); got_OVResult()
5421 ov_out_of_sync_print(device); got_OVResult()
5422 drbd_resync_finished(device); got_OVResult()
5425 put_ldev(device); got_OVResult()
5445 struct drbd_device *device = peer_device->device; connection_finish_peer_reqs() local
5446 kref_get(&device->kref); connection_finish_peer_reqs()
5448 if (drbd_finish_peer_reqs(device)) { connection_finish_peer_reqs()
5449 kref_put(&device->kref, drbd_destroy_device); connection_finish_peer_reqs()
5452 kref_put(&device->kref, drbd_destroy_device); connection_finish_peer_reqs()
5459 struct drbd_device *device = peer_device->device; connection_finish_peer_reqs() local
5460 not_empty = !list_empty(&device->done_ee); connection_finish_peer_reqs()
/linux-4.4.14/drivers/s390/char/
H A Dtape_char.c2 * character device frontend for tape device driver
63 tapechar_setup_device(struct tape_device * device) tapechar_setup_device() argument
67 sprintf(device_name, "ntibm%i", device->first_minor / 2); tapechar_setup_device()
68 device->nt = register_tape_dev( tapechar_setup_device()
69 &device->cdev->dev, tapechar_setup_device()
70 MKDEV(tapechar_major, device->first_minor), tapechar_setup_device()
76 device->rt = register_tape_dev( tapechar_setup_device()
77 &device->cdev->dev, tapechar_setup_device()
78 MKDEV(tapechar_major, device->first_minor + 1), tapechar_setup_device()
88 tapechar_cleanup_device(struct tape_device *device) tapechar_cleanup_device() argument
90 unregister_tape_dev(&device->cdev->dev, device->rt); tapechar_cleanup_device()
91 device->rt = NULL; tapechar_cleanup_device()
92 unregister_tape_dev(&device->cdev->dev, device->nt); tapechar_cleanup_device()
93 device->nt = NULL; tapechar_cleanup_device()
97 tapechar_check_idalbuffer(struct tape_device *device, size_t block_size) tapechar_check_idalbuffer() argument
101 if (device->char_data.idal_buf != NULL && tapechar_check_idalbuffer()
102 device->char_data.idal_buf->size == block_size) tapechar_check_idalbuffer()
116 if (device->char_data.idal_buf != NULL) tapechar_check_idalbuffer()
117 idal_buffer_free(device->char_data.idal_buf); tapechar_check_idalbuffer()
119 device->char_data.idal_buf = new; tapechar_check_idalbuffer()
125 * Tape device read function
130 struct tape_device *device; tapechar_read() local
136 device = (struct tape_device *) filp->private_data; tapechar_read()
143 if(device->required_tapemarks) { tapechar_read()
144 return tape_std_terminate_write(device); tapechar_read()
148 if (device->char_data.block_size != 0) { tapechar_read()
149 if (count < device->char_data.block_size) { tapechar_read()
154 block_size = device->char_data.block_size; tapechar_read()
159 rc = tapechar_check_idalbuffer(device, block_size); tapechar_read()
165 request = device->discipline->read_block(device, block_size); tapechar_read()
169 rc = tape_do_io(device, request); tapechar_read()
174 if (idal_buffer_to_user(device->char_data.idal_buf, tapechar_read()
183 * Tape device write function
188 struct tape_device *device; tapechar_write() local
196 device = (struct tape_device *) filp->private_data; tapechar_write()
198 if (device->char_data.block_size != 0) { tapechar_write()
199 if (count < device->char_data.block_size) { tapechar_write()
204 block_size = device->char_data.block_size; tapechar_write()
211 rc = tapechar_check_idalbuffer(device, block_size); tapechar_write()
218 request = device->discipline->write_block(device, block_size); tapechar_write()
225 if (idal_buffer_from_user(device->char_data.idal_buf, tapechar_write()
230 rc = tape_do_io(device, request); tapechar_write()
243 * Ok, the device has no more space. It has NOT written tapechar_write()
246 if (device->discipline->process_eov) tapechar_write()
247 device->discipline->process_eov(device); tapechar_write()
261 device->required_tapemarks = 2; tapechar_write()
267 * Character frontend tape device open function.
272 struct tape_device *device; tapechar_open() local
283 device = tape_find_device(minor / TAPE_MINORS_PER_DEV); tapechar_open()
284 if (IS_ERR(device)) { tapechar_open()
286 return PTR_ERR(device); tapechar_open()
289 rc = tape_open(device); tapechar_open()
291 filp->private_data = device; tapechar_open()
294 tape_put_device(device); tapechar_open()
300 * Character frontend tape device release function.
306 struct tape_device *device; tapechar_release() local
309 device = (struct tape_device *) filp->private_data; tapechar_release()
317 if (device->required_tapemarks) tapechar_release()
318 tape_std_terminate_write(device); tapechar_release()
319 tape_mtop(device, MTREW, 1); tapechar_release()
321 if (device->required_tapemarks > 1) { tapechar_release()
322 if (tape_mtop(device, MTWEOF, 1) == 0) tapechar_release()
323 device->required_tapemarks--; tapechar_release()
327 if (device->char_data.idal_buf != NULL) { tapechar_release()
328 idal_buffer_free(device->char_data.idal_buf); tapechar_release()
329 device->char_data.idal_buf = NULL; tapechar_release()
331 tape_release(device); tapechar_release()
333 tape_put_device(device); tapechar_release()
339 * Tape device io controls.
342 __tapechar_ioctl(struct tape_device *device, __tapechar_ioctl() argument
371 if (device->required_tapemarks) __tapechar_ioctl()
372 tape_std_terminate_write(device); __tapechar_ioctl()
376 rc = tape_mtop(device, op.mt_op, op.mt_count); __tapechar_ioctl()
379 if (op.mt_count > device->required_tapemarks) __tapechar_ioctl()
380 device->required_tapemarks = 0; __tapechar_ioctl()
382 device->required_tapemarks -= op.mt_count; __tapechar_ioctl()
390 rc = tape_mtop(device, MTTELL, 1); __tapechar_ioctl()
404 get.mt_resid = 0 /* device->devstat.rescnt */; __tapechar_ioctl()
406 ((device->char_data.block_size << MT_ST_BLKSIZE_SHIFT) __tapechar_ioctl()
412 get.mt_gstat = device->tape_generic_status; __tapechar_ioctl()
414 if (device->medium_state == MS_LOADED) { __tapechar_ioctl()
415 rc = tape_mtop(device, MTTELL, 1); __tapechar_ioctl()
432 if (device->discipline->ioctl_fn == NULL) __tapechar_ioctl()
434 return device->discipline->ioctl_fn(device, no, data); __tapechar_ioctl()
440 struct tape_device *device; tapechar_ioctl() local
445 device = (struct tape_device *) filp->private_data; tapechar_ioctl()
446 mutex_lock(&device->mutex); tapechar_ioctl()
447 rc = __tapechar_ioctl(device, no, data); tapechar_ioctl()
448 mutex_unlock(&device->mutex); tapechar_ioctl()
456 struct tape_device *device = filp->private_data; tapechar_compat_ioctl() local
465 if (device->discipline->ioctl_fn) { tapechar_compat_ioctl()
466 mutex_lock(&device->mutex); tapechar_compat_ioctl()
467 rval = device->discipline->ioctl_fn(device, no, argp); tapechar_compat_ioctl()
468 mutex_unlock(&device->mutex); tapechar_compat_ioctl()
478 * Initialize character device frontend.
H A Dtape_core.c2 * basic function of the tape device driver
93 tape_medium_state_show(struct device *dev, struct device_attribute *attr, char *buf) tape_medium_state_show()
105 tape_first_minor_show(struct device *dev, struct device_attribute *attr, char *buf) tape_first_minor_show()
117 tape_state_show(struct device *dev, struct device_attribute *attr, char *buf) tape_state_show()
130 tape_operation_show(struct device *dev, struct device_attribute *attr, char *buf) tape_operation_show()
157 tape_blocksize_show(struct device *dev, struct device_attribute *attr, char *buf) tape_blocksize_show()
186 tape_state_set(struct tape_device *device, enum tape_state newstate) tape_state_set() argument
190 if (device->tape_state == TS_NOT_OPER) { tape_state_set()
194 DBF_EVENT(4, "ts. dev: %x\n", device->first_minor); tape_state_set()
196 if (device->tape_state < TS_SIZE && device->tape_state >=0 ) tape_state_set()
197 str = tape_state_verbose[device->tape_state]; tape_state_set()
207 device->tape_state = newstate; tape_state_set()
208 wake_up(&device->state_change_wq); tape_state_set()
212 struct tape_device *device; member in struct:tape_med_state_work_data
224 struct tape_device *device = p->device; tape_med_state_work_handler() local
230 "unloaded\n", dev_name(&device->cdev->dev)); tape_med_state_work_handler()
232 kobject_uevent_env(&device->cdev->dev.kobj, KOBJ_CHANGE, envp); tape_med_state_work_handler()
236 dev_name(&device->cdev->dev)); tape_med_state_work_handler()
238 kobject_uevent_env(&device->cdev->dev.kobj, KOBJ_CHANGE, envp); tape_med_state_work_handler()
243 tape_put_device(device); tape_med_state_work_handler()
248 tape_med_state_work(struct tape_device *device, enum tape_medium_state state) tape_med_state_work() argument
255 p->device = tape_get_device(device); tape_med_state_work()
262 tape_med_state_set(struct tape_device *device, enum tape_medium_state newstate) tape_med_state_set() argument
266 oldstate = device->medium_state; tape_med_state_set()
269 device->medium_state = newstate; tape_med_state_set()
272 device->tape_generic_status |= GMT_DR_OPEN(~0); tape_med_state_set()
274 tape_med_state_work(device, MS_UNLOADED); tape_med_state_set()
277 device->tape_generic_status &= ~GMT_DR_OPEN(~0); tape_med_state_set()
279 tape_med_state_work(device, MS_LOADED); tape_med_state_set()
284 wake_up(&device->state_change_wq); tape_med_state_set()
288 * Stop running ccw. Has to be called with the device lock held.
291 __tape_cancel_io(struct tape_device *device, struct tape_request *request) __tape_cancel_io() argument
302 rc = ccw_device_clear(device->cdev, (long) request); __tape_cancel_io()
310 schedule_delayed_work(&device->tape_dnr, 0); __tape_cancel_io()
313 DBF_EXCEPTION(2, "device gone, retry\n"); __tape_cancel_io()
327 * Add device into the sorted list, giving it the first
331 tape_assign_minor(struct tape_device *device) tape_assign_minor() argument
347 device->first_minor = minor; tape_assign_minor()
348 list_add_tail(&device->node, &tmp->node); tape_assign_minor()
353 /* remove device from the list */
355 tape_remove_minor(struct tape_device *device) tape_remove_minor() argument
358 list_del_init(&device->node); tape_remove_minor()
359 device->first_minor = -1; tape_remove_minor()
364 * Set a device online.
366 * This function is called by the common I/O layer to move a device from the
368 * If we return an error (RC < 0) the device remains in the offline state. This
369 * can happen if the device is assigned somewhere else, for example.
372 tape_generic_online(struct tape_device *device, tape_generic_online() argument
377 DBF_LH(6, "tape_enable_device(%p, %p)\n", device, discipline); tape_generic_online()
379 if (device->tape_state != TS_INIT) { tape_generic_online()
380 DBF_LH(3, "Tapestate not INIT (%d)\n", device->tape_state); tape_generic_online()
384 init_timer(&device->lb_timeout); tape_generic_online()
385 device->lb_timeout.function = tape_long_busy_timeout; tape_generic_online()
387 /* Let the discipline have a go at the device. */ tape_generic_online()
388 device->discipline = discipline; tape_generic_online()
393 rc = discipline->setup_device(device); tape_generic_online()
396 rc = tape_assign_minor(device); tape_generic_online()
400 rc = tapechar_setup_device(device); tape_generic_online()
404 tape_state_set(device, TS_UNUSED); tape_generic_online()
406 DBF_LH(3, "(%08x): Drive set online\n", device->cdev_id); tape_generic_online()
411 tape_remove_minor(device); tape_generic_online()
413 device->discipline->cleanup_device(device); tape_generic_online()
414 device->discipline = NULL; tape_generic_online()
421 tape_cleanup_device(struct tape_device *device) tape_cleanup_device() argument
423 tapechar_cleanup_device(device); tape_cleanup_device()
424 device->discipline->cleanup_device(device); tape_cleanup_device()
425 module_put(device->discipline->owner); tape_cleanup_device()
426 tape_remove_minor(device); tape_cleanup_device()
427 tape_med_state_set(device, MS_UNKNOWN); tape_cleanup_device()
431 * Suspend device.
434 * request. We refuse to suspend if the device is loaded or in use for the
438 * during DETACH processing (unless the tape device was attached with the
440 * resume the original state of the tape device, since we would need to
445 struct tape_device *device; tape_generic_pm_suspend() local
447 device = dev_get_drvdata(&cdev->dev); tape_generic_pm_suspend()
448 if (!device) { tape_generic_pm_suspend()
453 device->cdev_id, device); tape_generic_pm_suspend()
455 if (device->medium_state != MS_UNLOADED) { tape_generic_pm_suspend()
456 pr_err("A cartridge is loaded in tape device %s, " tape_generic_pm_suspend()
461 spin_lock_irq(get_ccwdev_lock(device->cdev)); tape_generic_pm_suspend()
462 switch (device->tape_state) { tape_generic_pm_suspend()
466 spin_unlock_irq(get_ccwdev_lock(device->cdev)); tape_generic_pm_suspend()
469 pr_err("Tape device %s is busy, refusing to " tape_generic_pm_suspend()
471 spin_unlock_irq(get_ccwdev_lock(device->cdev)); tape_generic_pm_suspend()
475 DBF_LH(3, "(%08x): Drive suspended.\n", device->cdev_id); tape_generic_pm_suspend()
480 * Set device offline.
489 struct tape_device *device; tape_generic_offline() local
491 device = dev_get_drvdata(&cdev->dev); tape_generic_offline()
492 if (!device) { tape_generic_offline()
497 device->cdev_id, device); tape_generic_offline()
499 spin_lock_irq(get_ccwdev_lock(device->cdev)); tape_generic_offline()
500 switch (device->tape_state) { tape_generic_offline()
503 spin_unlock_irq(get_ccwdev_lock(device->cdev)); tape_generic_offline()
506 tape_state_set(device, TS_INIT); tape_generic_offline()
507 spin_unlock_irq(get_ccwdev_lock(device->cdev)); tape_generic_offline()
508 tape_cleanup_device(device); tape_generic_offline()
513 device->cdev_id); tape_generic_offline()
514 spin_unlock_irq(get_ccwdev_lock(device->cdev)); tape_generic_offline()
518 DBF_LH(3, "(%08x): Drive set offline.\n", device->cdev_id); tape_generic_offline()
523 * Allocate memory for a new device structure.
528 struct tape_device *device; tape_alloc_device() local
530 device = kzalloc(sizeof(struct tape_device), GFP_KERNEL); tape_alloc_device()
531 if (device == NULL) { tape_alloc_device()
535 device->modeset_byte = kmalloc(1, GFP_KERNEL | GFP_DMA); tape_alloc_device()
536 if (device->modeset_byte == NULL) { tape_alloc_device()
538 kfree(device); tape_alloc_device()
541 mutex_init(&device->mutex); tape_alloc_device()
542 INIT_LIST_HEAD(&device->req_queue); tape_alloc_device()
543 INIT_LIST_HEAD(&device->node); tape_alloc_device()
544 init_waitqueue_head(&device->state_change_wq); tape_alloc_device()
545 init_waitqueue_head(&device->wait_queue); tape_alloc_device()
546 device->tape_state = TS_INIT; tape_alloc_device()
547 device->medium_state = MS_UNKNOWN; tape_alloc_device()
548 *device->modeset_byte = 0; tape_alloc_device()
549 device->first_minor = -1; tape_alloc_device()
550 atomic_set(&device->ref_count, 1); tape_alloc_device()
551 INIT_DELAYED_WORK(&device->tape_dnr, tape_delayed_next_request); tape_alloc_device()
553 return device; tape_alloc_device()
557 * Get a reference to an existing device structure. This will automatically
561 tape_get_device(struct tape_device *device) tape_get_device() argument
565 count = atomic_inc_return(&device->ref_count); tape_get_device()
566 DBF_EVENT(4, "tape_get_device(%p) = %i\n", device, count); tape_get_device()
567 return device; tape_get_device()
572 * reference counter reaches zero free the device structure.
577 tape_put_device(struct tape_device *device) tape_put_device() argument
581 count = atomic_dec_return(&device->ref_count); tape_put_device()
582 DBF_EVENT(4, "tape_put_device(%p) -> %i\n", device, count); tape_put_device()
585 kfree(device->modeset_byte); tape_put_device()
586 kfree(device); tape_put_device()
591 * Find tape device by a device index.
596 struct tape_device *device, *tmp; tape_find_device() local
598 device = ERR_PTR(-ENODEV); tape_find_device()
602 device = tape_get_device(tmp); tape_find_device()
607 return device; tape_find_device()
616 struct tape_device *device; tape_generic_probe() local
620 device = tape_alloc_device(); tape_generic_probe()
621 if (IS_ERR(device)) tape_generic_probe()
627 tape_put_device(device); tape_generic_probe()
630 dev_set_drvdata(&cdev->dev, device); tape_generic_probe()
632 device->cdev = cdev; tape_generic_probe()
634 device->cdev_id = devid_to_int(&dev_id); tape_generic_probe()
639 __tape_discard_requests(struct tape_device *device) __tape_discard_requests() argument
644 list_for_each_safe(l, n, &device->req_queue) { __tape_discard_requests()
651 request->device = NULL; __tape_discard_requests()
652 tape_put_device(device); __tape_discard_requests()
662 * This function is called whenever the common I/O layer detects the device
668 struct tape_device * device; tape_generic_remove() local
670 device = dev_get_drvdata(&cdev->dev); tape_generic_remove()
671 if (!device) { tape_generic_remove()
674 DBF_LH(3, "(%08x): tape_generic_remove(%p)\n", device->cdev_id, cdev); tape_generic_remove()
676 spin_lock_irq(get_ccwdev_lock(device->cdev)); tape_generic_remove()
677 switch (device->tape_state) { tape_generic_remove()
679 tape_state_set(device, TS_NOT_OPER); tape_generic_remove()
684 spin_unlock_irq(get_ccwdev_lock(device->cdev)); tape_generic_remove()
688 * Need only to release the device. tape_generic_remove()
690 tape_state_set(device, TS_NOT_OPER); tape_generic_remove()
691 spin_unlock_irq(get_ccwdev_lock(device->cdev)); tape_generic_remove()
692 tape_cleanup_device(device); tape_generic_remove()
701 device->cdev_id); tape_generic_remove()
703 "use\n", dev_name(&device->cdev->dev)); tape_generic_remove()
704 tape_state_set(device, TS_NOT_OPER); tape_generic_remove()
705 __tape_discard_requests(device); tape_generic_remove()
706 spin_unlock_irq(get_ccwdev_lock(device->cdev)); tape_generic_remove()
707 tape_cleanup_device(device); tape_generic_remove()
710 device = dev_get_drvdata(&cdev->dev); tape_generic_remove()
711 if (device) { tape_generic_remove()
714 tape_put_device(device); tape_generic_remove()
769 if (request->device) tape_free_request()
770 tape_put_device(request->device); tape_free_request()
777 __tape_start_io(struct tape_device *device, struct tape_request *request) __tape_start_io() argument
782 device->cdev, __tape_start_io()
793 schedule_delayed_work(&device->tape_dnr, 0); __tape_start_io()
803 __tape_start_next_request(struct tape_device *device) __tape_start_next_request() argument
809 DBF_LH(6, "__tape_start_next_request(%p)\n", device); __tape_start_next_request()
814 list_for_each_safe(l, n, &device->req_queue) { __tape_start_next_request()
838 rc = __tape_cancel_io(device, request); __tape_start_next_request()
840 rc = __tape_start_io(device, request); __tape_start_next_request()
861 struct tape_device *device = tape_delayed_next_request() local
864 DBF_LH(6, "tape_delayed_next_request(%p)\n", device); tape_delayed_next_request()
865 spin_lock_irq(get_ccwdev_lock(device->cdev)); tape_delayed_next_request()
866 __tape_start_next_request(device); tape_delayed_next_request()
867 spin_unlock_irq(get_ccwdev_lock(device->cdev)); tape_delayed_next_request()
873 struct tape_device *device; tape_long_busy_timeout() local
875 device = (struct tape_device *) data; tape_long_busy_timeout()
876 spin_lock_irq(get_ccwdev_lock(device->cdev)); tape_long_busy_timeout()
877 request = list_entry(device->req_queue.next, struct tape_request, list); tape_long_busy_timeout()
879 DBF_LH(6, "%08x: Long busy timeout.\n", device->cdev_id); tape_long_busy_timeout()
880 __tape_start_next_request(device); tape_long_busy_timeout()
881 device->lb_timeout.data = 0UL; tape_long_busy_timeout()
882 tape_put_device(device); tape_long_busy_timeout()
883 spin_unlock_irq(get_ccwdev_lock(device->cdev)); tape_long_busy_timeout()
888 struct tape_device * device, __tape_end_request()
892 DBF_LH(6, "__tape_end_request(%p, %p, %i)\n", device, request, rc); __tape_end_request()
906 if (!list_empty(&device->req_queue)) __tape_end_request()
907 __tape_start_next_request(device); __tape_end_request()
914 tape_dump_sense_dbf(struct tape_device *device, struct tape_request *request, tape_dump_sense_dbf() argument
926 DBF_EVENT(3, "DEVICE: %08x OP\t: %s\n", device->cdev_id, op); tape_dump_sense_dbf()
937 * the device lock held.
940 __tape_start_request(struct tape_device *device, struct tape_request *request) __tape_start_request() argument
950 if (device->tape_state == TS_INIT) __tape_start_request()
952 if (device->tape_state == TS_UNUSED) __tape_start_request()
955 if (device->tape_state == TS_BLKUSE) __tape_start_request()
957 if (device->tape_state != TS_IN_USE) __tape_start_request()
961 /* Increase use count of device for the added request. */ __tape_start_request()
962 request->device = tape_get_device(device); __tape_start_request()
964 if (list_empty(&device->req_queue)) { __tape_start_request()
966 rc = __tape_start_io(device, request); __tape_start_request()
971 list_add(&request->list, &device->req_queue); __tape_start_request()
975 list_add_tail(&request->list, &device->req_queue); __tape_start_request()
985 tape_do_io_async(struct tape_device *device, struct tape_request *request) tape_do_io_async() argument
989 DBF_LH(6, "tape_do_io_async(%p, %p)\n", device, request); tape_do_io_async()
991 spin_lock_irq(get_ccwdev_lock(device->cdev)); tape_do_io_async()
993 rc = __tape_start_request(device, request); tape_do_io_async()
994 spin_unlock_irq(get_ccwdev_lock(device->cdev)); tape_do_io_async()
1011 tape_do_io(struct tape_device *device, struct tape_request *request) tape_do_io() argument
1015 spin_lock_irq(get_ccwdev_lock(device->cdev)); tape_do_io()
1018 request->callback_data = &device->wait_queue; tape_do_io()
1020 rc = __tape_start_request(device, request); tape_do_io()
1021 spin_unlock_irq(get_ccwdev_lock(device->cdev)); tape_do_io()
1025 wait_event(device->wait_queue, (request->callback == NULL)); tape_do_io()
1043 tape_do_io_interruptible(struct tape_device *device, tape_do_io_interruptible() argument
1048 spin_lock_irq(get_ccwdev_lock(device->cdev)); tape_do_io_interruptible()
1051 request->callback_data = &device->wait_queue; tape_do_io_interruptible()
1052 rc = __tape_start_request(device, request); tape_do_io_interruptible()
1053 spin_unlock_irq(get_ccwdev_lock(device->cdev)); tape_do_io_interruptible()
1057 rc = wait_event_interruptible(device->wait_queue, tape_do_io_interruptible()
1064 spin_lock_irq(get_ccwdev_lock(device->cdev)); tape_do_io_interruptible()
1065 rc = __tape_cancel_io(device, request); tape_do_io_interruptible()
1066 spin_unlock_irq(get_ccwdev_lock(device->cdev)); tape_do_io_interruptible()
1071 device->wait_queue, tape_do_io_interruptible()
1076 DBF_EVENT(3, "IO stopped on %08x\n", device->cdev_id); tape_do_io_interruptible()
1086 tape_cancel_io(struct tape_device *device, struct tape_request *request) tape_cancel_io() argument
1090 spin_lock_irq(get_ccwdev_lock(device->cdev)); tape_cancel_io()
1091 rc = __tape_cancel_io(device, request); tape_cancel_io()
1092 spin_unlock_irq(get_ccwdev_lock(device->cdev)); tape_cancel_io()
1102 struct tape_device *device; __tape_do_irq() local
1106 device = dev_get_drvdata(&cdev->dev); __tape_do_irq()
1107 if (device == NULL) { __tape_do_irq()
1112 DBF_LH(6, "__tape_do_irq(device=%p, request=%p)\n", device, request); __tape_do_irq()
1120 device->cdev_id); __tape_do_irq()
1122 __tape_end_request(device, request, -EIO); __tape_do_irq()
1126 device->cdev_id, PTR_ERR(irb)); __tape_do_irq()
1142 device->cdev_id, irb->scsw.cmd.cc, irb->scsw.cmd.fctl); __tape_do_irq()
1144 schedule_delayed_work(&device->tape_dnr, HZ); __tape_do_irq()
1152 !list_empty(&device->req_queue)) { __tape_do_irq()
1155 req = list_entry(device->req_queue.next, __tape_do_irq()
1158 DBF_EVENT(3, "(%08x): del timer\n", device->cdev_id); __tape_do_irq()
1159 if (del_timer(&device->lb_timeout)) { __tape_do_irq()
1160 device->lb_timeout.data = 0UL; __tape_do_irq()
1161 tape_put_device(device); __tape_do_irq()
1162 __tape_start_next_request(device); __tape_do_irq()
1170 device->tape_generic_status |= GMT_ONLINE(~0); __tape_do_irq()
1172 device->tape_generic_status &= ~GMT_ONLINE(~0); __tape_do_irq()
1176 * and device end is unusual. Log the sense data. __tape_do_irq()
1179 tape_dump_sense_dbf(device, request, irb); __tape_do_irq()
1181 /* Upon normal completion the device _is_ online */ __tape_do_irq()
1182 device->tape_generic_status |= GMT_ONLINE(~0); __tape_do_irq()
1184 if (device->tape_state == TS_NOT_OPER) { __tape_do_irq()
1185 DBF_EVENT(6, "tape:device is not operational\n"); __tape_do_irq()
1194 __tape_end_request(device, request, -EIO); __tape_do_irq()
1198 rc = device->discipline->irq(device, request, irb); __tape_do_irq()
1208 /* Upon normal completion the device _is_ online */ __tape_do_irq()
1209 device->tape_generic_status |= GMT_ONLINE(~0); __tape_do_irq()
1210 __tape_end_request(device, request, rc); __tape_do_irq()
1215 device->lb_timeout.data = __tape_do_irq()
1216 (unsigned long) tape_get_device(device); __tape_do_irq()
1217 device->lb_timeout.expires = jiffies + __tape_do_irq()
1219 DBF_EVENT(3, "(%08x): add timer\n", device->cdev_id); __tape_do_irq()
1220 add_timer(&device->lb_timeout); __tape_do_irq()
1224 rc = __tape_start_io(device, request); __tape_do_irq()
1226 __tape_end_request(device, request, rc); __tape_do_irq()
1229 rc = __tape_cancel_io(device, request); __tape_do_irq()
1231 __tape_end_request(device, request, rc); __tape_do_irq()
1236 __tape_end_request(device, request, -EIO); __tape_do_irq()
1238 __tape_end_request(device, request, rc); __tape_do_irq()
1245 * Tape device open function used by tape_char frontend.
1248 tape_open(struct tape_device *device) tape_open() argument
1252 spin_lock_irq(get_ccwdev_lock(device->cdev)); tape_open()
1253 if (device->tape_state == TS_NOT_OPER) { tape_open()
1256 } else if (device->tape_state == TS_IN_USE) { tape_open()
1259 } else if (device->tape_state == TS_BLKUSE) { tape_open()
1262 } else if (device->discipline != NULL && tape_open()
1263 !try_module_get(device->discipline->owner)) { tape_open()
1267 tape_state_set(device, TS_IN_USE); tape_open()
1270 spin_unlock_irq(get_ccwdev_lock(device->cdev)); tape_open()
1275 * Tape device release function used by tape_char frontend.
1278 tape_release(struct tape_device *device) tape_release() argument
1280 spin_lock_irq(get_ccwdev_lock(device->cdev)); tape_release()
1281 if (device->tape_state == TS_IN_USE) tape_release()
1282 tape_state_set(device, TS_UNUSED); tape_release()
1283 module_put(device->discipline->owner); tape_release()
1284 spin_unlock_irq(get_ccwdev_lock(device->cdev)); tape_release()
1292 tape_mtop(struct tape_device *device, int mt_op, int mt_count) tape_mtop() argument
1303 fn = device->discipline->mtop_array[mt_op]; tape_mtop()
1312 if ((rc = fn(device, 500)) != 0) tape_mtop()
1315 rc = fn(device, mt_count); tape_mtop()
1317 rc = fn(device, mt_count); tape_mtop()
1355 MODULE_DESCRIPTION("Linux on zSeries channel attached tape device driver");
887 __tape_end_request( struct tape_device * device, struct tape_request * request, int rc) __tape_end_request() argument
H A Dtape_class.h4 * Tape class device support
7 * Based on simple class device code by Greg K-H
18 #include <linux/device.h>
25 struct device *class_device;
31 * Register a tape device and return a pointer to the tape class device
34 * device
35 * The pointer to the struct device of the physical (base) device.
40 * The pointer to the drivers file operations for the tape device.
42 * Pointer to the logical device name (will also be used as kobject name
44 * device.
47 * name from the physical device to the logical device (class).
50 struct device * device,
56 void unregister_tape_dev(struct device *device, struct tape_class_device *tcd);
H A Dtape_proc.c2 * tape device driver for S/390 and zSeries tapes.
40 struct tape_device *device; tape_proc_show() local
50 device = tape_find_device(n); tape_proc_show()
51 if (IS_ERR(device)) tape_proc_show()
53 spin_lock_irq(get_ccwdev_lock(device->cdev)); tape_proc_show()
55 seq_printf(m, "%-10.10s ", dev_name(&device->cdev->dev)); tape_proc_show()
56 seq_printf(m, "%04X/", device->cdev->id.cu_type); tape_proc_show()
57 seq_printf(m, "%02X\t", device->cdev->id.cu_model); tape_proc_show()
58 seq_printf(m, "%04X/", device->cdev->id.dev_type); tape_proc_show()
59 seq_printf(m, "%02X\t\t", device->cdev->id.dev_model); tape_proc_show()
60 if (device->char_data.block_size == 0) tape_proc_show()
63 seq_printf(m, "%i\t", device->char_data.block_size); tape_proc_show()
64 if (device->tape_state >= 0 && tape_proc_show()
65 device->tape_state < TS_SIZE) tape_proc_show()
66 str = tape_state_verbose[device->tape_state]; tape_proc_show()
70 if (!list_empty(&device->req_queue)) { tape_proc_show()
71 request = list_entry(device->req_queue.next, tape_proc_show()
77 seq_printf(m, "%s\n", tape_med_st_verbose[device->medium_state]); tape_proc_show()
78 spin_unlock_irq(get_ccwdev_lock(device->cdev)); tape_proc_show()
79 tape_put_device(device); tape_proc_show()
H A Dtape_class.c4 * Tape class device support
7 * Based on simple class device code by Greg K-H
27 * Register a tape device and return a pointer to the cdev structure.
29 * device
30 * The pointer to the struct device of the physical (base) device.
37 * The pointer to the drivers file operations for the tape device.
39 * The pointer to the name of the character device.
42 struct device * device, register_tape_dev()
77 tcd->class_device = device_create(tape_class, device, register_tape_dev()
84 &device->kobj, register_tape_dev()
106 void unregister_tape_dev(struct device *device, struct tape_class_device *tcd) unregister_tape_dev() argument
109 sysfs_remove_link(&device->kobj, tcd->mode_name); unregister_tape_dev()
41 register_tape_dev( struct device * device, dev_t dev, const struct file_operations *fops, char * device_name, char * mode_name) register_tape_dev() argument
H A Dtape_std.c2 * standard tape device functions for ibm tapes.
38 struct tape_device * device; tape_std_assign_timeout() local
42 device = request->device; tape_std_assign_timeout()
43 BUG_ON(!device); tape_std_assign_timeout()
46 device->cdev_id); tape_std_assign_timeout()
47 rc = tape_cancel_io(device, request); tape_std_assign_timeout()
50 "%i\n", device->cdev_id, rc); tape_std_assign_timeout()
54 tape_std_assign(struct tape_device *device) tape_std_assign() argument
69 * The assign command sometimes blocks if the device is assigned tape_std_assign()
79 rc = tape_do_io_interruptible(device, request); tape_std_assign()
85 DBF_EVENT(3, "%08x: assign failed - device might be busy\n", tape_std_assign()
86 device->cdev_id); tape_std_assign()
88 DBF_EVENT(3, "%08x: Tape assigned\n", device->cdev_id); tape_std_assign()
98 tape_std_unassign (struct tape_device *device) tape_std_unassign() argument
103 if (device->tape_state == TS_NOT_OPER) { tape_std_unassign()
104 DBF_EVENT(3, "(%08x): Can't unassign device\n", tape_std_unassign()
105 device->cdev_id); tape_std_unassign()
117 if ((rc = tape_do_io(device, request)) != 0) { tape_std_unassign()
118 DBF_EVENT(3, "%08x: Unassign failed\n", device->cdev_id); tape_std_unassign()
120 DBF_EVENT(3, "%08x: Tape unassigned\n", device->cdev_id); tape_std_unassign()
130 tape_std_display(struct tape_device *device, struct display_struct *disp) tape_std_display() argument
151 rc = tape_do_io_interruptible(device, request); tape_std_display()
160 tape_std_read_block_id(struct tape_device *device, __u64 *id) tape_std_read_block_id() argument
170 tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte); tape_std_read_block_id()
174 rc = tape_do_io(device, request); tape_std_read_block_id()
183 tape_std_terminate_write(struct tape_device *device) tape_std_terminate_write() argument
187 if(device->required_tapemarks == 0) tape_std_terminate_write()
190 DBF_LH(5, "tape%d: terminate write %dxEOF\n", device->first_minor, tape_std_terminate_write()
191 device->required_tapemarks); tape_std_terminate_write()
193 rc = tape_mtop(device, MTWEOF, device->required_tapemarks); tape_std_terminate_write()
197 device->required_tapemarks = 0; tape_std_terminate_write()
198 return tape_mtop(device, MTBSR, 1); tape_std_terminate_write()
207 tape_std_mtload(struct tape_device *device, int count) tape_std_mtload() argument
209 return wait_event_interruptible(device->state_change_wq, tape_std_mtload()
210 (device->medium_state == MS_LOADED)); tape_std_mtload()
217 tape_std_mtsetblk(struct tape_device *device, int count) tape_std_mtsetblk() argument
228 device->char_data.block_size = 0; tape_std_mtsetblk()
231 if (device->char_data.idal_buf != NULL && tape_std_mtsetblk()
232 device->char_data.idal_buf->size == count) tape_std_mtsetblk()
246 if (device->char_data.idal_buf != NULL) tape_std_mtsetblk()
247 idal_buffer_free(device->char_data.idal_buf); tape_std_mtsetblk()
248 device->char_data.idal_buf = new; tape_std_mtsetblk()
249 device->char_data.block_size = count; tape_std_mtsetblk()
251 DBF_LH(6, "new blocksize is %d\n", device->char_data.block_size); tape_std_mtsetblk()
260 tape_std_mtreset(struct tape_device *device, int count) tape_std_mtreset() argument
263 device->char_data.block_size = 0; tape_std_mtreset()
272 tape_std_mtfsf(struct tape_device *device, int mt_count) tape_std_mtfsf() argument
283 device->modeset_byte); tape_std_mtfsf()
288 return tape_do_io_free(device, request); tape_std_mtfsf()
296 tape_std_mtfsr(struct tape_device *device, int mt_count) tape_std_mtfsr() argument
308 device->modeset_byte); tape_std_mtfsr()
313 rc = tape_do_io(device, request); tape_std_mtfsr()
328 tape_std_mtbsr(struct tape_device *device, int mt_count) tape_std_mtbsr() argument
340 device->modeset_byte); tape_std_mtbsr()
345 rc = tape_do_io(device, request); tape_std_mtbsr()
359 tape_std_mtweof(struct tape_device *device, int mt_count) tape_std_mtweof() argument
370 device->modeset_byte); tape_std_mtweof()
375 return tape_do_io_free(device, request); tape_std_mtweof()
384 tape_std_mtbsfm(struct tape_device *device, int mt_count) tape_std_mtbsfm() argument
395 device->modeset_byte); tape_std_mtbsfm()
400 return tape_do_io_free(device, request); tape_std_mtbsfm()
408 tape_std_mtbsf(struct tape_device *device, int mt_count) tape_std_mtbsf() argument
420 device->modeset_byte); tape_std_mtbsf()
424 rc = tape_do_io_free(device, request); tape_std_mtbsf()
426 rc = tape_mtop(device, MTFSR, 1); tape_std_mtbsf()
439 tape_std_mtfsfm(struct tape_device *device, int mt_count) tape_std_mtfsfm() argument
451 device->modeset_byte); tape_std_mtfsfm()
455 rc = tape_do_io_free(device, request); tape_std_mtfsfm()
457 rc = tape_mtop(device, MTBSR, 1); tape_std_mtfsfm()
469 tape_std_mtrew(struct tape_device *device, int mt_count) tape_std_mtrew() argument
479 device->modeset_byte); tape_std_mtrew()
484 return tape_do_io_free(device, request); tape_std_mtrew()
492 tape_std_mtoffl(struct tape_device *device, int mt_count) tape_std_mtoffl() argument
501 tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte); tape_std_mtoffl()
506 return tape_do_io_free(device, request); tape_std_mtoffl()
513 tape_std_mtnop(struct tape_device *device, int mt_count) tape_std_mtnop() argument
522 tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte); tape_std_mtnop()
525 return tape_do_io_free(device, request); tape_std_mtnop()
534 tape_std_mteom(struct tape_device *device, int mt_count) tape_std_mteom() argument
541 if ((rc = tape_mtop(device, MTREW, 1)) < 0) tape_std_mteom()
551 if ((rc = tape_mtop(device, MTFSF, 1)) < 0) tape_std_mteom()
553 if ((rc = tape_mtop(device, MTFSR, 1)) < 0) tape_std_mteom()
557 return tape_mtop(device, MTBSR, 1); tape_std_mteom()
564 tape_std_mtreten(struct tape_device *device, int mt_count) tape_std_mtreten() argument
573 tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte); tape_std_mtreten()
578 tape_do_io_interruptible(device, request); tape_std_mtreten()
580 return tape_mtop(device, MTREW, 1); tape_std_mtreten()
587 tape_std_mterase(struct tape_device *device, int mt_count) tape_std_mterase() argument
596 tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte); tape_std_mterase()
604 return tape_do_io_free(device, request); tape_std_mterase()
611 tape_std_mtunload(struct tape_device *device, int mt_count) tape_std_mtunload() argument
613 return tape_mtop(device, MTOFFL, mt_count); tape_std_mtunload()
621 tape_std_mtcompression(struct tape_device *device, int mt_count) tape_std_mtcompression() argument
635 *device->modeset_byte &= ~0x08; tape_std_mtcompression()
637 *device->modeset_byte |= 0x08; tape_std_mtcompression()
638 tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte); tape_std_mtcompression()
641 return tape_do_io_free(device, request); tape_std_mtcompression()
648 tape_std_read_block(struct tape_device *device, size_t count) tape_std_read_block() argument
662 tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte); tape_std_read_block()
664 device->char_data.idal_buf); tape_std_read_block()
673 tape_std_read_backward(struct tape_device *device, struct tape_request *request) tape_std_read_backward() argument
681 tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte); tape_std_read_backward()
683 device->char_data.idal_buf); tape_std_read_backward()
692 tape_std_write_block(struct tape_device *device, size_t count) tape_std_write_block() argument
702 tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte); tape_std_write_block()
704 device->char_data.idal_buf); tape_std_write_block()
713 tape_std_process_eov(struct tape_device *device) tape_std_process_eov() argument
719 if (tape_mtop(device, MTBSR, 1) == 0 && tape_std_process_eov()
720 tape_mtop(device, MTWEOF, 1) == 0) { tape_std_process_eov()
721 tape_mtop(device, MTBSR, 1); tape_std_process_eov()
H A Dtape_3590.c2 * tape device discipline for 3590 tapes.
102 static int crypt_supported(struct tape_device *device) crypt_supported() argument
104 return TAPE390_CRYPT_SUPPORTED(TAPE_3590_CRYPT_INFO(device)); crypt_supported()
107 static int crypt_enabled(struct tape_device *device) crypt_enabled() argument
109 return TAPE390_CRYPT_ON(TAPE_3590_CRYPT_INFO(device)); crypt_enabled()
200 static int tape_3592_kekl_query(struct tape_device *device, tape_3592_kekl_query() argument
225 rc = tape_do_io(device, request); tape_3592_kekl_query()
241 static int tape_3592_ioctl_kekl_query(struct tape_device *device, tape_3592_ioctl_kekl_query() argument
248 if (!crypt_supported(device)) tape_3592_ioctl_kekl_query()
250 if (!crypt_enabled(device)) tape_3592_ioctl_kekl_query()
255 rc = tape_3592_kekl_query(device, ext_kekls); tape_3592_ioctl_kekl_query()
268 static int tape_3590_mttell(struct tape_device *device, int mt_count);
273 static int tape_3592_kekl_set(struct tape_device *device, tape_3592_kekl_set() argument
284 if (tape_3590_mttell(device, 0) != 0) tape_3592_kekl_set()
298 return tape_do_io_free(device, request); tape_3592_kekl_set()
304 static int tape_3592_ioctl_kekl_set(struct tape_device *device, tape_3592_ioctl_kekl_set() argument
311 if (!crypt_supported(device)) tape_3592_ioctl_kekl_set()
313 if (!crypt_enabled(device)) tape_3592_ioctl_kekl_set()
322 rc = tape_3592_kekl_set(device, ext_kekls); tape_3592_ioctl_kekl_set()
331 static struct tape_request *__tape_3592_enable_crypt(struct tape_device *device) __tape_3592_enable_crypt() argument
337 if (!crypt_supported(device)) __tape_3592_enable_crypt()
359 static int tape_3592_enable_crypt(struct tape_device *device) tape_3592_enable_crypt() argument
363 request = __tape_3592_enable_crypt(device); tape_3592_enable_crypt()
366 return tape_do_io_free(device, request); tape_3592_enable_crypt()
369 static void tape_3592_enable_crypt_async(struct tape_device *device) tape_3592_enable_crypt_async() argument
373 request = __tape_3592_enable_crypt(device); tape_3592_enable_crypt_async()
375 tape_do_io_async_free(device, request); tape_3592_enable_crypt_async()
381 static struct tape_request *__tape_3592_disable_crypt(struct tape_device *device) __tape_3592_disable_crypt() argument
387 if (!crypt_supported(device)) __tape_3592_disable_crypt()
407 static int tape_3592_disable_crypt(struct tape_device *device) tape_3592_disable_crypt() argument
411 request = __tape_3592_disable_crypt(device); tape_3592_disable_crypt()
414 return tape_do_io_free(device, request); tape_3592_disable_crypt()
417 static void tape_3592_disable_crypt_async(struct tape_device *device) tape_3592_disable_crypt_async() argument
421 request = __tape_3592_disable_crypt(device); tape_3592_disable_crypt_async()
423 tape_do_io_async_free(device, request); tape_3592_disable_crypt_async()
429 static int tape_3592_ioctl_crypt_set(struct tape_device *device, tape_3592_ioctl_crypt_set() argument
435 if (!crypt_supported(device)) tape_3592_ioctl_crypt_set()
442 return tape_3592_enable_crypt(device); tape_3592_ioctl_crypt_set()
444 return tape_3592_disable_crypt(device); tape_3592_ioctl_crypt_set()
447 static int tape_3590_sense_medium(struct tape_device *device);
452 static int tape_3592_ioctl_crypt_query(struct tape_device *device, tape_3592_ioctl_crypt_query() argument
456 if (!crypt_supported(device)) tape_3592_ioctl_crypt_query()
458 tape_3590_sense_medium(device); tape_3592_ioctl_crypt_query()
459 if (copy_to_user((char __user *) arg, &TAPE_3590_CRYPT_INFO(device), tape_3592_ioctl_crypt_query()
460 sizeof(TAPE_3590_CRYPT_INFO(device)))) tape_3592_ioctl_crypt_query()
470 tape_3590_ioctl(struct tape_device *device, unsigned int cmd, unsigned long arg) tape_3590_ioctl() argument
479 return tape_std_display(device, &disp); tape_3590_ioctl()
482 return tape_3592_ioctl_kekl_set(device, arg); tape_3590_ioctl()
484 return tape_3592_ioctl_kekl_query(device, arg); tape_3590_ioctl()
486 return tape_3592_ioctl_crypt_set(device, arg); tape_3590_ioctl()
488 return tape_3592_ioctl_crypt_query(device, arg); tape_3590_ioctl()
497 static int tape_3590_sense_medium(struct tape_device *device) tape_3590_sense_medium() argument
506 return tape_do_io_free(device, request); tape_3590_sense_medium()
509 static void tape_3590_sense_medium_async(struct tape_device *device) tape_3590_sense_medium_async() argument
518 tape_do_io_async_free(device, request); tape_3590_sense_medium_async()
525 tape_3590_mttell(struct tape_device *device, int mt_count) tape_3590_mttell() argument
530 rc = tape_std_read_block_id(device, &block_id); tape_3590_mttell()
540 tape_3590_mtseek(struct tape_device *device, int count) tape_3590_mtseek() argument
549 tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte); tape_3590_mtseek()
553 return tape_do_io_free(device, request); tape_3590_mtseek()
561 tape_3590_read_opposite(struct tape_device *device, tape_3590_read_opposite() argument
572 tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte); tape_3590_read_opposite()
573 data = device->discdata; tape_3590_read_opposite()
575 device->char_data.idal_buf); tape_3590_read_opposite()
584 * in device state.
597 static void tape_3590_read_attmsg_async(struct tape_device *device) tape_3590_read_attmsg_async() argument
612 tape_do_io_async_free(device, request); tape_3590_read_attmsg_async()
623 struct tape_device *device; member in struct:work_handler_data
636 tape_3590_sense_medium_async(p->device); tape_3590_work_handler()
639 tape_3590_read_attmsg_async(p->device); tape_3590_work_handler()
642 tape_3592_enable_crypt_async(p->device); tape_3590_work_handler()
645 tape_3592_disable_crypt_async(p->device); tape_3590_work_handler()
651 tape_put_device(p->device); tape_3590_work_handler()
656 tape_3590_schedule_work(struct tape_device *device, enum tape_op op) tape_3590_schedule_work() argument
665 p->device = tape_get_device(device); tape_3590_schedule_work()
672 static void tape_3590_med_state_set(struct tape_device *device, tape_3590_med_state_set() argument
677 c_info = &TAPE_3590_CRYPT_INFO(device); tape_3590_med_state_set()
684 tape_med_state_set(device, MS_UNLOADED); tape_3590_med_state_set()
685 TAPE_3590_CRYPT_INFO(device).medium_status = 0; tape_3590_med_state_set()
689 tape_med_state_set(device, MS_LOADED); tape_3590_med_state_set()
692 tape_med_state_set(device, MS_UNKNOWN); tape_3590_med_state_set()
706 * The done handler is called at device/channel end and wakes up the sleeping
710 tape_3590_done(struct tape_device *device, struct tape_request *request) tape_3590_done() argument
729 tape_med_state_set(device, MS_LOADED); tape_3590_done()
732 tape_med_state_set(device, MS_UNLOADED); tape_3590_done()
733 tape_3590_schedule_work(device, TO_CRYPT_OFF); tape_3590_done()
736 tape_3590_med_state_set(device, request->cpdata); tape_3590_done()
739 TAPE_3590_CRYPT_INFO(device).status tape_3590_done()
741 *(device->modeset_byte) |= 0x03; tape_3590_done()
744 TAPE_3590_CRYPT_INFO(device).status tape_3590_done()
746 *(device->modeset_byte) &= ~0x03; tape_3590_done()
768 tape_3590_erp_succeded(struct tape_device *device, struct tape_request *request) tape_3590_erp_succeded() argument
772 return tape_3590_done(device, request); tape_3590_erp_succeded()
779 tape_3590_erp_failed(struct tape_device *device, struct tape_request *request, tape_3590_erp_failed() argument
784 tape_dump_sense_dbf(device, request, irb); tape_3590_erp_failed()
792 tape_3590_erp_retry(struct tape_device *device, struct tape_request *request, tape_3590_erp_retry() argument
796 tape_dump_sense_dbf(device, request, irb); tape_3590_erp_retry()
804 tape_3590_unsolicited_irq(struct tape_device *device, struct irb *irb) tape_3590_unsolicited_irq() argument
811 DBF_EVENT(3, "unsol.irq! tape ready: %08x\n", device->cdev_id); tape_3590_unsolicited_irq()
813 tape_3590_schedule_work(device, TO_READ_ATTMSG); tape_3590_unsolicited_irq()
815 DBF_EVENT(3, "unsol.irq! dev end: %08x\n", device->cdev_id); tape_3590_unsolicited_irq()
816 tape_dump_sense_dbf(device, NULL, irb); tape_3590_unsolicited_irq()
819 tape_3590_schedule_work(device, TO_MSEN); tape_3590_unsolicited_irq()
827 tape_3590_erp_basic(struct tape_device *device, struct tape_request *request, tape_3590_erp_basic() argument
836 return tape_3590_erp_failed(device, request, irb, rc); tape_3590_erp_basic()
838 return tape_3590_erp_succeded(device, request); tape_3590_erp_basic()
840 return tape_3590_erp_retry(device, request, irb); tape_3590_erp_basic()
842 return tape_3590_erp_failed(device, request, irb, rc); tape_3590_erp_basic()
853 tape_3590_erp_read_buf_log(struct tape_device *device, tape_3590_erp_read_buf_log() argument
860 return tape_3590_erp_basic(device, request, irb, -EIO); tape_3590_erp_read_buf_log()
867 tape_3590_erp_swap(struct tape_device *device, struct tape_request *request, tape_3590_erp_swap() argument
876 dev_warn (&device->cdev->dev, "The tape medium must be loaded into a " tape_3590_erp_swap()
878 return tape_3590_erp_basic(device, request, irb, -EIO); tape_3590_erp_swap()
885 tape_3590_erp_long_busy(struct tape_device *device, tape_3590_erp_long_busy() argument
896 tape_3590_erp_special_interrupt(struct tape_device *device, tape_3590_erp_special_interrupt() argument
899 return tape_3590_erp_basic(device, request, irb, -EIO); tape_3590_erp_special_interrupt()
906 tape_3590_erp_read_alternate(struct tape_device *device, tape_3590_erp_read_alternate() argument
913 * supported by the device tape_3590_erp_read_alternate()
918 data = device->discdata; tape_3590_erp_read_alternate()
921 device->cdev_id); tape_3590_erp_read_alternate()
925 device->cdev_id); tape_3590_erp_read_alternate()
928 tape_3590_read_opposite(device, request); tape_3590_erp_read_alternate()
929 return tape_3590_erp_retry(device, request, irb); tape_3590_erp_read_alternate()
936 tape_3590_erp_read_opposite(struct tape_device *device, tape_3590_erp_read_opposite() argument
945 tape_3590_read_opposite(device, request); tape_3590_erp_read_opposite()
946 return tape_3590_erp_retry(device, request, irb); tape_3590_erp_read_opposite()
949 return tape_3590_erp_failed(device, request, irb, -EIO); tape_3590_erp_read_opposite()
952 return tape_3590_erp_failed(device, request, irb, -EIO); tape_3590_erp_read_opposite()
960 tape_3590_print_mim_msg_f0(struct tape_device *device, struct irb *irb) tape_3590_print_mim_msg_f0() argument
1012 dev_warn (&device->cdev->dev, "Tape media information: exception %s, " tape_3590_print_mim_msg_f0()
1024 tape_3590_print_io_sim_msg_f1(struct tape_device *device, struct irb *irb) tape_3590_print_io_sim_msg_f1() argument
1050 snprintf(exception, BUFSIZE, "CU Exception on device path " tape_3590_print_io_sim_msg_f1()
1099 snprintf(service, BUFSIZE, "Repair will disable device" tape_3590_print_io_sim_msg_f1()
1102 snprintf(service, BUFSIZE, "Repair will disable device" tape_3590_print_io_sim_msg_f1()
1124 dev_warn (&device->cdev->dev, "I/O subsystem information: exception" tape_3590_print_io_sim_msg_f1()
1135 tape_3590_print_dev_sim_msg_f2(struct tape_device *device, struct irb *irb) tape_3590_print_dev_sim_msg_f2() argument
1184 snprintf(service, BUFSIZE, "Repair will not impact device " tape_3590_print_dev_sim_msg_f2()
1236 dev_warn (&device->cdev->dev, "Device subsystem information: exception" tape_3590_print_dev_sim_msg_f2()
1247 tape_3590_print_era_msg(struct tape_device *device, struct irb *irb) tape_3590_print_era_msg() argument
1256 dev_warn (&device->cdev->dev, "The tape unit has " tape_3590_print_era_msg()
1260 dev_warn (&device->cdev->dev, "The tape unit has " tape_3590_print_era_msg()
1267 dev_warn (&device->cdev->dev, "MIM SEV=%i, MC=%02x, ES=%x/%x, " tape_3590_print_era_msg()
1272 tape_3590_print_mim_msg_f0(device, irb); tape_3590_print_era_msg()
1277 dev_warn (&device->cdev->dev, "IOSIM SEV=%i, DEVTYPE=3590/%02x," tape_3590_print_era_msg()
1279 sense->fmt.f71.sev, device->cdev->id.dev_model, tape_3590_print_era_msg()
1283 tape_3590_print_io_sim_msg_f1(device, irb); tape_3590_print_era_msg()
1288 dev_warn (&device->cdev->dev, "DEVSIM SEV=%i, DEVTYPE=3590/%02x" tape_3590_print_era_msg()
1290 sense->fmt.f71.sev, device->cdev->id.dev_model, tape_3590_print_era_msg()
1294 tape_3590_print_dev_sim_msg_f2(device, irb); tape_3590_print_era_msg()
1301 dev_warn (&device->cdev->dev, "The tape unit has issued an unknown " tape_3590_print_era_msg()
1305 static int tape_3590_crypt_error(struct tape_device *device, tape_3590_crypt_error() argument
1317 return tape_3590_erp_basic(device, request, irb, -EKEYREJECTED); tape_3590_crypt_error()
1320 return tape_3590_erp_basic(device, request, irb, -ENOTCONN); tape_3590_crypt_error()
1322 dev_err (&device->cdev->dev, "The tape unit failed to obtain the " tape_3590_crypt_error()
1325 return tape_3590_erp_basic(device, request, irb, -ENOKEY); tape_3590_crypt_error()
1334 tape_3590_unit_check(struct tape_device *device, struct tape_request *request, tape_3590_unit_check() argument
1351 tape_3590_print_era_msg(device, irb); tape_3590_unit_check()
1352 return tape_3590_erp_read_buf_log(device, request, irb); tape_3590_unit_check()
1355 tape_3590_print_era_msg(device, irb); tape_3590_unit_check()
1356 return tape_3590_erp_read_alternate(device, request, irb); tape_3590_unit_check()
1360 tape_3590_print_era_msg(device, irb); tape_3590_unit_check()
1361 return tape_3590_erp_special_interrupt(device, request, irb); tape_3590_unit_check()
1363 return tape_3590_crypt_error(device, request, irb); tape_3590_unit_check()
1367 device->cdev_id); tape_3590_unit_check()
1368 return tape_3590_erp_basic(device, request, irb, -ENOSPC); tape_3590_unit_check()
1371 device->cdev_id); tape_3590_unit_check()
1372 return tape_3590_erp_basic(device, request, irb, -ENOSPC); tape_3590_unit_check()
1374 DBF_EVENT(2, "(%08x): End of Data Mark\n", device->cdev_id); tape_3590_unit_check()
1375 return tape_3590_erp_basic(device, request, irb, -ENOSPC); tape_3590_unit_check()
1379 device->cdev_id); tape_3590_unit_check()
1380 return tape_3590_erp_basic(device, request, irb, -EIO); tape_3590_unit_check()
1383 device->cdev_id); tape_3590_unit_check()
1384 tape_med_state_set(device, MS_UNLOADED); tape_3590_unit_check()
1385 tape_3590_schedule_work(device, TO_CRYPT_OFF); tape_3590_unit_check()
1386 return tape_3590_erp_basic(device, request, irb, 0); tape_3590_unit_check()
1391 * "device intervention" is not very meaningfull tape_3590_unit_check()
1393 tape_med_state_set(device, MS_UNLOADED); tape_3590_unit_check()
1394 tape_3590_schedule_work(device, TO_CRYPT_OFF); tape_3590_unit_check()
1395 return tape_3590_erp_basic(device, request, irb, -ENOMEDIUM); tape_3590_unit_check()
1398 DBF_EVENT(6, "(%08x): LONG BUSY\n", device->cdev_id); tape_3590_unit_check()
1399 tape_3590_print_era_msg(device, irb); tape_3590_unit_check()
1400 return tape_3590_erp_basic(device, request, irb, -EBUSY); tape_3590_unit_check()
1402 DBF_EVENT(6, "(%08x): Crypto LONG BUSY\n", device->cdev_id); tape_3590_unit_check()
1403 return tape_3590_erp_long_busy(device, request, irb); tape_3590_unit_check()
1408 tape_3590_print_era_msg(device, irb); tape_3590_unit_check()
1409 return tape_3590_erp_swap(device, request, irb); tape_3590_unit_check()
1413 tape_3590_print_era_msg(device, irb); tape_3590_unit_check()
1414 return tape_3590_erp_read_opposite(device, request, tape_3590_unit_check()
1417 return tape_3590_erp_basic(device, request, irb, -EIO); tape_3590_unit_check()
1424 tape_3590_print_era_msg(device, irb); tape_3590_unit_check()
1425 return tape_3590_erp_swap(device, request, irb); tape_3590_unit_check()
1429 return tape_3590_erp_basic(device, request, irb, -EMEDIUMTYPE); tape_3590_unit_check()
1433 tape_med_state_set(device, MS_UNLOADED); tape_3590_unit_check()
1434 tape_3590_schedule_work(device, TO_CRYPT_OFF); tape_3590_unit_check()
1435 return tape_3590_erp_basic(device, request, irb, -ENOMEDIUM); tape_3590_unit_check()
1438 return tape_3590_erp_basic(device, request, irb, -EMEDIUMTYPE); tape_3590_unit_check()
1441 return tape_3590_erp_basic(device, request, irb, -EPERM); tape_3590_unit_check()
1443 dev_warn (&device->cdev->dev, "A different host has privileged" tape_3590_unit_check()
1445 return tape_3590_erp_basic(device, request, irb, -EPERM); tape_3590_unit_check()
1447 return tape_3590_erp_basic(device, request, irb, -EIO); tape_3590_unit_check()
1455 tape_3590_irq(struct tape_device *device, struct tape_request *request, tape_3590_irq() argument
1459 return tape_3590_unsolicited_irq(device, irb); tape_3590_irq()
1466 return tape_3590_erp_failed(device, request, irb, -ENOSPC); tape_3590_irq()
1470 return tape_3590_unit_check(device, request, irb); tape_3590_irq()
1480 return tape_3590_done(device, request); tape_3590_irq()
1494 tape_dump_sense_dbf(device, request, irb); tape_3590_irq()
1499 static int tape_3590_read_dev_chars(struct tape_device *device, tape_3590_read_dev_chars() argument
1511 rc = tape_do_io(device, request); tape_3590_read_dev_chars()
1519 * Setup device function
1522 tape_3590_setup_device(struct tape_device *device) tape_3590_setup_device() argument
1528 DBF_EVENT(6, "3590 device setup\n"); tape_3590_setup_device()
1533 device->discdata = data; tape_3590_setup_device()
1540 rc = tape_3590_read_dev_chars(device, rdc_data); tape_3590_setup_device()
1542 DBF_LH(3, "Read device characteristics failed!\n"); tape_3590_setup_device()
1545 rc = tape_std_assign(device); tape_3590_setup_device()
1550 tape_3592_disable_crypt(device); tape_3590_setup_device()
1555 rc = tape_3590_sense_medium(device); tape_3590_setup_device()
1570 * Cleanup device function
1573 tape_3590_cleanup_device(struct tape_device *device) tape_3590_cleanup_device() argument
1576 tape_std_unassign(device); tape_3590_cleanup_device()
1578 kfree(device->discdata); tape_3590_cleanup_device()
1579 device->discdata = NULL; tape_3590_cleanup_device()
1702 MODULE_DESCRIPTION("Linux on zSeries channel attached 3590 tape device driver");
/linux-4.4.14/drivers/hid/
H A Dhid-roccat.c15 * Module roccat is a char device used to report special events of roccat
18 * not stored in device. The information in these events depends on hid device
48 struct device *dev;
65 struct roccat_device *device; member in struct:roccat_reader
80 struct roccat_device *device = reader->device; roccat_read() local
85 mutex_lock(&device->cbuf_lock); roccat_read()
88 if (reader->cbuf_start == device->cbuf_end) { roccat_read()
89 add_wait_queue(&device->wait, &wait); roccat_read()
93 while (reader->cbuf_start == device->cbuf_end) { roccat_read()
102 if (!device->exist) { roccat_read()
107 mutex_unlock(&device->cbuf_lock); roccat_read()
109 mutex_lock(&device->cbuf_lock); roccat_read()
114 remove_wait_queue(&device->wait, &wait); roccat_read()
121 report = &device->cbuf[reader->cbuf_start]; roccat_read()
126 len = device->report_size > count ? count : device->report_size; roccat_read()
136 mutex_unlock(&device->cbuf_lock); roccat_read()
143 poll_wait(file, &reader->device->wait, wait); roccat_poll()
144 if (reader->cbuf_start != reader->device->cbuf_end) roccat_poll()
146 if (!reader->device->exist) roccat_poll()
155 struct roccat_device *device; roccat_open() local
164 device = devices[minor]; roccat_open()
166 if (!device) { roccat_open()
167 pr_emerg("roccat device with minor %d doesn't exist\n", minor); roccat_open()
172 mutex_lock(&device->readers_lock); roccat_open()
174 if (!device->open++) { roccat_open()
175 /* power on device on adding first reader */ roccat_open()
176 error = hid_hw_power(device->hid, PM_HINT_FULLON); roccat_open()
178 --device->open; roccat_open()
182 error = hid_hw_open(device->hid); roccat_open()
184 hid_hw_power(device->hid, PM_HINT_NORMAL); roccat_open()
185 --device->open; roccat_open()
190 reader->device = device; roccat_open()
192 reader->cbuf_start = device->cbuf_end; roccat_open()
194 list_add_tail(&reader->node, &device->readers); roccat_open()
198 mutex_unlock(&device->readers_lock); roccat_open()
210 struct roccat_device *device; roccat_release() local
214 device = devices[minor]; roccat_release()
215 if (!device) { roccat_release()
217 pr_emerg("roccat device with minor %d doesn't exist\n", minor); roccat_release()
221 mutex_lock(&device->readers_lock); roccat_release()
223 mutex_unlock(&device->readers_lock); roccat_release()
226 if (!--device->open) { roccat_release()
228 if (device->exist) { roccat_release()
229 hid_hw_power(device->hid, PM_HINT_NORMAL); roccat_release()
230 hid_hw_close(device->hid); roccat_release()
232 kfree(device); roccat_release()
243 * @minor: minor device number returned by roccat_connect()
252 struct roccat_device *device; roccat_report_event() local
257 device = devices[minor]; roccat_report_event()
259 new_value = kmemdup(data, device->report_size, GFP_ATOMIC); roccat_report_event()
263 report = &device->cbuf[device->cbuf_end]; roccat_report_event()
269 device->cbuf_end = (device->cbuf_end + 1) % ROCCAT_CBUF_SIZE; roccat_report_event()
271 list_for_each_entry(reader, &device->readers, node) { roccat_report_event()
278 if (reader->cbuf_start == device->cbuf_end) roccat_report_event()
282 wake_up_interruptible(&device->wait); roccat_report_event()
288 * roccat_connect() - create a char device for special event output
289 * @class: the class thats used to create the device. Meant to hold device
291 * @hid: the hid device the char device should be connected to.
294 * Return value is minor device number in Range [0, ROCCAT_MAX_DEVICES] on
300 struct roccat_device *device; roccat_connect() local
303 device = kzalloc(sizeof(struct roccat_device), GFP_KERNEL); roccat_connect()
304 if (!device) roccat_connect()
316 devices[minor] = device; roccat_connect()
319 kfree(device); roccat_connect()
323 device->dev = device_create(klass, &hid->dev, roccat_connect()
327 if (IS_ERR(device->dev)) { roccat_connect()
330 temp = PTR_ERR(device->dev); roccat_connect()
331 kfree(device); roccat_connect()
337 init_waitqueue_head(&device->wait); roccat_connect()
338 INIT_LIST_HEAD(&device->readers); roccat_connect()
339 mutex_init(&device->readers_lock); roccat_connect()
340 mutex_init(&device->cbuf_lock); roccat_connect()
341 device->minor = minor; roccat_connect()
342 device->hid = hid; roccat_connect()
343 device->exist = 1; roccat_connect()
344 device->cbuf_end = 0; roccat_connect()
345 device->report_size = report_size; roccat_connect()
351 /* roccat_disconnect() - remove char device from hid device
352 * @minor: the minor device number returned by roccat_connect()
356 struct roccat_device *device; roccat_disconnect() local
359 device = devices[minor]; roccat_disconnect()
362 device->exist = 0; /* TODO exist maybe not needed */ roccat_disconnect()
364 device_destroy(device->dev->class, MKDEV(roccat_major, minor)); roccat_disconnect()
370 if (device->open) { roccat_disconnect()
371 hid_hw_close(device->hid); roccat_disconnect()
372 wake_up_interruptible(&device->wait); roccat_disconnect()
374 kfree(device); roccat_disconnect()
382 struct roccat_device *device; roccat_ioctl() local
388 device = devices[minor]; roccat_ioctl()
389 if (!device) { roccat_ioctl()
396 if (put_user(device->report_size, (int __user *)arg)) roccat_ioctl()
460 MODULE_DESCRIPTION("USB Roccat char device");
/linux-4.4.14/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/
H A Dgf100.c29 struct nvkm_device *device = ibus->device; gf100_ibus_intr_hub() local
30 u32 addr = nvkm_rd32(device, 0x122120 + (i * 0x0400)); gf100_ibus_intr_hub()
31 u32 data = nvkm_rd32(device, 0x122124 + (i * 0x0400)); gf100_ibus_intr_hub()
32 u32 stat = nvkm_rd32(device, 0x122128 + (i * 0x0400)); gf100_ibus_intr_hub()
34 nvkm_mask(device, 0x122128 + (i * 0x0400), 0x00000200, 0x00000000); gf100_ibus_intr_hub()
40 struct nvkm_device *device = ibus->device; gf100_ibus_intr_rop() local
41 u32 addr = nvkm_rd32(device, 0x124120 + (i * 0x0400)); gf100_ibus_intr_rop()
42 u32 data = nvkm_rd32(device, 0x124124 + (i * 0x0400)); gf100_ibus_intr_rop()
43 u32 stat = nvkm_rd32(device, 0x124128 + (i * 0x0400)); gf100_ibus_intr_rop()
45 nvkm_mask(device, 0x124128 + (i * 0x0400), 0x00000200, 0x00000000); gf100_ibus_intr_rop()
51 struct nvkm_device *device = ibus->device; gf100_ibus_intr_gpc() local
52 u32 addr = nvkm_rd32(device, 0x128120 + (i * 0x0400)); gf100_ibus_intr_gpc()
53 u32 data = nvkm_rd32(device, 0x128124 + (i * 0x0400)); gf100_ibus_intr_gpc()
54 u32 stat = nvkm_rd32(device, 0x128128 + (i * 0x0400)); gf100_ibus_intr_gpc()
56 nvkm_mask(device, 0x128128 + (i * 0x0400), 0x00000200, 0x00000000); gf100_ibus_intr_gpc()
62 struct nvkm_device *device = ibus->device; gf100_ibus_intr() local
63 u32 intr0 = nvkm_rd32(device, 0x121c58); gf100_ibus_intr()
64 u32 intr1 = nvkm_rd32(device, 0x121c5c); gf100_ibus_intr()
65 u32 hubnr = nvkm_rd32(device, 0x121c70); gf100_ibus_intr()
66 u32 ropnr = nvkm_rd32(device, 0x121c74); gf100_ibus_intr()
67 u32 gpcnr = nvkm_rd32(device, 0x121c78); gf100_ibus_intr()
98 struct nvkm_device *device = ibus->device; gf100_ibus_init() local
99 nvkm_mask(device, 0x122310, 0x0003ffff, 0x00000800); gf100_ibus_init()
100 nvkm_wr32(device, 0x12232c, 0x00100064); gf100_ibus_init()
101 nvkm_wr32(device, 0x122330, 0x00100064); gf100_ibus_init()
102 nvkm_wr32(device, 0x122334, 0x00100064); gf100_ibus_init()
103 nvkm_mask(device, 0x122348, 0x0003ffff, 0x00000100); gf100_ibus_init()
114 gf100_ibus_new(struct nvkm_device *device, int index, gf100_ibus_new() argument
120 nvkm_subdev_ctor(&gf100_ibus, device, index, 0, ibus); gf100_ibus_new()
H A Dgk20a.c28 struct nvkm_device *device = ibus->device; gk20a_ibus_init_ibus_ring() local
29 nvkm_mask(device, 0x137250, 0x3f, 0); gk20a_ibus_init_ibus_ring()
31 nvkm_mask(device, 0x000200, 0x20, 0); gk20a_ibus_init_ibus_ring()
33 nvkm_mask(device, 0x000200, 0x20, 0x20); gk20a_ibus_init_ibus_ring()
35 nvkm_wr32(device, 0x12004c, 0x4); gk20a_ibus_init_ibus_ring()
36 nvkm_wr32(device, 0x122204, 0x2); gk20a_ibus_init_ibus_ring()
37 nvkm_rd32(device, 0x122204); gk20a_ibus_init_ibus_ring()
43 nvkm_wr32(device, 0x122354, 0x800); gk20a_ibus_init_ibus_ring()
44 nvkm_wr32(device, 0x128328, 0x800); gk20a_ibus_init_ibus_ring()
45 nvkm_wr32(device, 0x124320, 0x800); gk20a_ibus_init_ibus_ring()
51 struct nvkm_device *device = ibus->device; gk20a_ibus_intr() local
52 u32 status0 = nvkm_rd32(device, 0x120058); gk20a_ibus_intr()
60 nvkm_mask(device, 0x12004c, 0x2, 0x2); gk20a_ibus_intr()
61 nvkm_msec(device, 2000, gk20a_ibus_intr()
62 if (!(nvkm_rd32(device, 0x12004c) & 0x0000003f)) gk20a_ibus_intr()
81 gk20a_ibus_new(struct nvkm_device *device, int index, gk20a_ibus_new() argument
87 nvkm_subdev_ctor(&gk20a_ibus, device, index, 0, ibus); gk20a_ibus_new()
H A Dgk104.c29 struct nvkm_device *device = ibus->device; gk104_ibus_intr_hub() local
30 u32 addr = nvkm_rd32(device, 0x122120 + (i * 0x0800)); gk104_ibus_intr_hub()
31 u32 data = nvkm_rd32(device, 0x122124 + (i * 0x0800)); gk104_ibus_intr_hub()
32 u32 stat = nvkm_rd32(device, 0x122128 + (i * 0x0800)); gk104_ibus_intr_hub()
34 nvkm_mask(device, 0x122128 + (i * 0x0800), 0x00000200, 0x00000000); gk104_ibus_intr_hub()
40 struct nvkm_device *device = ibus->device; gk104_ibus_intr_rop() local
41 u32 addr = nvkm_rd32(device, 0x124120 + (i * 0x0800)); gk104_ibus_intr_rop()
42 u32 data = nvkm_rd32(device, 0x124124 + (i * 0x0800)); gk104_ibus_intr_rop()
43 u32 stat = nvkm_rd32(device, 0x124128 + (i * 0x0800)); gk104_ibus_intr_rop()
45 nvkm_mask(device, 0x124128 + (i * 0x0800), 0x00000200, 0x00000000); gk104_ibus_intr_rop()
51 struct nvkm_device *device = ibus->device; gk104_ibus_intr_gpc() local
52 u32 addr = nvkm_rd32(device, 0x128120 + (i * 0x0800)); gk104_ibus_intr_gpc()
53 u32 data = nvkm_rd32(device, 0x128124 + (i * 0x0800)); gk104_ibus_intr_gpc()
54 u32 stat = nvkm_rd32(device, 0x128128 + (i * 0x0800)); gk104_ibus_intr_gpc()
56 nvkm_mask(device, 0x128128 + (i * 0x0800), 0x00000200, 0x00000000); gk104_ibus_intr_gpc()
62 struct nvkm_device *device = ibus->device; gk104_ibus_intr() local
63 u32 intr0 = nvkm_rd32(device, 0x120058); gk104_ibus_intr()
64 u32 intr1 = nvkm_rd32(device, 0x12005c); gk104_ibus_intr()
65 u32 hubnr = nvkm_rd32(device, 0x120070); gk104_ibus_intr()
66 u32 ropnr = nvkm_rd32(device, 0x120074); gk104_ibus_intr()
67 u32 gpcnr = nvkm_rd32(device, 0x120078); gk104_ibus_intr()
98 struct nvkm_device *device = ibus->device; gk104_ibus_init() local
99 nvkm_mask(device, 0x122318, 0x0003ffff, 0x00001000); gk104_ibus_init()
100 nvkm_mask(device, 0x12231c, 0x0003ffff, 0x00000200); gk104_ibus_init()
101 nvkm_mask(device, 0x122310, 0x0003ffff, 0x00000800); gk104_ibus_init()
102 nvkm_mask(device, 0x122348, 0x0003ffff, 0x00000100); gk104_ibus_init()
103 nvkm_mask(device, 0x1223b0, 0x0003ffff, 0x00000fff); gk104_ibus_init()
104 nvkm_mask(device, 0x122348, 0x0003ffff, 0x00000200); gk104_ibus_init()
105 nvkm_mask(device, 0x122358, 0x0003ffff, 0x00002880); gk104_ibus_init()
117 gk104_ibus_new(struct nvkm_device *device, int index, gk104_ibus_new() argument
123 nvkm_subdev_ctor(&gk104_ibus, device, index, 0, ibus); gk104_ibus_new()
/linux-4.4.14/drivers/gpu/drm/nouveau/nvkm/engine/device/
H A Dctrl.h4 #include <core/device.h>
8 struct nvkm_device *device; member in struct:nvkm_control
H A Duser.c38 struct nvkm_device *device; member in struct:nvkm_udevice
45 struct nvkm_device *device = udev->device; nvkm_udevice_info() local
46 struct nvkm_fb *fb = device->fb; nvkm_udevice_info()
47 struct nvkm_instmem *imem = device->imem; nvkm_udevice_info()
53 nvif_ioctl(object, "device info size %d\n", size); nvkm_udevice_info()
55 nvif_ioctl(object, "device info vers %d\n", args->v0.version); nvkm_udevice_info()
59 switch (device->chipset) { nvkm_udevice_info()
73 switch (device->type) { nvkm_udevice_info()
93 switch (device->card_type) { nvkm_udevice_info()
109 args->v0.chipset = device->chipset; nvkm_udevice_info()
110 args->v0.revision = device->chiprev; nvkm_udevice_info()
118 strncpy(args->v0.chip, device->chip->name, sizeof(args->v0.chip)); nvkm_udevice_info()
119 strncpy(args->v0.name, device->name, sizeof(args->v0.name)); nvkm_udevice_info()
126 struct nvkm_device *device = udev->device; nvkm_udevice_time() local
133 args->v0.time = nvkm_timer_read(device->timer); nvkm_udevice_time()
158 *data = nvkm_rd08(udev->device, addr); nvkm_udevice_rd08()
166 *data = nvkm_rd16(udev->device, addr); nvkm_udevice_rd16()
174 *data = nvkm_rd32(udev->device, addr); nvkm_udevice_rd32()
182 nvkm_wr08(udev->device, addr, data); nvkm_udevice_wr08()
190 nvkm_wr16(udev->device, addr, data); nvkm_udevice_wr16()
198 nvkm_wr32(udev->device, addr, data); nvkm_udevice_wr32()
206 struct nvkm_device *device = udev->device; nvkm_udevice_map() local
207 *addr = device->func->resource_addr(device, 0); nvkm_udevice_map()
208 *size = device->func->resource_size(device, 0); nvkm_udevice_map()
216 struct nvkm_device *device = udev->device; nvkm_udevice_fini() local
219 mutex_lock(&device->mutex); nvkm_udevice_fini()
220 if (!--device->refcount) { nvkm_udevice_fini()
221 ret = nvkm_device_fini(device, suspend); nvkm_udevice_fini()
223 device->refcount++; nvkm_udevice_fini()
229 mutex_unlock(&device->mutex); nvkm_udevice_fini()
237 struct nvkm_device *device = udev->device; nvkm_udevice_init() local
240 mutex_lock(&device->mutex); nvkm_udevice_init()
241 if (!device->refcount++) { nvkm_udevice_init()
242 ret = nvkm_device_init(device); nvkm_udevice_init()
244 device->refcount--; nvkm_udevice_init()
250 mutex_unlock(&device->mutex); nvkm_udevice_init()
260 return sclass->ctor(udev->device, oclass, data, size, pobject); nvkm_udevice_child_new()
268 struct nvkm_device *device = udev->device; nvkm_udevice_child_get() local
278 if (!(engine = nvkm_device_engine(device, i)) || nvkm_udevice_child_get()
336 nvif_ioctl(parent, "create device size %d\n", size); nvkm_udevice_new()
338 nvif_ioctl(parent, "create device v%d device %016llx\n", nvkm_udevice_new()
339 args->v0.version, args->v0.device); nvkm_udevice_new()
354 /* find the device that matches what the client requested */ nvkm_udevice_new()
355 if (args->v0.device != ~0) nvkm_udevice_new()
356 udev->device = nvkm_device_find(args->v0.device); nvkm_udevice_new()
358 udev->device = nvkm_device_find(client->device); nvkm_udevice_new()
359 if (!udev->device) nvkm_udevice_new()
H A Dbase.c38 struct nvkm_device *device; nvkm_device_find_locked() local
39 list_for_each_entry(device, &nv_devices, head) { nvkm_device_find_locked()
40 if (device->handle == handle) nvkm_device_find_locked()
41 return device; nvkm_device_find_locked()
49 struct nvkm_device *device; nvkm_device_find() local
51 device = nvkm_device_find_locked(handle); nvkm_device_find()
53 return device; nvkm_device_find()
59 struct nvkm_device *device; nvkm_device_list() local
62 list_for_each_entry(device, &nv_devices, head) { nvkm_device_list()
64 name[nr - 1] = device->handle; nvkm_device_list()
2066 nvkm_device_subdev(struct nvkm_device *device, int index) nvkm_device_subdev() argument
2070 if (device->disable_mask & (1ULL << index)) nvkm_device_subdev()
2075 _(BAR , device->bar , &device->bar->subdev); nvkm_device_subdev()
2076 _(VBIOS , device->bios , &device->bios->subdev); nvkm_device_subdev()
2077 _(BUS , device->bus , &device->bus->subdev); nvkm_device_subdev()
2078 _(CLK , device->clk , &device->clk->subdev); nvkm_device_subdev()
2079 _(DEVINIT, device->devinit, &device->devinit->subdev); nvkm_device_subdev()
2080 _(FB , device->fb , &device->fb->subdev); nvkm_device_subdev()
2081 _(FUSE , device->fuse , &device->fuse->subdev); nvkm_device_subdev()
2082 _(GPIO , device->gpio , &device->gpio->subdev); nvkm_device_subdev()
2083 _(I2C , device->i2c , &device->i2c->subdev); nvkm_device_subdev()
2084 _(IBUS , device->ibus , device->ibus); nvkm_device_subdev()
2085 _(INSTMEM, device->imem , &device->imem->subdev); nvkm_device_subdev()
2086 _(LTC , device->ltc , &device->ltc->subdev); nvkm_device_subdev()
2087 _(MC , device->mc , &device->mc->subdev); nvkm_device_subdev()
2088 _(MMU , device->mmu , &device->mmu->subdev); nvkm_device_subdev()
2089 _(MXM , device->mxm , device->mxm); nvkm_device_subdev()
2090 _(PCI , device->pci , &device->pci->subdev); nvkm_device_subdev()
2091 _(PMU , device->pmu , &device->pmu->subdev); nvkm_device_subdev()
2092 _(THERM , device->therm , &device->therm->subdev); nvkm_device_subdev()
2093 _(TIMER , device->timer , &device->timer->subdev); nvkm_device_subdev()
2094 _(VOLT , device->volt , &device->volt->subdev); nvkm_device_subdev()
2097 engine = nvkm_device_engine(device, index); nvkm_device_subdev()
2106 nvkm_device_engine(struct nvkm_device *device, int index) nvkm_device_engine() argument
2108 if (device->disable_mask & (1ULL << index)) nvkm_device_engine()
2113 _(BSP , device->bsp , device->bsp); nvkm_device_engine()
2114 _(CE0 , device->ce[0] , device->ce[0]); nvkm_device_engine()
2115 _(CE1 , device->ce[1] , device->ce[1]); nvkm_device_engine()
2116 _(CE2 , device->ce[2] , device->ce[2]); nvkm_device_engine()
2117 _(CIPHER , device->cipher , device->cipher); nvkm_device_engine()
2118 _(DISP , device->disp , &device->disp->engine); nvkm_device_engine()
2119 _(DMAOBJ , device->dma , &device->dma->engine); nvkm_device_engine()
2120 _(FIFO , device->fifo , &device->fifo->engine); nvkm_device_engine()
2121 _(GR , device->gr , &device->gr->engine); nvkm_device_engine()
2122 _(IFB , device->ifb , device->ifb); nvkm_device_engine()
2123 _(ME , device->me , device->me); nvkm_device_engine()
2124 _(MPEG , device->mpeg , device->mpeg); nvkm_device_engine()
2125 _(MSENC , device->msenc , device->msenc); nvkm_device_engine()
2126 _(MSPDEC , device->mspdec , device->mspdec); nvkm_device_engine()
2127 _(MSPPP , device->msppp , device->msppp); nvkm_device_engine()
2128 _(MSVLD , device->msvld , device->msvld); nvkm_device_engine()
2129 _(PM , device->pm , &device->pm->engine); nvkm_device_engine()
2130 _(SEC , device->sec , device->sec); nvkm_device_engine()
2131 _(SW , device->sw , &device->sw->engine); nvkm_device_engine()
2132 _(VIC , device->vic , device->vic); nvkm_device_engine()
2133 _(VP , device->vp , device->vp); nvkm_device_engine()
2143 nvkm_device_fini(struct nvkm_device *device, bool suspend) nvkm_device_fini() argument
2150 nvdev_trace(device, "%s running...\n", action); nvkm_device_fini()
2153 nvkm_acpi_fini(device); nvkm_device_fini()
2156 if ((subdev = nvkm_device_subdev(device, i))) { nvkm_device_fini()
2164 if (device->func->fini) nvkm_device_fini()
2165 device->func->fini(device, suspend); nvkm_device_fini()
2168 nvdev_trace(device, "%s completed in %lldus...\n", action, time); nvkm_device_fini()
2173 if ((subdev = nvkm_device_subdev(device, i))) { nvkm_device_fini()
2180 nvdev_trace(device, "%s failed with %d\n", action, ret); nvkm_device_fini()
2185 nvkm_device_preinit(struct nvkm_device *device) nvkm_device_preinit() argument
2191 nvdev_trace(device, "preinit running...\n"); nvkm_device_preinit()
2194 if (device->func->preinit) { nvkm_device_preinit()
2195 ret = device->func->preinit(device); nvkm_device_preinit()
2201 if ((subdev = nvkm_device_subdev(device, i))) { nvkm_device_preinit()
2208 ret = nvkm_devinit_post(device->devinit, &device->disable_mask); nvkm_device_preinit()
2213 nvdev_trace(device, "preinit completed in %lldus\n", time); nvkm_device_preinit()
2217 nvdev_error(device, "preinit failed with %d\n", ret); nvkm_device_preinit()
2222 nvkm_device_init(struct nvkm_device *device) nvkm_device_init() argument
2228 ret = nvkm_device_preinit(device); nvkm_device_init()
2232 nvkm_device_fini(device, false); nvkm_device_init()
2234 nvdev_trace(device, "init running...\n"); nvkm_device_init()
2237 if (device->func->init) { nvkm_device_init()
2238 ret = device->func->init(device); nvkm_device_init()
2244 if ((subdev = nvkm_device_subdev(device, i))) { nvkm_device_init()
2251 nvkm_acpi_init(device); nvkm_device_init()
2254 nvdev_trace(device, "init completed in %lldus\n", time); nvkm_device_init()
2259 if ((subdev = nvkm_device_subdev(device, i))) nvkm_device_init()
2264 nvdev_error(device, "init failed with %d\n", ret); nvkm_device_init()
2271 struct nvkm_device *device = *pdevice; nvkm_device_del() local
2273 if (device) { nvkm_device_del()
2275 device->disable_mask = 0; nvkm_device_del()
2278 nvkm_device_subdev(device, i); nvkm_device_del()
2282 nvkm_event_fini(&device->event); nvkm_device_del()
2284 if (device->pri) nvkm_device_del()
2285 iounmap(device->pri); nvkm_device_del()
2286 list_del(&device->head); nvkm_device_del()
2288 if (device->func->dtor) nvkm_device_del()
2289 *pdevice = device->func->dtor(device); nvkm_device_del()
2300 struct device *dev, enum nvkm_device_type type, u64 handle,
2303 struct nvkm_device *device)
2316 device->func = func;
2317 device->quirk = quirk;
2318 device->dev = dev;
2319 device->type = type;
2320 device->handle = handle;
2321 device->cfgopt = cfg;
2322 device->dbgopt = dbg;
2323 device->name = name;
2324 list_add_tail(&device->head, &nv_devices);
2325 device->debug = nvkm_dbgopt(device->dbgopt, "device");
2327 ret = nvkm_event_init(&nvkm_device_event_func, 1, 1, &device->event);
2331 mmio_base = device->func->resource_addr(device, 0);
2332 mmio_size = device->func->resource_size(device, 0);
2357 device->chipset = (boot0 & 0x1ff00000) >> 20;
2358 device->chiprev = (boot0 & 0x000000ff);
2359 switch (device->chipset & 0x1f0) {
2361 if (0x461 & (1 << (device->chipset & 0xf)))
2362 device->card_type = NV_10;
2364 device->card_type = NV_11;
2365 device->chiprev = 0x00;
2368 case 0x020: device->card_type = NV_20; break;
2369 case 0x030: device->card_type = NV_30; break;
2371 case 0x060: device->card_type = NV_40; break;
2375 case 0x0a0: device->card_type = NV_50; break;
2377 case 0x0d0: device->card_type = NV_C0; break;
2380 case 0x100: device->card_type = NV_E0; break;
2382 case 0x120: device->card_type = GM100; break;
2389 device->chipset = 0x05;
2391 device->chipset = 0x04;
2392 device->card_type = NV_04;
2395 switch (device->chipset) {
2396 case 0x004: device->chip = &nv4_chipset; break;
2397 case 0x005: device->chip = &nv5_chipset; break;
2398 case 0x010: device->chip = &nv10_chipset; break;
2399 case 0x011: device->chip = &nv11_chipset; break;
2400 case 0x015: device->chip = &nv15_chipset; break;
2401 case 0x017: device->chip = &nv17_chipset; break;
2402 case 0x018: device->chip = &nv18_chipset; break;
2403 case 0x01a: device->chip = &nv1a_chipset; break;
2404 case 0x01f: device->chip = &nv1f_chipset; break;
2405 case 0x020: device->chip = &nv20_chipset; break;
2406 case 0x025: device->chip = &nv25_chipset; break;
2407 case 0x028: device->chip = &nv28_chipset; break;
2408 case 0x02a: device->chip = &nv2a_chipset; break;
2409 case 0x030: device->chip = &nv30_chipset; break;
2410 case 0x031: device->chip = &nv31_chipset; break;
2411 case 0x034: device->chip = &nv34_chipset; break;
2412 case 0x035: device->chip = &nv35_chipset; break;
2413 case 0x036: device->chip = &nv36_chipset; break;
2414 case 0x040: device->chip = &nv40_chipset; break;
2415 case 0x041: device->chip = &nv41_chipset; break;
2416 case 0x042: device->chip = &nv42_chipset; break;
2417 case 0x043: device->chip = &nv43_chipset; break;
2418 case 0x044: device->chip = &nv44_chipset; break;
2419 case 0x045: device->chip = &nv45_chipset; break;
2420 case 0x046: device->chip = &nv46_chipset; break;
2421 case 0x047: device->chip = &nv47_chipset; break;
2422 case 0x049: device->chip = &nv49_chipset; break;
2423 case 0x04a: device->chip = &nv4a_chipset; break;
2424 case 0x04b: device->chip = &nv4b_chipset; break;
2425 case 0x04c: device->chip = &nv4c_chipset; break;
2426 case 0x04e: device->chip = &nv4e_chipset; break;
2427 case 0x050: device->chip = &nv50_chipset; break;
2428 case 0x063: device->chip = &nv63_chipset; break;
2429 case 0x067: device->chip = &nv67_chipset; break;
2430 case 0x068: device->chip = &nv68_chipset; break;
2431 case 0x084: device->chip = &nv84_chipset; break;
2432 case 0x086: device->chip = &nv86_chipset; break;
2433 case 0x092: device->chip = &nv92_chipset; break;
2434 case 0x094: device->chip = &nv94_chipset; break;
2435 case 0x096: device->chip = &nv96_chipset; break;
2436 case 0x098: device->chip = &nv98_chipset; break;
2437 case 0x0a0: device->chip = &nva0_chipset; break;
2438 case 0x0a3: device->chip = &nva3_chipset; break;
2439 case 0x0a5: device->chip = &nva5_chipset; break;
2440 case 0x0a8: device->chip = &nva8_chipset; break;
2441 case 0x0aa: device->chip = &nvaa_chipset; break;
2442 case 0x0ac: device->chip = &nvac_chipset; break;
2443 case 0x0af: device->chip = &nvaf_chipset; break;
2444 case 0x0c0: device->chip = &nvc0_chipset; break;
2445 case 0x0c1: device->chip = &nvc1_chipset; break;
2446 case 0x0c3: device->chip = &nvc3_chipset; break;
2447 case 0x0c4: device->chip = &nvc4_chipset; break;
2448 case 0x0c8: device->chip = &nvc8_chipset; break;
2449 case 0x0ce: device->chip = &nvce_chipset; break;
2450 case 0x0cf: device->chip = &nvcf_chipset; break;
2451 case 0x0d7: device->chip = &nvd7_chipset; break;
2452 case 0x0d9: device->chip = &nvd9_chipset; break;
2453 case 0x0e4: device->chip = &nve4_chipset; break;
2454 case 0x0e6: device->chip = &nve6_chipset; break;
2455 case 0x0e7: device->chip = &nve7_chipset; break;
2456 case 0x0ea: device->chip = &nvea_chipset; break;
2457 case 0x0f0: device->chip = &nvf0_chipset; break;
2458 case 0x0f1: device->chip = &nvf1_chipset; break;
2459 case 0x106: device->chip = &nv106_chipset; break;
2460 case 0x108: device->chip = &nv108_chipset; break;
2461 case 0x117: device->chip = &nv117_chipset; break;
2462 case 0x124: device->chip = &nv124_chipset; break;
2463 case 0x126: device->chip = &nv126_chipset; break;
2464 case 0x12b: device->chip = &nv12b_chipset; break;
2466 nvdev_error(device, "unknown chipset (%08x)\n", boot0);
2470 nvdev_info(device, "NVIDIA %s (%08x)\n",
2471 device->chip->name, boot0);
2474 if ( device->card_type <= NV_10 || device->chipset < 0x17 ||
2475 (device->chipset >= 0x20 && device->chipset < 0x25))
2481 case 0x00000000: device->crystal = 13500; break;
2482 case 0x00000040: device->crystal = 14318; break;
2483 case 0x00400000: device->crystal = 27000; break;
2484 case 0x00400040: device->crystal = 25000; break;
2487 device->chip = &null_chipset;
2490 if (!device->name)
2491 device->name = device->chip->name;
2494 device->pri = ioremap(mmio_base, mmio_size);
2495 if (!device->pri) {
2496 nvdev_error(device, "unable to map PRI\n");
2501 mutex_init(&device->mutex);
2505 if (device->chip->m && (subdev_mask & (1ULL << (s)))) { \
2506 ret = device->chip->m(device, (s), &device->m); \
2508 subdev = nvkm_device_subdev(device, (s)); \
2510 device->m = NULL; \
2512 nvdev_error(device, "%s ctor failed, %d\n", \
2298 nvkm_device_ctor(const struct nvkm_device_func *func, const struct nvkm_device_quirk *quirk, struct device *dev, enum nvkm_device_type type, u64 handle, const char *name, const char *cfg, const char *dbg, bool detect, bool mmio, u64 subdev_mask, struct nvkm_device *device) nvkm_device_ctor() argument
/linux-4.4.14/drivers/gpu/drm/nouveau/nvkm/engine/fifo/
H A Dnv04.c52 struct nvkm_device *device = fifo->base.engine.subdev.device; variable in typeref:struct:nvkm_device
58 nvkm_wr32(device, NV03_PFIFO_CACHES, 0x00000000);
59 nvkm_mask(device, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0x00000000);
70 nvkm_msec(device, 2000,
71 u32 tmp = nvkm_rd32(device, NV04_PFIFO_CACHE1_PULL0);
76 if (nvkm_rd32(device, NV04_PFIFO_CACHE1_PULL0) &
78 nvkm_wr32(device, NV03_PFIFO_INTR_0, NV_PFIFO_INTR_CACHE_ERROR);
80 nvkm_wr32(device, NV04_PFIFO_CACHE1_HASH, 0x00000000);
88 struct nvkm_device *device = fifo->base.engine.subdev.device; variable in typeref:struct:nvkm_device
91 nvkm_mask(device, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0x00000001);
92 nvkm_wr32(device, NV03_PFIFO_CACHES, 0x00000001);
108 nv04_fifo_swmthd(struct nvkm_device *device, u32 chid, u32 addr, u32 data) nv04_fifo_swmthd() argument
110 struct nvkm_sw *sw = device->sw; nv04_fifo_swmthd()
114 u32 engine = nvkm_rd32(device, 0x003280); nv04_fifo_swmthd()
119 nvkm_wr32(device, 0x003280, (engine &= ~mask)); nv04_fifo_swmthd()
121 data = nvkm_rd32(device, 0x003258) & 0x0000ffff; nv04_fifo_swmthd()
138 struct nvkm_device *device = subdev->device; nv04_fifo_cache_error() local
141 u32 pull0 = nvkm_rd32(device, 0x003250); nv04_fifo_cache_error()
152 if (device->card_type < NV_40) { nv04_fifo_cache_error()
153 mthd = nvkm_rd32(device, NV04_PFIFO_CACHE1_METHOD(ptr)); nv04_fifo_cache_error()
154 data = nvkm_rd32(device, NV04_PFIFO_CACHE1_DATA(ptr)); nv04_fifo_cache_error()
156 mthd = nvkm_rd32(device, NV40_PFIFO_CACHE1_METHOD(ptr)); nv04_fifo_cache_error()
157 data = nvkm_rd32(device, NV40_PFIFO_CACHE1_DATA(ptr)); nv04_fifo_cache_error()
161 !nv04_fifo_swmthd(device, chid, mthd, data)) { nv04_fifo_cache_error()
170 nvkm_wr32(device, NV04_PFIFO_CACHE1_DMA_PUSH, 0); nv04_fifo_cache_error()
171 nvkm_wr32(device, NV03_PFIFO_INTR_0, NV_PFIFO_INTR_CACHE_ERROR); nv04_fifo_cache_error()
173 nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH0, nv04_fifo_cache_error()
174 nvkm_rd32(device, NV03_PFIFO_CACHE1_PUSH0) & ~1); nv04_fifo_cache_error()
175 nvkm_wr32(device, NV03_PFIFO_CACHE1_GET, get + 4); nv04_fifo_cache_error()
176 nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH0, nv04_fifo_cache_error()
177 nvkm_rd32(device, NV03_PFIFO_CACHE1_PUSH0) | 1); nv04_fifo_cache_error()
178 nvkm_wr32(device, NV04_PFIFO_CACHE1_HASH, 0); nv04_fifo_cache_error()
180 nvkm_wr32(device, NV04_PFIFO_CACHE1_DMA_PUSH, nv04_fifo_cache_error()
181 nvkm_rd32(device, NV04_PFIFO_CACHE1_DMA_PUSH) | 1); nv04_fifo_cache_error()
182 nvkm_wr32(device, NV04_PFIFO_CACHE1_PULL0, 1); nv04_fifo_cache_error()
189 struct nvkm_device *device = subdev->device; nv04_fifo_dma_pusher() local
190 u32 dma_get = nvkm_rd32(device, 0x003244); nv04_fifo_dma_pusher()
191 u32 dma_put = nvkm_rd32(device, 0x003240); nv04_fifo_dma_pusher()
192 u32 push = nvkm_rd32(device, 0x003220); nv04_fifo_dma_pusher()
193 u32 state = nvkm_rd32(device, 0x003228); nv04_fifo_dma_pusher()
200 if (device->card_type == NV_50) { nv04_fifo_dma_pusher()
201 u32 ho_get = nvkm_rd32(device, 0x003328); nv04_fifo_dma_pusher()
202 u32 ho_put = nvkm_rd32(device, 0x003320); nv04_fifo_dma_pusher()
203 u32 ib_get = nvkm_rd32(device, 0x003334); nv04_fifo_dma_pusher()
204 u32 ib_put = nvkm_rd32(device, 0x003330); nv04_fifo_dma_pusher()
214 nvkm_wr32(device, 0x003364, 0x00000000); nv04_fifo_dma_pusher()
216 nvkm_wr32(device, 0x003244, dma_put); nv04_fifo_dma_pusher()
217 nvkm_wr32(device, 0x003328, ho_put); nv04_fifo_dma_pusher()
220 nvkm_wr32(device, 0x003334, ib_put); nv04_fifo_dma_pusher()
228 nvkm_wr32(device, 0x003244, dma_put); nv04_fifo_dma_pusher()
232 nvkm_wr32(device, 0x003228, 0x00000000); nv04_fifo_dma_pusher()
233 nvkm_wr32(device, 0x003220, 0x00000001); nv04_fifo_dma_pusher()
234 nvkm_wr32(device, 0x002100, NV_PFIFO_INTR_DMA_PUSHER); nv04_fifo_dma_pusher()
242 struct nvkm_device *device = subdev->device; nv04_fifo_intr() local
243 u32 mask = nvkm_rd32(device, NV03_PFIFO_INTR_EN_0); nv04_fifo_intr()
244 u32 stat = nvkm_rd32(device, NV03_PFIFO_INTR_0) & mask; nv04_fifo_intr()
247 reassign = nvkm_rd32(device, NV03_PFIFO_CACHES) & 1; nv04_fifo_intr()
248 nvkm_wr32(device, NV03_PFIFO_CACHES, 0); nv04_fifo_intr()
250 chid = nvkm_rd32(device, NV03_PFIFO_CACHE1_PUSH1) & (fifo->base.nr - 1); nv04_fifo_intr()
251 get = nvkm_rd32(device, NV03_PFIFO_CACHE1_GET); nv04_fifo_intr()
265 nvkm_wr32(device, NV03_PFIFO_INTR_0, NV_PFIFO_INTR_SEMAPHORE); nv04_fifo_intr()
267 sem = nvkm_rd32(device, NV10_PFIFO_CACHE1_SEMAPHORE); nv04_fifo_intr()
268 nvkm_wr32(device, NV10_PFIFO_CACHE1_SEMAPHORE, sem | 0x1); nv04_fifo_intr()
270 nvkm_wr32(device, NV03_PFIFO_CACHE1_GET, get + 4); nv04_fifo_intr()
271 nvkm_wr32(device, NV04_PFIFO_CACHE1_PULL0, 1); nv04_fifo_intr()
274 if (device->card_type == NV_50) { nv04_fifo_intr()
277 nvkm_wr32(device, 0x002100, 0x00000010); nv04_fifo_intr()
281 nvkm_wr32(device, 0x002100, 0x40000000); nv04_fifo_intr()
289 nvkm_mask(device, NV03_PFIFO_INTR_EN_0, stat, 0x00000000); nv04_fifo_intr()
290 nvkm_wr32(device, NV03_PFIFO_INTR_0, stat); nv04_fifo_intr()
293 nvkm_wr32(device, NV03_PFIFO_CACHES, reassign); nv04_fifo_intr()
300 struct nvkm_device *device = fifo->base.engine.subdev.device; nv04_fifo_init() local
301 struct nvkm_instmem *imem = device->imem; nv04_fifo_init()
306 nvkm_wr32(device, NV04_PFIFO_DELAY_0, 0x000000ff); nv04_fifo_init()
307 nvkm_wr32(device, NV04_PFIFO_DMA_TIMESLICE, 0x0101ffff); nv04_fifo_init()
309 nvkm_wr32(device, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ | nv04_fifo_init()
312 nvkm_wr32(device, NV03_PFIFO_RAMRO, nvkm_memory_addr(ramro) >> 8); nv04_fifo_init()
313 nvkm_wr32(device, NV03_PFIFO_RAMFC, nvkm_memory_addr(ramfc) >> 8); nv04_fifo_init()
315 nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH1, fifo->base.nr - 1); nv04_fifo_init()
317 nvkm_wr32(device, NV03_PFIFO_INTR_0, 0xffffffff); nv04_fifo_init()
318 nvkm_wr32(device, NV03_PFIFO_INTR_EN_0, 0xffffffff); nv04_fifo_init()
320 nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH0, 1); nv04_fifo_init()
321 nvkm_wr32(device, NV04_PFIFO_CACHE1_PULL0, 1); nv04_fifo_init()
322 nvkm_wr32(device, NV03_PFIFO_CACHES, 1); nv04_fifo_init()
326 nv04_fifo_new_(const struct nvkm_fifo_func *func, struct nvkm_device *device, nv04_fifo_new_() argument
338 ret = nvkm_fifo_ctor(func, device, index, nr, &fifo->base); nv04_fifo_new_()
359 nv04_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo) nv04_fifo_new() argument
361 return nv04_fifo_new_(&nv04_fifo, device, index, 16, nv04_fifo_new()
H A Dnv40.c63 struct nvkm_device *device = fifo->base.engine.subdev.device; nv40_fifo_init() local
64 struct nvkm_fb *fb = device->fb; nv40_fifo_init()
65 struct nvkm_instmem *imem = device->imem; nv40_fifo_init()
70 nvkm_wr32(device, 0x002040, 0x000000ff); nv40_fifo_init()
71 nvkm_wr32(device, 0x002044, 0x2101ffff); nv40_fifo_init()
72 nvkm_wr32(device, 0x002058, 0x00000001); nv40_fifo_init()
74 nvkm_wr32(device, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ | nv40_fifo_init()
77 nvkm_wr32(device, NV03_PFIFO_RAMRO, nvkm_memory_addr(ramro) >> 8); nv40_fifo_init()
79 switch (device->chipset) { nv40_fifo_init()
83 nvkm_wr32(device, 0x002230, 0x00000001); nv40_fifo_init()
90 nvkm_wr32(device, 0x002220, 0x00030002); nv40_fifo_init()
93 nvkm_wr32(device, 0x002230, 0x00000000); nv40_fifo_init()
94 nvkm_wr32(device, 0x002220, ((fb->ram->size - 512 * 1024 + nv40_fifo_init()
100 nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH1, fifo->base.nr - 1); nv40_fifo_init()
102 nvkm_wr32(device, NV03_PFIFO_INTR_0, 0xffffffff); nv40_fifo_init()
103 nvkm_wr32(device, NV03_PFIFO_INTR_EN_0, 0xffffffff); nv40_fifo_init()
105 nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH0, 1); nv40_fifo_init()
106 nvkm_wr32(device, NV04_PFIFO_CACHE1_PULL0, 1); nv40_fifo_init()
107 nvkm_wr32(device, NV03_PFIFO_CACHES, 1); nv40_fifo_init()
123 nv40_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo) nv40_fifo_new() argument
125 return nv04_fifo_new_(&nv40_fifo, device, index, 32, nv40_fifo_new()
H A Dgk104.c38 struct nvkm_device *device = fifo->engine.subdev.device; gk104_fifo_uevent_fini() local
39 nvkm_mask(device, 0x002140, 0x80000000, 0x00000000); gk104_fifo_uevent_fini()
45 struct nvkm_device *device = fifo->engine.subdev.device; gk104_fifo_uevent_init() local
46 nvkm_mask(device, 0x002140, 0x80000000, 0x80000000); gk104_fifo_uevent_init()
55 struct nvkm_device *device = subdev->device; gk104_fifo_runlist_update() local
71 nvkm_wr32(device, 0x002270, nvkm_memory_addr(cur) >> 12); gk104_fifo_runlist_update()
72 nvkm_wr32(device, 0x002274, (engine << 20) | nr); gk104_fifo_runlist_update()
74 if (wait_event_timeout(engn->wait, !(nvkm_rd32(device, 0x002284 + gk104_fifo_runlist_update()
84 struct nvkm_device *device = fifo->base.engine.subdev.device; gk104_fifo_engine() local
87 return nvkm_device_engine(device, __ffs(subdevs)); gk104_fifo_engine()
95 struct nvkm_device *device = fifo->base.engine.subdev.device; gk104_fifo_recover_work() local
108 nvkm_mask(device, 0x002630, engm, engm); gk104_fifo_recover_work()
111 if ((engine = nvkm_device_engine(device, engn))) { gk104_fifo_recover_work()
118 nvkm_wr32(device, 0x00262c, engm); gk104_fifo_recover_work()
119 nvkm_mask(device, 0x002630, engm, 0x00000000); gk104_fifo_recover_work()
127 struct nvkm_device *device = subdev->device; gk104_fifo_recover() local
134 nvkm_mask(device, 0x800004 + (chid * 0x08), 0x00000800, 0x00000800); gk104_fifo_recover()
157 struct nvkm_device *device = subdev->device; gk104_fifo_intr_bind() local
158 u32 intr = nvkm_rd32(device, 0x00252c); gk104_fifo_intr_bind()
175 struct nvkm_device *device = fifo->base.engine.subdev.device; gk104_fifo_intr_sched_ctxsw() local
183 u32 stat = nvkm_rd32(device, 0x002640 + (engn * 0x04)); gk104_fifo_intr_sched_ctxsw()
212 struct nvkm_device *device = subdev->device; gk104_fifo_intr_sched() local
213 u32 intr = nvkm_rd32(device, 0x00254c); gk104_fifo_intr_sched()
233 struct nvkm_device *device = subdev->device; gk104_fifo_intr_chsw() local
234 u32 stat = nvkm_rd32(device, 0x00256c); gk104_fifo_intr_chsw()
236 nvkm_wr32(device, 0x00256c, stat); gk104_fifo_intr_chsw()
243 struct nvkm_device *device = subdev->device; gk104_fifo_intr_dropped_fault() local
244 u32 stat = nvkm_rd32(device, 0x00259c); gk104_fifo_intr_dropped_fault()
357 struct nvkm_device *device = subdev->device; gk104_fifo_intr_fault() local
358 u32 inst = nvkm_rd32(device, 0x002800 + (unit * 0x10)); gk104_fifo_intr_fault()
359 u32 valo = nvkm_rd32(device, 0x002804 + (unit * 0x10)); gk104_fifo_intr_fault()
360 u32 vahi = nvkm_rd32(device, 0x002808 + (unit * 0x10)); gk104_fifo_intr_fault()
361 u32 stat = nvkm_rd32(device, 0x00280c + (unit * 0x10)); gk104_fifo_intr_fault()
385 nvkm_mask(device, 0x001704, 0x00000000, 0x00000000); gk104_fifo_intr_fault()
388 nvkm_mask(device, 0x001714, 0x00000000, 0x00000000); gk104_fifo_intr_fault()
391 nvkm_mask(device, 0x001718, 0x00000000, 0x00000000); gk104_fifo_intr_fault()
394 engine = nvkm_device_engine(device, eu->data2); gk104_fifo_intr_fault()
453 struct nvkm_device *device = subdev->device; gk104_fifo_intr_pbdma_0() local
454 u32 mask = nvkm_rd32(device, 0x04010c + (unit * 0x2000)); gk104_fifo_intr_pbdma_0()
455 u32 stat = nvkm_rd32(device, 0x040108 + (unit * 0x2000)) & mask; gk104_fifo_intr_pbdma_0()
456 u32 addr = nvkm_rd32(device, 0x0400c0 + (unit * 0x2000)); gk104_fifo_intr_pbdma_0()
457 u32 data = nvkm_rd32(device, 0x0400c4 + (unit * 0x2000)); gk104_fifo_intr_pbdma_0()
458 u32 chid = nvkm_rd32(device, 0x040120 + (unit * 0x2000)) & 0xfff; gk104_fifo_intr_pbdma_0()
467 if (device->sw) { gk104_fifo_intr_pbdma_0()
468 if (nvkm_sw_mthd(device->sw, chid, subc, mthd, data)) gk104_fifo_intr_pbdma_0()
471 nvkm_wr32(device, 0x0400c0 + (unit * 0x2000), 0x80600008); gk104_fifo_intr_pbdma_0()
485 nvkm_wr32(device, 0x040108 + (unit * 0x2000), stat); gk104_fifo_intr_pbdma_0()
501 struct nvkm_device *device = subdev->device; gk104_fifo_intr_pbdma_1() local
502 u32 mask = nvkm_rd32(device, 0x04014c + (unit * 0x2000)); gk104_fifo_intr_pbdma_1()
503 u32 stat = nvkm_rd32(device, 0x040148 + (unit * 0x2000)) & mask; gk104_fifo_intr_pbdma_1()
504 u32 chid = nvkm_rd32(device, 0x040120 + (unit * 0x2000)) & 0xfff; gk104_fifo_intr_pbdma_1()
511 nvkm_rd32(device, 0x040150 + (unit * 0x2000)), gk104_fifo_intr_pbdma_1()
512 nvkm_rd32(device, 0x040154 + (unit * 0x2000))); gk104_fifo_intr_pbdma_1()
515 nvkm_wr32(device, 0x040148 + (unit * 0x2000), stat); gk104_fifo_intr_pbdma_1()
521 struct nvkm_device *device = fifo->base.engine.subdev.device; gk104_fifo_intr_runlist() local
522 u32 mask = nvkm_rd32(device, 0x002a00); gk104_fifo_intr_runlist()
526 nvkm_wr32(device, 0x002a00, 1 << engn); gk104_fifo_intr_runlist()
542 struct nvkm_device *device = subdev->device; gk104_fifo_intr() local
543 u32 mask = nvkm_rd32(device, 0x002140); gk104_fifo_intr()
544 u32 stat = nvkm_rd32(device, 0x002100) & mask; gk104_fifo_intr()
548 nvkm_wr32(device, 0x002100, 0x00000001); gk104_fifo_intr()
554 nvkm_wr32(device, 0x002100, 0x00000010); gk104_fifo_intr()
560 nvkm_wr32(device, 0x002100, 0x00000100); gk104_fifo_intr()
566 nvkm_wr32(device, 0x002100, 0x00010000); gk104_fifo_intr()
572 nvkm_wr32(device, 0x002100, 0x00800000); gk104_fifo_intr()
578 nvkm_wr32(device, 0x002100, 0x01000000); gk104_fifo_intr()
584 nvkm_wr32(device, 0x002100, 0x08000000); gk104_fifo_intr()
589 u32 mask = nvkm_rd32(device, 0x00259c); gk104_fifo_intr()
593 nvkm_wr32(device, 0x00259c, (1 << unit)); gk104_fifo_intr()
600 u32 mask = nvkm_rd32(device, 0x0025a0); gk104_fifo_intr()
605 nvkm_wr32(device, 0x0025a0, (1 << unit)); gk104_fifo_intr()
617 nvkm_wr32(device, 0x002100, 0x80000000); gk104_fifo_intr()
624 nvkm_mask(device, 0x002140, stat, 0x00000000); gk104_fifo_intr()
625 nvkm_wr32(device, 0x002100, stat); gk104_fifo_intr()
633 struct nvkm_device *device = fifo->base.engine.subdev.device; gk104_fifo_fini() local
636 nvkm_mask(device, 0x002140, 0x10000000, 0x10000000); gk104_fifo_fini()
643 struct nvkm_device *device = fifo->base.engine.subdev.device; gk104_fifo_oneinit() local
647 ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, gk104_fifo_oneinit()
653 ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, gk104_fifo_oneinit()
663 ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, gk104_fifo_oneinit()
669 ret = nvkm_bar_umap(device->bar, fifo->base.nr * 0x200, 12, gk104_fifo_oneinit()
683 struct nvkm_device *device = subdev->device; gk104_fifo_init() local
687 nvkm_wr32(device, 0x000204, 0xffffffff); gk104_fifo_init()
688 fifo->spoon_nr = hweight32(nvkm_rd32(device, 0x000204)); gk104_fifo_init()
693 nvkm_mask(device, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000); gk104_fifo_init()
694 nvkm_wr32(device, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */ gk104_fifo_init()
695 nvkm_wr32(device, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTREN */ gk104_fifo_init()
700 nvkm_wr32(device, 0x040148 + (i * 0x2000), 0xffffffff); /* INTR */ gk104_fifo_init()
701 nvkm_wr32(device, 0x04014c + (i * 0x2000), 0xffffffff); /* INTREN */ gk104_fifo_init()
704 nvkm_wr32(device, 0x002254, 0x10000000 | fifo->user.bar.offset >> 12); gk104_fifo_init()
706 nvkm_wr32(device, 0x002100, 0xffffffff); gk104_fifo_init()
707 nvkm_wr32(device, 0x002140, 0x7fffffff); gk104_fifo_init()
728 gk104_fifo_new_(const struct nvkm_fifo_func *func, struct nvkm_device *device, gk104_fifo_new_() argument
738 return nvkm_fifo_ctor(func, device, index, nr, &fifo->base); gk104_fifo_new_()
757 gk104_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo) gk104_fifo_new() argument
759 return gk104_fifo_new_(&gk104_fifo, device, index, 4096, pfifo); gk104_fifo_new()
H A Dgf100.c38 struct nvkm_device *device = fifo->engine.subdev.device; gf100_fifo_uevent_init() local
39 nvkm_mask(device, 0x002140, 0x80000000, 0x80000000); gf100_fifo_uevent_init()
45 struct nvkm_device *device = fifo->engine.subdev.device; gf100_fifo_uevent_fini() local
46 nvkm_mask(device, 0x002140, 0x80000000, 0x00000000); gf100_fifo_uevent_fini()
54 struct nvkm_device *device = subdev->device; gf100_fifo_runlist_update() local
70 nvkm_wr32(device, 0x002270, nvkm_memory_addr(cur) >> 12); gf100_fifo_runlist_update()
71 nvkm_wr32(device, 0x002274, 0x01f00000 | nr); gf100_fifo_runlist_update()
74 !(nvkm_rd32(device, 0x00227c) & 0x00100000), gf100_fifo_runlist_update()
100 struct nvkm_device *device = fifo->base.engine.subdev.device; gf100_fifo_engine() local
113 return nvkm_device_engine(device, engn); gf100_fifo_engine()
120 struct nvkm_device *device = fifo->base.engine.subdev.device; gf100_fifo_recover_work() local
133 nvkm_mask(device, 0x002630, engm, engm); gf100_fifo_recover_work()
136 if ((engine = nvkm_device_engine(device, engn))) { gf100_fifo_recover_work()
143 nvkm_wr32(device, 0x00262c, engm); gf100_fifo_recover_work()
144 nvkm_mask(device, 0x002630, engm, 0x00000000); gf100_fifo_recover_work()
152 struct nvkm_device *device = subdev->device; gf100_fifo_recover() local
159 nvkm_mask(device, 0x003004 + (chid * 0x08), 0x00000001, 0x00000000); gf100_fifo_recover()
176 struct nvkm_device *device = fifo->base.engine.subdev.device; gf100_fifo_intr_sched_ctxsw() local
184 u32 stat = nvkm_rd32(device, 0x002640 + (engn * 0x04)); gf100_fifo_intr_sched_ctxsw()
211 struct nvkm_device *device = subdev->device; gf100_fifo_intr_sched() local
212 u32 intr = nvkm_rd32(device, 0x00254c); gf100_fifo_intr_sched()
292 struct nvkm_device *device = subdev->device; gf100_fifo_intr_fault() local
293 u32 inst = nvkm_rd32(device, 0x002800 + (unit * 0x10)); gf100_fifo_intr_fault()
294 u32 valo = nvkm_rd32(device, 0x002804 + (unit * 0x10)); gf100_fifo_intr_fault()
295 u32 vahi = nvkm_rd32(device, 0x002808 + (unit * 0x10)); gf100_fifo_intr_fault()
296 u32 stat = nvkm_rd32(device, 0x00280c + (unit * 0x10)); gf100_fifo_intr_fault()
320 nvkm_mask(device, 0x001704, 0x00000000, 0x00000000); gf100_fifo_intr_fault()
323 nvkm_mask(device, 0x001714, 0x00000000, 0x00000000); gf100_fifo_intr_fault()
326 nvkm_mask(device, 0x001718, 0x00000000, 0x00000000); gf100_fifo_intr_fault()
329 engine = nvkm_device_engine(device, eu->data2); gf100_fifo_intr_fault()
362 struct nvkm_device *device = subdev->device; gf100_fifo_intr_pbdma() local
363 u32 stat = nvkm_rd32(device, 0x040108 + (unit * 0x2000)); gf100_fifo_intr_pbdma()
364 u32 addr = nvkm_rd32(device, 0x0400c0 + (unit * 0x2000)); gf100_fifo_intr_pbdma()
365 u32 data = nvkm_rd32(device, 0x0400c4 + (unit * 0x2000)); gf100_fifo_intr_pbdma()
366 u32 chid = nvkm_rd32(device, 0x040120 + (unit * 0x2000)) & 0x7f; gf100_fifo_intr_pbdma()
375 if (device->sw) { gf100_fifo_intr_pbdma()
376 if (nvkm_sw_mthd(device->sw, chid, subc, mthd, data)) gf100_fifo_intr_pbdma()
392 nvkm_wr32(device, 0x0400c0 + (unit * 0x2000), 0x80600008); gf100_fifo_intr_pbdma()
393 nvkm_wr32(device, 0x040108 + (unit * 0x2000), stat); gf100_fifo_intr_pbdma()
400 struct nvkm_device *device = subdev->device; gf100_fifo_intr_runlist() local
401 u32 intr = nvkm_rd32(device, 0x002a00); gf100_fifo_intr_runlist()
405 nvkm_wr32(device, 0x002a00, 0x10000000); gf100_fifo_intr_runlist()
411 nvkm_wr32(device, 0x002a00, intr); gf100_fifo_intr_runlist()
419 struct nvkm_device *device = subdev->device; gf100_fifo_intr_engine_unit() local
420 u32 intr = nvkm_rd32(device, 0x0025a8 + (engn * 0x04)); gf100_fifo_intr_engine_unit()
421 u32 inte = nvkm_rd32(device, 0x002628); gf100_fifo_intr_engine_unit()
424 nvkm_wr32(device, 0x0025a8 + (engn * 0x04), intr); gf100_fifo_intr_engine_unit()
435 nvkm_mask(device, 0x002628, ints, 0); gf100_fifo_intr_engine_unit()
443 struct nvkm_device *device = fifo->base.engine.subdev.device; gf100_fifo_intr_engine() local
444 u32 mask = nvkm_rd32(device, 0x0025a4); gf100_fifo_intr_engine()
457 struct nvkm_device *device = subdev->device; gf100_fifo_intr() local
458 u32 mask = nvkm_rd32(device, 0x002140); gf100_fifo_intr()
459 u32 stat = nvkm_rd32(device, 0x002100) & mask; gf100_fifo_intr()
462 u32 intr = nvkm_rd32(device, 0x00252c); gf100_fifo_intr()
464 nvkm_wr32(device, 0x002100, 0x00000001); gf100_fifo_intr()
470 nvkm_wr32(device, 0x002100, 0x00000100); gf100_fifo_intr()
475 u32 intr = nvkm_rd32(device, 0x00256c); gf100_fifo_intr()
477 nvkm_wr32(device, 0x002100, 0x00010000); gf100_fifo_intr()
482 u32 intr = nvkm_rd32(device, 0x00258c); gf100_fifo_intr()
484 nvkm_wr32(device, 0x002100, 0x01000000); gf100_fifo_intr()
489 u32 mask = nvkm_rd32(device, 0x00259c); gf100_fifo_intr()
493 nvkm_wr32(device, 0x00259c, (1 << unit)); gf100_fifo_intr()
500 u32 mask = nvkm_rd32(device, 0x0025a0); gf100_fifo_intr()
504 nvkm_wr32(device, 0x0025a0, (1 << unit)); gf100_fifo_intr()
522 nvkm_mask(device, 0x002140, stat, 0x00000000); gf100_fifo_intr()
523 nvkm_wr32(device, 0x002100, stat); gf100_fifo_intr()
531 struct nvkm_device *device = fifo->base.engine.subdev.device; gf100_fifo_oneinit() local
534 ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 0x1000, gf100_fifo_oneinit()
539 ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 0x1000, gf100_fifo_oneinit()
546 ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 128 * 0x1000, gf100_fifo_oneinit()
551 ret = nvkm_bar_umap(device->bar, 128 * 0x1000, 12, &fifo->user.bar); gf100_fifo_oneinit()
571 struct nvkm_device *device = subdev->device; gf100_fifo_init() local
574 nvkm_wr32(device, 0x000204, 0xffffffff); gf100_fifo_init()
575 nvkm_wr32(device, 0x002204, 0xffffffff); gf100_fifo_init()
577 fifo->spoon_nr = hweight32(nvkm_rd32(device, 0x002204)); gf100_fifo_init()
582 nvkm_wr32(device, 0x002208, ~(1 << 0)); /* PGRAPH */ gf100_fifo_init()
583 nvkm_wr32(device, 0x00220c, ~(1 << 1)); /* PVP */ gf100_fifo_init()
584 nvkm_wr32(device, 0x002210, ~(1 << 1)); /* PMSPP */ gf100_fifo_init()
585 nvkm_wr32(device, 0x002214, ~(1 << 1)); /* PMSVLD */ gf100_fifo_init()
586 nvkm_wr32(device, 0x002218, ~(1 << 2)); /* PCE0 */ gf100_fifo_init()
587 nvkm_wr32(device, 0x00221c, ~(1 << 1)); /* PCE1 */ gf100_fifo_init()
592 nvkm_mask(device, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000); gf100_fifo_init()
593 nvkm_wr32(device, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */ gf100_fifo_init()
594 nvkm_wr32(device, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTREN */ gf100_fifo_init()
597 nvkm_mask(device, 0x002200, 0x00000001, 0x00000001); gf100_fifo_init()
598 nvkm_wr32(device, 0x002254, 0x10000000 | fifo->user.bar.offset >> 12); gf100_fifo_init()
600 nvkm_wr32(device, 0x002100, 0xffffffff); gf100_fifo_init()
601 nvkm_wr32(device, 0x002140, 0x7fffffff); gf100_fifo_init()
602 nvkm_wr32(device, 0x002628, 0x00000001); /* ENGINE_INTR_EN */ gf100_fifo_init()
632 gf100_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo) gf100_fifo_new() argument
642 return nvkm_fifo_ctor(&gf100_fifo, device, index, 128, &fifo->base); gf100_fifo_new()
H A Dnv50.c32 struct nvkm_device *device = fifo->base.engine.subdev.device; nv50_fifo_runlist_update_locked() local
41 if (nvkm_rd32(device, 0x002600 + (i * 4)) & 0x80000000) nv50_fifo_runlist_update_locked()
46 nvkm_wr32(device, 0x0032f4, nvkm_memory_addr(cur) >> 12); nv50_fifo_runlist_update_locked()
47 nvkm_wr32(device, 0x0032ec, p); nv50_fifo_runlist_update_locked()
48 nvkm_wr32(device, 0x002500, 0x00000101); nv50_fifo_runlist_update_locked()
63 struct nvkm_device *device = fifo->base.engine.subdev.device; nv50_fifo_oneinit() local
66 ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 128 * 4, 0x1000, nv50_fifo_oneinit()
71 return nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 128 * 4, 0x1000, nv50_fifo_oneinit()
79 struct nvkm_device *device = fifo->base.engine.subdev.device; nv50_fifo_init() local
82 nvkm_mask(device, 0x000200, 0x00000100, 0x00000000); nv50_fifo_init()
83 nvkm_mask(device, 0x000200, 0x00000100, 0x00000100); nv50_fifo_init()
84 nvkm_wr32(device, 0x00250c, 0x6f3cfc34); nv50_fifo_init()
85 nvkm_wr32(device, 0x002044, 0x01003fff); nv50_fifo_init()
87 nvkm_wr32(device, 0x002100, 0xffffffff); nv50_fifo_init()
88 nvkm_wr32(device, 0x002140, 0xbfffffff); nv50_fifo_init()
91 nvkm_wr32(device, 0x002600 + (i * 4), 0x00000000); nv50_fifo_init()
94 nvkm_wr32(device, 0x003200, 0x00000001); nv50_fifo_init()
95 nvkm_wr32(device, 0x003250, 0x00000001); nv50_fifo_init()
96 nvkm_wr32(device, 0x002500, 0x00000001); nv50_fifo_init()
109 nv50_fifo_new_(const struct nvkm_fifo_func *func, struct nvkm_device *device, nv50_fifo_new_() argument
119 ret = nvkm_fifo_ctor(func, device, index, 128, &fifo->base); nv50_fifo_new_()
144 nv50_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo) nv50_fifo_new() argument
146 return nv50_fifo_new_(&nv50_fifo, device, index, pfifo); nv50_fifo_new()
/linux-4.4.14/drivers/firewire/
H A Dcore-device.c24 #include <linux/device.h>
171 static const struct ieee1394_device_id *unit_match(struct device *dev, unit_match()
187 static bool is_fw_unit(struct device *dev);
189 static int fw_unit_match(struct device *dev, struct device_driver *drv) fw_unit_match()
195 static int fw_unit_probe(struct device *dev) fw_unit_probe()
203 static int fw_unit_remove(struct device *dev) fw_unit_remove()
222 static int fw_unit_uevent(struct device *dev, struct kobj_uevent_env *env) fw_unit_uevent()
243 int fw_device_enable_phys_dma(struct fw_device *device) fw_device_enable_phys_dma() argument
245 int generation = device->generation; fw_device_enable_phys_dma()
247 /* device->node_id, accessed below, must not be older than generation */ fw_device_enable_phys_dma()
250 return device->card->driver->enable_phys_dma(device->card, fw_device_enable_phys_dma()
251 device->node_id, fw_device_enable_phys_dma()
261 static ssize_t show_immediate(struct device *dev, show_immediate()
293 static ssize_t show_text_leaf(struct device *dev, show_text_leaf()
346 static void init_fw_attribute_group(struct device *dev, init_fw_attribute_group()
370 static ssize_t modalias_show(struct device *dev, modalias_show()
382 static ssize_t rom_index_show(struct device *dev, rom_index_show()
385 struct fw_device *device = fw_device(dev->parent); rom_index_show() local
389 (int)(unit->directory - device->config_rom)); rom_index_show()
398 static ssize_t config_rom_show(struct device *dev, config_rom_show()
401 struct fw_device *device = fw_device(dev); config_rom_show() local
405 length = device->config_rom_length * 4; config_rom_show()
406 memcpy(buf, device->config_rom, length); config_rom_show()
412 static ssize_t guid_show(struct device *dev, guid_show()
415 struct fw_device *device = fw_device(dev); guid_show() local
420 device->config_rom[3], device->config_rom[4]); guid_show()
426 static ssize_t is_local_show(struct device *dev, is_local_show()
429 struct fw_device *device = fw_device(dev); is_local_show() local
431 return sprintf(buf, "%u\n", device->is_local); is_local_show()
456 static ssize_t units_show(struct device *dev, units_show()
459 struct fw_device *device = fw_device(dev); units_show() local
464 fw_csr_iterator_init(&ci, &device->config_rom[5]); units_show()
488 static int read_rom(struct fw_device *device, read_rom() argument
494 /* device->node_id, accessed below, must not be older than generation */ read_rom()
498 rcode = fw_run_transaction(device->card, read_rom()
499 TCODE_READ_QUADLET_REQUEST, device->node_id, read_rom()
500 generation, device->max_speed, offset, data, 4); read_rom()
520 static int read_config_rom(struct fw_device *device, int generation) read_config_rom() argument
522 struct fw_card *card = device->card; read_config_rom()
536 device->max_speed = SCODE_100; read_config_rom()
540 ret = read_rom(device, generation, i, &rom[i]); read_config_rom()
557 device->max_speed = device->node->max_speed; read_config_rom()
568 if ((rom[2] & 0x7) < device->max_speed || read_config_rom()
569 device->max_speed == SCODE_BETA || read_config_rom()
574 if (device->max_speed == SCODE_BETA) read_config_rom()
575 device->max_speed = card->link_speed; read_config_rom()
577 while (device->max_speed > SCODE_100) { read_config_rom()
578 if (read_rom(device, generation, 0, &dummy) == read_config_rom()
581 device->max_speed--; read_config_rom()
610 ret = read_rom(device, generation, i, &rom[i]); read_config_rom()
634 ret = read_rom(device, generation, i, &rom[i]); read_config_rom()
661 old_rom = device->config_rom; read_config_rom()
669 device->config_rom = new_rom; read_config_rom()
670 device->config_rom_length = length; read_config_rom()
675 device->max_rec = rom[2] >> 12 & 0xf; read_config_rom()
676 device->cmc = rom[2] >> 30 & 1; read_config_rom()
677 device->irmc = rom[2] >> 31 & 1; read_config_rom()
684 static void fw_unit_release(struct device *dev) fw_unit_release()
697 static bool is_fw_unit(struct device *dev) is_fw_unit()
702 static void create_units(struct fw_device *device) create_units() argument
709 fw_csr_iterator_init(&ci, &device->config_rom[5]); create_units()
723 unit->device.bus = &fw_bus_type; create_units()
724 unit->device.type = &fw_unit_type; create_units()
725 unit->device.parent = &device->device; create_units()
726 dev_set_name(&unit->device, "%s.%d", dev_name(&device->device), i++); create_units()
731 init_fw_attribute_group(&unit->device, create_units()
735 if (device_register(&unit->device) < 0) create_units()
738 fw_device_get(device); create_units()
746 static int shutdown_unit(struct device *device, void *data) shutdown_unit() argument
748 device_unregister(device); shutdown_unit()
766 struct fw_device *device; fw_device_get_by_devt() local
769 device = idr_find(&fw_device_idr, MINOR(devt)); fw_device_get_by_devt()
770 if (device) fw_device_get_by_devt()
771 fw_device_get(device); fw_device_get_by_devt()
774 return device; fw_device_get_by_devt()
780 static void fw_schedule_device_work(struct fw_device *device, fw_schedule_device_work() argument
783 queue_delayed_work(fw_workqueue, &device->work, delay); fw_schedule_device_work()
788 * rom. It shouldn't be necessary to tweak these; if the device
804 struct fw_device *device = fw_device_shutdown() local
806 int minor = MINOR(device->device.devt); fw_device_shutdown()
809 device->card->reset_jiffies + SHUTDOWN_DELAY) fw_device_shutdown()
810 && !list_empty(&device->card->link)) { fw_device_shutdown()
811 fw_schedule_device_work(device, SHUTDOWN_DELAY); fw_device_shutdown()
815 if (atomic_cmpxchg(&device->state, fw_device_shutdown()
820 fw_device_cdev_remove(device); fw_device_shutdown()
821 device_for_each_child(&device->device, NULL, shutdown_unit); fw_device_shutdown()
822 device_unregister(&device->device); fw_device_shutdown()
828 fw_device_put(device); fw_device_shutdown()
831 static void fw_device_release(struct device *dev) fw_device_release()
833 struct fw_device *device = fw_device(dev); fw_device_release() local
834 struct fw_card *card = device->card; fw_device_release()
843 device->node->data = NULL; fw_device_release()
846 fw_node_put(device->node); fw_device_release()
847 kfree(device->config_rom); fw_device_release()
848 kfree(device); fw_device_release()
856 static bool is_fw_device(struct device *dev) is_fw_device()
861 static int update_unit(struct device *dev, void *data) update_unit()
877 struct fw_device *device = fw_device_update() local
880 fw_device_cdev_update(device); fw_device_update()
881 device_for_each_child(&device->device, NULL, update_unit); fw_device_update()
885 * If a device was pending for deletion because its node went away but its
887 * device, revive the existing fw_device.
890 static int lookup_existing_device(struct device *dev, void *data) lookup_existing_device()
920 fw_notice(card, "rediscovered device %s\n", dev_name(dev)); lookup_existing_device()
939 static void set_broadcast_channel(struct fw_device *device, int generation) set_broadcast_channel() argument
941 struct fw_card *card = device->card; set_broadcast_channel()
955 if (!device->irmc || device->max_rec < 8) set_broadcast_channel()
962 if (device->bc_implemented == BC_UNKNOWN) { set_broadcast_channel()
964 device->node_id, generation, device->max_speed, set_broadcast_channel()
970 device->bc_implemented = BC_IMPLEMENTED; set_broadcast_channel()
975 device->bc_implemented = BC_UNIMPLEMENTED; set_broadcast_channel()
979 if (device->bc_implemented == BC_IMPLEMENTED) { set_broadcast_channel()
983 device->node_id, generation, device->max_speed, set_broadcast_channel()
989 int fw_device_set_broadcast_channel(struct device *dev, void *gen) fw_device_set_broadcast_channel()
999 struct fw_device *device = fw_device_init() local
1001 struct fw_card *card = device->card; fw_device_init()
1002 struct device *revived_dev; fw_device_init()
1008 * device. fw_device_init()
1011 ret = read_config_rom(device, device->generation); fw_device_init()
1013 if (device->config_rom_retries < MAX_RETRIES && fw_device_init()
1014 atomic_read(&device->state) == FW_DEVICE_INITIALIZING) { fw_device_init()
1015 device->config_rom_retries++; fw_device_init()
1016 fw_schedule_device_work(device, RETRY_DELAY); fw_device_init()
1018 if (device->node->link_on) fw_device_init()
1020 device->node_id, fw_device_init()
1022 if (device->node == card->root_node) fw_device_init()
1024 fw_device_release(&device->device); fw_device_init()
1029 revived_dev = device_find_child(card->device, fw_device_init()
1030 device, lookup_existing_device); fw_device_init()
1033 fw_device_release(&device->device); fw_device_init()
1038 device_initialize(&device->device); fw_device_init()
1040 fw_device_get(device); fw_device_init()
1042 minor = idr_alloc(&fw_device_idr, device, 0, 1 << MINORBITS, fw_device_init()
1049 device->device.bus = &fw_bus_type; fw_device_init()
1050 device->device.type = &fw_device_type; fw_device_init()
1051 device->device.parent = card->device; fw_device_init()
1052 device->device.devt = MKDEV(fw_cdev_major, minor); fw_device_init()
1053 dev_set_name(&device->device, "fw%d", minor); fw_device_init()
1055 BUILD_BUG_ON(ARRAY_SIZE(device->attribute_group.attrs) < fw_device_init()
1058 init_fw_attribute_group(&device->device, fw_device_init()
1060 &device->attribute_group); fw_device_init()
1062 if (device_add(&device->device)) { fw_device_init()
1063 fw_err(card, "failed to add device\n"); fw_device_init()
1067 create_units(device); fw_device_init()
1070 * Transition the device to running state. If it got pulled fw_device_init()
1072 * have to shut down the device again here. Normally, though, fw_device_init()
1078 if (atomic_cmpxchg(&device->state, fw_device_init()
1081 device->workfn = fw_device_shutdown; fw_device_init()
1082 fw_schedule_device_work(device, SHUTDOWN_DELAY); fw_device_init()
1084 fw_notice(card, "created device %s: GUID %08x%08x, S%d00\n", fw_device_init()
1085 dev_name(&device->device), fw_device_init()
1086 device->config_rom[3], device->config_rom[4], fw_device_init()
1087 1 << device->max_speed); fw_device_init()
1088 device->config_rom_retries = 0; fw_device_init()
1090 set_broadcast_channel(device, device->generation); fw_device_init()
1092 add_device_randomness(&device->config_rom[3], 8); fw_device_init()
1101 if (device->node == card->root_node) fw_device_init()
1111 fw_device_put(device); /* fw_device_idr's reference */ fw_device_init()
1113 put_device(&device->device); /* our reference */ fw_device_init()
1117 static int reread_config_rom(struct fw_device *device, int generation, reread_config_rom() argument
1124 rcode = read_rom(device, generation, i, &q); reread_config_rom()
1132 if (q != device->config_rom[i]) { reread_config_rom()
1144 struct fw_device *device = fw_device_refresh() local
1146 struct fw_card *card = device->card; fw_device_refresh()
1147 int ret, node_id = device->node_id; fw_device_refresh()
1150 ret = reread_config_rom(device, device->generation, &changed); fw_device_refresh()
1155 if (atomic_cmpxchg(&device->state, fw_device_refresh()
1161 device->config_rom_retries = 0; fw_device_refresh()
1169 device_for_each_child(&device->device, NULL, shutdown_unit); fw_device_refresh()
1171 ret = read_config_rom(device, device->generation); fw_device_refresh()
1175 fw_device_cdev_update(device); fw_device_refresh()
1176 create_units(device); fw_device_refresh()
1179 kobject_uevent(&device->device.kobj, KOBJ_CHANGE); fw_device_refresh()
1181 if (atomic_cmpxchg(&device->state, fw_device_refresh()
1186 fw_notice(card, "refreshed device %s\n", dev_name(&device->device)); fw_device_refresh()
1187 device->config_rom_retries = 0; fw_device_refresh()
1191 if (device->config_rom_retries < MAX_RETRIES && fw_device_refresh()
1192 atomic_read(&device->state) == FW_DEVICE_INITIALIZING) { fw_device_refresh()
1193 device->config_rom_retries++; fw_device_refresh()
1194 fw_schedule_device_work(device, RETRY_DELAY); fw_device_refresh()
1198 fw_notice(card, "giving up on refresh of device %s: %s\n", fw_device_refresh()
1199 dev_name(&device->device), fw_rcode_string(ret)); fw_device_refresh()
1201 atomic_set(&device->state, FW_DEVICE_GONE); fw_device_refresh()
1202 device->workfn = fw_device_shutdown; fw_device_refresh()
1203 fw_schedule_device_work(device, SHUTDOWN_DELAY); fw_device_refresh()
1211 struct fw_device *device = container_of(to_delayed_work(work), fw_device_workfn() local
1213 device->workfn(work); fw_device_workfn()
1218 struct fw_device *device; fw_node_event() local
1229 device = kzalloc(sizeof(*device), GFP_ATOMIC); fw_node_event()
1230 if (device == NULL) fw_node_event()
1234 * Do minimal intialization of the device here, the fw_node_event()
1239 * You can basically just check device->state and fw_node_event()
1243 atomic_set(&device->state, FW_DEVICE_INITIALIZING); fw_node_event()
1244 device->card = fw_card_get(card); fw_node_event()
1245 device->node = fw_node_get(node); fw_node_event()
1246 device->node_id = node->node_id; fw_node_event()
1247 device->generation = card->generation; fw_node_event()
1248 device->is_local = node == card->local_node; fw_node_event()
1249 mutex_init(&device->client_list_mutex); fw_node_event()
1250 INIT_LIST_HEAD(&device->client_list); fw_node_event()
1253 * Set the node data to point back to this device so fw_node_event()
1255 * and generation for the device. fw_node_event()
1257 node->data = device; fw_node_event()
1265 device->workfn = fw_device_init; fw_node_event()
1266 INIT_DELAYED_WORK(&device->work, fw_device_workfn); fw_node_event()
1267 fw_schedule_device_work(device, INITIAL_DELAY); fw_node_event()
1272 device = node->data; fw_node_event()
1273 if (device == NULL) fw_node_event()
1276 device->node_id = node->node_id; fw_node_event()
1278 device->generation = card->generation; fw_node_event()
1279 if (atomic_cmpxchg(&device->state, fw_node_event()
1282 device->workfn = fw_device_refresh; fw_node_event()
1283 fw_schedule_device_work(device, fw_node_event()
1284 device->is_local ? 0 : INITIAL_DELAY); fw_node_event()
1289 device = node->data; fw_node_event()
1290 if (device == NULL) fw_node_event()
1293 device->node_id = node->node_id; fw_node_event()
1295 device->generation = card->generation; fw_node_event()
1296 if (atomic_read(&device->state) == FW_DEVICE_RUNNING) { fw_node_event()
1297 device->workfn = fw_device_update; fw_node_event()
1298 fw_schedule_device_work(device, 0); fw_node_event()
1308 * Destroy the device associated with the node. There fw_node_event()
1309 * are two cases here: either the device is fully fw_node_event()
1313 * initialized we can reuse device->work to schedule a fw_node_event()
1316 * the device in shutdown state to have that code fail fw_node_event()
1317 * to create the device. fw_node_event()
1319 device = node->data; fw_node_event()
1320 if (atomic_xchg(&device->state, fw_node_event()
1322 device->workfn = fw_device_shutdown; fw_node_event()
1323 fw_schedule_device_work(device, fw_node_event()
/linux-4.4.14/drivers/gpu/host1x/
H A Dbus.c41 * host1x_subdev_add() - add a new subdevice with an associated device node
43 static int host1x_subdev_add(struct host1x_device *device, host1x_subdev_add() argument
55 mutex_lock(&device->subdevs_lock); host1x_subdev_add()
56 list_add_tail(&subdev->list, &device->subdevs); host1x_subdev_add()
57 mutex_unlock(&device->subdevs_lock); host1x_subdev_add()
73 * host1x_device_parse_dt() - scan device tree and add matching subdevices
75 static int host1x_device_parse_dt(struct host1x_device *device, host1x_device_parse_dt() argument
81 for_each_child_of_node(device->dev.parent->of_node, np) { host1x_device_parse_dt()
84 err = host1x_subdev_add(device, np); host1x_device_parse_dt()
93 static void host1x_subdev_register(struct host1x_device *device, host1x_subdev_register() argument
102 * client with its parent device. host1x_subdev_register()
104 mutex_lock(&device->subdevs_lock); host1x_subdev_register()
105 mutex_lock(&device->clients_lock); host1x_subdev_register()
106 list_move_tail(&client->list, &device->clients); host1x_subdev_register()
107 list_move_tail(&subdev->list, &device->active); host1x_subdev_register()
108 client->parent = &device->dev; host1x_subdev_register()
110 mutex_unlock(&device->clients_lock); host1x_subdev_register()
111 mutex_unlock(&device->subdevs_lock); host1x_subdev_register()
113 if (list_empty(&device->subdevs)) { host1x_subdev_register()
114 err = device_add(&device->dev); host1x_subdev_register()
116 dev_err(&device->dev, "failed to add: %d\n", err); host1x_subdev_register()
118 device->registered = true; host1x_subdev_register()
122 static void __host1x_subdev_unregister(struct host1x_device *device, __host1x_subdev_unregister() argument
131 if (list_empty(&device->subdevs)) { __host1x_subdev_unregister()
132 if (device->registered) { __host1x_subdev_unregister()
133 device->registered = false; __host1x_subdev_unregister()
134 device_del(&device->dev); __host1x_subdev_unregister()
142 mutex_lock(&device->clients_lock); __host1x_subdev_unregister()
145 list_move_tail(&subdev->list, &device->subdevs); __host1x_subdev_unregister()
148 * when the device is about to be deleted. __host1x_subdev_unregister()
152 * also when the composite device is about to be removed. __host1x_subdev_unregister()
155 mutex_unlock(&device->clients_lock); __host1x_subdev_unregister()
158 static void host1x_subdev_unregister(struct host1x_device *device, host1x_subdev_unregister() argument
161 mutex_lock(&device->subdevs_lock); host1x_subdev_unregister()
162 __host1x_subdev_unregister(device, subdev); host1x_subdev_unregister()
163 mutex_unlock(&device->subdevs_lock); host1x_subdev_unregister()
166 int host1x_device_init(struct host1x_device *device) host1x_device_init() argument
171 mutex_lock(&device->clients_lock); host1x_device_init()
173 list_for_each_entry(client, &device->clients, list) { host1x_device_init()
177 dev_err(&device->dev, host1x_device_init()
180 mutex_unlock(&device->clients_lock); host1x_device_init()
186 mutex_unlock(&device->clients_lock); host1x_device_init()
192 int host1x_device_exit(struct host1x_device *device) host1x_device_exit() argument
197 mutex_lock(&device->clients_lock); host1x_device_exit()
199 list_for_each_entry_reverse(client, &device->clients, list) { host1x_device_exit()
203 dev_err(&device->dev, host1x_device_exit()
206 mutex_unlock(&device->clients_lock); host1x_device_exit()
212 mutex_unlock(&device->clients_lock); host1x_device_exit()
221 struct host1x_device *device; host1x_add_client() local
226 list_for_each_entry(device, &host1x->devices, list) { host1x_add_client()
227 list_for_each_entry(subdev, &device->subdevs, list) { host1x_add_client()
229 host1x_subdev_register(device, subdev, client); host1x_add_client()
243 struct host1x_device *device, *dt; host1x_del_client() local
248 list_for_each_entry_safe(device, dt, &host1x->devices, list) { host1x_del_client()
249 list_for_each_entry(subdev, &device->active, list) { host1x_del_client()
251 host1x_subdev_unregister(device, subdev); host1x_del_client()
262 static int host1x_device_match(struct device *dev, struct device_driver *drv) host1x_device_match()
267 static int host1x_device_probe(struct device *dev) host1x_device_probe()
270 struct host1x_device *device = to_host1x_device(dev); host1x_device_probe() local
273 return driver->probe(device); host1x_device_probe()
278 static int host1x_device_remove(struct device *dev) host1x_device_remove()
281 struct host1x_device *device = to_host1x_device(dev); host1x_device_remove() local
284 return driver->remove(device); host1x_device_remove()
289 static void host1x_device_shutdown(struct device *dev) host1x_device_shutdown()
292 struct host1x_device *device = to_host1x_device(dev); host1x_device_shutdown() local
295 driver->shutdown(device); host1x_device_shutdown()
316 static void __host1x_device_del(struct host1x_device *device) __host1x_device_del() argument
321 mutex_lock(&device->subdevs_lock); __host1x_device_del()
324 list_for_each_entry_safe(subdev, sd, &device->active, list) { __host1x_device_del()
336 __host1x_subdev_unregister(device, subdev); __host1x_device_del()
345 list_for_each_entry_safe(subdev, sd, &device->subdevs, list) __host1x_device_del()
348 mutex_unlock(&device->subdevs_lock); __host1x_device_del()
352 mutex_lock(&device->clients_lock); __host1x_device_del()
354 list_for_each_entry_safe(client, cl, &device->clients, list) __host1x_device_del()
357 mutex_unlock(&device->clients_lock); __host1x_device_del()
360 /* finally remove the device */ __host1x_device_del()
361 list_del_init(&device->list); __host1x_device_del()
364 static void host1x_device_release(struct device *dev) host1x_device_release()
366 struct host1x_device *device = to_host1x_device(dev); host1x_device_release() local
368 __host1x_device_del(device); host1x_device_release()
369 kfree(device); host1x_device_release()
377 struct host1x_device *device; host1x_device_add() local
380 device = kzalloc(sizeof(*device), GFP_KERNEL); host1x_device_add()
381 if (!device) host1x_device_add()
384 device_initialize(&device->dev); host1x_device_add()
386 mutex_init(&device->subdevs_lock); host1x_device_add()
387 INIT_LIST_HEAD(&device->subdevs); host1x_device_add()
388 INIT_LIST_HEAD(&device->active); host1x_device_add()
389 mutex_init(&device->clients_lock); host1x_device_add()
390 INIT_LIST_HEAD(&device->clients); host1x_device_add()
391 INIT_LIST_HEAD(&device->list); host1x_device_add()
392 device->driver = driver; host1x_device_add()
394 device->dev.coherent_dma_mask = host1x->dev->coherent_dma_mask; host1x_device_add()
395 device->dev.dma_mask = &device->dev.coherent_dma_mask; host1x_device_add()
396 dev_set_name(&device->dev, "%s", driver->driver.name); host1x_device_add()
397 device->dev.release = host1x_device_release; host1x_device_add()
398 device->dev.bus = &host1x_bus_type; host1x_device_add()
399 device->dev.parent = host1x->dev; host1x_device_add()
401 err = host1x_device_parse_dt(device, driver); host1x_device_add()
403 kfree(device); host1x_device_add()
407 list_add_tail(&device->list, &host1x->devices); host1x_device_add()
412 list_for_each_entry(subdev, &device->subdevs, list) { host1x_device_add()
414 host1x_subdev_register(device, subdev, client); host1x_device_add()
426 * Removes a device by first unregistering any subdevices and then removing
432 struct host1x_device *device) host1x_device_del()
434 if (device->registered) { host1x_device_del()
435 device->registered = false; host1x_device_del()
436 device_del(&device->dev); host1x_device_del()
439 put_device(&device->dev); host1x_device_del()
445 struct host1x_device *device; host1x_attach_driver() local
450 list_for_each_entry(device, &host1x->devices, list) { host1x_attach_driver()
451 if (device->driver == driver) { host1x_attach_driver()
459 dev_err(host1x->dev, "failed to allocate device: %d\n", err); host1x_attach_driver()
467 struct host1x_device *device, *tmp; host1x_detach_driver() local
471 list_for_each_entry_safe(device, tmp, &host1x->devices, list) host1x_detach_driver()
472 if (device->driver == driver) host1x_detach_driver()
473 host1x_device_del(host1x, device); host1x_detach_driver()
431 host1x_device_del(struct host1x *host1x, struct host1x_device *device) host1x_device_del() argument
/linux-4.4.14/drivers/gpu/drm/nouveau/nvkm/subdev/timer/
H A Dnv04.c31 struct nvkm_device *device = subdev->device; nv04_timer_time() local
38 nvkm_wr32(device, NV04_PTIMER_TIME_1, hi); nv04_timer_time()
39 nvkm_wr32(device, NV04_PTIMER_TIME_0, lo); nv04_timer_time()
45 struct nvkm_device *device = tmr->subdev.device; nv04_timer_read() local
49 hi = nvkm_rd32(device, NV04_PTIMER_TIME_1); nv04_timer_read()
50 lo = nvkm_rd32(device, NV04_PTIMER_TIME_0); nv04_timer_read()
51 } while (hi != nvkm_rd32(device, NV04_PTIMER_TIME_1)); nv04_timer_read()
59 struct nvkm_device *device = tmr->subdev.device; nv04_timer_alarm_fini() local
60 nvkm_wr32(device, NV04_PTIMER_INTR_EN_0, 0x00000000); nv04_timer_alarm_fini()
66 struct nvkm_device *device = tmr->subdev.device; nv04_timer_alarm_init() local
67 nvkm_wr32(device, NV04_PTIMER_ALARM_0, time); nv04_timer_alarm_init()
68 nvkm_wr32(device, NV04_PTIMER_INTR_EN_0, 0x00000001); nv04_timer_alarm_init()
75 struct nvkm_device *device = subdev->device; nv04_timer_intr() local
76 u32 stat = nvkm_rd32(device, NV04_PTIMER_INTR_0); nv04_timer_intr()
80 nvkm_wr32(device, NV04_PTIMER_INTR_0, 0x00000001); nv04_timer_intr()
86 nvkm_wr32(device, NV04_PTIMER_INTR_0, stat); nv04_timer_intr()
94 struct nvkm_device *device = subdev->device; nv04_timer_init() local
103 n = nvkm_rd32(device, NV04_PTIMER_NUMERATOR); nv04_timer_init()
104 d = nvkm_rd32(device, NV04_PTIMER_DENOMINATOR); nv04_timer_init()
133 nvkm_wr32(device, NV04_PTIMER_NUMERATOR, n); nv04_timer_init()
134 nvkm_wr32(device, NV04_PTIMER_DENOMINATOR, d); nv04_timer_init()
148 nv04_timer_new(struct nvkm_device *device, int index, struct nvkm_timer **ptmr) nv04_timer_new() argument
150 return nvkm_timer_new_(&nv04_timer, device, index, ptmr); nv04_timer_new()
/linux-4.4.14/drivers/iio/imu/inv_mpu6050/
H A DMakefile2 # Makefile for Invensense MPU6050 device.
/linux-4.4.14/drivers/gpu/drm/nouveau/nvkm/engine/disp/
H A Dvga.c27 nvkm_rdport(struct nvkm_device *device, int head, u16 port) nvkm_rdport() argument
29 if (device->card_type >= NV_50) nvkm_rdport()
30 return nvkm_rd08(device, 0x601000 + port); nvkm_rdport()
35 return nvkm_rd08(device, 0x601000 + (head * 0x2000) + port); nvkm_rdport()
40 if (device->card_type < NV_40) nvkm_rdport()
42 return nvkm_rd08(device, 0x0c0000 + (head * 0x2000) + port); nvkm_rdport()
49 nvkm_wrport(struct nvkm_device *device, int head, u16 port, u8 data) nvkm_wrport() argument
51 if (device->card_type >= NV_50) nvkm_wrport()
52 nvkm_wr08(device, 0x601000 + port, data); nvkm_wrport()
57 nvkm_wr08(device, 0x601000 + (head * 0x2000) + port, data); nvkm_wrport()
62 if (device->card_type < NV_40) nvkm_wrport()
64 nvkm_wr08(device, 0x0c0000 + (head * 0x2000) + port, data); nvkm_wrport()
69 nvkm_rdvgas(struct nvkm_device *device, int head, u8 index) nvkm_rdvgas() argument
71 nvkm_wrport(device, head, 0x03c4, index); nvkm_rdvgas()
72 return nvkm_rdport(device, head, 0x03c5); nvkm_rdvgas()
76 nvkm_wrvgas(struct nvkm_device *device, int head, u8 index, u8 value) nvkm_wrvgas() argument
78 nvkm_wrport(device, head, 0x03c4, index); nvkm_wrvgas()
79 nvkm_wrport(device, head, 0x03c5, value); nvkm_wrvgas()
83 nvkm_rdvgag(struct nvkm_device *device, int head, u8 index) nvkm_rdvgag() argument
85 nvkm_wrport(device, head, 0x03ce, index); nvkm_rdvgag()
86 return nvkm_rdport(device, head, 0x03cf); nvkm_rdvgag()
90 nvkm_wrvgag(struct nvkm_device *device, int head, u8 index, u8 value) nvkm_wrvgag() argument
92 nvkm_wrport(device, head, 0x03ce, index); nvkm_wrvgag()
93 nvkm_wrport(device, head, 0x03cf, value); nvkm_wrvgag()
97 nvkm_rdvgac(struct nvkm_device *device, int head, u8 index) nvkm_rdvgac() argument
99 nvkm_wrport(device, head, 0x03d4, index); nvkm_rdvgac()
100 return nvkm_rdport(device, head, 0x03d5); nvkm_rdvgac()
104 nvkm_wrvgac(struct nvkm_device *device, int head, u8 index, u8 value) nvkm_wrvgac() argument
106 nvkm_wrport(device, head, 0x03d4, index); nvkm_wrvgac()
107 nvkm_wrport(device, head, 0x03d5, value); nvkm_wrvgac()
111 nvkm_rdvgai(struct nvkm_device *device, int head, u16 port, u8 index) nvkm_rdvgai() argument
113 if (port == 0x03c4) return nvkm_rdvgas(device, head, index); nvkm_rdvgai()
114 if (port == 0x03ce) return nvkm_rdvgag(device, head, index); nvkm_rdvgai()
115 if (port == 0x03d4) return nvkm_rdvgac(device, head, index); nvkm_rdvgai()
120 nvkm_wrvgai(struct nvkm_device *device, int head, u16 port, u8 index, u8 value) nvkm_wrvgai() argument
122 if (port == 0x03c4) nvkm_wrvgas(device, head, index, value); nvkm_wrvgai()
123 else if (port == 0x03ce) nvkm_wrvgag(device, head, index, value); nvkm_wrvgai()
124 else if (port == 0x03d4) nvkm_wrvgac(device, head, index, value); nvkm_wrvgai()
128 nvkm_lockvgac(struct nvkm_device *device, bool lock) nvkm_lockvgac() argument
130 bool locked = !nvkm_rdvgac(device, 0, 0x1f); nvkm_lockvgac()
132 if (device->card_type < NV_50) nvkm_lockvgac()
133 nvkm_wrvgac(device, 0, 0x1f, data); nvkm_lockvgac()
135 nvkm_wrvgac(device, 0, 0x3f, data); nvkm_lockvgac()
136 if (device->chipset == 0x11) { nvkm_lockvgac()
137 if (!(nvkm_rd32(device, 0x001084) & 0x10000000)) nvkm_lockvgac()
138 nvkm_wrvgac(device, 1, 0x1f, data); nvkm_lockvgac()
162 nvkm_rdvgaowner(struct nvkm_device *device) nvkm_rdvgaowner() argument
164 if (device->card_type < NV_50) { nvkm_rdvgaowner()
165 if (device->chipset == 0x11) { nvkm_rdvgaowner()
166 u32 tied = nvkm_rd32(device, 0x001084) & 0x10000000; nvkm_rdvgaowner()
168 u8 slA = nvkm_rdvgac(device, 0, 0x28) & 0x80; nvkm_rdvgaowner()
169 u8 tvA = nvkm_rdvgac(device, 0, 0x33) & 0x01; nvkm_rdvgaowner()
170 u8 slB = nvkm_rdvgac(device, 1, 0x28) & 0x80; nvkm_rdvgaowner()
171 u8 tvB = nvkm_rdvgac(device, 1, 0x33) & 0x01; nvkm_rdvgaowner()
181 return nvkm_rdvgac(device, 0, 0x44); nvkm_rdvgaowner()
188 nvkm_wrvgaowner(struct nvkm_device *device, u8 select) nvkm_wrvgaowner() argument
190 if (device->card_type < NV_50) { nvkm_wrvgaowner()
192 if (device->chipset == 0x11) { nvkm_wrvgaowner()
194 nvkm_rdvgac(device, 0, 0x1f); nvkm_wrvgaowner()
195 nvkm_rdvgac(device, 1, 0x1f); nvkm_wrvgaowner()
198 nvkm_wrvgac(device, 0, 0x44, owner); nvkm_wrvgaowner()
200 if (device->chipset == 0x11) { nvkm_wrvgaowner()
201 nvkm_wrvgac(device, 0, 0x2e, owner); nvkm_wrvgaowner()
202 nvkm_wrvgac(device, 0, 0x2e, owner); nvkm_wrvgaowner()
H A Dnv04.c35 struct nvkm_device *device = disp->engine.subdev.device; nv04_disp_vblank_init() local
36 nvkm_wr32(device, 0x600140 + (head * 0x2000) , 0x00000001); nv04_disp_vblank_init()
42 struct nvkm_device *device = disp->engine.subdev.device; nv04_disp_vblank_fini() local
43 nvkm_wr32(device, 0x600140 + (head * 0x2000) , 0x00000000); nv04_disp_vblank_fini()
50 struct nvkm_device *device = subdev->device; nv04_disp_intr() local
51 u32 crtc0 = nvkm_rd32(device, 0x600100); nv04_disp_intr()
52 u32 crtc1 = nvkm_rd32(device, 0x602100); nv04_disp_intr()
57 nvkm_wr32(device, 0x600100, 0x00000001); nv04_disp_intr()
62 nvkm_wr32(device, 0x602100, 0x00000001); nv04_disp_intr()
65 if (device->chipset >= 0x10 && device->chipset <= 0x40) { nv04_disp_intr()
66 pvideo = nvkm_rd32(device, 0x8100); nv04_disp_intr()
69 nvkm_wr32(device, 0x8100, pvideo); nv04_disp_intr()
82 nv04_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp) nv04_disp_new() argument
84 return nvkm_disp_new_(&nv04_disp, device, index, 2, pdisp); nv04_disp_new()
H A Dhdmig84.c34 struct nvkm_device *device = disp->base.engine.subdev.device; g84_hdmi_ctrl() local
58 nvkm_mask(device, 0x6165a4 + hoff, 0x40000000, 0x00000000); g84_hdmi_ctrl()
59 nvkm_mask(device, 0x616520 + hoff, 0x00000001, 0x00000000); g84_hdmi_ctrl()
60 nvkm_mask(device, 0x616500 + hoff, 0x00000001, 0x00000000); g84_hdmi_ctrl()
65 nvkm_mask(device, 0x616520 + hoff, 0x00000001, 0x00000000); g84_hdmi_ctrl()
66 nvkm_wr32(device, 0x616528 + hoff, 0x000d0282); g84_hdmi_ctrl()
67 nvkm_wr32(device, 0x61652c + hoff, 0x0000006f); g84_hdmi_ctrl()
68 nvkm_wr32(device, 0x616530 + hoff, 0x00000000); g84_hdmi_ctrl()
69 nvkm_wr32(device, 0x616534 + hoff, 0x00000000); g84_hdmi_ctrl()
70 nvkm_wr32(device, 0x616538 + hoff, 0x00000000); g84_hdmi_ctrl()
71 nvkm_mask(device, 0x616520 + hoff, 0x00000001, 0x00000001); g84_hdmi_ctrl()
74 nvkm_mask(device, 0x616500 + hoff, 0x00000001, 0x00000000); g84_hdmi_ctrl()
75 nvkm_wr32(device, 0x616508 + hoff, 0x000a0184); g84_hdmi_ctrl()
76 nvkm_wr32(device, 0x61650c + hoff, 0x00000071); g84_hdmi_ctrl()
77 nvkm_wr32(device, 0x616510 + hoff, 0x00000000); g84_hdmi_ctrl()
78 nvkm_mask(device, 0x616500 + hoff, 0x00000001, 0x00000001); g84_hdmi_ctrl()
80 nvkm_mask(device, 0x6165d0 + hoff, 0x00070001, 0x00010001); /* SPARE, HW_CTS */ g84_hdmi_ctrl()
81 nvkm_mask(device, 0x616568 + hoff, 0x00010101, 0x00000000); /* ACR_CTRL, ?? */ g84_hdmi_ctrl()
82 nvkm_mask(device, 0x616578 + hoff, 0x80000000, 0x80000000); /* ACR_0441_ENABLE */ g84_hdmi_ctrl()
85 nvkm_mask(device, 0x61733c, 0x00100000, 0x00100000); /* RESETF */ g84_hdmi_ctrl()
86 nvkm_mask(device, 0x61733c, 0x10000000, 0x10000000); /* LOOKUP_EN */ g84_hdmi_ctrl()
87 nvkm_mask(device, 0x61733c, 0x00100000, 0x00000000); /* !RESETF */ g84_hdmi_ctrl()
90 nvkm_mask(device, 0x6165a4 + hoff, 0x5f1f007f, ctrl); g84_hdmi_ctrl()
H A Dhdmigt215.c35 struct nvkm_device *device = disp->base.engine.subdev.device; gt215_hdmi_ctrl() local
59 nvkm_mask(device, 0x61c5a4 + soff, 0x40000000, 0x00000000); gt215_hdmi_ctrl()
60 nvkm_mask(device, 0x61c520 + soff, 0x00000001, 0x00000000); gt215_hdmi_ctrl()
61 nvkm_mask(device, 0x61c500 + soff, 0x00000001, 0x00000000); gt215_hdmi_ctrl()
66 nvkm_mask(device, 0x61c520 + soff, 0x00000001, 0x00000000); gt215_hdmi_ctrl()
67 nvkm_wr32(device, 0x61c528 + soff, 0x000d0282); gt215_hdmi_ctrl()
68 nvkm_wr32(device, 0x61c52c + soff, 0x0000006f); gt215_hdmi_ctrl()
69 nvkm_wr32(device, 0x61c530 + soff, 0x00000000); gt215_hdmi_ctrl()
70 nvkm_wr32(device, 0x61c534 + soff, 0x00000000); gt215_hdmi_ctrl()
71 nvkm_wr32(device, 0x61c538 + soff, 0x00000000); gt215_hdmi_ctrl()
72 nvkm_mask(device, 0x61c520 + soff, 0x00000001, 0x00000001); gt215_hdmi_ctrl()
75 nvkm_mask(device, 0x61c500 + soff, 0x00000001, 0x00000000); gt215_hdmi_ctrl()
76 nvkm_wr32(device, 0x61c508 + soff, 0x000a0184); gt215_hdmi_ctrl()
77 nvkm_wr32(device, 0x61c50c + soff, 0x00000071); gt215_hdmi_ctrl()
78 nvkm_wr32(device, 0x61c510 + soff, 0x00000000); gt215_hdmi_ctrl()
79 nvkm_mask(device, 0x61c500 + soff, 0x00000001, 0x00000001); gt215_hdmi_ctrl()
81 nvkm_mask(device, 0x61c5d0 + soff, 0x00070001, 0x00010001); /* SPARE, HW_CTS */ gt215_hdmi_ctrl()
82 nvkm_mask(device, 0x61c568 + soff, 0x00010101, 0x00000000); /* ACR_CTRL, ?? */ gt215_hdmi_ctrl()
83 nvkm_mask(device, 0x61c578 + soff, 0x80000000, 0x80000000); /* ACR_0441_ENABLE */ gt215_hdmi_ctrl()
86 nvkm_mask(device, 0x61733c, 0x00100000, 0x00100000); /* RESETF */ gt215_hdmi_ctrl()
87 nvkm_mask(device, 0x61733c, 0x10000000, 0x10000000); /* LOOKUP_EN */ gt215_hdmi_ctrl()
88 nvkm_mask(device, 0x61733c, 0x00100000, 0x00000000); /* !RESETF */ gt215_hdmi_ctrl()
91 nvkm_mask(device, 0x61c5a4 + soff, 0x5f1f007f, ctrl); gt215_hdmi_ctrl()
H A Drootgf119.c37 struct nvkm_device *device = disp->base.engine.subdev.device; gf119_disp_root_scanoutpos() local
38 const u32 total = nvkm_rd32(device, 0x640414 + (head * 0x300)); gf119_disp_root_scanoutpos()
39 const u32 blanke = nvkm_rd32(device, 0x64041c + (head * 0x300)); gf119_disp_root_scanoutpos()
40 const u32 blanks = nvkm_rd32(device, 0x640420 + (head * 0x300)); gf119_disp_root_scanoutpos()
58 nvkm_rd32(device, 0x616340 + (head * 0x800)) & 0xffff; gf119_disp_root_scanoutpos()
61 nvkm_rd32(device, 0x616344 + (head * 0x800)) & 0xffff; gf119_disp_root_scanoutpos()
71 struct nvkm_device *device = root->disp->base.engine.subdev.device; gf119_disp_root_fini() local
73 nvkm_wr32(device, 0x6100b0, 0x00000000); gf119_disp_root_fini()
80 struct nvkm_device *device = disp->base.engine.subdev.device; gf119_disp_root_init() local
91 tmp = nvkm_rd32(device, 0x616104 + (i * 0x800)); gf119_disp_root_init()
92 nvkm_wr32(device, 0x6101b4 + (i * 0x800), tmp); gf119_disp_root_init()
93 tmp = nvkm_rd32(device, 0x616108 + (i * 0x800)); gf119_disp_root_init()
94 nvkm_wr32(device, 0x6101b8 + (i * 0x800), tmp); gf119_disp_root_init()
95 tmp = nvkm_rd32(device, 0x61610c + (i * 0x800)); gf119_disp_root_init()
96 nvkm_wr32(device, 0x6101bc + (i * 0x800), tmp); gf119_disp_root_init()
101 tmp = nvkm_rd32(device, 0x61a000 + (i * 0x800)); gf119_disp_root_init()
102 nvkm_wr32(device, 0x6101c0 + (i * 0x800), tmp); gf119_disp_root_init()
107 tmp = nvkm_rd32(device, 0x61c000 + (i * 0x800)); gf119_disp_root_init()
108 nvkm_wr32(device, 0x6301c4 + (i * 0x800), tmp); gf119_disp_root_init()
112 if (nvkm_rd32(device, 0x6100ac) & 0x00000100) { gf119_disp_root_init()
113 nvkm_wr32(device, 0x6100ac, 0x00000100); gf119_disp_root_init()
114 nvkm_mask(device, 0x6194e8, 0x00000001, 0x00000000); gf119_disp_root_init()
115 if (nvkm_msec(device, 2000, gf119_disp_root_init()
116 if (!(nvkm_rd32(device, 0x6194e8) & 0x00000002)) gf119_disp_root_init()
123 nvkm_wr32(device, 0x610010, (root->instmem->addr >> 8) | 9); gf119_disp_root_init()
126 nvkm_wr32(device, 0x610090, 0x00000000); gf119_disp_root_init()
127 nvkm_wr32(device, 0x6100a0, 0x00000000); gf119_disp_root_init()
128 nvkm_wr32(device, 0x6100b0, 0x00000307); gf119_disp_root_init()
137 nvkm_mask(device, 0x616308 + (i * 0x800), 0x00000111, 0x00000010); gf119_disp_root_init()
H A Dpiocnv50.c34 struct nvkm_device *device = subdev->device; nv50_disp_pioc_fini() local
37 nvkm_mask(device, 0x610200 + (chid * 0x10), 0x00000001, 0x00000000); nv50_disp_pioc_fini()
38 if (nvkm_msec(device, 2000, nv50_disp_pioc_fini()
39 if (!(nvkm_rd32(device, 0x610200 + (chid * 0x10)) & 0x00030000)) nv50_disp_pioc_fini()
43 nvkm_rd32(device, 0x610200 + (chid * 0x10))); nv50_disp_pioc_fini()
52 struct nvkm_device *device = subdev->device; nv50_disp_pioc_init() local
55 nvkm_wr32(device, 0x610200 + (chid * 0x10), 0x00002000); nv50_disp_pioc_init()
56 if (nvkm_msec(device, 2000, nv50_disp_pioc_init()
57 if (!(nvkm_rd32(device, 0x610200 + (chid * 0x10)) & 0x00030000)) nv50_disp_pioc_init()
61 nvkm_rd32(device, 0x610200 + (chid * 0x10))); nv50_disp_pioc_init()
65 nvkm_wr32(device, 0x610200 + (chid * 0x10), 0x00000001); nv50_disp_pioc_init()
66 if (nvkm_msec(device, 2000, nv50_disp_pioc_init()
67 u32 tmp = nvkm_rd32(device, 0x610200 + (chid * 0x10)); nv50_disp_pioc_init()
72 nvkm_rd32(device, 0x610200 + (chid * 0x10))); nv50_disp_pioc_init()
H A Ddmacgf119.c44 struct nvkm_device *device = subdev->device; gf119_disp_dmac_fini() local
48 nvkm_mask(device, 0x610490 + (chid * 0x0010), 0x00001010, 0x00001000); gf119_disp_dmac_fini()
49 nvkm_mask(device, 0x610490 + (chid * 0x0010), 0x00000003, 0x00000000); gf119_disp_dmac_fini()
50 if (nvkm_msec(device, 2000, gf119_disp_dmac_fini()
51 if (!(nvkm_rd32(device, 0x610490 + (chid * 0x10)) & 0x001e0000)) gf119_disp_dmac_fini()
55 nvkm_rd32(device, 0x610490 + (chid * 0x10))); gf119_disp_dmac_fini()
59 nvkm_mask(device, 0x610090, 0x00000001 << chid, 0x00000000); gf119_disp_dmac_fini()
60 nvkm_mask(device, 0x6100a0, 0x00000001 << chid, 0x00000000); gf119_disp_dmac_fini()
68 struct nvkm_device *device = subdev->device; gf119_disp_dmac_init() local
72 nvkm_mask(device, 0x6100a0, 0x00000001 << chid, 0x00000001 << chid); gf119_disp_dmac_init()
75 nvkm_wr32(device, 0x610494 + (chid * 0x0010), chan->push); gf119_disp_dmac_init()
76 nvkm_wr32(device, 0x610498 + (chid * 0x0010), 0x00010000); gf119_disp_dmac_init()
77 nvkm_wr32(device, 0x61049c + (chid * 0x0010), 0x00000001); gf119_disp_dmac_init()
78 nvkm_mask(device, 0x610490 + (chid * 0x0010), 0x00000010, 0x00000010); gf119_disp_dmac_init()
79 nvkm_wr32(device, 0x640000 + (chid * 0x1000), 0x00000000); gf119_disp_dmac_init()
80 nvkm_wr32(device, 0x610490 + (chid * 0x0010), 0x00000013); gf119_disp_dmac_init()
83 if (nvkm_msec(device, 2000, gf119_disp_dmac_init()
84 if (!(nvkm_rd32(device, 0x610490 + (chid * 0x10)) & 0x80000000)) gf119_disp_dmac_init()
88 nvkm_rd32(device, 0x610490 + (chid * 0x10))); gf119_disp_dmac_init()
H A Dpiocgf119.c34 struct nvkm_device *device = subdev->device; gf119_disp_pioc_fini() local
37 nvkm_mask(device, 0x610490 + (chid * 0x10), 0x00000001, 0x00000000); gf119_disp_pioc_fini()
38 if (nvkm_msec(device, 2000, gf119_disp_pioc_fini()
39 if (!(nvkm_rd32(device, 0x610490 + (chid * 0x10)) & 0x00030000)) gf119_disp_pioc_fini()
43 nvkm_rd32(device, 0x610490 + (chid * 0x10))); gf119_disp_pioc_fini()
47 nvkm_mask(device, 0x610090, 0x00000001 << chid, 0x00000000); gf119_disp_pioc_fini()
48 nvkm_mask(device, 0x6100a0, 0x00000001 << chid, 0x00000000); gf119_disp_pioc_fini()
56 struct nvkm_device *device = subdev->device; gf119_disp_pioc_init() local
60 nvkm_mask(device, 0x6100a0, 0x00000001 << chid, 0x00000001 << chid); gf119_disp_pioc_init()
63 nvkm_wr32(device, 0x610490 + (chid * 0x10), 0x00000001); gf119_disp_pioc_init()
64 if (nvkm_msec(device, 2000, gf119_disp_pioc_init()
65 u32 tmp = nvkm_rd32(device, 0x610490 + (chid * 0x10)); gf119_disp_pioc_init()
70 nvkm_rd32(device, 0x610490 + (chid * 0x10))); gf119_disp_pioc_init()
H A Dsorgm204.c44 struct nvkm_device *device = outp->disp->engine.subdev.device; gm204_sor_magic() local
48 nvkm_mask(device, 0x612308 + soff, 0x0000001f, 0x00000000 | data); gm204_sor_magic()
50 nvkm_mask(device, 0x612388 + soff, 0x0000001f, 0x00000010 | data); gm204_sor_magic()
54 gm204_sor_dp_lane_map(struct nvkm_device *device, u8 lane) gm204_sor_dp_lane_map() argument
62 struct nvkm_device *device = outp->base.disp->engine.subdev.device; gm204_sor_dp_pattern() local
66 nvkm_mask(device, 0x61c110 + soff, 0x0f0f0f0f, data); gm204_sor_dp_pattern()
68 nvkm_mask(device, 0x61c12c + soff, 0x0f0f0f0f, data); gm204_sor_dp_pattern()
75 struct nvkm_device *device = outp->base.disp->engine.subdev.device; gm204_sor_dp_lnk_pwr() local
81 mask |= 1 << (gm204_sor_dp_lane_map(device, i) >> 3); gm204_sor_dp_lnk_pwr()
83 nvkm_mask(device, 0x61c130 + loff, 0x0000000f, mask); gm204_sor_dp_lnk_pwr()
84 nvkm_mask(device, 0x61c034 + soff, 0x80000000, 0x80000000); gm204_sor_dp_lnk_pwr()
85 nvkm_msec(device, 2000, gm204_sor_dp_lnk_pwr()
86 if (!(nvkm_rd32(device, 0x61c034 + soff) & 0x80000000)) gm204_sor_dp_lnk_pwr()
96 struct nvkm_device *device = outp->base.disp->engine.subdev.device; gm204_sor_dp_drv_ctl() local
97 struct nvkm_bios *bios = device->bios; gm204_sor_dp_drv_ctl()
98 const u32 shift = gm204_sor_dp_lane_map(device, ln); gm204_sor_dp_drv_ctl()
117 data[0] = nvkm_rd32(device, 0x61c118 + loff) & ~(0x000000ff << shift); gm204_sor_dp_drv_ctl()
118 data[1] = nvkm_rd32(device, 0x61c120 + loff) & ~(0x000000ff << shift); gm204_sor_dp_drv_ctl()
119 data[2] = nvkm_rd32(device, 0x61c130 + loff); gm204_sor_dp_drv_ctl()
122 nvkm_wr32(device, 0x61c118 + loff, data[0] | (ocfg.dc << shift)); gm204_sor_dp_drv_ctl()
123 nvkm_wr32(device, 0x61c120 + loff, data[1] | (ocfg.pe << shift)); gm204_sor_dp_drv_ctl()
124 nvkm_wr32(device, 0x61c130 + loff, data[2]); gm204_sor_dp_drv_ctl()
125 data[3] = nvkm_rd32(device, 0x61c13c + loff) & ~(0x000000ff << shift); gm204_sor_dp_drv_ctl()
126 nvkm_wr32(device, 0x61c13c + loff, data[3] | (ocfg.pc << shift)); gm204_sor_dp_drv_ctl()
H A Dsorg94.c60 g94_sor_dp_lane_map(struct nvkm_device *device, u8 lane) g94_sor_dp_lane_map() argument
65 if (device->chipset >= 0x110) g94_sor_dp_lane_map()
67 if (device->chipset == 0xaf) g94_sor_dp_lane_map()
75 struct nvkm_device *device = outp->base.disp->engine.subdev.device; g94_sor_dp_pattern() local
77 nvkm_mask(device, 0x61c10c + loff, 0x0f000000, pattern << 24); g94_sor_dp_pattern()
84 struct nvkm_device *device = outp->base.disp->engine.subdev.device; g94_sor_dp_lnk_pwr() local
90 mask |= 1 << (g94_sor_dp_lane_map(device, i) >> 3); g94_sor_dp_lnk_pwr()
92 nvkm_mask(device, 0x61c130 + loff, 0x0000000f, mask); g94_sor_dp_lnk_pwr()
93 nvkm_mask(device, 0x61c034 + soff, 0x80000000, 0x80000000); g94_sor_dp_lnk_pwr()
94 nvkm_msec(device, 2000, g94_sor_dp_lnk_pwr()
95 if (!(nvkm_rd32(device, 0x61c034 + soff) & 0x80000000)) g94_sor_dp_lnk_pwr()
104 struct nvkm_device *device = outp->base.disp->engine.subdev.device; g94_sor_dp_lnk_ctl() local
116 nvkm_mask(device, 0x614300 + soff, 0x000c0000, clksor); g94_sor_dp_lnk_ctl()
117 nvkm_mask(device, 0x61c10c + loff, 0x001f4000, dpctrl); g94_sor_dp_lnk_ctl()
124 struct nvkm_device *device = outp->base.disp->engine.subdev.device; g94_sor_dp_drv_ctl() local
125 struct nvkm_bios *bios = device->bios; g94_sor_dp_drv_ctl()
126 const u32 shift = g94_sor_dp_lane_map(device, ln); g94_sor_dp_drv_ctl()
144 data[0] = nvkm_rd32(device, 0x61c118 + loff) & ~(0x000000ff << shift); g94_sor_dp_drv_ctl()
145 data[1] = nvkm_rd32(device, 0x61c120 + loff) & ~(0x000000ff << shift); g94_sor_dp_drv_ctl()
146 data[2] = nvkm_rd32(device, 0x61c130 + loff); g94_sor_dp_drv_ctl()
149 nvkm_wr32(device, 0x61c118 + loff, data[0] | (ocfg.dc << shift)); g94_sor_dp_drv_ctl()
150 nvkm_wr32(device, 0x61c120 + loff, data[1] | (ocfg.pe << shift)); g94_sor_dp_drv_ctl()
151 nvkm_wr32(device, 0x61c130 + loff, data[2]); g94_sor_dp_drv_ctl()
H A Dgf119.c36 struct nvkm_device *device = disp->base.engine.subdev.device; gf119_disp_vblank_init() local
37 nvkm_mask(device, 0x6100c0 + (head * 0x800), 0x00000001, 0x00000001); gf119_disp_vblank_init()
43 struct nvkm_device *device = disp->base.engine.subdev.device; gf119_disp_vblank_fini() local
44 nvkm_mask(device, 0x6100c0 + (head * 0x800), 0x00000001, 0x00000000); gf119_disp_vblank_fini()
53 struct nvkm_bios *bios = subdev->device->bios; exec_lookup()
98 struct nvkm_device *device = subdev->device; exec_script() local
99 struct nvkm_bios *bios = device->bios; exec_script()
107 ctrl = nvkm_rd32(device, 0x640180 + (or * 0x20)); exec_script()
136 struct nvkm_device *device = subdev->device; exec_clkcmp() local
137 struct nvkm_bios *bios = device->bios; exec_clkcmp()
146 ctrl = nvkm_rd32(device, 0x660180 + (or * 0x20)); exec_clkcmp()
213 .bios = subdev->device->bios, gf119_disp_intr_unk2_0()
228 struct nvkm_device *device = disp->base.engine.subdev.device; gf119_disp_intr_unk2_1() local
229 struct nvkm_devinit *devinit = device->devinit; gf119_disp_intr_unk2_1()
230 u32 pclk = nvkm_rd32(device, 0x660450 + (head * 0x300)) / 1000; gf119_disp_intr_unk2_1()
233 nvkm_wr32(device, 0x612200 + (head * 0x800), 0x00000000); gf119_disp_intr_unk2_1()
240 struct nvkm_device *device = disp->base.engine.subdev.device; gf119_disp_intr_unk2_2_tu() local
242 const u32 ctrl = nvkm_rd32(device, 0x660200 + (or * 0x020)); gf119_disp_intr_unk2_2_tu()
243 const u32 conf = nvkm_rd32(device, 0x660404 + (head * 0x300)); gf119_disp_intr_unk2_2_tu()
244 const s32 vactive = nvkm_rd32(device, 0x660414 + (head * 0x300)) & 0xffff; gf119_disp_intr_unk2_2_tu()
245 const s32 vblanke = nvkm_rd32(device, 0x66041c + (head * 0x300)) & 0xffff; gf119_disp_intr_unk2_2_tu()
246 const s32 vblanks = nvkm_rd32(device, 0x660420 + (head * 0x300)) & 0xffff; gf119_disp_intr_unk2_2_tu()
247 const u32 pclk = nvkm_rd32(device, 0x660450 + (head * 0x300)) / 1000; gf119_disp_intr_unk2_2_tu()
254 u32 dpctrl = nvkm_rd32(device, 0x61c10c + loff); gf119_disp_intr_unk2_2_tu()
255 u32 clksor = nvkm_rd32(device, 0x612300 + soff); gf119_disp_intr_unk2_2_tu()
268 nvkm_mask(device, 0x616620 + hoff, 0x0000ffff, value); gf119_disp_intr_unk2_2_tu()
275 nvkm_mask(device, 0x616624 + hoff, 0x00ffffff, value); gf119_disp_intr_unk2_2_tu()
295 nvkm_wr32(device, 0x616610 + hoff, value); gf119_disp_intr_unk2_2_tu()
301 struct nvkm_device *device = disp->base.engine.subdev.device; gf119_disp_intr_unk2_2() local
303 u32 pclk = nvkm_rd32(device, 0x660450 + (head * 0x300)) / 1000; gf119_disp_intr_unk2_2()
312 u32 sync = nvkm_rd32(device, 0x660404 + (head * 0x300)); gf119_disp_intr_unk2_2()
339 nvkm_mask(device, addr, 0x007c0000, 0x00280000); gf119_disp_intr_unk2_2()
349 nvkm_mask(device, addr, 0x00000707, data); gf119_disp_intr_unk2_2()
355 struct nvkm_device *device = disp->base.engine.subdev.device; gf119_disp_intr_unk4_0() local
356 u32 pclk = nvkm_rd32(device, 0x660450 + (head * 0x300)) / 1000; gf119_disp_intr_unk4_0()
368 struct nvkm_device *device = subdev->device; gf119_disp_intr_supervisor() local
374 mask[head] = nvkm_rd32(device, 0x6101d4 + (head * 0x800)); gf119_disp_intr_supervisor()
417 nvkm_wr32(device, 0x6101d4 + (head * 0x800), 0x00000000); gf119_disp_intr_supervisor()
418 nvkm_wr32(device, 0x6101d0, 0x80000000); gf119_disp_intr_supervisor()
425 struct nvkm_device *device = subdev->device; gf119_disp_intr_error() local
426 u32 mthd = nvkm_rd32(device, 0x6101f0 + (chid * 12)); gf119_disp_intr_error()
427 u32 data = nvkm_rd32(device, 0x6101f4 + (chid * 12)); gf119_disp_intr_error()
428 u32 unkn = nvkm_rd32(device, 0x6101f8 + (chid * 12)); gf119_disp_intr_error()
443 nvkm_wr32(device, 0x61009c, (1 << chid)); gf119_disp_intr_error()
444 nvkm_wr32(device, 0x6101f0 + (chid * 12), 0x90000000); gf119_disp_intr_error()
451 struct nvkm_device *device = subdev->device; gf119_disp_intr() local
452 u32 intr = nvkm_rd32(device, 0x610088); gf119_disp_intr()
456 u32 stat = nvkm_rd32(device, 0x61008c); gf119_disp_intr()
460 nvkm_wr32(device, 0x61008c, 1 << chid); gf119_disp_intr()
466 u32 stat = nvkm_rd32(device, 0x61009c); gf119_disp_intr()
474 u32 stat = nvkm_rd32(device, 0x6100ac); gf119_disp_intr()
478 nvkm_wr32(device, 0x6100ac, disp->super); gf119_disp_intr()
484 nvkm_wr32(device, 0x6100ac, stat); gf119_disp_intr()
493 u32 stat = nvkm_rd32(device, 0x6100bc + (i * 0x800)); gf119_disp_intr()
496 nvkm_mask(device, 0x6100bc + (i * 0x800), 0, 0); gf119_disp_intr()
497 nvkm_rd32(device, 0x6100c0 + (i * 0x800)); gf119_disp_intr()
503 gf119_disp_new_(const struct nv50_disp_func *func, struct nvkm_device *device, gf119_disp_new_() argument
506 u32 heads = nvkm_rd32(device, 0x022448); gf119_disp_new_()
507 return nv50_disp_new_(func, device, index, heads, pdisp); gf119_disp_new_()
533 gf119_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp) gf119_disp_new() argument
535 return gf119_disp_new_(&gf119_disp, device, index, pdisp); gf119_disp_new()
H A Ddacnv50.c36 struct nvkm_device *device = disp->base.engine.subdev.device; nv50_dac_power() local
57 nvkm_msec(device, 2000, nv50_dac_power()
58 if (!(nvkm_rd32(device, 0x61a004 + doff) & 0x80000000)) nv50_dac_power()
61 nvkm_mask(device, 0x61a004 + doff, 0xc000007f, 0x80000000 | stat); nv50_dac_power()
62 nvkm_msec(device, 2000, nv50_dac_power()
63 if (!(nvkm_rd32(device, 0x61a004 + doff) & 0x80000000)) nv50_dac_power()
73 struct nvkm_device *device = subdev->device; nv50_dac_sense() local
91 nvkm_mask(device, 0x61a004 + doff, 0x807f0000, 0x80150000); nv50_dac_sense()
92 nvkm_msec(device, 2000, nv50_dac_sense()
93 if (!(nvkm_rd32(device, 0x61a004 + doff) & 0x80000000)) nv50_dac_sense()
97 nvkm_wr32(device, 0x61a00c + doff, 0x00100000 | loadval); nv50_dac_sense()
100 loadval = nvkm_mask(device, 0x61a00c + doff, 0xffffffff, 0x00000000); nv50_dac_sense()
102 nvkm_mask(device, 0x61a004 + doff, 0x807f0000, 0x80550000); nv50_dac_sense()
103 nvkm_msec(device, 2000, nv50_dac_sense()
104 if (!(nvkm_rd32(device, 0x61a004 + doff) & 0x80000000)) nv50_dac_sense()
H A Dhdmigk104.c34 struct nvkm_device *device = disp->base.engine.subdev.device; gk104_hdmi_ctrl() local
58 nvkm_mask(device, 0x616798 + hoff, 0x40000000, 0x00000000); gk104_hdmi_ctrl()
59 nvkm_mask(device, 0x6900c0 + hdmi, 0x00000001, 0x00000000); gk104_hdmi_ctrl()
60 nvkm_mask(device, 0x690000 + hdmi, 0x00000001, 0x00000000); gk104_hdmi_ctrl()
65 nvkm_mask(device, 0x690000 + hdmi, 0x00000001, 0x00000000); gk104_hdmi_ctrl()
66 nvkm_wr32(device, 0x690008 + hdmi, 0x000d0282); gk104_hdmi_ctrl()
67 nvkm_wr32(device, 0x69000c + hdmi, 0x0000006f); gk104_hdmi_ctrl()
68 nvkm_wr32(device, 0x690010 + hdmi, 0x00000000); gk104_hdmi_ctrl()
69 nvkm_wr32(device, 0x690014 + hdmi, 0x00000000); gk104_hdmi_ctrl()
70 nvkm_wr32(device, 0x690018 + hdmi, 0x00000000); gk104_hdmi_ctrl()
71 nvkm_mask(device, 0x690000 + hdmi, 0x00000001, 0x00000001); gk104_hdmi_ctrl()
74 nvkm_mask(device, 0x6900c0 + hdmi, 0x00000001, 0x00000000); gk104_hdmi_ctrl()
75 nvkm_wr32(device, 0x6900cc + hdmi, 0x00000010); gk104_hdmi_ctrl()
76 nvkm_mask(device, 0x6900c0 + hdmi, 0x00000001, 0x00000001); gk104_hdmi_ctrl()
79 nvkm_wr32(device, 0x690080 + hdmi, 0x82000000); gk104_hdmi_ctrl()
82 nvkm_mask(device, 0x616798 + hoff, 0x401f007f, ctrl); gk104_hdmi_ctrl()
H A Dcorenv50.c172 struct nvkm_device *device = subdev->device; nv50_disp_core_fini() local
175 nvkm_mask(device, 0x610200, 0x00000010, 0x00000000); nv50_disp_core_fini()
176 nvkm_mask(device, 0x610200, 0x00000003, 0x00000000); nv50_disp_core_fini()
177 if (nvkm_msec(device, 2000, nv50_disp_core_fini()
178 if (!(nvkm_rd32(device, 0x610200) & 0x001e0000)) nv50_disp_core_fini()
182 nvkm_rd32(device, 0x610200)); nv50_disp_core_fini()
186 nvkm_mask(device, 0x610028, 0x00010001, 0x00000000); nv50_disp_core_fini()
194 struct nvkm_device *device = subdev->device; nv50_disp_core_init() local
197 nvkm_mask(device, 0x610028, 0x00010000, 0x00010000); nv50_disp_core_init()
200 if ((nvkm_rd32(device, 0x610200) & 0x009f0000) == 0x00020000) nv50_disp_core_init()
201 nvkm_mask(device, 0x610200, 0x00800000, 0x00800000); nv50_disp_core_init()
202 if ((nvkm_rd32(device, 0x610200) & 0x003f0000) == 0x00030000) nv50_disp_core_init()
203 nvkm_mask(device, 0x610200, 0x00600000, 0x00600000); nv50_disp_core_init()
206 nvkm_wr32(device, 0x610204, chan->push); nv50_disp_core_init()
207 nvkm_wr32(device, 0x610208, 0x00010000); nv50_disp_core_init()
208 nvkm_wr32(device, 0x61020c, 0x00000000); nv50_disp_core_init()
209 nvkm_mask(device, 0x610200, 0x00000010, 0x00000010); nv50_disp_core_init()
210 nvkm_wr32(device, 0x640000, 0x00000000); nv50_disp_core_init()
211 nvkm_wr32(device, 0x610200, 0x01000013); nv50_disp_core_init()
214 if (nvkm_msec(device, 2000, nv50_disp_core_init()
215 if (!(nvkm_rd32(device, 0x610200) & 0x80000000)) nv50_disp_core_init()
219 nvkm_rd32(device, 0x610200)); nv50_disp_core_init()
/linux-4.4.14/drivers/net/ethernet/pasemi/
H A DMakefile2 # Makefile for the A Semi network device drivers.
/linux-4.4.14/drivers/net/ethernet/qualcomm/
H A DMakefile2 # Makefile for the Qualcomm network device drivers.
/linux-4.4.14/drivers/net/ethernet/renesas/
H A DMakefile2 # Makefile for the Renesas device drivers.
/linux-4.4.14/drivers/crypto/caam/
H A Djr.h11 struct device *caam_jr_alloc(void);
12 void caam_jr_free(struct device *rdev);
13 int caam_jr_enqueue(struct device *dev, u32 *desc,
14 void (*cbk)(struct device *dev, u32 *desc, u32 status,
/linux-4.4.14/include/media/
H A Dir-rx51.h7 int(*set_max_mpu_wakeup_lat)(struct device *dev, long t);
H A Dmedia-devnode.h2 * Media device node
25 * device nodes.
33 #include <linux/device.h>
55 * struct media_devnode - Media device node
56 * @fops: pointer to struct media_file_operations with media device ops
57 * @dev: struct device pointer for the media controller device
58 * @cdev: struct cdev pointer character device
59 * @parent: parent device
60 * @minor: device node minor number
64 * This structure represents a media-related device node.
66 * The @parent is a physical device. It must be set by core or device drivers
70 /* device ops */
74 struct device dev; /* media device */
75 struct cdev cdev; /* character device */
76 struct device *parent; /* device parent */
78 /* device info */
H A Dv4l2-flash-led-class.h2 * V4L2 flash LED sub-device registration helpers.
40 /* convert intensity to brightness in a device specific manner */
43 /* convert brightness to intensity in a device specific manner */
49 * struct v4l2_flash_config - V4L2 Flash sub-device initialization data
55 * device can report; corresponding LED_FAULT* bit
69 * struct v4l2_flash - Flash sub-device context
70 * @fled_cdev: LED flash class device controlled by this sub-device
71 * @iled_cdev: LED class device representing indicator LED associated
72 * with the LED flash class device
74 * @sd: V4L2 sub-device
77 * the sub-device state
102 * v4l2_flash_init - initialize V4L2 flash led sub-device
103 * @dev: flash device, e.g. an I2C device
104 * @of_node: of_node of the LED, may be NULL if the same as device's
105 * @fled_cdev: LED flash class device to wrap
106 * @iled_cdev: LED flash class device representing indicator LED associated
108 * @ops: V4L2 Flash device ops
109 * @config: initialization data for V4L2 Flash sub-device
111 * Create V4L2 Flash sub-device wrapping given LED subsystem device.
118 struct device *dev, struct device_node *of_node,
125 * v4l2_flash_release - release V4L2 Flash sub-device
126 * @v4l2_flash: the V4L2 Flash sub-device to release
128 * Release V4L2 Flash sub-device.
134 struct device *dev, struct device_node *of_node, v4l2_flash_init()
/linux-4.4.14/arch/sparc/crypto/
H A Dcrop_devid.c4 /* This is a dummy device table linked into all of the crypto
6 * mechanisms in userspace which scan the OF device tree and
7 * load any modules which have device table entries that
8 * match OF device nodes.
/linux-4.4.14/sound/usb/usx2y/
H A DusX2Yhwdep.h4 int usX2Y_hwdep_new(struct snd_card *card, struct usb_device* device);
/linux-4.4.14/drivers/hwmon/
H A Dadt7x10.h17 struct device;
20 int (*read_byte)(struct device *, u8 reg);
21 int (*write_byte)(struct device *, u8 reg, u8 data);
22 int (*read_word)(struct device *, u8 reg);
23 int (*write_word)(struct device *, u8 reg, u16 data);
26 int adt7x10_probe(struct device *dev, const char *name, int irq,
28 int adt7x10_remove(struct device *dev, int irq);
/linux-4.4.14/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/
H A Dnv50.c39 int ret = nvkm_gpuobj_new(object->engine->subdev.device, 128 * 4, nv50_mpeg_cclass_bind()
63 struct nvkm_device *device = subdev->device; nv50_mpeg_intr() local
64 u32 stat = nvkm_rd32(device, 0x00b100); nv50_mpeg_intr()
65 u32 type = nvkm_rd32(device, 0x00b230); nv50_mpeg_intr()
66 u32 mthd = nvkm_rd32(device, 0x00b234); nv50_mpeg_intr()
67 u32 data = nvkm_rd32(device, 0x00b238); nv50_mpeg_intr()
73 nvkm_wr32(device, 0x00b308, 0x00000100); nv50_mpeg_intr()
83 nvkm_wr32(device, 0x00b100, stat); nv50_mpeg_intr()
84 nvkm_wr32(device, 0x00b230, 0x00000001); nv50_mpeg_intr()
91 struct nvkm_device *device = subdev->device; nv50_mpeg_init() local
93 nvkm_wr32(device, 0x00b32c, 0x00000000); nv50_mpeg_init()
94 nvkm_wr32(device, 0x00b314, 0x00000100); nv50_mpeg_init()
95 nvkm_wr32(device, 0x00b0e0, 0x0000001a); nv50_mpeg_init()
97 nvkm_wr32(device, 0x00b220, 0x00000044); nv50_mpeg_init()
98 nvkm_wr32(device, 0x00b300, 0x00801ec1); nv50_mpeg_init()
99 nvkm_wr32(device, 0x00b390, 0x00000000); nv50_mpeg_init()
100 nvkm_wr32(device, 0x00b394, 0x00000000); nv50_mpeg_init()
101 nvkm_wr32(device, 0x00b398, 0x00000000); nv50_mpeg_init()
102 nvkm_mask(device, 0x00b32c, 0x00000001, 0x00000001); nv50_mpeg_init()
104 nvkm_wr32(device, 0x00b100, 0xffffffff); nv50_mpeg_init()
105 nvkm_wr32(device, 0x00b140, 0xffffffff); nv50_mpeg_init()
107 if (nvkm_msec(device, 2000, nv50_mpeg_init()
108 if (!(nvkm_rd32(device, 0x00b200) & 0x00000001)) nv50_mpeg_init()
112 nvkm_rd32(device, 0x00b200)); nv50_mpeg_init()
131 nv50_mpeg_new(struct nvkm_device *device, int index, struct nvkm_engine **pmpeg) nv50_mpeg_new() argument
133 return nvkm_engine_new_(&nv50_mpeg, device, index, 0x00400002, nv50_mpeg_new()
H A Dnv31.c42 int ret = nvkm_gpuobj_new(object->engine->subdev.device, 16, align, nv31_mpeg_object_bind()
117 struct nvkm_device *device = mpeg->engine.subdev.device; nv31_mpeg_tile() local
119 nvkm_wr32(device, 0x00b008 + (i * 0x10), tile->pitch); nv31_mpeg_tile()
120 nvkm_wr32(device, 0x00b004 + (i * 0x10), tile->limit); nv31_mpeg_tile()
121 nvkm_wr32(device, 0x00b000 + (i * 0x10), tile->addr); nv31_mpeg_tile()
125 nv31_mpeg_mthd_dma(struct nvkm_device *device, u32 mthd, u32 data) nv31_mpeg_mthd_dma() argument
128 u32 dma0 = nvkm_rd32(device, 0x700000 + inst); nv31_mpeg_mthd_dma()
129 u32 dma1 = nvkm_rd32(device, 0x700004 + inst); nv31_mpeg_mthd_dma()
130 u32 dma2 = nvkm_rd32(device, 0x700008 + inst); nv31_mpeg_mthd_dma()
140 nvkm_mask(device, 0x00b300, 0x00010000, nv31_mpeg_mthd_dma()
142 nvkm_wr32(device, 0x00b334, base); nv31_mpeg_mthd_dma()
143 nvkm_wr32(device, 0x00b324, size); nv31_mpeg_mthd_dma()
147 nvkm_mask(device, 0x00b300, 0x00020000, nv31_mpeg_mthd_dma()
149 nvkm_wr32(device, 0x00b360, base); nv31_mpeg_mthd_dma()
150 nvkm_wr32(device, 0x00b364, size); nv31_mpeg_mthd_dma()
156 nvkm_wr32(device, 0x00b370, base); nv31_mpeg_mthd_dma()
157 nvkm_wr32(device, 0x00b374, size); nv31_mpeg_mthd_dma()
166 struct nvkm_device *device = mpeg->engine.subdev.device; nv31_mpeg_mthd() local
171 return mpeg->func->mthd_dma(device, mthd, data); nv31_mpeg_mthd()
183 struct nvkm_device *device = subdev->device; nv31_mpeg_intr() local
184 u32 stat = nvkm_rd32(device, 0x00b100); nv31_mpeg_intr()
185 u32 type = nvkm_rd32(device, 0x00b230); nv31_mpeg_intr()
186 u32 mthd = nvkm_rd32(device, 0x00b234); nv31_mpeg_intr()
187 u32 data = nvkm_rd32(device, 0x00b238); nv31_mpeg_intr()
196 nvkm_mask(device, 0x00b308, 0x00000000, 0x00000000); nv31_mpeg_intr()
206 nvkm_wr32(device, 0x00b100, stat); nv31_mpeg_intr()
207 nvkm_wr32(device, 0x00b230, 0x00000001); nv31_mpeg_intr()
223 struct nvkm_device *device = subdev->device; nv31_mpeg_init() local
226 nvkm_wr32(device, 0x00b0e0, 0x00000020); /* nvidia: rd 0x01, wr 0x20 */ nv31_mpeg_init()
227 nvkm_wr32(device, 0x00b0e8, 0x00000020); /* nvidia: rd 0x01, wr 0x20 */ nv31_mpeg_init()
230 nvkm_wr32(device, 0x00b32c, 0x00000000); nv31_mpeg_init()
231 nvkm_wr32(device, 0x00b314, 0x00000100); nv31_mpeg_init()
232 nvkm_wr32(device, 0x00b220, 0x00000031); nv31_mpeg_init()
233 nvkm_wr32(device, 0x00b300, 0x02001ec1); nv31_mpeg_init()
234 nvkm_mask(device, 0x00b32c, 0x00000001, 0x00000001); nv31_mpeg_init()
236 nvkm_wr32(device, 0x00b100, 0xffffffff); nv31_mpeg_init()
237 nvkm_wr32(device, 0x00b140, 0xffffffff); nv31_mpeg_init()
239 if (nvkm_msec(device, 2000, nv31_mpeg_init()
240 if (!(nvkm_rd32(device, 0x00b200) & 0x00000001)) nv31_mpeg_init()
244 nvkm_rd32(device, 0x00b200)); nv31_mpeg_init()
271 nv31_mpeg_new_(const struct nv31_mpeg_func *func, struct nvkm_device *device, nv31_mpeg_new_() argument
281 return nvkm_engine_ctor(&nv31_mpeg_, device, index, 0x00000002, nv31_mpeg_new_()
291 nv31_mpeg_new(struct nvkm_device *device, int index, struct nvkm_engine **pmpeg) nv31_mpeg_new() argument
293 return nv31_mpeg_new_(&nv31_mpeg, device, index, pmpeg); nv31_mpeg_new()
/linux-4.4.14/include/asm-generic/
H A Ddma-coherent.h7 * Don't use them in device drivers.
9 int dma_alloc_from_coherent(struct device *dev, ssize_t size,
11 int dma_release_from_coherent(struct device *dev, int order, void *vaddr);
13 int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma,
19 int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
22 void dma_release_declared_memory(struct device *dev);
24 void *dma_mark_declared_memory_occupied(struct device *dev,
H A Ddma-mapping-broken.h12 dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
16 dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
19 static inline void *dma_alloc_attrs(struct device *dev, size_t size, dma_alloc_attrs()
27 static inline void dma_free_attrs(struct device *dev, size_t size, dma_free_attrs()
39 dma_map_single(struct device *dev, void *ptr, size_t size,
43 dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
47 dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
51 dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
55 dma_map_page(struct device *dev, struct page *page, unsigned long offset,
59 dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
63 dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
67 dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
72 dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
80 dma_mapping_error(struct device *dev, dma_addr_t dma_addr);
83 dma_supported(struct device *dev, u64 mask);
86 dma_set_mask(struct device *dev, u64 mask);
92 dma_cache_sync(struct device *dev, void *vaddr, size_t size,
/linux-4.4.14/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/
H A Dgm107.c32 struct nvkm_device *device = ltc->subdev.device; gm107_ltc_cbc_clear() local
33 nvkm_wr32(device, 0x17e270, start); gm107_ltc_cbc_clear()
34 nvkm_wr32(device, 0x17e274, limit); gm107_ltc_cbc_clear()
35 nvkm_wr32(device, 0x17e26c, 0x00000004); gm107_ltc_cbc_clear()
41 struct nvkm_device *device = ltc->subdev.device; gm107_ltc_cbc_wait() local
46 nvkm_msec(device, 2000, gm107_ltc_cbc_wait()
47 if (!nvkm_rd32(device, addr)) gm107_ltc_cbc_wait()
57 struct nvkm_device *device = ltc->subdev.device; gm107_ltc_zbc_clear_color() local
58 nvkm_mask(device, 0x17e338, 0x0000000f, i); gm107_ltc_zbc_clear_color()
59 nvkm_wr32(device, 0x17e33c, color[0]); gm107_ltc_zbc_clear_color()
60 nvkm_wr32(device, 0x17e340, color[1]); gm107_ltc_zbc_clear_color()
61 nvkm_wr32(device, 0x17e344, color[2]); gm107_ltc_zbc_clear_color()
62 nvkm_wr32(device, 0x17e348, color[3]); gm107_ltc_zbc_clear_color()
68 struct nvkm_device *device = ltc->subdev.device; gm107_ltc_zbc_clear_depth() local
69 nvkm_mask(device, 0x17e338, 0x0000000f, i); gm107_ltc_zbc_clear_depth()
70 nvkm_wr32(device, 0x17e34c, depth); gm107_ltc_zbc_clear_depth()
77 struct nvkm_device *device = subdev->device; gm107_ltc_lts_isr() local
79 u32 stat = nvkm_rd32(device, base + 0x00c); gm107_ltc_lts_isr()
83 nvkm_wr32(device, base + 0x00c, stat); gm107_ltc_lts_isr()
90 struct nvkm_device *device = ltc->subdev.device; gm107_ltc_intr() local
93 mask = nvkm_rd32(device, 0x00017c); gm107_ltc_intr()
105 struct nvkm_device *device = ltc->subdev.device; gm107_ltc_oneinit() local
106 const u32 parts = nvkm_rd32(device, 0x022438); gm107_ltc_oneinit()
107 const u32 mask = nvkm_rd32(device, 0x021c14); gm107_ltc_oneinit()
108 const u32 slice = nvkm_rd32(device, 0x17e280) >> 28; gm107_ltc_oneinit()
123 struct nvkm_device *device = ltc->subdev.device; gm107_ltc_init() local
124 u32 lpg128 = !(nvkm_rd32(device, 0x100c80) & 0x00000001); gm107_ltc_init()
126 nvkm_wr32(device, 0x17e27c, ltc->ltc_nr); gm107_ltc_init()
127 nvkm_wr32(device, 0x17e278, ltc->tag_base); gm107_ltc_init()
128 nvkm_mask(device, 0x17e264, 0x00000002, lpg128 ? 0x00000002 : 0x00000000); gm107_ltc_init()
146 gm107_ltc_new(struct nvkm_device *device, int index, struct nvkm_ltc **pltc) gm107_ltc_new() argument
148 return nvkm_ltc_new_(&gm107_ltc, device, index, pltc); gm107_ltc_new()
H A Dgf100.c33 struct nvkm_device *device = ltc->subdev.device; gf100_ltc_cbc_clear() local
34 nvkm_wr32(device, 0x17e8cc, start); gf100_ltc_cbc_clear()
35 nvkm_wr32(device, 0x17e8d0, limit); gf100_ltc_cbc_clear()
36 nvkm_wr32(device, 0x17e8c8, 0x00000004); gf100_ltc_cbc_clear()
42 struct nvkm_device *device = ltc->subdev.device; gf100_ltc_cbc_wait() local
47 nvkm_msec(device, 2000, gf100_ltc_cbc_wait()
48 if (!nvkm_rd32(device, addr)) gf100_ltc_cbc_wait()
58 struct nvkm_device *device = ltc->subdev.device; gf100_ltc_zbc_clear_color() local
59 nvkm_mask(device, 0x17ea44, 0x0000000f, i); gf100_ltc_zbc_clear_color()
60 nvkm_wr32(device, 0x17ea48, color[0]); gf100_ltc_zbc_clear_color()
61 nvkm_wr32(device, 0x17ea4c, color[1]); gf100_ltc_zbc_clear_color()
62 nvkm_wr32(device, 0x17ea50, color[2]); gf100_ltc_zbc_clear_color()
63 nvkm_wr32(device, 0x17ea54, color[3]); gf100_ltc_zbc_clear_color()
69 struct nvkm_device *device = ltc->subdev.device; gf100_ltc_zbc_clear_depth() local
70 nvkm_mask(device, 0x17ea44, 0x0000000f, i); gf100_ltc_zbc_clear_depth()
71 nvkm_wr32(device, 0x17ea58, depth); gf100_ltc_zbc_clear_depth()
96 struct nvkm_device *device = subdev->device; gf100_ltc_lts_intr() local
98 u32 intr = nvkm_rd32(device, base + 0x020); gf100_ltc_lts_intr()
107 nvkm_wr32(device, base + 0x020, intr); gf100_ltc_lts_intr()
113 struct nvkm_device *device = ltc->subdev.device; gf100_ltc_intr() local
116 mask = nvkm_rd32(device, 0x00017c); gf100_ltc_intr()
128 struct nvkm_device *device = ltc->subdev.device; gf100_ltc_invalidate() local
131 nvkm_wr32(device, 0x70004, 0x00000001); gf100_ltc_invalidate()
132 taken = nvkm_wait_msec(device, 2, 0x70004, 0x00000003, 0x00000000); gf100_ltc_invalidate()
143 struct nvkm_device *device = ltc->subdev.device; gf100_ltc_flush() local
146 nvkm_wr32(device, 0x70010, 0x00000001); gf100_ltc_flush()
147 taken = nvkm_wait_msec(device, 2, 0x70010, 0x00000003, 0x00000000); gf100_ltc_flush()
160 struct nvkm_ram *ram = ltc->subdev.device->fb->ram; gf100_ltc_oneinit_tag_ram()
211 struct nvkm_device *device = ltc->subdev.device; gf100_ltc_oneinit() local
212 const u32 parts = nvkm_rd32(device, 0x022438); gf100_ltc_oneinit()
213 const u32 mask = nvkm_rd32(device, 0x022554); gf100_ltc_oneinit()
214 const u32 slice = nvkm_rd32(device, 0x17e8dc) >> 28; gf100_ltc_oneinit()
229 struct nvkm_device *device = ltc->subdev.device; gf100_ltc_init() local
230 u32 lpg128 = !(nvkm_rd32(device, 0x100c80) & 0x00000001); gf100_ltc_init()
232 nvkm_mask(device, 0x17e820, 0x00100000, 0x00000000); /* INTR_EN &= ~0x10 */ gf100_ltc_init()
233 nvkm_wr32(device, 0x17e8d8, ltc->ltc_nr); gf100_ltc_init()
234 nvkm_wr32(device, 0x17e8d4, ltc->tag_base); gf100_ltc_init()
235 nvkm_mask(device, 0x17e8c0, 0x00000002, lpg128 ? 0x00000002 : 0x00000000); gf100_ltc_init()
253 gf100_ltc_new(struct nvkm_device *device, int index, struct nvkm_ltc **pltc) gf100_ltc_new() argument
255 return nvkm_ltc_new_(&gf100_ltc, device, index, pltc); gf100_ltc_new()
/linux-4.4.14/drivers/input/touchscreen/
H A Dad7879.h15 struct device;
19 int (*read)(struct device *dev, u8 reg);
20 int (*multi_read)(struct device *dev, u8 first_reg, u8 count, u16 *buf);
21 int (*write)(struct device *dev, u8 reg, u16 val);
26 struct ad7879 *ad7879_probe(struct device *dev, u8 devid, unsigned irq,
/linux-4.4.14/include/drm/
H A Ddrm_of.h5 struct device;
12 extern int drm_of_component_probe(struct device *dev,
13 int (*compare_of)(struct device *, void *),
23 drm_of_component_probe(struct device *dev, drm_of_component_probe()
24 int (*compare_of)(struct device *, void *), drm_of_component_probe()
H A Ddrm_sysfs.h9 extern int drm_class_device_register(struct device *dev);
10 extern void drm_class_device_unregister(struct device *dev);
/linux-4.4.14/include/trace/events/
H A Diommu.h16 struct device;
20 TP_PROTO(int group_id, struct device *dev),
26 __string(device, dev_name(dev))
31 __assign_str(device, dev_name(dev));
34 TP_printk("IOMMU: groupID=%d device=%s",
35 __entry->gid, __get_str(device)
41 TP_PROTO(int group_id, struct device *dev),
49 TP_PROTO(int group_id, struct device *dev),
56 TP_PROTO(struct device *dev),
61 __string(device, dev_name(dev))
65 __assign_str(device, dev_name(dev));
68 TP_printk("IOMMU: device=%s", __get_str(device)
74 TP_PROTO(struct device *dev),
81 TP_PROTO(struct device *dev),
134 TP_PROTO(struct device *dev, unsigned long iova, int flags),
139 __string(device, dev_name(dev))
146 __assign_str(device, dev_name(dev));
153 __get_str(driver), __get_str(device),
160 TP_PROTO(struct device *dev, unsigned long iova, int flags),
/linux-4.4.14/drivers/s390/block/
H A Ddasd_devmap.c37 * between device number and device index. To find a dasd_devmap_t
38 * that corresponds to a device number of a device index each
40 * the device number and one to search by the device index. As
41 * soon as big minor numbers are available the device index list
42 * can be removed since the device number will then be identical
43 * to the device index.
50 struct dasd_device *device; member in struct:dasd_devmap
128 * Read a device busid/devno from a string.
139 pr_err("The IPL device is not a CCW device\n"); dasd_busid()
217 pr_warning("%*s is not a supported device option\n", dasd_feature_list()
303 * as a device number or a range of devices. If the interpretation is
332 pr_err("%s is not a valid device range\n", parsestring); dasd_parse_range()
338 /* each device in dasd= parameter should be set initially online */ dasd_parse_range()
369 * keywords and device ranges. When the dasd driver is build into the kernel,
402 * Add a devmap for the device specified by busid. It is possible that
429 new->device = NULL; dasd_add_busid()
440 * Find devmap for device with given bus_id.
471 * Forget all about the device numbers added so far.
483 BUG_ON(devmap->device != NULL); dasd_forget_ranges()
492 * Find the device struct by its device index.
498 struct dasd_device *device; dasd_device_from_devindex() local
506 /* Found the devmap for the device. */ dasd_device_from_devindex()
510 if (devmap && devmap->device) { dasd_device_from_devindex()
511 device = devmap->device; dasd_device_from_devindex()
512 dasd_get_device(device); dasd_device_from_devindex()
514 device = ERR_PTR(-ENODEV); dasd_device_from_devindex()
516 return device; dasd_device_from_devindex()
536 * Create a dasd device structure for cdev.
542 struct dasd_device *device; dasd_create_device() local
550 device = dasd_alloc_device(); dasd_create_device()
551 if (IS_ERR(device)) dasd_create_device()
552 return device; dasd_create_device()
553 atomic_set(&device->ref_count, 3); dasd_create_device()
556 if (!devmap->device) { dasd_create_device()
557 devmap->device = device; dasd_create_device()
558 device->devindex = devmap->devindex; dasd_create_device()
559 device->features = devmap->features; dasd_create_device()
561 device->cdev = cdev; dasd_create_device()
569 dasd_free_device(device); dasd_create_device()
574 dev_set_drvdata(&cdev->dev, device); dasd_create_device()
577 return device; dasd_create_device()
586 * Remove a dasd device structure. The passed referenced
590 dasd_delete_device(struct dasd_device *device) dasd_delete_device() argument
596 /* First remove device pointer from devmap. */ dasd_delete_device()
597 devmap = dasd_find_busid(dev_name(&device->cdev->dev)); dasd_delete_device()
600 if (devmap->device != device) { dasd_delete_device()
602 dasd_put_device(device); dasd_delete_device()
605 devmap->device = NULL; dasd_delete_device()
609 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); dasd_delete_device()
610 dev_set_drvdata(&device->cdev->dev, NULL); dasd_delete_device()
611 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); dasd_delete_device()
617 atomic_sub(3, &device->ref_count); dasd_delete_device()
620 wait_event(dasd_delete_wq, atomic_read(&device->ref_count) == 0); dasd_delete_device()
623 cdev = device->cdev; dasd_delete_device()
624 device->cdev = NULL; dasd_delete_device()
629 /* Now the device structure can be freed. */ dasd_delete_device()
630 dasd_free_device(device); dasd_delete_device()
638 dasd_put_device_wake(struct dasd_device *device) dasd_put_device_wake() argument
646 * This function needs to be called with the ccw device
652 struct dasd_device *device = dev_get_drvdata(&cdev->dev); dasd_device_from_cdev_locked() local
654 if (!device) dasd_device_from_cdev_locked()
656 dasd_get_device(device); dasd_device_from_cdev_locked()
657 return device; dasd_device_from_cdev_locked()
666 struct dasd_device *device; dasd_device_from_cdev() local
670 device = dasd_device_from_cdev_locked(cdev); dasd_device_from_cdev()
672 return device; dasd_device_from_cdev()
675 void dasd_add_link_to_gendisk(struct gendisk *gdp, struct dasd_device *device) dasd_add_link_to_gendisk() argument
679 devmap = dasd_find_busid(dev_name(&device->cdev->dev)); dasd_add_link_to_gendisk()
689 struct dasd_device *device; dasd_device_from_gendisk() local
694 device = NULL; dasd_device_from_gendisk()
697 if (devmap && devmap->device) { dasd_device_from_gendisk()
698 device = devmap->device; dasd_device_from_gendisk()
699 dasd_get_device(device); dasd_device_from_gendisk()
702 return device; dasd_device_from_gendisk()
712 static ssize_t dasd_ff_show(struct device *dev, struct device_attribute *attr, dasd_ff_show()
726 static ssize_t dasd_ff_store(struct device *dev, struct device_attribute *attr, dasd_ff_store()
746 if (devmap->device) dasd_ff_store()
747 devmap->device->features = devmap->features; dasd_ff_store()
758 dasd_ro_show(struct device *dev, struct device_attribute *attr, char *buf) dasd_ro_show()
772 dasd_ro_store(struct device *dev, struct device_attribute *attr, dasd_ro_store()
776 struct dasd_device *device; dasd_ro_store() local
793 device = devmap->device; dasd_ro_store()
794 if (device) { dasd_ro_store()
795 device->features = devmap->features; dasd_ro_store()
796 val = val || test_bit(DASD_FLAG_DEVICE_RO, &device->flags); dasd_ro_store()
799 if (device && device->block && device->block->gdp) dasd_ro_store()
800 set_disk_ro(device->block->gdp, val); dasd_ro_store()
810 dasd_erplog_show(struct device *dev, struct device_attribute *attr, char *buf) dasd_erplog_show()
824 dasd_erplog_store(struct device *dev, struct device_attribute *attr, dasd_erplog_store()
844 if (devmap->device) dasd_erplog_store()
845 devmap->device->features = devmap->features; dasd_erplog_store()
854 * to talk to the device
857 dasd_use_diag_show(struct device *dev, struct device_attribute *attr, char *buf) dasd_use_diag_show()
871 dasd_use_diag_store(struct device *dev, struct device_attribute *attr, dasd_use_diag_store()
890 if (!devmap->device && !(devmap->features & DASD_FEATURE_USERAW)) { dasd_use_diag_store()
908 dasd_use_raw_show(struct device *dev, struct device_attribute *attr, char *buf) dasd_use_raw_show()
922 dasd_use_raw_store(struct device *dev, struct device_attribute *attr, dasd_use_raw_store()
939 if (!devmap->device && !(devmap->features & DASD_FEATURE_USEDIAG)) { dasd_use_raw_store()
954 dasd_safe_offline_store(struct device *dev, struct device_attribute *attr, dasd_safe_offline_store()
958 struct dasd_device *device; dasd_safe_offline_store() local
961 device = dasd_device_from_cdev(cdev); dasd_safe_offline_store()
962 if (IS_ERR(device)) { dasd_safe_offline_store()
963 rc = PTR_ERR(device); dasd_safe_offline_store()
967 if (test_bit(DASD_FLAG_OFFLINE, &device->flags) || dasd_safe_offline_store()
968 test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) { dasd_safe_offline_store()
970 dasd_put_device(device); dasd_safe_offline_store()
975 set_bit(DASD_FLAG_SAFE_OFFLINE, &device->flags); dasd_safe_offline_store()
976 dasd_put_device(device); dasd_safe_offline_store()
987 dasd_discipline_show(struct device *dev, struct device_attribute *attr, dasd_discipline_show()
990 struct dasd_device *device; dasd_discipline_show() local
993 device = dasd_device_from_cdev(to_ccwdev(dev)); dasd_discipline_show()
994 if (IS_ERR(device)) dasd_discipline_show()
996 else if (!device->discipline) { dasd_discipline_show()
997 dasd_put_device(device); dasd_discipline_show()
1001 device->discipline->name); dasd_discipline_show()
1002 dasd_put_device(device); dasd_discipline_show()
1013 dasd_device_status_show(struct device *dev, struct device_attribute *attr, dasd_device_status_show()
1016 struct dasd_device *device; dasd_device_status_show() local
1019 device = dasd_device_from_cdev(to_ccwdev(dev)); dasd_device_status_show()
1020 if (!IS_ERR(device)) { dasd_device_status_show()
1021 switch (device->state) { dasd_device_status_show()
1044 dasd_put_device(device); dasd_device_status_show()
1052 static ssize_t dasd_alias_show(struct device *dev, dasd_alias_show()
1055 struct dasd_device *device; dasd_alias_show() local
1058 device = dasd_device_from_cdev(to_ccwdev(dev)); dasd_alias_show()
1059 if (IS_ERR(device)) dasd_alias_show()
1062 if (device->discipline && device->discipline->get_uid && dasd_alias_show()
1063 !device->discipline->get_uid(device, &uid)) { dasd_alias_show()
1066 dasd_put_device(device); dasd_alias_show()
1070 dasd_put_device(device); dasd_alias_show()
1077 static ssize_t dasd_vendor_show(struct device *dev, dasd_vendor_show()
1080 struct dasd_device *device; dasd_vendor_show() local
1084 device = dasd_device_from_cdev(to_ccwdev(dev)); dasd_vendor_show()
1086 if (IS_ERR(device)) dasd_vendor_show()
1089 if (device->discipline && device->discipline->get_uid && dasd_vendor_show()
1090 !device->discipline->get_uid(device, &uid)) dasd_vendor_show()
1093 dasd_put_device(device); dasd_vendor_show()
1105 dasd_uid_show(struct device *dev, struct device_attribute *attr, char *buf) dasd_uid_show()
1107 struct dasd_device *device; dasd_uid_show() local
1112 device = dasd_device_from_cdev(to_ccwdev(dev)); dasd_uid_show()
1114 if (IS_ERR(device)) dasd_uid_show()
1117 if (device->discipline && device->discipline->get_uid && dasd_uid_show()
1118 !device->discipline->get_uid(device, &uid)) { dasd_uid_show()
1132 /* should not happen, treat like base device */ dasd_uid_show()
1148 dasd_put_device(device); dasd_uid_show()
1158 dasd_eer_show(struct device *dev, struct device_attribute *attr, char *buf) dasd_eer_show()
1164 if (!IS_ERR(devmap) && devmap->device) dasd_eer_show()
1165 eer_flag = dasd_eer_enabled(devmap->device); dasd_eer_show()
1172 dasd_eer_store(struct device *dev, struct device_attribute *attr, dasd_eer_store()
1182 if (!devmap->device) dasd_eer_store()
1190 rc = dasd_eer_enable(devmap->device); dasd_eer_store()
1194 dasd_eer_disable(devmap->device); dasd_eer_store()
1204 dasd_expires_show(struct device *dev, struct device_attribute *attr, char *buf) dasd_expires_show()
1206 struct dasd_device *device; dasd_expires_show() local
1209 device = dasd_device_from_cdev(to_ccwdev(dev)); dasd_expires_show()
1210 if (IS_ERR(device)) dasd_expires_show()
1212 len = snprintf(buf, PAGE_SIZE, "%lu\n", device->default_expires); dasd_expires_show()
1213 dasd_put_device(device); dasd_expires_show()
1218 dasd_expires_store(struct device *dev, struct device_attribute *attr, dasd_expires_store()
1221 struct dasd_device *device; dasd_expires_store() local
1224 device = dasd_device_from_cdev(to_ccwdev(dev)); dasd_expires_store()
1225 if (IS_ERR(device)) dasd_expires_store()
1230 dasd_put_device(device); dasd_expires_store()
1235 device->default_expires = val; dasd_expires_store()
1237 dasd_put_device(device); dasd_expires_store()
1244 dasd_retries_show(struct device *dev, struct device_attribute *attr, char *buf) dasd_retries_show()
1246 struct dasd_device *device; dasd_retries_show() local
1249 device = dasd_device_from_cdev(to_ccwdev(dev)); dasd_retries_show()
1250 if (IS_ERR(device)) dasd_retries_show()
1252 len = snprintf(buf, PAGE_SIZE, "%lu\n", device->default_retries); dasd_retries_show()
1253 dasd_put_device(device); dasd_retries_show()
1258 dasd_retries_store(struct device *dev, struct device_attribute *attr, dasd_retries_store()
1261 struct dasd_device *device; dasd_retries_store() local
1264 device = dasd_device_from_cdev(to_ccwdev(dev)); dasd_retries_store()
1265 if (IS_ERR(device)) dasd_retries_store()
1270 dasd_put_device(device); dasd_retries_store()
1275 device->default_retries = val; dasd_retries_store()
1277 dasd_put_device(device); dasd_retries_store()
1284 dasd_timeout_show(struct device *dev, struct device_attribute *attr, dasd_timeout_show()
1287 struct dasd_device *device; dasd_timeout_show() local
1290 device = dasd_device_from_cdev(to_ccwdev(dev)); dasd_timeout_show()
1291 if (IS_ERR(device)) dasd_timeout_show()
1293 len = snprintf(buf, PAGE_SIZE, "%lu\n", device->blk_timeout); dasd_timeout_show()
1294 dasd_put_device(device); dasd_timeout_show()
1299 dasd_timeout_store(struct device *dev, struct device_attribute *attr, dasd_timeout_store()
1302 struct dasd_device *device; dasd_timeout_store() local
1306 device = dasd_device_from_cdev(to_ccwdev(dev)); dasd_timeout_store()
1307 if (IS_ERR(device) || !device->block) dasd_timeout_store()
1312 dasd_put_device(device); dasd_timeout_store()
1315 q = device->block->request_queue; dasd_timeout_store()
1317 dasd_put_device(device); dasd_timeout_store()
1320 spin_lock_irqsave(&device->block->request_queue_lock, flags); dasd_timeout_store()
1326 device->blk_timeout = val; dasd_timeout_store()
1328 blk_queue_rq_timeout(q, device->blk_timeout * HZ); dasd_timeout_store()
1329 spin_unlock_irqrestore(&device->block->request_queue_lock, flags); dasd_timeout_store()
1331 dasd_put_device(device); dasd_timeout_store()
1338 static ssize_t dasd_reservation_policy_show(struct device *dev, dasd_reservation_policy_show()
1359 static ssize_t dasd_reservation_policy_store(struct device *dev, dasd_reservation_policy_store()
1377 if (devmap->device) dasd_reservation_policy_store()
1378 devmap->device->features = devmap->features; dasd_reservation_policy_store()
1389 static ssize_t dasd_reservation_state_show(struct device *dev, dasd_reservation_state_show()
1393 struct dasd_device *device; dasd_reservation_state_show() local
1396 device = dasd_device_from_cdev(to_ccwdev(dev)); dasd_reservation_state_show()
1397 if (IS_ERR(device)) dasd_reservation_state_show()
1400 if (test_bit(DASD_FLAG_IS_RESERVED, &device->flags)) dasd_reservation_state_show()
1402 else if (test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags)) dasd_reservation_state_show()
1406 dasd_put_device(device); dasd_reservation_state_show()
1410 static ssize_t dasd_reservation_state_store(struct device *dev, dasd_reservation_state_store()
1414 struct dasd_device *device; dasd_reservation_state_store() local
1417 device = dasd_device_from_cdev(to_ccwdev(dev)); dasd_reservation_state_store()
1418 if (IS_ERR(device)) dasd_reservation_state_store()
1421 clear_bit(DASD_FLAG_LOCK_STOLEN, &device->flags); dasd_reservation_state_store()
1424 dasd_put_device(device); dasd_reservation_state_store()
1435 static ssize_t dasd_pm_show(struct device *dev, dasd_pm_show()
1438 struct dasd_device *device; dasd_pm_show() local
1441 device = dasd_device_from_cdev(to_ccwdev(dev)); dasd_pm_show()
1442 if (IS_ERR(device)) dasd_pm_show()
1445 opm = device->path_data.opm; dasd_pm_show()
1446 nppm = device->path_data.npm; dasd_pm_show()
1447 cablepm = device->path_data.cablepm; dasd_pm_show()
1448 cuirpm = device->path_data.cuirpm; dasd_pm_show()
1449 hpfpm = device->path_data.hpfpm; dasd_pm_show()
1450 dasd_put_device(device); dasd_pm_show()
1517 if (devmap->device) dasd_set_feature()
1518 devmap->device->features = devmap->features; dasd_set_feature()
H A Ddasd.c54 MODULE_DESCRIPTION("Linux on S/390 DASD device driver,"
80 * SECTION: Operations on the device structure.
88 * Allocate memory for a new device structure.
92 struct dasd_device *device; dasd_alloc_device() local
94 device = kzalloc(sizeof(struct dasd_device), GFP_ATOMIC); dasd_alloc_device()
95 if (!device) dasd_alloc_device()
98 /* Get two pages for normal block device operations. */ dasd_alloc_device()
99 device->ccw_mem = (void *) __get_free_pages(GFP_ATOMIC | GFP_DMA, 1); dasd_alloc_device()
100 if (!device->ccw_mem) { dasd_alloc_device()
101 kfree(device); dasd_alloc_device()
105 device->erp_mem = (void *) get_zeroed_page(GFP_ATOMIC | GFP_DMA); dasd_alloc_device()
106 if (!device->erp_mem) { dasd_alloc_device()
107 free_pages((unsigned long) device->ccw_mem, 1); dasd_alloc_device()
108 kfree(device); dasd_alloc_device()
112 dasd_init_chunklist(&device->ccw_chunks, device->ccw_mem, PAGE_SIZE*2); dasd_alloc_device()
113 dasd_init_chunklist(&device->erp_chunks, device->erp_mem, PAGE_SIZE); dasd_alloc_device()
114 spin_lock_init(&device->mem_lock); dasd_alloc_device()
115 atomic_set(&device->tasklet_scheduled, 0); dasd_alloc_device()
116 tasklet_init(&device->tasklet, dasd_alloc_device()
118 (unsigned long) device); dasd_alloc_device()
119 INIT_LIST_HEAD(&device->ccw_queue); dasd_alloc_device()
120 init_timer(&device->timer); dasd_alloc_device()
121 device->timer.function = dasd_device_timeout; dasd_alloc_device()
122 device->timer.data = (unsigned long) device; dasd_alloc_device()
123 INIT_WORK(&device->kick_work, do_kick_device); dasd_alloc_device()
124 INIT_WORK(&device->restore_device, do_restore_device); dasd_alloc_device()
125 INIT_WORK(&device->reload_device, do_reload_device); dasd_alloc_device()
126 device->state = DASD_STATE_NEW; dasd_alloc_device()
127 device->target = DASD_STATE_NEW; dasd_alloc_device()
128 mutex_init(&device->state_mutex); dasd_alloc_device()
129 spin_lock_init(&device->profile.lock); dasd_alloc_device()
130 return device; dasd_alloc_device()
134 * Free memory of a device structure.
136 void dasd_free_device(struct dasd_device *device) dasd_free_device() argument
138 kfree(device->private); dasd_free_device()
139 free_page((unsigned long) device->erp_mem); dasd_free_device()
140 free_pages((unsigned long) device->ccw_mem, 1); dasd_free_device()
141 kfree(device); dasd_free_device()
145 * Allocate memory for a new device structure.
154 /* open_count = 0 means device online but not in use */ dasd_alloc_block()
174 * Free memory of a device structure.
183 * Make a new device known to the system.
185 static int dasd_state_new_to_known(struct dasd_device *device) dasd_state_new_to_known() argument
190 * As long as the device is not in state DASD_STATE_NEW we want to dasd_state_new_to_known()
193 dasd_get_device(device); dasd_state_new_to_known()
195 if (device->block) { dasd_state_new_to_known()
196 rc = dasd_alloc_queue(device->block); dasd_state_new_to_known()
198 dasd_put_device(device); dasd_state_new_to_known()
202 device->state = DASD_STATE_KNOWN; dasd_state_new_to_known()
207 * Let the system forget about a device.
209 static int dasd_state_known_to_new(struct dasd_device *device) dasd_state_known_to_new() argument
211 /* Disable extended error reporting for this device. */ dasd_state_known_to_new()
212 dasd_eer_disable(device); dasd_state_known_to_new()
214 if (device->discipline) { dasd_state_known_to_new()
215 if (device->discipline->uncheck_device) dasd_state_known_to_new()
216 device->discipline->uncheck_device(device); dasd_state_known_to_new()
217 module_put(device->discipline->owner); dasd_state_known_to_new()
219 device->discipline = NULL; dasd_state_known_to_new()
220 if (device->base_discipline) dasd_state_known_to_new()
221 module_put(device->base_discipline->owner); dasd_state_known_to_new()
222 device->base_discipline = NULL; dasd_state_known_to_new()
223 device->state = DASD_STATE_NEW; dasd_state_known_to_new()
225 if (device->block) dasd_state_known_to_new()
226 dasd_free_queue(device->block); dasd_state_known_to_new()
229 dasd_put_device(device); dasd_state_known_to_new()
247 * Request the irq line for the device.
249 static int dasd_state_known_to_basic(struct dasd_device *device) dasd_state_known_to_basic() argument
251 struct dasd_block *block = device->block; dasd_state_known_to_basic()
264 dasd_profile_on(&device->block->profile); dasd_state_known_to_basic()
266 device->debugfs_dentry = dasd_state_known_to_basic()
267 dasd_debugfs_setup(dev_name(&device->cdev->dev), dasd_state_known_to_basic()
269 dasd_profile_init(&device->profile, device->debugfs_dentry); dasd_state_known_to_basic()
271 /* register 'device' debug area, used for all DBF_DEV_XXX calls */ dasd_state_known_to_basic()
272 device->debug_area = debug_register(dev_name(&device->cdev->dev), 4, 1, dasd_state_known_to_basic()
274 debug_register_view(device->debug_area, &debug_sprintf_view); dasd_state_known_to_basic()
275 debug_set_level(device->debug_area, DBF_WARNING); dasd_state_known_to_basic()
276 DBF_DEV_EVENT(DBF_EMERG, device, "%s", "debug area created"); dasd_state_known_to_basic()
278 device->state = DASD_STATE_BASIC; dasd_state_known_to_basic()
284 * Release the irq line for the device. Terminate any running i/o.
286 static int dasd_state_basic_to_known(struct dasd_device *device) dasd_state_basic_to_known() argument
290 if (device->discipline->basic_to_known) { dasd_state_basic_to_known()
291 rc = device->discipline->basic_to_known(device); dasd_state_basic_to_known()
296 if (device->block) { dasd_state_basic_to_known()
297 dasd_profile_exit(&device->block->profile); dasd_state_basic_to_known()
298 debugfs_remove(device->block->debugfs_dentry); dasd_state_basic_to_known()
299 dasd_gendisk_free(device->block); dasd_state_basic_to_known()
300 dasd_block_clear_timer(device->block); dasd_state_basic_to_known()
302 rc = dasd_flush_device_queue(device); dasd_state_basic_to_known()
305 dasd_device_clear_timer(device); dasd_state_basic_to_known()
306 dasd_profile_exit(&device->profile); dasd_state_basic_to_known()
307 debugfs_remove(device->debugfs_dentry); dasd_state_basic_to_known()
308 DBF_DEV_EVENT(DBF_EMERG, device, "%p debug area deleted", device); dasd_state_basic_to_known()
309 if (device->debug_area != NULL) { dasd_state_basic_to_known()
310 debug_unregister(device->debug_area); dasd_state_basic_to_known()
311 device->debug_area = NULL; dasd_state_basic_to_known()
313 device->state = DASD_STATE_KNOWN; dasd_state_basic_to_known()
319 * -EAGAIN in which case the device keeps the state DASD_STATE_BASIC
327 * device is setup.
328 * In case the analysis returns an error, the device setup is stopped
331 static int dasd_state_basic_to_ready(struct dasd_device *device) dasd_state_basic_to_ready() argument
337 block = device->block; dasd_state_basic_to_ready()
344 device->state = DASD_STATE_UNFMT; dasd_state_basic_to_ready()
352 device->state = DASD_STATE_READY; dasd_state_basic_to_ready()
355 device->state = DASD_STATE_BASIC; dasd_state_basic_to_ready()
359 device->state = DASD_STATE_READY; dasd_state_basic_to_ready()
362 if (device->discipline->basic_to_ready) dasd_state_basic_to_ready()
363 rc = device->discipline->basic_to_ready(device); dasd_state_basic_to_ready()
368 int _wait_for_empty_queues(struct dasd_device *device) _wait_for_empty_queues() argument
370 if (device->block) _wait_for_empty_queues()
371 return list_empty(&device->ccw_queue) && _wait_for_empty_queues()
372 list_empty(&device->block->ccw_queue); _wait_for_empty_queues()
374 return list_empty(&device->ccw_queue); _wait_for_empty_queues()
378 * Remove device from block device layer. Destroy dirty buffers.
382 static int dasd_state_ready_to_basic(struct dasd_device *device) dasd_state_ready_to_basic() argument
386 device->state = DASD_STATE_BASIC; dasd_state_ready_to_basic()
387 if (device->block) { dasd_state_ready_to_basic()
388 struct dasd_block *block = device->block; dasd_state_ready_to_basic()
391 device->state = DASD_STATE_READY; dasd_state_ready_to_basic()
406 static int dasd_state_unfmt_to_basic(struct dasd_device *device) dasd_state_unfmt_to_basic() argument
408 device->state = DASD_STATE_BASIC; dasd_state_unfmt_to_basic()
413 * Make the device online and schedule the bottom half to start
418 dasd_state_ready_to_online(struct dasd_device * device) dasd_state_ready_to_online() argument
424 device->state = DASD_STATE_ONLINE; dasd_state_ready_to_online()
425 if (device->block) { dasd_state_ready_to_online()
426 dasd_schedule_block_bh(device->block); dasd_state_ready_to_online()
427 if ((device->features & DASD_FEATURE_USERAW)) { dasd_state_ready_to_online()
428 disk = device->block->gdp; dasd_state_ready_to_online()
432 disk = device->block->bdev->bd_disk; dasd_state_ready_to_online()
444 static int dasd_state_online_to_ready(struct dasd_device *device) dasd_state_online_to_ready() argument
451 if (device->discipline->online_to_ready) { dasd_state_online_to_ready()
452 rc = device->discipline->online_to_ready(device); dasd_state_online_to_ready()
457 device->state = DASD_STATE_READY; dasd_state_online_to_ready()
458 if (device->block && !(device->features & DASD_FEATURE_USERAW)) { dasd_state_online_to_ready()
459 disk = device->block->bdev->bd_disk; dasd_state_online_to_ready()
471 static int dasd_increase_state(struct dasd_device *device) dasd_increase_state() argument
476 if (device->state == DASD_STATE_NEW && dasd_increase_state()
477 device->target >= DASD_STATE_KNOWN) dasd_increase_state()
478 rc = dasd_state_new_to_known(device); dasd_increase_state()
481 device->state == DASD_STATE_KNOWN && dasd_increase_state()
482 device->target >= DASD_STATE_BASIC) dasd_increase_state()
483 rc = dasd_state_known_to_basic(device); dasd_increase_state()
486 device->state == DASD_STATE_BASIC && dasd_increase_state()
487 device->target >= DASD_STATE_READY) dasd_increase_state()
488 rc = dasd_state_basic_to_ready(device); dasd_increase_state()
491 device->state == DASD_STATE_UNFMT && dasd_increase_state()
492 device->target > DASD_STATE_UNFMT) dasd_increase_state()
496 device->state == DASD_STATE_READY && dasd_increase_state()
497 device->target >= DASD_STATE_ONLINE) dasd_increase_state()
498 rc = dasd_state_ready_to_online(device); dasd_increase_state()
506 static int dasd_decrease_state(struct dasd_device *device) dasd_decrease_state() argument
511 if (device->state == DASD_STATE_ONLINE && dasd_decrease_state()
512 device->target <= DASD_STATE_READY) dasd_decrease_state()
513 rc = dasd_state_online_to_ready(device); dasd_decrease_state()
516 device->state == DASD_STATE_READY && dasd_decrease_state()
517 device->target <= DASD_STATE_BASIC) dasd_decrease_state()
518 rc = dasd_state_ready_to_basic(device); dasd_decrease_state()
521 device->state == DASD_STATE_UNFMT && dasd_decrease_state()
522 device->target <= DASD_STATE_BASIC) dasd_decrease_state()
523 rc = dasd_state_unfmt_to_basic(device); dasd_decrease_state()
526 device->state == DASD_STATE_BASIC && dasd_decrease_state()
527 device->target <= DASD_STATE_KNOWN) dasd_decrease_state()
528 rc = dasd_state_basic_to_known(device); dasd_decrease_state()
531 device->state == DASD_STATE_KNOWN && dasd_decrease_state()
532 device->target <= DASD_STATE_NEW) dasd_decrease_state()
533 rc = dasd_state_known_to_new(device); dasd_decrease_state()
541 static void dasd_change_state(struct dasd_device *device) dasd_change_state() argument
545 if (device->state == device->target) dasd_change_state()
548 if (device->state < device->target) dasd_change_state()
549 rc = dasd_increase_state(device); dasd_change_state()
551 rc = dasd_decrease_state(device); dasd_change_state()
555 device->target = device->state; dasd_change_state()
557 /* let user-space know that the device status changed */ dasd_change_state()
558 kobject_uevent(&device->cdev->dev.kobj, KOBJ_CHANGE); dasd_change_state()
560 if (device->state == device->target) dasd_change_state()
572 struct dasd_device *device = container_of(work, struct dasd_device, kick_work); do_kick_device() local
573 mutex_lock(&device->state_mutex); do_kick_device()
574 dasd_change_state(device); do_kick_device()
575 mutex_unlock(&device->state_mutex); do_kick_device()
576 dasd_schedule_device_bh(device); do_kick_device()
577 dasd_put_device(device); do_kick_device()
580 void dasd_kick_device(struct dasd_device *device) dasd_kick_device() argument
582 dasd_get_device(device); dasd_kick_device()
584 if (!schedule_work(&device->kick_work)) dasd_kick_device()
585 dasd_put_device(device); dasd_kick_device()
595 struct dasd_device *device = container_of(work, struct dasd_device, do_reload_device() local
597 device->discipline->reload(device); do_reload_device()
598 dasd_put_device(device); do_reload_device()
601 void dasd_reload_device(struct dasd_device *device) dasd_reload_device() argument
603 dasd_get_device(device); dasd_reload_device()
605 if (!schedule_work(&device->reload_device)) dasd_reload_device()
606 dasd_put_device(device); dasd_reload_device()
616 struct dasd_device *device = container_of(work, struct dasd_device, do_restore_device() local
618 device->cdev->drv->restore(device->cdev); do_restore_device()
619 dasd_put_device(device); do_restore_device()
622 void dasd_restore_device(struct dasd_device *device) dasd_restore_device() argument
624 dasd_get_device(device); dasd_restore_device()
626 if (!schedule_work(&device->restore_device)) dasd_restore_device()
627 dasd_put_device(device); dasd_restore_device()
631 * Set the target state for a device and starts the state change.
633 void dasd_set_target_state(struct dasd_device *device, int target) dasd_set_target_state() argument
635 dasd_get_device(device); dasd_set_target_state()
636 mutex_lock(&device->state_mutex); dasd_set_target_state()
640 if (device->target != target) { dasd_set_target_state()
641 if (device->state == target) dasd_set_target_state()
643 device->target = target; dasd_set_target_state()
645 if (device->state != device->target) dasd_set_target_state()
646 dasd_change_state(device); dasd_set_target_state()
647 mutex_unlock(&device->state_mutex); dasd_set_target_state()
648 dasd_put_device(device); dasd_set_target_state()
653 * Enable devices with device numbers in [from..to].
655 static inline int _wait_for_device(struct dasd_device *device) _wait_for_device() argument
657 return (device->state == device->target); _wait_for_device()
660 void dasd_enable_device(struct dasd_device *device) dasd_enable_device() argument
662 dasd_set_target_state(device, DASD_STATE_ONLINE); dasd_enable_device()
663 if (device->state <= DASD_STATE_KNOWN) dasd_enable_device()
664 /* No discipline for device found. */ dasd_enable_device()
665 dasd_set_target_state(device, DASD_STATE_NEW); dasd_enable_device()
667 wait_event(dasd_init_waitq, _wait_for_device(device)); dasd_enable_device()
669 dasd_reload_device(device); dasd_enable_device()
670 if (device->discipline->kick_validate) dasd_enable_device()
671 device->discipline->kick_validate(device); dasd_enable_device()
676 * SECTION: device operation (interrupt handler, start i/o, term i/o ...)
696 struct dasd_device *device; dasd_profile_start() local
722 * We count the request for the start device, even though it may run on dasd_profile_start()
723 * some other device due to error recovery. This way we make sure that dasd_profile_start()
726 device = cqr->startdev; dasd_profile_start()
727 if (device->profile.data) { dasd_profile_start()
728 counter = 1; /* request is not yet queued on the start device */ dasd_profile_start()
729 list_for_each(l, &device->ccw_queue) dasd_profile_start()
733 spin_lock(&device->profile.lock); dasd_profile_start()
734 if (device->profile.data) { dasd_profile_start()
735 device->profile.data->dasd_io_nr_req[counter]++; dasd_profile_start()
737 device->profile.data->dasd_read_nr_req[counter]++; dasd_profile_start()
739 spin_unlock(&device->profile.lock); dasd_profile_start()
806 struct dasd_device *device; dasd_profile_end() local
810 device = cqr->startdev; dasd_profile_end()
813 device->profile.data)) dasd_profile_end()
861 spin_lock(&device->profile.lock); dasd_profile_end()
862 if (device->profile.data) dasd_profile_end()
863 dasd_profile_end_add_data(device->profile.data, dasd_profile_end()
871 spin_unlock(&device->profile.lock); dasd_profile_end()
1158 * that gets allocated for each device.
1162 struct dasd_device *device) dasd_kmalloc_request()
1193 dasd_get_device(device); dasd_kmalloc_request()
1200 struct dasd_device *device) dasd_smalloc_request()
1212 spin_lock_irqsave(&device->mem_lock, flags); dasd_smalloc_request()
1214 dasd_alloc_chunk(&device->ccw_chunks, size); dasd_smalloc_request()
1215 spin_unlock_irqrestore(&device->mem_lock, flags); dasd_smalloc_request()
1233 dasd_get_device(device); dasd_smalloc_request()
1243 void dasd_kfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device) dasd_kfree_request() argument
1255 dasd_put_device(device); dasd_kfree_request()
1259 void dasd_sfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device) dasd_sfree_request() argument
1263 spin_lock_irqsave(&device->mem_lock, flags); dasd_sfree_request()
1264 dasd_free_chunk(&device->ccw_chunks, cqr); dasd_sfree_request()
1265 spin_unlock_irqrestore(&device->mem_lock, flags); dasd_sfree_request()
1266 dasd_put_device(device); dasd_sfree_request()
1275 struct dasd_device *device; dasd_check_cqr() local
1279 device = cqr->startdev; dasd_check_cqr()
1280 if (strncmp((char *) &cqr->magic, device->discipline->ebcname, 4)) { dasd_check_cqr()
1281 DBF_DEV_EVENT(DBF_WARNING, device, dasd_check_cqr()
1285 *(unsigned int *) device->discipline->name); dasd_check_cqr()
1293 * Timer keeps device runnig.
1299 struct dasd_device *device; dasd_term_IO() local
1308 device = (struct dasd_device *) cqr->startdev; dasd_term_IO()
1310 rc = ccw_device_clear(device->cdev, (long) cqr); dasd_term_IO()
1316 DBF_DEV_EVENT(DBF_DEBUG, device, dasd_term_IO()
1321 DBF_DEV_EVENT(DBF_ERR, device, "%s", dasd_term_IO()
1322 "device gone, retry"); dasd_term_IO()
1325 DBF_DEV_EVENT(DBF_ERR, device, "%s", dasd_term_IO()
1330 * device not valid so no I/O could be running dasd_term_IO()
1338 DBF_DEV_EVENT(DBF_ERR, device, "%s", dasd_term_IO()
1344 DBF_DEV_EVENT(DBF_ERR, device, "%s", dasd_term_IO()
1345 "device busy, retry later"); dasd_term_IO()
1350 dev_err(&device->cdev->dev, "An error occurred in the " dasd_term_IO()
1351 "DASD device driver, reason=%s\n", errorstring); dasd_term_IO()
1357 dasd_schedule_device_bh(device); dasd_term_IO()
1368 struct dasd_device *device; dasd_start_IO() local
1378 device = (struct dasd_device *) cqr->startdev; dasd_start_IO()
1381 test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags)) && dasd_start_IO()
1383 DBF_DEV_EVENT(DBF_DEBUG, device, "start_IO: return request %p " dasd_start_IO()
1392 dev_err(&device->cdev->dev, "An error occurred in the DASD " dasd_start_IO()
1393 "device driver, reason=%s\n", errorstring); dasd_start_IO()
1401 cqr->lpm &= device->path_data.opm; dasd_start_IO()
1403 cqr->lpm = device->path_data.opm; dasd_start_IO()
1406 rc = ccw_device_tm_start(device->cdev, cqr->cpaddr, dasd_start_IO()
1409 rc = ccw_device_start(device->cdev, cqr->cpaddr, dasd_start_IO()
1417 DBF_DEV_EVENT(DBF_WARNING, device, "%s", dasd_start_IO()
1418 "start_IO: device busy, retry later"); dasd_start_IO()
1421 DBF_DEV_EVENT(DBF_WARNING, device, "%s", dasd_start_IO()
1433 DBF_DEV_EVENT(DBF_WARNING, device, dasd_start_IO()
1436 } else if (cqr->lpm != device->path_data.opm) { dasd_start_IO()
1437 cqr->lpm = device->path_data.opm; dasd_start_IO()
1438 DBF_DEV_EVENT(DBF_DEBUG, device, "%s", dasd_start_IO()
1442 DBF_DEV_EVENT(DBF_WARNING, device, "%s", dasd_start_IO()
1445 dasd_generic_last_path_gone(device); dasd_start_IO()
1446 device->path_data.opm = 0; dasd_start_IO()
1447 device->path_data.ppm = 0; dasd_start_IO()
1448 device->path_data.npm = 0; dasd_start_IO()
1449 device->path_data.tbvpm = dasd_start_IO()
1450 ccw_device_get_path_mask(device->cdev); dasd_start_IO()
1454 DBF_DEV_EVENT(DBF_WARNING, device, "%s", dasd_start_IO()
1455 "start_IO: -ENODEV device gone, retry"); dasd_start_IO()
1458 DBF_DEV_EVENT(DBF_WARNING, device, "%s", dasd_start_IO()
1459 "start_IO: -EIO device gone, retry"); dasd_start_IO()
1463 DBF_DEV_EVENT(DBF_WARNING, device, "%s", dasd_start_IO()
1464 "start_IO: -EINVAL device currently " dasd_start_IO()
1470 dev_err(&device->cdev->dev, dasd_start_IO()
1471 "An error occurred in the DASD device driver, " dasd_start_IO()
1492 struct dasd_device *device; dasd_device_timeout() local
1494 device = (struct dasd_device *) ptr; dasd_device_timeout()
1495 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); dasd_device_timeout()
1497 dasd_device_remove_stop_bits(device, DASD_STOPPED_PENDING); dasd_device_timeout()
1498 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); dasd_device_timeout()
1499 dasd_schedule_device_bh(device); dasd_device_timeout()
1503 * Setup timeout for a device in jiffies.
1505 void dasd_device_set_timer(struct dasd_device *device, int expires) dasd_device_set_timer() argument
1508 del_timer(&device->timer); dasd_device_set_timer()
1510 mod_timer(&device->timer, jiffies + expires); dasd_device_set_timer()
1515 * Clear timeout for a device.
1517 void dasd_device_clear_timer(struct dasd_device *device) dasd_device_clear_timer() argument
1519 del_timer(&device->timer); dasd_device_clear_timer()
1527 struct dasd_device *device; dasd_handle_killed_request() local
1539 device = dasd_device_from_cdev_locked(cdev); dasd_handle_killed_request()
1540 if (IS_ERR(device)) { dasd_handle_killed_request()
1542 "unable to get device from cdev"); dasd_handle_killed_request()
1547 device != cqr->startdev || dasd_handle_killed_request()
1551 "invalid device in request"); dasd_handle_killed_request()
1552 dasd_put_device(device); dasd_handle_killed_request()
1559 dasd_device_clear_timer(device); dasd_handle_killed_request()
1560 dasd_schedule_device_bh(device); dasd_handle_killed_request()
1561 dasd_put_device(device); dasd_handle_killed_request()
1564 void dasd_generic_handle_state_change(struct dasd_device *device) dasd_generic_handle_state_change() argument
1567 dasd_eer_snss(device); dasd_generic_handle_state_change()
1569 dasd_device_remove_stop_bits(device, DASD_STOPPED_PENDING); dasd_generic_handle_state_change()
1570 dasd_schedule_device_bh(device); dasd_generic_handle_state_change()
1571 if (device->block) dasd_generic_handle_state_change()
1572 dasd_schedule_block_bh(device->block); dasd_generic_handle_state_change()
1583 struct dasd_device *device; dasd_int_handler() local
1612 device = dasd_device_from_cdev_locked(cdev); dasd_int_handler()
1613 if (IS_ERR(device)) dasd_int_handler()
1616 if (device->discipline == dasd_diag_discipline_pointer) { dasd_int_handler()
1617 dasd_put_device(device); dasd_int_handler()
1620 device->discipline->dump_sense_dbf(device, irb, "int"); dasd_int_handler()
1621 if (device->features & DASD_FEATURE_ERPLOG) dasd_int_handler()
1622 device->discipline->dump_sense(device, cqr, irb); dasd_int_handler()
1623 device->discipline->check_for_device_change(device, cqr, irb); dasd_int_handler()
1624 dasd_put_device(device); dasd_int_handler()
1629 device = dasd_device_from_cdev_locked(cdev); dasd_int_handler()
1630 device->discipline->check_attention(device, irb->esw.esw1.lpum); dasd_int_handler()
1631 dasd_put_device(device); dasd_int_handler()
1637 device = (struct dasd_device *) cqr->startdev; dasd_int_handler()
1638 if (!device || dasd_int_handler()
1639 strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) { dasd_int_handler()
1641 "invalid device in request"); dasd_int_handler()
1649 dasd_device_clear_timer(device); dasd_int_handler()
1651 dasd_schedule_device_bh(device); dasd_int_handler()
1657 DBF_DEV_EVENT(DBF_DEBUG, device, "invalid status: bus_id %s, " dasd_int_handler()
1670 if (cqr->devlist.next != &device->ccw_queue) { dasd_int_handler()
1681 if (cqr->lpm == device->path_data.opm) dasd_int_handler()
1682 DBF_DEV_EVENT(DBF_DEBUG, device, dasd_int_handler()
1687 cqr->lpm = device->path_data.opm; dasd_int_handler()
1694 (!device->stopped)) { dasd_int_handler()
1695 if (device->discipline->start_IO(next) == 0) dasd_int_handler()
1699 dasd_device_set_timer(device, expires); dasd_int_handler()
1701 dasd_device_clear_timer(device); dasd_int_handler()
1702 dasd_schedule_device_bh(device); dasd_int_handler()
1708 struct dasd_device *device; dasd_generic_uc_handler() local
1710 device = dasd_device_from_cdev_locked(cdev); dasd_generic_uc_handler()
1712 if (IS_ERR(device)) dasd_generic_uc_handler()
1714 if (test_bit(DASD_FLAG_OFFLINE, &device->flags) || dasd_generic_uc_handler()
1715 device->state != device->target || dasd_generic_uc_handler()
1716 !device->discipline->check_for_device_change){ dasd_generic_uc_handler()
1717 dasd_put_device(device); dasd_generic_uc_handler()
1720 if (device->discipline->dump_sense_dbf) dasd_generic_uc_handler()
1721 device->discipline->dump_sense_dbf(device, irb, "uc"); dasd_generic_uc_handler()
1722 device->discipline->check_for_device_change(device, NULL, irb); dasd_generic_uc_handler()
1723 dasd_put_device(device); dasd_generic_uc_handler()
1733 static void __dasd_device_recovery(struct dasd_device *device, __dasd_device_recovery() argument
1745 list_for_each_safe(l, n, &device->ccw_queue) { __dasd_device_recovery()
1758 static void __dasd_device_process_ccw_queue(struct dasd_device *device, __dasd_device_process_ccw_queue() argument
1765 list_for_each_safe(l, n, &device->ccw_queue) { __dasd_device_process_ccw_queue()
1774 __dasd_device_recovery(device, cqr); __dasd_device_process_ccw_queue()
1785 static void __dasd_device_process_final_queue(struct dasd_device *device, __dasd_device_process_final_queue() argument
1816 dev_err(&device->cdev->dev, list_for_each_safe()
1817 "An error occurred in the DASD device driver, " list_for_each_safe()
1832 static void __dasd_device_check_expire(struct dasd_device *device) __dasd_device_check_expire() argument
1836 if (list_empty(&device->ccw_queue)) __dasd_device_check_expire()
1838 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist); __dasd_device_check_expire()
1841 if (test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) { __dasd_device_check_expire()
1848 if (device->discipline->term_IO(cqr) != 0) { __dasd_device_check_expire()
1850 dev_err(&device->cdev->dev, __dasd_device_check_expire()
1855 dasd_device_set_timer(device, 5*HZ); __dasd_device_check_expire()
1857 dev_err(&device->cdev->dev, __dasd_device_check_expire()
1866 * return 1 when device is not eligible for IO
1868 static int __dasd_device_is_unusable(struct dasd_device *device, __dasd_device_is_unusable() argument
1873 if (test_bit(DASD_FLAG_OFFLINE, &device->flags)) { __dasd_device_is_unusable()
1877 if (device->stopped) { __dasd_device_is_unusable()
1878 if (device->stopped & mask) { __dasd_device_is_unusable()
1883 /* CQR is not able to change device to __dasd_device_is_unusable()
1887 /* CQR required to get device operational. */ __dasd_device_is_unusable()
1896 static void __dasd_device_start_head(struct dasd_device *device) __dasd_device_start_head() argument
1901 if (list_empty(&device->ccw_queue)) __dasd_device_start_head()
1903 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist); __dasd_device_start_head()
1906 /* if device is not usable return request to upper layer */ __dasd_device_start_head()
1907 if (__dasd_device_is_unusable(device, cqr)) { __dasd_device_start_head()
1910 dasd_schedule_device_bh(device); __dasd_device_start_head()
1914 rc = device->discipline->start_IO(cqr); __dasd_device_start_head()
1916 dasd_device_set_timer(device, cqr->expires); __dasd_device_start_head()
1918 dasd_schedule_device_bh(device); __dasd_device_start_head()
1921 dasd_device_set_timer(device, 50); __dasd_device_start_head()
1924 static void __dasd_device_check_path_events(struct dasd_device *device) __dasd_device_check_path_events() argument
1928 if (device->path_data.tbvpm) { __dasd_device_check_path_events()
1929 if (device->stopped & ~(DASD_STOPPED_DC_WAIT | __dasd_device_check_path_events()
1932 rc = device->discipline->verify_path( __dasd_device_check_path_events()
1933 device, device->path_data.tbvpm); __dasd_device_check_path_events()
1935 dasd_device_set_timer(device, 50); __dasd_device_check_path_events()
1937 device->path_data.tbvpm = 0; __dasd_device_check_path_events()
1947 * this function is called!. In other words, when 'device' is a base
1948 * device then all block layer requests must have been removed before
1951 int dasd_flush_device_queue(struct dasd_device *device) dasd_flush_device_queue() argument
1958 spin_lock_irq(get_ccwdev_lock(device->cdev)); dasd_flush_device_queue()
1960 list_for_each_entry_safe(cqr, n, &device->ccw_queue, devlist) { dasd_flush_device_queue()
1964 rc = device->discipline->term_IO(cqr); dasd_flush_device_queue()
1967 dev_err(&device->cdev->dev, dasd_flush_device_queue()
1984 spin_unlock_irq(get_ccwdev_lock(device->cdev)); dasd_flush_device_queue()
1997 __dasd_device_process_final_queue(device, &flush_queue); dasd_flush_device_queue()
2003 * Acquire the device lock and process queues for the device.
2005 static void dasd_device_tasklet(struct dasd_device *device) dasd_device_tasklet() argument
2009 atomic_set (&device->tasklet_scheduled, 0); dasd_device_tasklet()
2011 spin_lock_irq(get_ccwdev_lock(device->cdev)); dasd_device_tasklet()
2013 __dasd_device_check_expire(device); dasd_device_tasklet()
2015 __dasd_device_process_ccw_queue(device, &final_queue); dasd_device_tasklet()
2016 __dasd_device_check_path_events(device); dasd_device_tasklet()
2017 spin_unlock_irq(get_ccwdev_lock(device->cdev)); dasd_device_tasklet()
2019 __dasd_device_process_final_queue(device, &final_queue); dasd_device_tasklet()
2020 spin_lock_irq(get_ccwdev_lock(device->cdev)); dasd_device_tasklet()
2022 __dasd_device_start_head(device); dasd_device_tasklet()
2023 spin_unlock_irq(get_ccwdev_lock(device->cdev)); dasd_device_tasklet()
2026 dasd_put_device(device); dasd_device_tasklet()
2030 * Schedules a call to dasd_tasklet over the device tasklet.
2032 void dasd_schedule_device_bh(struct dasd_device *device) dasd_schedule_device_bh() argument
2035 if (atomic_cmpxchg (&device->tasklet_scheduled, 0, 1) != 0) dasd_schedule_device_bh()
2037 dasd_get_device(device); dasd_schedule_device_bh()
2038 tasklet_hi_schedule(&device->tasklet); dasd_schedule_device_bh()
2042 void dasd_device_set_stop_bits(struct dasd_device *device, int bits) dasd_device_set_stop_bits() argument
2044 device->stopped |= bits; dasd_device_set_stop_bits()
2048 void dasd_device_remove_stop_bits(struct dasd_device *device, int bits) dasd_device_remove_stop_bits() argument
2050 device->stopped &= ~bits; dasd_device_remove_stop_bits()
2051 if (!device->stopped) dasd_device_remove_stop_bits()
2057 * Queue a request to the head of the device ccw_queue.
2062 struct dasd_device *device; dasd_add_request_head() local
2065 device = cqr->startdev; dasd_add_request_head()
2066 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); dasd_add_request_head()
2068 list_add(&cqr->devlist, &device->ccw_queue); dasd_add_request_head()
2070 dasd_schedule_device_bh(device); dasd_add_request_head()
2071 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); dasd_add_request_head()
2076 * Queue a request to the tail of the device ccw_queue.
2081 struct dasd_device *device; dasd_add_request_tail() local
2084 device = cqr->startdev; dasd_add_request_tail()
2085 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); dasd_add_request_tail()
2087 list_add_tail(&cqr->devlist, &device->ccw_queue); dasd_add_request_tail()
2089 dasd_schedule_device_bh(device); dasd_add_request_tail()
2090 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); dasd_add_request_tail()
2108 struct dasd_device *device; _wait_for_wakeup() local
2111 device = cqr->startdev; _wait_for_wakeup()
2112 spin_lock_irq(get_ccwdev_lock(device->cdev)); _wait_for_wakeup()
2114 spin_unlock_irq(get_ccwdev_lock(device->cdev)); _wait_for_wakeup()
2123 struct dasd_device *device; __dasd_sleep_on_erp() local
2128 device = cqr->startdev; __dasd_sleep_on_erp()
2131 device->discipline->handle_terminated_request(cqr); __dasd_sleep_on_erp()
2135 erp_fn = device->discipline->erp_action(cqr); __dasd_sleep_on_erp()
2142 __dasd_process_erp(device, cqr); __dasd_sleep_on_erp()
2162 struct dasd_device *device; _dasd_sleep_on() local
2169 device = maincqr->startdev; _dasd_sleep_on()
2179 if (test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags) && _dasd_sleep_on()
2186 if (device->stopped & ~DASD_STOPPED_PENDING && _dasd_sleep_on()
2188 (!dasd_eer_enabled(device))) { _dasd_sleep_on()
2194 * Don't try to start requests if device is stopped _dasd_sleep_on()
2200 generic_waitq, !(device->stopped)); _dasd_sleep_on()
2207 wait_event(generic_waitq, !(device->stopped)); _dasd_sleep_on()
2257 struct dasd_device *device; _dasd_sleep_on_queue() local
2263 device = cqr->startdev; list_for_each_entry_safe()
2267 if (test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags) && list_for_each_entry_safe()
2274 if (device->stopped & ~DASD_STOPPED_PENDING && list_for_each_entry_safe()
2276 !dasd_eer_enabled(device)) { list_for_each_entry_safe()
2282 /*Don't try to start requests if device is stopped*/ list_for_each_entry_safe()
2285 generic_waitq, !device->stopped); list_for_each_entry_safe()
2292 wait_event(generic_waitq, !(device->stopped)); list_for_each_entry_safe()
2324 * Queue a request to the tail of the device ccw_queue and wait for
2343 * Queue a request to the tail of the device ccw_queue and wait
2358 static inline int _dasd_term_running_cqr(struct dasd_device *device) _dasd_term_running_cqr() argument
2363 if (list_empty(&device->ccw_queue)) _dasd_term_running_cqr()
2365 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist); _dasd_term_running_cqr()
2366 rc = device->discipline->term_IO(cqr); _dasd_term_running_cqr()
2379 struct dasd_device *device; dasd_sleep_on_immediatly() local
2382 device = cqr->startdev; dasd_sleep_on_immediatly()
2383 if (test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags) && dasd_sleep_on_immediatly()
2389 spin_lock_irq(get_ccwdev_lock(device->cdev)); dasd_sleep_on_immediatly()
2390 rc = _dasd_term_running_cqr(device); dasd_sleep_on_immediatly()
2392 spin_unlock_irq(get_ccwdev_lock(device->cdev)); dasd_sleep_on_immediatly()
2402 list_add(&cqr->devlist, device->ccw_queue.next); dasd_sleep_on_immediatly()
2405 dasd_schedule_device_bh(device); dasd_sleep_on_immediatly()
2407 spin_unlock_irq(get_ccwdev_lock(device->cdev)); dasd_sleep_on_immediatly()
2419 dasd_schedule_device_bh(device); dasd_sleep_on_immediatly()
2420 if (device->block) dasd_sleep_on_immediatly()
2421 dasd_schedule_block_bh(device->block); dasd_sleep_on_immediatly()
2438 struct dasd_device *device = cqr->startdev; dasd_cancel_req() local
2443 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); dasd_cancel_req()
2453 rc = device->discipline->term_IO(cqr); dasd_cancel_req()
2455 dev_err(&device->cdev->dev, dasd_cancel_req()
2465 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); dasd_cancel_req()
2466 dasd_schedule_device_bh(device); dasd_cancel_req()
2517 static void __dasd_process_erp(struct dasd_device *device, __dasd_process_erp() argument
2523 DBF_DEV_EVENT(DBF_NOTICE, device, "%s", "ERP successful"); __dasd_process_erp()
2525 dev_err(&device->cdev->dev, "ERP failed for the DASD\n"); __dasd_process_erp()
2526 erp_fn = device->discipline->erp_postaction(cqr); __dasd_process_erp()
2531 * Fetch requests from the block device queue.
2547 * We requeue request from the block device queue to the ccw __dasd_process_request_queue()
2550 * for that. State DASD_STATE_ONLINE is normal block device __dasd_process_request_queue()
2559 /* if device ist stopped do not fetch new requests */ __dasd_process_request_queue()
2594 * is the head-of-queue we stop the device __dasd_process_request_queue()
2754 /* Don't try to start requests if device is stopped */ __dasd_block_start_head()
2794 /* Get new request from the block device request queue */ dasd_block_tasklet()
2888 * Schedules a call to dasd_tasklet over the device tasklet.
2895 /* life cycle of block is bound to it's base device */ dasd_schedule_block_bh()
2903 * SECTION: external block device operations
2916 /* Get new request from the block device request queue */ do_dasd_request()
2937 struct dasd_device *device; dasd_times_out() local
2943 device = cqr->startdev ? cqr->startdev : block->base; dasd_times_out()
2944 if (!device->blk_timeout) dasd_times_out()
2946 DBF_DEV_EVENT(DBF_WARNING, device, dasd_times_out()
2951 spin_lock(get_ccwdev_lock(device->cdev)); dasd_times_out()
2955 spin_unlock(get_ccwdev_lock(device->cdev)); dasd_times_out()
2960 spin_unlock(get_ccwdev_lock(device->cdev)); dasd_times_out()
2975 spin_unlock(get_ccwdev_lock(device->cdev)); dasd_times_out()
2977 spin_lock(get_ccwdev_lock(device->cdev)); dasd_times_out()
2991 spin_unlock(get_ccwdev_lock(device->cdev)); dasd_times_out()
3103 " Cannot open unrecognized device"); dasd_open()
3169 * end of block device operations
3197 * Is the device read-only?
3199 * readonly device attribute, but how it is configured in z/VM.
3201 int dasd_device_is_ro(struct dasd_device *device) dasd_device_is_ro() argument
3209 ccw_device_get_id(device->cdev, &dev_id); dasd_device_is_ro()
3271 struct dasd_device *device; dasd_generic_remove() local
3276 device = dasd_device_from_cdev(cdev); dasd_generic_remove()
3277 if (IS_ERR(device)) { dasd_generic_remove()
3281 if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags) && dasd_generic_remove()
3282 !test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) { dasd_generic_remove()
3284 dasd_put_device(device); dasd_generic_remove()
3289 * This device is removed unconditionally. Set offline dasd_generic_remove()
3293 dasd_set_target_state(device, DASD_STATE_NEW); dasd_generic_remove()
3294 /* dasd_delete_device destroys the device reference. */ dasd_generic_remove()
3295 block = device->block; dasd_generic_remove()
3296 dasd_delete_device(device); dasd_generic_remove()
3298 * life cycle of block is bound to device, so delete it after dasd_generic_remove()
3299 * device was safely removed dasd_generic_remove()
3309 * Activate a device. This is called from dasd_{eckd,fba}_probe() when either
3310 * the device is detected for the first time and is supposed to be used
3317 struct dasd_device *device; dasd_generic_set_online() local
3322 device = dasd_create_device(cdev); dasd_generic_set_online()
3323 if (IS_ERR(device)) dasd_generic_set_online()
3324 return PTR_ERR(device); dasd_generic_set_online()
3327 if (device->features & DASD_FEATURE_USEDIAG) { dasd_generic_set_online()
3337 dasd_delete_device(device); dasd_generic_set_online()
3346 dasd_delete_device(device); dasd_generic_set_online()
3352 dasd_delete_device(device); dasd_generic_set_online()
3357 dasd_delete_device(device); dasd_generic_set_online()
3360 device->base_discipline = base_discipline; dasd_generic_set_online()
3361 device->discipline = discipline; dasd_generic_set_online()
3363 /* check_device will allocate block device if necessary */ dasd_generic_set_online()
3364 rc = discipline->check_device(device); dasd_generic_set_online()
3370 dasd_delete_device(device); dasd_generic_set_online()
3374 dasd_set_target_state(device, DASD_STATE_ONLINE); dasd_generic_set_online()
3375 if (device->state <= DASD_STATE_KNOWN) { dasd_generic_set_online()
3379 dasd_set_target_state(device, DASD_STATE_NEW); dasd_generic_set_online()
3380 if (device->block) dasd_generic_set_online()
3381 dasd_free_block(device->block); dasd_generic_set_online()
3382 dasd_delete_device(device); dasd_generic_set_online()
3384 pr_debug("dasd_generic device %s found\n", dasd_generic_set_online()
3387 wait_event(dasd_init_waitq, _wait_for_device(device)); dasd_generic_set_online()
3389 dasd_put_device(device); dasd_generic_set_online()
3396 struct dasd_device *device; dasd_generic_set_offline() local
3401 device = dasd_device_from_cdev(cdev); dasd_generic_set_offline()
3402 if (IS_ERR(device)) dasd_generic_set_offline()
3403 return PTR_ERR(device); dasd_generic_set_offline()
3406 * We must make sure that this device is currently not in use. dasd_generic_set_offline()
3411 if (device->block) { dasd_generic_set_offline()
3412 max_count = device->block->bdev ? 0 : -1; dasd_generic_set_offline()
3413 open_count = atomic_read(&device->block->open_count); dasd_generic_set_offline()
3421 clear_bit(DASD_FLAG_OFFLINE, &device->flags); dasd_generic_set_offline()
3422 dasd_put_device(device); dasd_generic_set_offline()
3427 if (test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) { dasd_generic_set_offline()
3433 if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags)) { dasd_generic_set_offline()
3435 dasd_put_device(device); dasd_generic_set_offline()
3438 clear_bit(DASD_FLAG_SAFE_OFFLINE, &device->flags); dasd_generic_set_offline()
3441 if (test_bit(DASD_FLAG_OFFLINE, &device->flags)) { dasd_generic_set_offline()
3443 dasd_put_device(device); dasd_generic_set_offline()
3452 if (test_and_clear_bit(DASD_FLAG_SAFE_OFFLINE, &device->flags) && dasd_generic_set_offline()
3453 !test_and_set_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) { dasd_generic_set_offline()
3455 * If we want to set the device safe offline all IO operations dasd_generic_set_offline()
3461 rc = fsync_bdev(device->block->bdev); dasd_generic_set_offline()
3465 /* schedule device tasklet and wait for completion */ dasd_generic_set_offline()
3466 dasd_schedule_device_bh(device); dasd_generic_set_offline()
3468 _wait_for_empty_queues(device)); dasd_generic_set_offline()
3473 set_bit(DASD_FLAG_OFFLINE, &device->flags); dasd_generic_set_offline()
3474 dasd_set_target_state(device, DASD_STATE_NEW); dasd_generic_set_offline()
3475 /* dasd_delete_device destroys the device reference. */ dasd_generic_set_offline()
3476 block = device->block; dasd_generic_set_offline()
3477 dasd_delete_device(device); dasd_generic_set_offline()
3479 * life cycle of block is bound to device, so delete it after dasd_generic_set_offline()
3480 * device was safely removed dasd_generic_set_offline()
3488 clear_bit(DASD_FLAG_SAFE_OFFLINE, &device->flags); dasd_generic_set_offline()
3489 clear_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags); dasd_generic_set_offline()
3490 clear_bit(DASD_FLAG_OFFLINE, &device->flags); dasd_generic_set_offline()
3491 dasd_put_device(device); dasd_generic_set_offline()
3496 int dasd_generic_last_path_gone(struct dasd_device *device) dasd_generic_last_path_gone() argument
3500 dev_warn(&device->cdev->dev, "No operational channel path is left " dasd_generic_last_path_gone()
3501 "for the device\n"); dasd_generic_last_path_gone()
3502 DBF_DEV_EVENT(DBF_WARNING, device, "%s", "last path gone"); dasd_generic_last_path_gone()
3504 dasd_eer_write(device, NULL, DASD_EER_NOPATH); dasd_generic_last_path_gone()
3506 if (device->state < DASD_STATE_BASIC) dasd_generic_last_path_gone()
3509 list_for_each_entry(cqr, &device->ccw_queue, devlist) dasd_generic_last_path_gone()
3515 dasd_device_set_stop_bits(device, DASD_STOPPED_DC_WAIT); dasd_generic_last_path_gone()
3516 dasd_device_clear_timer(device); dasd_generic_last_path_gone()
3517 dasd_schedule_device_bh(device); dasd_generic_last_path_gone()
3522 int dasd_generic_path_operational(struct dasd_device *device) dasd_generic_path_operational() argument
3524 dev_info(&device->cdev->dev, "A channel path to the device has become " dasd_generic_path_operational()
3526 DBF_DEV_EVENT(DBF_WARNING, device, "%s", "path operational"); dasd_generic_path_operational()
3527 dasd_device_remove_stop_bits(device, DASD_STOPPED_DC_WAIT); dasd_generic_path_operational()
3528 if (device->stopped & DASD_UNRESUMED_PM) { dasd_generic_path_operational()
3529 dasd_device_remove_stop_bits(device, DASD_UNRESUMED_PM); dasd_generic_path_operational()
3530 dasd_restore_device(device); dasd_generic_path_operational()
3533 dasd_schedule_device_bh(device); dasd_generic_path_operational()
3534 if (device->block) dasd_generic_path_operational()
3535 dasd_schedule_block_bh(device->block); dasd_generic_path_operational()
3537 if (!device->stopped) dasd_generic_path_operational()
3546 struct dasd_device *device; dasd_generic_notify() local
3549 device = dasd_device_from_cdev_locked(cdev); dasd_generic_notify()
3550 if (IS_ERR(device)) dasd_generic_notify()
3557 device->path_data.opm = 0; dasd_generic_notify()
3558 device->path_data.ppm = 0; dasd_generic_notify()
3559 device->path_data.npm = 0; dasd_generic_notify()
3560 ret = dasd_generic_last_path_gone(device); dasd_generic_notify()
3564 if (device->path_data.opm) dasd_generic_notify()
3565 ret = dasd_generic_path_operational(device); dasd_generic_notify()
3568 dasd_put_device(device); dasd_generic_notify()
3577 struct dasd_device *device; dasd_generic_path_event() local
3579 device = dasd_device_from_cdev_locked(cdev); dasd_generic_path_event()
3580 if (IS_ERR(device)) dasd_generic_path_event()
3585 oldopm = device->path_data.opm; dasd_generic_path_event()
3586 device->path_data.opm &= ~eventlpm; dasd_generic_path_event()
3587 device->path_data.ppm &= ~eventlpm; dasd_generic_path_event()
3588 device->path_data.npm &= ~eventlpm; dasd_generic_path_event()
3589 if (oldopm && !device->path_data.opm) { dasd_generic_path_event()
3590 dev_warn(&device->cdev->dev, dasd_generic_path_event()
3592 "for the device\n"); dasd_generic_path_event()
3593 DBF_DEV_EVENT(DBF_WARNING, device, dasd_generic_path_event()
3595 dasd_eer_write(device, NULL, DASD_EER_NOPATH); dasd_generic_path_event()
3596 dasd_device_set_stop_bits(device, dasd_generic_path_event()
3601 device->path_data.opm &= ~eventlpm; dasd_generic_path_event()
3602 device->path_data.ppm &= ~eventlpm; dasd_generic_path_event()
3603 device->path_data.npm &= ~eventlpm; dasd_generic_path_event()
3604 device->path_data.tbvpm |= eventlpm; dasd_generic_path_event()
3605 dasd_schedule_device_bh(device); dasd_generic_path_event()
3608 if (!(device->path_data.opm & eventlpm) && dasd_generic_path_event()
3609 !(device->path_data.tbvpm & eventlpm)) { dasd_generic_path_event()
3615 device->path_data.tbvpm |= eventlpm; dasd_generic_path_event()
3616 dasd_schedule_device_bh(device); dasd_generic_path_event()
3618 DBF_DEV_EVENT(DBF_WARNING, device, "%s", dasd_generic_path_event()
3620 if (device->discipline->kick_validate) dasd_generic_path_event()
3621 device->discipline->kick_validate(device); dasd_generic_path_event()
3624 dasd_put_device(device); dasd_generic_path_event()
3628 int dasd_generic_verify_path(struct dasd_device *device, __u8 lpm) dasd_generic_verify_path() argument
3630 if (!device->path_data.opm && lpm) { dasd_generic_verify_path()
3631 device->path_data.opm = lpm; dasd_generic_verify_path()
3632 dasd_generic_path_operational(device); dasd_generic_verify_path()
3634 device->path_data.opm |= lpm; dasd_generic_verify_path()
3642 struct dasd_device *device = dasd_device_from_cdev(cdev); dasd_generic_pm_freeze() local
3648 if (IS_ERR(device)) dasd_generic_pm_freeze()
3649 return PTR_ERR(device); dasd_generic_pm_freeze()
3651 /* mark device as suspended */ dasd_generic_pm_freeze()
3652 set_bit(DASD_FLAG_SUSPENDED, &device->flags); dasd_generic_pm_freeze()
3654 if (device->discipline->freeze) dasd_generic_pm_freeze()
3655 rc = device->discipline->freeze(device); dasd_generic_pm_freeze()
3658 dasd_device_set_stop_bits(device, DASD_STOPPED_PM); dasd_generic_pm_freeze()
3664 list_for_each_entry_safe(cqr, n, &device->ccw_queue, devlist) { dasd_generic_pm_freeze()
3667 rc = device->discipline->term_IO(cqr); dasd_generic_pm_freeze()
3670 dev_err(&device->cdev->dev, dasd_generic_pm_freeze()
3674 dasd_put_device(device); dasd_generic_pm_freeze()
3689 block device requests */ dasd_generic_pm_freeze()
3693 /* remove requests from device and block queue */ dasd_generic_pm_freeze()
3711 * and go back to the device queue dasd_generic_pm_freeze()
3716 list_splice_tail(&freeze_queue, &device->ccw_queue); dasd_generic_pm_freeze()
3719 dasd_put_device(device); dasd_generic_pm_freeze()
3726 struct dasd_device *device = dasd_device_from_cdev(cdev); dasd_generic_restore_device() local
3729 if (IS_ERR(device)) dasd_generic_restore_device()
3730 return PTR_ERR(device); dasd_generic_restore_device()
3733 dasd_device_remove_stop_bits(device, dasd_generic_restore_device()
3736 dasd_schedule_device_bh(device); dasd_generic_restore_device()
3740 * if device is stopped do nothing e.g. for disconnected devices dasd_generic_restore_device()
3742 if (device->discipline->restore && !(device->stopped)) dasd_generic_restore_device()
3743 rc = device->discipline->restore(device); dasd_generic_restore_device()
3744 if (rc || device->stopped) dasd_generic_restore_device()
3749 device->stopped |= DASD_UNRESUMED_PM; dasd_generic_restore_device()
3751 if (device->block) dasd_generic_restore_device()
3752 dasd_schedule_block_bh(device->block); dasd_generic_restore_device()
3754 clear_bit(DASD_FLAG_SUSPENDED, &device->flags); dasd_generic_restore_device()
3755 dasd_put_device(device); dasd_generic_restore_device()
3760 static struct dasd_ccw_req *dasd_generic_build_rdc(struct dasd_device *device, dasd_generic_build_rdc() argument
3769 cqr = dasd_smalloc_request(magic, 1 /* RDC */, rdc_buffer_size, device); dasd_generic_build_rdc()
3773 dev_err(&device->cdev->dev, dasd_generic_build_rdc()
3774 "An error occurred in the DASD device driver, " dasd_generic_build_rdc()
3792 cqr->startdev = device; dasd_generic_build_rdc()
3793 cqr->memdev = device; dasd_generic_build_rdc()
3802 int dasd_generic_read_dev_chars(struct dasd_device *device, int magic, dasd_generic_read_dev_chars() argument
3808 cqr = dasd_generic_build_rdc(device, rdc_buffer, rdc_buffer_size, dasd_generic_read_dev_chars()
3855 struct dasd_device *device; dasd_generic_shutdown() local
3857 device = dasd_device_from_cdev(cdev); dasd_generic_shutdown()
3858 if (IS_ERR(device)) dasd_generic_shutdown()
3861 if (device->block) dasd_generic_shutdown()
3862 dasd_schedule_block_bh(device->block); dasd_generic_shutdown()
3864 dasd_schedule_device_bh(device); dasd_generic_shutdown()
3866 wait_event(shutdown_waitq, _wait_for_empty_queues(device)); dasd_generic_shutdown()
3914 pr_info("The DASD device driver could not be initialized\n"); dasd_init()
1160 dasd_kmalloc_request(int magic, int cplength, int datasize, struct dasd_device *device) dasd_kmalloc_request() argument
1198 dasd_smalloc_request(int magic, int cplength, int datasize, struct dasd_device *device) dasd_smalloc_request() argument
H A Ddasd_erp.c27 struct dasd_device * device) dasd_alloc_erp_request()
43 spin_lock_irqsave(&device->mem_lock, flags); dasd_alloc_erp_request()
45 dasd_alloc_chunk(&device->erp_chunks, size); dasd_alloc_erp_request()
46 spin_unlock_irqrestore(&device->mem_lock, flags); dasd_alloc_erp_request()
67 dasd_get_device(device); dasd_alloc_erp_request()
72 dasd_free_erp_request(struct dasd_ccw_req *cqr, struct dasd_device * device) dasd_free_erp_request() argument
76 spin_lock_irqsave(&device->mem_lock, flags); dasd_free_erp_request()
77 dasd_free_chunk(&device->erp_chunks, cqr); dasd_free_erp_request()
78 spin_unlock_irqrestore(&device->mem_lock, flags); dasd_free_erp_request()
79 atomic_dec(&device->ref_count); dasd_free_erp_request()
89 struct dasd_device *device; dasd_default_erp_action() local
91 device = cqr->startdev; dasd_default_erp_action()
95 DBF_DEV_EVENT(DBF_DEBUG, device, dasd_default_erp_action()
99 cqr->lpm = device->path_data.opm; dasd_default_erp_action()
103 dev_name(&device->cdev->dev)); dasd_default_erp_action()
167 struct dasd_device *device; dasd_log_sense() local
169 device = cqr->startdev; dasd_log_sense()
171 dev_err(&device->cdev->dev, dasd_log_sense()
176 dev_err(&device->cdev->dev, dasd_log_sense()
181 if (device->discipline && device->discipline->dump_sense) dasd_log_sense()
182 device->discipline->dump_sense(device, cqr, irb); dasd_log_sense()
188 struct dasd_device *device; dasd_log_sense_dbf() local
190 device = cqr->startdev; dasd_log_sense_dbf()
192 if (device->discipline && device->discipline->dump_sense_dbf) dasd_log_sense_dbf()
193 device->discipline->dump_sense_dbf(device, irb, "log"); dasd_log_sense_dbf()
26 dasd_alloc_erp_request(char *magic, int cplength, int datasize, struct dasd_device * device) dasd_alloc_erp_request() argument
H A Ddasd_alias.c25 * - A device is connected to an lcu as long as the device exists.
27 * device is checked by the eckd discipline and
29 * before the device is deleted.
31 * functions mark the point when a device is 'ready for service'.
35 * - dasd_alias_get_start_dev will find an alias device that can be used
36 * instead of the base device and does some (very simple) load balancing.
181 * so this function must be called first for a new device.
186 int dasd_alias_make_device_known_to_lcu(struct dasd_device *device) dasd_alias_make_device_known_to_lcu() argument
194 private = (struct dasd_eckd_private *) device->private; dasd_alias_make_device_known_to_lcu()
196 device->discipline->get_uid(device, &uid); dasd_alias_make_device_known_to_lcu()
232 list_add(&device->alias_list, &lcu->inactive_devices); dasd_alias_make_device_known_to_lcu()
241 * This function removes a device from the scope of alias management.
245 void dasd_alias_disconnect_device_from_lcu(struct dasd_device *device) dasd_alias_disconnect_device_from_lcu() argument
254 private = (struct dasd_eckd_private *) device->private; dasd_alias_disconnect_device_from_lcu()
259 device->discipline->get_uid(device, &uid); dasd_alias_disconnect_device_from_lcu()
261 list_del_init(&device->alias_list); dasd_alias_disconnect_device_from_lcu()
262 /* make sure that the workers don't use this device */ dasd_alias_disconnect_device_from_lcu()
263 if (device == lcu->suc_data.device) { dasd_alias_disconnect_device_from_lcu()
267 if (device == lcu->suc_data.device) { dasd_alias_disconnect_device_from_lcu()
268 dasd_put_device(device); dasd_alias_disconnect_device_from_lcu()
269 lcu->suc_data.device = NULL; dasd_alias_disconnect_device_from_lcu()
273 if (device == lcu->ruac_data.device) { dasd_alias_disconnect_device_from_lcu()
278 if (device == lcu->ruac_data.device) { dasd_alias_disconnect_device_from_lcu()
279 dasd_put_device(device); dasd_alias_disconnect_device_from_lcu()
280 lcu->ruac_data.device = NULL; dasd_alias_disconnect_device_from_lcu()
310 * in the lcu is up to date and will update the device uid before
315 struct dasd_device *device, _add_device_to_lcu()
324 private = (struct dasd_eckd_private *) device->private; _add_device_to_lcu()
327 if (device != pos) _add_device_to_lcu()
328 spin_lock_irqsave_nested(get_ccwdev_lock(device->cdev), flags, _add_device_to_lcu()
335 if (device != pos) _add_device_to_lcu()
336 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); _add_device_to_lcu()
340 list_move(&device->alias_list, &lcu->active_devices); _add_device_to_lcu()
363 list_move(&device->alias_list, &group->baselist); _add_device_to_lcu()
365 list_move(&device->alias_list, &group->aliaslist); _add_device_to_lcu()
371 struct dasd_device *device) _remove_device_from_lcu()
376 private = (struct dasd_eckd_private *) device->private; _remove_device_from_lcu()
377 list_move(&device->alias_list, &lcu->inactive_devices); _remove_device_from_lcu()
387 if (group->next == device) _remove_device_from_lcu()
414 static int read_unit_address_configuration(struct dasd_device *device, read_unit_address_configuration() argument
425 device); read_unit_address_configuration()
428 cqr->startdev = device; read_unit_address_configuration()
429 cqr->memdev = device; read_unit_address_configuration()
481 struct dasd_device *device, *tempdev; _lcu_update() local
487 list_for_each_entry_safe(device, tempdev, &pavgroup->baselist, _lcu_update()
489 list_move(&device->alias_list, &lcu->active_devices); _lcu_update()
490 private = (struct dasd_eckd_private *) device->private; _lcu_update()
493 list_for_each_entry_safe(device, tempdev, &pavgroup->aliaslist, _lcu_update()
495 list_move(&device->alias_list, &lcu->active_devices); _lcu_update()
496 private = (struct dasd_eckd_private *) device->private; _lcu_update()
526 list_for_each_entry_safe(device, tempdev, &lcu->active_devices, _lcu_update()
528 _add_device_to_lcu(lcu, device, refdev); _lcu_update()
539 struct dasd_device *device; lcu_update_work() local
545 device = ruac_data->device; lcu_update_work()
546 rc = _lcu_update(device, lcu); lcu_update_work()
549 * prepare_update or a new device a new device while we were still lcu_update_work()
554 DBF_DEV_EVENT(DBF_WARNING, device, "could not update" lcu_update_work()
557 dasd_put_device(device); lcu_update_work()
559 dasd_put_device(device); lcu_update_work()
560 lcu->ruac_data.device = NULL; lcu_update_work()
567 struct dasd_device *device) _schedule_lcu_update()
573 if (lcu->ruac_data.device) { _schedule_lcu_update()
577 if (device && !list_empty(&device->alias_list)) _schedule_lcu_update()
578 usedev = device; _schedule_lcu_update()
597 * if we haven't found a proper device yet, give up for now, the next _schedule_lcu_update()
598 * device that will be set active will trigger an lcu update _schedule_lcu_update()
603 lcu->ruac_data.device = usedev; _schedule_lcu_update()
609 int dasd_alias_add_device(struct dasd_device *device) dasd_alias_add_device() argument
616 private = (struct dasd_eckd_private *) device->private; dasd_alias_add_device()
621 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); dasd_alias_add_device()
624 rc = _add_device_to_lcu(lcu, device, device); dasd_alias_add_device()
629 list_move(&device->alias_list, &lcu->active_devices); dasd_alias_add_device()
630 _schedule_lcu_update(lcu, device); dasd_alias_add_device()
633 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); dasd_alias_add_device()
637 int dasd_alias_update_add_device(struct dasd_device *device) dasd_alias_update_add_device() argument
640 private = (struct dasd_eckd_private *) device->private; dasd_alias_update_add_device()
642 return dasd_alias_add_device(device); dasd_alias_update_add_device()
645 int dasd_alias_remove_device(struct dasd_device *device) dasd_alias_remove_device() argument
651 private = (struct dasd_eckd_private *) device->private; dasd_alias_remove_device()
657 _remove_device_from_lcu(lcu, device); dasd_alias_remove_device()
683 * use base device to do IO dasd_alias_get_start_dev()
722 struct dasd_device *device, reset_summary_unit_check()
741 cqr->startdev = device; reset_summary_unit_check()
742 cqr->memdev = device; reset_summary_unit_check()
755 struct dasd_device *device; _restart_all_base_devices_on_lcu() local
760 list_for_each_entry(device, &lcu->active_devices, alias_list) { _restart_all_base_devices_on_lcu()
761 private = (struct dasd_eckd_private *) device->private; _restart_all_base_devices_on_lcu()
762 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); _restart_all_base_devices_on_lcu()
764 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), _restart_all_base_devices_on_lcu()
768 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); _restart_all_base_devices_on_lcu()
769 dasd_schedule_block_bh(device->block); _restart_all_base_devices_on_lcu()
770 dasd_schedule_device_bh(device); _restart_all_base_devices_on_lcu()
772 list_for_each_entry(device, &lcu->inactive_devices, alias_list) { _restart_all_base_devices_on_lcu()
773 private = (struct dasd_eckd_private *) device->private; _restart_all_base_devices_on_lcu()
774 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); _restart_all_base_devices_on_lcu()
776 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), _restart_all_base_devices_on_lcu()
780 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); _restart_all_base_devices_on_lcu()
781 dasd_schedule_block_bh(device->block); _restart_all_base_devices_on_lcu()
782 dasd_schedule_device_bh(device); _restart_all_base_devices_on_lcu()
785 list_for_each_entry(device, &pavgroup->baselist, alias_list) { _restart_all_base_devices_on_lcu()
786 dasd_schedule_block_bh(device->block); _restart_all_base_devices_on_lcu()
787 dasd_schedule_device_bh(device); _restart_all_base_devices_on_lcu()
795 struct dasd_device *device, *temp; flush_all_alias_devices_on_lcu() local
813 list_for_each_entry_safe(device, temp, &lcu->active_devices, flush_all_alias_devices_on_lcu()
815 private = (struct dasd_eckd_private *) device->private; flush_all_alias_devices_on_lcu()
818 list_move(&device->alias_list, &active); flush_all_alias_devices_on_lcu()
825 device = list_first_entry(&active, struct dasd_device, flush_all_alias_devices_on_lcu()
828 rc = dasd_flush_device_queue(device); flush_all_alias_devices_on_lcu()
831 * only move device around if it wasn't moved away while we flush_all_alias_devices_on_lcu()
834 if (device == list_first_entry(&active, flush_all_alias_devices_on_lcu()
836 list_move(&device->alias_list, &lcu->active_devices); flush_all_alias_devices_on_lcu()
837 private = (struct dasd_eckd_private *) device->private; flush_all_alias_devices_on_lcu()
844 static void __stop_device_on_lcu(struct dasd_device *device, __stop_device_on_lcu() argument
847 /* If pos == device then device is already locked! */ __stop_device_on_lcu()
848 if (pos == device) { __stop_device_on_lcu()
859 * cdev lock for device is already locked!
862 struct dasd_device *device) _stop_all_devices_on_lcu()
868 __stop_device_on_lcu(device, pos); _stop_all_devices_on_lcu()
870 __stop_device_on_lcu(device, pos); _stop_all_devices_on_lcu()
873 __stop_device_on_lcu(device, pos); _stop_all_devices_on_lcu()
875 __stop_device_on_lcu(device, pos); _stop_all_devices_on_lcu()
882 struct dasd_device *device; _unstop_all_devices_on_lcu() local
885 list_for_each_entry(device, &lcu->active_devices, alias_list) { _unstop_all_devices_on_lcu()
886 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); _unstop_all_devices_on_lcu()
887 dasd_device_remove_stop_bits(device, DASD_STOPPED_SU); _unstop_all_devices_on_lcu()
888 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); _unstop_all_devices_on_lcu()
891 list_for_each_entry(device, &lcu->inactive_devices, alias_list) { _unstop_all_devices_on_lcu()
892 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); _unstop_all_devices_on_lcu()
893 dasd_device_remove_stop_bits(device, DASD_STOPPED_SU); _unstop_all_devices_on_lcu()
894 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); _unstop_all_devices_on_lcu()
898 list_for_each_entry(device, &pavgroup->baselist, alias_list) { _unstop_all_devices_on_lcu()
899 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); _unstop_all_devices_on_lcu()
900 dasd_device_remove_stop_bits(device, DASD_STOPPED_SU); _unstop_all_devices_on_lcu()
901 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), _unstop_all_devices_on_lcu()
904 list_for_each_entry(device, &pavgroup->aliaslist, alias_list) { _unstop_all_devices_on_lcu()
905 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); _unstop_all_devices_on_lcu()
906 dasd_device_remove_stop_bits(device, DASD_STOPPED_SU); _unstop_all_devices_on_lcu()
907 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), _unstop_all_devices_on_lcu()
918 struct dasd_device *device; summary_unit_check_handling_work() local
923 device = suc_data->device; summary_unit_check_handling_work()
929 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); summary_unit_check_handling_work()
930 dasd_device_remove_stop_bits(device, summary_unit_check_handling_work()
932 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); summary_unit_check_handling_work()
933 reset_summary_unit_check(lcu, device, suc_data->reason); summary_unit_check_handling_work()
939 _schedule_lcu_update(lcu, device); summary_unit_check_handling_work()
940 lcu->suc_data.device = NULL; summary_unit_check_handling_work()
941 dasd_put_device(device); summary_unit_check_handling_work()
948 void dasd_alias_handle_summary_unit_check(struct dasd_device *device, dasd_alias_handle_summary_unit_check() argument
956 private = (struct dasd_eckd_private *) device->private; dasd_alias_handle_summary_unit_check()
961 DBF_DEV_EVENT(DBF_NOTICE, device, "%s %x", dasd_alias_handle_summary_unit_check()
964 DBF_DEV_EVENT(DBF_WARNING, device, "%s", dasd_alias_handle_summary_unit_check()
972 DBF_DEV_EVENT(DBF_WARNING, device, "%s", dasd_alias_handle_summary_unit_check()
973 "device not ready to handle summary" dasd_alias_handle_summary_unit_check()
978 _stop_all_devices_on_lcu(lcu, device); dasd_alias_handle_summary_unit_check()
981 /* If this device is about to be removed just return and wait for dasd_alias_handle_summary_unit_check()
982 * the next interrupt on a different device dasd_alias_handle_summary_unit_check()
984 if (list_empty(&device->alias_list)) { dasd_alias_handle_summary_unit_check()
985 DBF_DEV_EVENT(DBF_WARNING, device, "%s", dasd_alias_handle_summary_unit_check()
986 "device is in offline processing," dasd_alias_handle_summary_unit_check()
991 if (lcu->suc_data.device) { dasd_alias_handle_summary_unit_check()
993 DBF_DEV_EVENT(DBF_WARNING, device, "%s", dasd_alias_handle_summary_unit_check()
1000 lcu->suc_data.device = device; dasd_alias_handle_summary_unit_check()
1001 dasd_get_device(device); dasd_alias_handle_summary_unit_check()
1004 dasd_put_device(device); dasd_alias_handle_summary_unit_check()
314 _add_device_to_lcu(struct alias_lcu *lcu, struct dasd_device *device, struct dasd_device *pos) _add_device_to_lcu() argument
370 _remove_device_from_lcu(struct alias_lcu *lcu, struct dasd_device *device) _remove_device_from_lcu() argument
566 _schedule_lcu_update(struct alias_lcu *lcu, struct dasd_device *device) _schedule_lcu_update() argument
721 reset_summary_unit_check(struct alias_lcu *lcu, struct dasd_device *device, char reason) reset_summary_unit_check() argument
861 _stop_all_devices_on_lcu(struct alias_lcu *lcu, struct dasd_device *device) _stop_all_devices_on_lcu() argument
/linux-4.4.14/drivers/gpu/drm/nouveau/nvkm/subdev/bus/
H A Dnv31.c34 struct nvkm_device *device = subdev->device; nv31_bus_intr() local
35 u32 stat = nvkm_rd32(device, 0x001100) & nvkm_rd32(device, 0x001140); nv31_bus_intr()
36 u32 gpio = nvkm_rd32(device, 0x001104) & nvkm_rd32(device, 0x001144); nv31_bus_intr()
39 struct nvkm_gpio *gpio = device->gpio; nv31_bus_intr()
45 u32 addr = nvkm_rd32(device, 0x009084); nv31_bus_intr()
46 u32 data = nvkm_rd32(device, 0x009088); nv31_bus_intr()
53 nvkm_wr32(device, 0x001100, 0x00000008); nv31_bus_intr()
57 struct nvkm_therm *therm = device->therm; nv31_bus_intr()
61 nvkm_wr32(device, 0x001100, 0x00070000); nv31_bus_intr()
66 nvkm_mask(device, 0x001140, stat, 0x00000000); nv31_bus_intr()
73 struct nvkm_device *device = bus->subdev.device; nv31_bus_init() local
74 nvkm_wr32(device, 0x001100, 0xffffffff); nv31_bus_init()
75 nvkm_wr32(device, 0x001140, 0x00070008); nv31_bus_init()
85 nv31_bus_new(struct nvkm_device *device, int index, struct nvkm_bus **pbus) nv31_bus_new() argument
87 return nvkm_bus_new_(&nv31_bus, device, index, pbus); nv31_bus_new()
H A Dnv50.c33 struct nvkm_device *device = bus->subdev.device; nv50_bus_hwsq_exec() local
36 nvkm_mask(device, 0x001098, 0x00000008, 0x00000000); nv50_bus_hwsq_exec()
37 nvkm_wr32(device, 0x001304, 0x00000000); nv50_bus_hwsq_exec()
39 nvkm_wr32(device, 0x001400 + (i * 4), data[i]); nv50_bus_hwsq_exec()
40 nvkm_mask(device, 0x001098, 0x00000018, 0x00000018); nv50_bus_hwsq_exec()
41 nvkm_wr32(device, 0x00130c, 0x00000003); nv50_bus_hwsq_exec()
43 if (nvkm_msec(device, 2000, nv50_bus_hwsq_exec()
44 if (!(nvkm_rd32(device, 0x001308) & 0x00000100)) nv50_bus_hwsq_exec()
56 struct nvkm_device *device = subdev->device; nv50_bus_intr() local
57 u32 stat = nvkm_rd32(device, 0x001100) & nvkm_rd32(device, 0x001140); nv50_bus_intr()
60 u32 addr = nvkm_rd32(device, 0x009084); nv50_bus_intr()
61 u32 data = nvkm_rd32(device, 0x009088); nv50_bus_intr()
68 nvkm_wr32(device, 0x001100, 0x00000008); nv50_bus_intr()
72 struct nvkm_therm *therm = device->therm; nv50_bus_intr()
76 nvkm_wr32(device, 0x001100, 0x00010000); nv50_bus_intr()
81 nvkm_mask(device, 0x001140, stat, 0); nv50_bus_intr()
88 struct nvkm_device *device = bus->subdev.device; nv50_bus_init() local
89 nvkm_wr32(device, 0x001100, 0xffffffff); nv50_bus_init()
90 nvkm_wr32(device, 0x001140, 0x00010008); nv50_bus_init()
102 nv50_bus_new(struct nvkm_device *device, int index, struct nvkm_bus **pbus) nv50_bus_new() argument
104 return nvkm_bus_new_(&nv50_bus, device, index, pbus); nv50_bus_new()
H A Dgf100.c31 struct nvkm_device *device = subdev->device; gf100_bus_intr() local
32 u32 stat = nvkm_rd32(device, 0x001100) & nvkm_rd32(device, 0x001140); gf100_bus_intr()
35 u32 addr = nvkm_rd32(device, 0x009084); gf100_bus_intr()
36 u32 data = nvkm_rd32(device, 0x009088); gf100_bus_intr()
46 nvkm_wr32(device, 0x009084, 0x00000000); gf100_bus_intr()
47 nvkm_wr32(device, 0x001100, (stat & 0x0000000e)); gf100_bus_intr()
53 nvkm_mask(device, 0x001140, stat, 0x00000000); gf100_bus_intr()
60 struct nvkm_device *device = bus->subdev.device; gf100_bus_init() local
61 nvkm_wr32(device, 0x001100, 0xffffffff); gf100_bus_init()
62 nvkm_wr32(device, 0x001140, 0x0000000e); gf100_bus_init()
72 gf100_bus_new(struct nvkm_device *device, int index, struct nvkm_bus **pbus) gf100_bus_new() argument
74 return nvkm_bus_new_(&gf100_bus, device, index, pbus); gf100_bus_new()
H A Dnv04.c35 struct nvkm_device *device = subdev->device; nv04_bus_intr() local
36 u32 stat = nvkm_rd32(device, 0x001100) & nvkm_rd32(device, 0x001140); nv04_bus_intr()
41 nvkm_wr32(device, 0x001100, 0x00000001); nv04_bus_intr()
45 struct nvkm_gpio *gpio = device->gpio; nv04_bus_intr()
49 nvkm_wr32(device, 0x001100, 0x00000110); nv04_bus_intr()
54 nvkm_mask(device, 0x001140, stat, 0x00000000); nv04_bus_intr()
61 struct nvkm_device *device = bus->subdev.device; nv04_bus_init() local
62 nvkm_wr32(device, 0x001100, 0xffffffff); nv04_bus_init()
63 nvkm_wr32(device, 0x001140, 0x00000111); nv04_bus_init()
73 nv04_bus_new(struct nvkm_device *device, int index, struct nvkm_bus **pbus) nv04_bus_new() argument
75 return nvkm_bus_new_(&nv04_bus, device, index, pbus); nv04_bus_new()
/linux-4.4.14/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/
H A Dgm204.c33 struct nvkm_device *device = init->base.subdev.device; pmu_code() local
34 struct nvkm_bios *bios = device->bios; pmu_code()
37 nvkm_wr32(device, 0x10a180, 0x01000000 | (sec ? 0x10000000 : 0) | pmu); pmu_code()
40 nvkm_wr32(device, 0x10a188, (pmu + i) >> 8); pmu_code()
41 nvkm_wr32(device, 0x10a184, nvbios_rd32(bios, img + i)); pmu_code()
45 nvkm_wr32(device, 0x10a184, 0x00000000); pmu_code()
53 struct nvkm_device *device = init->base.subdev.device; pmu_data() local
54 struct nvkm_bios *bios = device->bios; pmu_data()
57 nvkm_wr32(device, 0x10a1c0, 0x01000000 | pmu); pmu_data()
59 nvkm_wr32(device, 0x10a1c4, nvbios_rd32(bios, img + i)); pmu_data()
65 struct nvkm_device *device = init->base.subdev.device; pmu_args() local
66 nvkm_wr32(device, 0x10a1c0, argp); pmu_args()
67 nvkm_wr32(device, 0x10a1c0, nvkm_rd32(device, 0x10a1c4) + argi); pmu_args()
68 return nvkm_rd32(device, 0x10a1c4); pmu_args()
74 struct nvkm_device *device = init->base.subdev.device; pmu_exec() local
75 nvkm_wr32(device, 0x10a104, init_addr); pmu_exec()
76 nvkm_wr32(device, 0x10a10c, 0x00000000); pmu_exec()
77 nvkm_wr32(device, 0x10a100, 0x00000002); pmu_exec()
85 struct nvkm_bios *bios = subdev->device->bios; pmu_load()
114 struct nvkm_device *device = subdev->device; gm204_devinit_post() local
115 struct nvkm_bios *bios = device->bios; gm204_devinit_post()
128 nvkm_mask(device, 0x000200, 0x00002000, 0x00000000); gm204_devinit_post()
129 nvkm_mask(device, 0x000200, 0x00002000, 0x00002000); gm204_devinit_post()
130 nvkm_rd32(device, 0x000200); gm204_devinit_post()
131 while (nvkm_rd32(device, 0x10a10c) & 0x00000006) { gm204_devinit_post()
157 nvkm_wr32(device, 0x10a040, 0x00005000); gm204_devinit_post()
159 while (!(nvkm_rd32(device, 0x10a040) & 0x00002000)) { gm204_devinit_post()
177 gm204_devinit_new(struct nvkm_device *device, int index, gm204_devinit_new() argument
180 return nv50_devinit_new_(&gm204_devinit, device, index, pinit); gm204_devinit_new()
H A Dnv04.c39 struct nvkm_device *device = subdev->device; nv04_devinit_meminit() local
45 fb = fbmem_init(device); nv04_devinit_meminit()
52 nvkm_wrvgas(device, 0, 1, nvkm_rdvgas(device, 0, 1) | 0x20); nv04_devinit_meminit()
53 nvkm_mask(device, NV04_PFB_DEBUG_0, 0, NV04_PFB_DEBUG_0_REFRESH_OFF); nv04_devinit_meminit()
55 nvkm_mask(device, NV04_PFB_BOOT_0, ~0, nv04_devinit_meminit()
66 nvkm_mask(device, NV04_PFB_BOOT_0, nv04_devinit_meminit()
69 nvkm_mask(device, NV04_PFB_DEBUG_0, nv04_devinit_meminit()
76 nvkm_mask(device, NV04_PFB_BOOT_0, nv04_devinit_meminit()
82 nvkm_mask(device, NV04_PFB_BOOT_0, nv04_devinit_meminit()
89 nvkm_mask(device, NV04_PFB_BOOT_0, nv04_devinit_meminit()
93 nvkm_mask(device, NV04_PFB_BOOT_0, nv04_devinit_meminit()
97 nvkm_mask(device, NV04_PFB_BOOT_0, NV04_PFB_BOOT_0_RAM_TYPE, nv04_devinit_meminit()
101 nvkm_mask(device, NV04_PFB_BOOT_0, NV04_PFB_BOOT_0_RAM_AMOUNT, nv04_devinit_meminit()
107 nvkm_mask(device, NV04_PFB_DEBUG_0, NV04_PFB_DEBUG_0_REFRESH_OFF, 0); nv04_devinit_meminit()
108 nvkm_wrvgas(device, 0, 1, nvkm_rdvgas(device, 0, 1) & ~0x20); nv04_devinit_meminit()
146 struct nvkm_device *device = init->subdev.device; setPLL_single() local
147 int chip_version = device->bios->version.chip; setPLL_single()
148 uint32_t oldpll = nvkm_rd32(device, reg); setPLL_single()
158 saved_powerctrl_1 = nvkm_rd32(device, 0x001584); setPLL_single()
159 nvkm_wr32(device, 0x001584, setPLL_single()
166 nvkm_wr32(device, reg, pv->log2P << 16 | (oldpll & 0xffff)); setPLL_single()
169 nvkm_wr32(device, reg, (oldpll & 0xffff0000) | pv->NM1); setPLL_single()
175 nvkm_rd32(device, reg); setPLL_single()
178 nvkm_wr32(device, reg, pll); setPLL_single()
181 nvkm_wr32(device, 0x001584, saved_powerctrl_1); setPLL_single()
201 struct nvkm_device *device = init->subdev.device; setPLL_double_highregs() local
202 int chip_version = device->bios->version.chip; setPLL_double_highregs()
205 uint32_t oldpll1 = nvkm_rd32(device, reg1); setPLL_double_highregs()
206 uint32_t oldpll2 = !nv3035 ? nvkm_rd32(device, reg2) : 0; setPLL_double_highregs()
221 oldramdac580 = nvkm_rd32(device, 0x680580); setPLL_double_highregs()
237 saved_powerctrl_1 = nvkm_rd32(device, 0x001584); setPLL_double_highregs()
238 nvkm_wr32(device, 0x001584, setPLL_double_highregs()
257 savedc040 = nvkm_rd32(device, 0xc040); setPLL_double_highregs()
259 nvkm_wr32(device, 0xc040, savedc040 & ~(3 << shift_c040)); setPLL_double_highregs()
263 nvkm_wr32(device, 0x680580, ramdac580); setPLL_double_highregs()
266 nvkm_wr32(device, reg2, pll2); setPLL_double_highregs()
267 nvkm_wr32(device, reg1, pll1); setPLL_double_highregs()
270 nvkm_wr32(device, 0x001584, saved_powerctrl_1); setPLL_double_highregs()
272 nvkm_wr32(device, 0xc040, savedc040); setPLL_double_highregs()
286 struct nvkm_device *device = init->subdev.device; setPLL_double_lowregs() local
289 uint32_t oldPval = nvkm_rd32(device, Preg); setPLL_double_lowregs()
298 if (nvkm_rd32(device, NMNMreg) == NMNM && (oldPval & 0xc0070000) == Pval) setPLL_double_lowregs()
310 if (nvbios_pll_parse(device->bios, Preg, &info)) setPLL_double_lowregs()
318 saved4600 = nvkm_rd32(device, 0x4600); setPLL_double_lowregs()
319 nvkm_wr32(device, 0x4600, saved4600 | 8 << 28); setPLL_double_lowregs()
324 nvkm_wr32(device, Preg, oldPval | 1 << 28); setPLL_double_lowregs()
325 nvkm_wr32(device, Preg, Pval & ~(4 << 28)); setPLL_double_lowregs()
328 nvkm_wr32(device, 0x4020, Pval & ~(0xc << 28)); setPLL_double_lowregs()
329 nvkm_wr32(device, 0x4038, Pval & ~(0xc << 28)); setPLL_double_lowregs()
332 savedc040 = nvkm_rd32(device, 0xc040); setPLL_double_lowregs()
333 nvkm_wr32(device, 0xc040, savedc040 & maskc040); setPLL_double_lowregs()
335 nvkm_wr32(device, NMNMreg, NMNM); setPLL_double_lowregs()
337 nvkm_wr32(device, 0x403c, NMNM); setPLL_double_lowregs()
339 nvkm_wr32(device, Preg, Pval); setPLL_double_lowregs()
342 nvkm_wr32(device, 0x4020, Pval); setPLL_double_lowregs()
343 nvkm_wr32(device, 0x4038, Pval); setPLL_double_lowregs()
344 nvkm_wr32(device, 0x4600, saved4600); setPLL_double_lowregs()
347 nvkm_wr32(device, 0xc040, savedc040); setPLL_double_lowregs()
350 nvkm_wr32(device, 0x4020, Pval & ~(1 << 28)); setPLL_double_lowregs()
351 nvkm_wr32(device, 0x4038, Pval & ~(1 << 28)); setPLL_double_lowregs()
359 struct nvkm_bios *bios = subdev->device->bios; nv04_devinit_pll_set()
404 struct nvkm_device *device = subdev->device; nv04_devinit_preinit() local
407 nvkm_mask(device, 0x000200, 0x00000001, 0x00000001); nv04_devinit_preinit()
411 init->owner = nvkm_rdvgaowner(device); nv04_devinit_preinit()
412 nvkm_wrvgaowner(device, 0); nv04_devinit_preinit()
415 u32 htotal = nvkm_rdvgac(device, 0, 0x06); nv04_devinit_preinit()
416 htotal |= (nvkm_rdvgac(device, 0, 0x07) & 0x01) << 8; nv04_devinit_preinit()
417 htotal |= (nvkm_rdvgac(device, 0, 0x07) & 0x20) << 4; nv04_devinit_preinit()
418 htotal |= (nvkm_rdvgac(device, 0, 0x25) & 0x01) << 10; nv04_devinit_preinit()
419 htotal |= (nvkm_rdvgac(device, 0, 0x41) & 0x01) << 11; nv04_devinit_preinit()
432 nvkm_wrvgaowner(init->base.subdev.device, init->owner); nv04_devinit_dtor()
438 struct nvkm_device *device, int index, nv04_devinit_new_()
447 nvkm_devinit_ctor(func, device, index, &init->base); nv04_devinit_new_()
462 nv04_devinit_new(struct nvkm_device *device, int index, nv04_devinit_new() argument
465 return nv04_devinit_new_(&nv04_devinit, device, index, pinit); nv04_devinit_new()
437 nv04_devinit_new_(const struct nvkm_devinit_func *func, struct nvkm_device *device, int index, struct nvkm_devinit **pinit) nv04_devinit_new_() argument
/linux-4.4.14/arch/m68k/include/asm/
H A Ddma-mapping.h8 static inline int dma_supported(struct device *dev, u64 mask) dma_supported()
13 static inline int dma_set_mask(struct device *dev, u64 mask) dma_set_mask()
18 extern void *dma_alloc_coherent(struct device *, size_t,
20 extern void dma_free_coherent(struct device *, size_t,
23 static inline void *dma_alloc_attrs(struct device *dev, size_t size, dma_alloc_attrs()
31 static inline void dma_free_attrs(struct device *dev, size_t size, dma_free_attrs()
39 static inline void *dma_alloc_noncoherent(struct device *dev, size_t size, dma_alloc_noncoherent()
44 static inline void dma_free_noncoherent(struct device *dev, size_t size, dma_free_noncoherent()
49 static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size, dma_cache_sync()
55 extern dma_addr_t dma_map_single(struct device *, void *, size_t,
57 static inline void dma_unmap_single(struct device *dev, dma_addr_t addr, dma_unmap_single()
62 extern dma_addr_t dma_map_page(struct device *, struct page *,
65 static inline void dma_unmap_page(struct device *dev, dma_addr_t address, dma_unmap_page()
70 extern int dma_map_sg(struct device *, struct scatterlist *, int,
72 static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg, dma_unmap_sg()
77 extern void dma_sync_single_for_device(struct device *, dma_addr_t, size_t,
79 extern void dma_sync_sg_for_device(struct device *, struct scatterlist *, int,
82 static inline void dma_sync_single_range_for_device(struct device *dev, dma_sync_single_range_for_device()
90 static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t handle, dma_sync_single_for_cpu()
95 static inline void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, dma_sync_sg_for_cpu()
100 static inline void dma_sync_single_range_for_cpu(struct device *dev, dma_sync_single_range_for_cpu()
108 static inline int dma_mapping_error(struct device *dev, dma_addr_t handle) dma_mapping_error()
114 extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
116 extern int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
/linux-4.4.14/drivers/gpu/drm/nouveau/nvkm/subdev/therm/
H A Dnv40.c32 switch (therm->subdev.device->chipset) { nv40_sensor_style()
55 struct nvkm_device *device = therm->subdev.device; nv40_sensor_setup() local
60 nvkm_mask(device, 0x15b8, 0x80000000, 0); nv40_sensor_setup()
61 nvkm_wr32(device, 0x15b0, 0x80003fff); nv40_sensor_setup()
63 return nvkm_rd32(device, 0x15b4) & 0x3fff; nv40_sensor_setup()
65 nvkm_wr32(device, 0x15b0, 0xff); nv40_sensor_setup()
67 return nvkm_rd32(device, 0x15b4) & 0xff; nv40_sensor_setup()
75 struct nvkm_device *device = therm->subdev.device; nv40_temp_get() local
81 nvkm_wr32(device, 0x15b0, 0x80003fff); nv40_temp_get()
82 core_temp = nvkm_rd32(device, 0x15b4) & 0x3fff; nv40_temp_get()
84 nvkm_wr32(device, 0x15b0, 0xff); nv40_temp_get()
85 core_temp = nvkm_rd32(device, 0x15b4) & 0xff; nv40_temp_get()
109 struct nvkm_device *device = subdev->device; nv40_fan_pwm_ctrl() local
111 if (line == 2) nvkm_mask(device, 0x0010f0, 0x80000000, mask); nv40_fan_pwm_ctrl()
112 else if (line == 9) nvkm_mask(device, 0x0015f4, 0x80000000, mask); nv40_fan_pwm_ctrl()
124 struct nvkm_device *device = subdev->device; nv40_fan_pwm_get() local
126 u32 reg = nvkm_rd32(device, 0x0010f0); nv40_fan_pwm_get()
134 u32 reg = nvkm_rd32(device, 0x0015f4); nv40_fan_pwm_get()
136 *divs = nvkm_rd32(device, 0x0015f8); nv40_fan_pwm_get()
152 struct nvkm_device *device = subdev->device; nv40_fan_pwm_set() local
154 nvkm_mask(device, 0x0010f0, 0x7fff7fff, (duty << 16) | divs); nv40_fan_pwm_set()
157 nvkm_wr32(device, 0x0015f8, divs); nv40_fan_pwm_set()
158 nvkm_mask(device, 0x0015f4, 0x7fffffff, duty); nv40_fan_pwm_set()
171 struct nvkm_device *device = subdev->device; nv40_therm_intr() local
172 uint32_t stat = nvkm_rd32(device, 0x1100); nv40_therm_intr()
177 nvkm_wr32(device, 0x1100, 0x70000); nv40_therm_intr()
200 nv40_therm_new(struct nvkm_device *device, int index, nv40_therm_new() argument
203 return nvkm_therm_new_(&nv40_therm, device, index, ptherm); nv40_therm_new()
H A Dgf119.c30 struct nvkm_device *device = subdev->device; pwm_info() local
31 u32 gpio = nvkm_rd32(device, 0x00d610 + (line * 0x04)); pwm_info()
55 struct nvkm_device *device = therm->subdev.device; gf119_fan_pwm_ctrl() local
61 nvkm_mask(device, 0x00d610 + (line * 0x04), 0x000000c0, data); gf119_fan_pwm_ctrl()
69 struct nvkm_device *device = therm->subdev.device; gf119_fan_pwm_get() local
74 if (nvkm_rd32(device, 0x00d610 + (line * 0x04)) & 0x00000040) { gf119_fan_pwm_get()
75 *divs = nvkm_rd32(device, 0x00e114 + (indx * 8)); gf119_fan_pwm_get()
76 *duty = nvkm_rd32(device, 0x00e118 + (indx * 8)); gf119_fan_pwm_get()
80 *divs = nvkm_rd32(device, 0x0200d8) & 0x1fff; gf119_fan_pwm_get()
81 *duty = nvkm_rd32(device, 0x0200dc) & 0x1fff; gf119_fan_pwm_get()
91 struct nvkm_device *device = therm->subdev.device; gf119_fan_pwm_set() local
96 nvkm_wr32(device, 0x00e114 + (indx * 8), divs); gf119_fan_pwm_set()
97 nvkm_wr32(device, 0x00e118 + (indx * 8), duty | 0x80000000); gf119_fan_pwm_set()
99 nvkm_mask(device, 0x0200d8, 0x1fff, divs); /* keep the high bits */ gf119_fan_pwm_set()
100 nvkm_wr32(device, 0x0200dc, duty | 0x40000000); gf119_fan_pwm_set()
108 struct nvkm_device *device = therm->subdev.device; gf119_fan_pwm_clock() local
113 return (device->crystal * 1000) / 20; gf119_fan_pwm_clock()
115 return device->crystal * 1000 / 10; gf119_fan_pwm_clock()
121 struct nvkm_device *device = therm->subdev.device; gf119_therm_init() local
126 nvkm_mask(device, 0x00e720, 0x00000003, 0x00000002); gf119_therm_init()
128 nvkm_mask(device, 0x00d79c, 0x000000ff, therm->fan->tach.line); gf119_therm_init()
129 nvkm_wr32(device, 0x00e724, device->crystal * 1000); gf119_therm_init()
130 nvkm_mask(device, 0x00e720, 0x00000001, 0x00000001); gf119_therm_init()
132 nvkm_mask(device, 0x00e720, 0x00000002, 0x00000000); gf119_therm_init()
149 gf119_therm_new(struct nvkm_device *device, int index, gf119_therm_new() argument
152 return nvkm_therm_new_(&gf119_therm, device, index, ptherm); gf119_therm_new()
H A Dnv50.c57 struct nvkm_device *device = therm->subdev.device; nv50_fan_pwm_ctrl() local
61 nvkm_mask(device, ctrl, 0x00010001 << line, data << line); nv50_fan_pwm_ctrl()
68 struct nvkm_device *device = therm->subdev.device; nv50_fan_pwm_get() local
73 if (nvkm_rd32(device, ctrl) & (1 << line)) { nv50_fan_pwm_get()
74 *divs = nvkm_rd32(device, 0x00e114 + (id * 8)); nv50_fan_pwm_get()
75 *duty = nvkm_rd32(device, 0x00e118 + (id * 8)); nv50_fan_pwm_get()
85 struct nvkm_device *device = therm->subdev.device; nv50_fan_pwm_set() local
90 nvkm_wr32(device, 0x00e114 + (id * 8), divs); nv50_fan_pwm_set()
91 nvkm_wr32(device, 0x00e118 + (id * 8), duty | 0x80000000); nv50_fan_pwm_set()
98 struct nvkm_device *device = therm->subdev.device; nv50_fan_pwm_clock() local
102 if (device->chipset > 0x50 && device->chipset < 0x94) { nv50_fan_pwm_clock()
103 u8 pwm_div = nvkm_rd32(device, 0x410c); nv50_fan_pwm_clock()
104 if (nvkm_rd32(device, 0xc040) & 0x800000) { nv50_fan_pwm_clock()
110 pwm_clock = (device->crystal * 1000) >> pwm_div; nv50_fan_pwm_clock()
114 pwm_clock = (device->crystal * 1000) / 20; nv50_fan_pwm_clock()
123 struct nvkm_device *device = therm->subdev.device; nv50_sensor_setup() local
124 nvkm_mask(device, 0x20010, 0x40000000, 0x0); nv50_sensor_setup()
131 struct nvkm_device *device = therm->subdev.device; nv50_temp_get() local
135 core_temp = nvkm_rd32(device, 0x20014) & 0x3fff; nv50_temp_get()
172 nv50_therm_new(struct nvkm_device *device, int index, nv50_therm_new() argument
175 return nvkm_therm_new_(&nv50_therm, device, index, ptherm); nv50_therm_new()
H A Dgt215.c31 struct nvkm_device *device = therm->subdev.device; gt215_therm_fan_sense() local
32 u32 tach = nvkm_rd32(device, 0x00e728) & 0x0000ffff; gt215_therm_fan_sense()
33 u32 ctrl = nvkm_rd32(device, 0x00e720); gt215_therm_fan_sense()
42 struct nvkm_device *device = therm->subdev.device; gt215_therm_init() local
48 nvkm_mask(device, 0x00e720, 0x00000003, 0x00000002); gt215_therm_init()
50 nvkm_wr32(device, 0x00e724, device->crystal * 1000); gt215_therm_init()
51 nvkm_mask(device, 0x00e720, 0x001f0000, tach->line << 16); gt215_therm_init()
52 nvkm_mask(device, 0x00e720, 0x00000001, 0x00000001); gt215_therm_init()
54 nvkm_mask(device, 0x00e720, 0x00000002, 0x00000000); gt215_therm_init()
71 gt215_therm_new(struct nvkm_device *device, int index, gt215_therm_new() argument
74 return nvkm_therm_new_(&gt215_therm, device, index, ptherm); gt215_therm_new()
/linux-4.4.14/drivers/acpi/
H A Dscan.c28 #define ACPI_IS_ROOT_DEVICE(device) (!(device)->parent)
38 static const char *dummy_hid = "device";
145 struct acpi_device *device = NULL; acpi_bus_offline() local
150 if (acpi_bus_get_device(handle, &device)) acpi_bus_offline()
153 if (device->handler && !device->handler->hotplug.enabled) { acpi_bus_offline()
154 *ret_p = &device->dev; acpi_bus_offline()
158 mutex_lock(&device->physical_node_lock); acpi_bus_offline()
160 list_for_each_entry(pn, &device->physical_node_list, node) { acpi_bus_offline()
185 mutex_unlock(&device->physical_node_lock); acpi_bus_offline()
193 struct acpi_device *device = NULL; acpi_bus_online() local
196 if (acpi_bus_get_device(handle, &device)) acpi_bus_online()
199 mutex_lock(&device->physical_node_lock); acpi_bus_online()
201 list_for_each_entry(pn, &device->physical_node_list, node) acpi_bus_online()
207 mutex_unlock(&device->physical_node_lock); acpi_bus_online()
212 static int acpi_scan_try_to_offline(struct acpi_device *device) acpi_scan_try_to_offline() argument
214 acpi_handle handle = device->handle; acpi_scan_try_to_offline()
215 struct device *errdev = NULL; acpi_scan_try_to_offline()
258 static int acpi_scan_hot_remove(struct acpi_device *device) acpi_scan_hot_remove() argument
260 acpi_handle handle = device->handle; acpi_scan_hot_remove()
264 if (device->handler && device->handler->hotplug.demand_offline acpi_scan_hot_remove()
266 if (!acpi_scan_is_offline(device, true)) acpi_scan_hot_remove()
269 int error = acpi_scan_try_to_offline(device); acpi_scan_hot_remove()
275 "Hot-removing device %s...\n", dev_name(&device->dev))); acpi_scan_hot_remove()
277 acpi_bus_trim(device); acpi_scan_hot_remove()
322 * This function is only called for device objects for which acpi_scan_device_check()
324 * the scan handler is not attached to this device object yet acpi_scan_device_check()
325 * is when the device has just appeared (either it wasn't acpi_scan_device_check()
404 * The device object's ACPI handle cannot become invalid as long as we acpi_device_hotplug()
426 * There may be additional notify handlers for device objects acpi_device_hotplug()
446 static void acpi_free_power_resources_lists(struct acpi_device *device) acpi_free_power_resources_lists() argument
450 if (device->wakeup.flags.valid) acpi_free_power_resources_lists()
451 acpi_power_resources_list_free(&device->wakeup.resources); acpi_free_power_resources_lists()
453 if (!device->power.flags.power_resources) acpi_free_power_resources_lists()
457 struct acpi_device_power_state *ps = &device->power.states[i]; acpi_free_power_resources_lists()
462 static void acpi_device_release(struct device *dev) acpi_device_release()
472 static void acpi_device_del(struct acpi_device *device) acpi_device_del() argument
475 if (device->parent) acpi_device_del()
476 list_del(&device->node); acpi_device_del()
478 list_del(&device->wakeup_list); acpi_device_del()
481 acpi_power_add_remove_device(device, false); acpi_device_del()
482 acpi_device_remove_files(device); acpi_device_del()
483 if (device->remove) acpi_device_del()
484 device->remove(device); acpi_device_del()
486 device_del(&device->dev); acpi_device_del()
512 * used by the device. acpi_device_del_work_fn()
520 * acpi_scan_drop_device - Drop an ACPI device object.
522 * @context: Address of the ACPI device object to drop.
525 * namespace node the device object pointed to by @context is attached to.
529 * ensure the correct ordering (the device objects must be unregistered in the
542 * prevents attempts to register device objects identical to those being acpi_scan_drop_device()
546 * those work items to ensure that they are not accessing stale device acpi_scan_drop_device()
559 static int acpi_get_device_data(acpi_handle handle, struct acpi_device **device, acpi_get_device_data() argument
564 if (!device) acpi_get_device_data()
568 (void **)device, callback); acpi_get_device_data()
569 if (ACPI_FAILURE(status) || !*device) { acpi_get_device_data()
577 int acpi_bus_get_device(acpi_handle handle, struct acpi_device **device) acpi_bus_get_device() argument
579 return acpi_get_device_data(handle, device, NULL); acpi_bus_get_device()
602 int acpi_device_add(struct acpi_device *device, acpi_device_add() argument
603 void (*release)(struct device *)) acpi_device_add()
609 if (device->handle) { acpi_device_add()
612 status = acpi_attach_data(device->handle, acpi_scan_drop_device, acpi_device_add()
613 device); acpi_device_add()
615 acpi_handle_err(device->handle, acpi_device_add()
616 "Unable to attach device data\n"); acpi_device_add()
624 * Link this device to its parent and siblings. acpi_device_add()
626 INIT_LIST_HEAD(&device->children); acpi_device_add()
627 INIT_LIST_HEAD(&device->node); acpi_device_add()
628 INIT_LIST_HEAD(&device->wakeup_list); acpi_device_add()
629 INIT_LIST_HEAD(&device->physical_node_list); acpi_device_add()
630 INIT_LIST_HEAD(&device->del_list); acpi_device_add()
631 mutex_init(&device->physical_node_lock); acpi_device_add()
647 acpi_device_hid(device))) { acpi_device_add()
656 strcpy(acpi_device_bus_id->bus_id, acpi_device_hid(device)); acpi_device_add()
660 dev_set_name(&device->dev, "%s:%02x", acpi_device_bus_id->bus_id, acpi_device_bus_id->instance_no); acpi_device_add()
662 if (device->parent) acpi_device_add()
663 list_add_tail(&device->node, &device->parent->children); acpi_device_add()
665 if (device->wakeup.flags.valid) acpi_device_add()
666 list_add_tail(&device->wakeup_list, &acpi_wakeup_device_list); acpi_device_add()
669 if (device->parent) acpi_device_add()
670 device->dev.parent = &device->parent->dev; acpi_device_add()
671 device->dev.bus = &acpi_bus_type; acpi_device_add()
672 device->dev.release = release; acpi_device_add()
673 result = device_add(&device->dev); acpi_device_add()
675 dev_err(&device->dev, "Error registering device\n"); acpi_device_add()
679 result = acpi_device_setup_files(device); acpi_device_add()
681 printk(KERN_ERR PREFIX "Error creating sysfs interface for device %s\n", acpi_device_add()
682 dev_name(&device->dev)); acpi_device_add()
688 if (device->parent) acpi_device_add()
689 list_del(&device->node); acpi_device_add()
690 list_del(&device->wakeup_list); acpi_device_add()
694 acpi_detach_data(device->handle, acpi_scan_drop_device); acpi_device_add()
703 struct acpi_device *device = NULL; acpi_bus_get_parent() local
718 } while (acpi_bus_get_device(handle, &device)); acpi_bus_get_parent()
719 return device; acpi_bus_get_parent()
827 static void acpi_wakeup_gpe_init(struct acpi_device *device) acpi_wakeup_gpe_init() argument
835 struct acpi_device_wakeup *wakeup = &device->wakeup; acpi_wakeup_gpe_init()
842 if (!acpi_match_device_ids(device, button_device_ids)) { acpi_wakeup_gpe_init()
844 if (!acpi_match_device_ids(device, &button_device_ids[1])) { acpi_wakeup_gpe_init()
850 device_set_wakeup_capable(&device->dev, true); acpi_wakeup_gpe_init()
854 acpi_setup_gpe_for_wake(device->handle, wakeup->gpe_device, acpi_wakeup_gpe_init()
864 static void acpi_bus_get_wakeup_device_flags(struct acpi_device *device) acpi_bus_get_wakeup_device_flags() argument
869 if (!acpi_has_method(device->handle, "_PRW")) acpi_bus_get_wakeup_device_flags()
872 err = acpi_bus_extract_wakeup_device_power_package(device->handle, acpi_bus_get_wakeup_device_flags()
873 &device->wakeup); acpi_bus_get_wakeup_device_flags()
875 dev_err(&device->dev, "_PRW evaluation error: %d\n", err); acpi_bus_get_wakeup_device_flags()
879 device->wakeup.flags.valid = 1; acpi_bus_get_wakeup_device_flags()
880 device->wakeup.prepare_count = 0; acpi_bus_get_wakeup_device_flags()
881 acpi_wakeup_gpe_init(device); acpi_bus_get_wakeup_device_flags()
883 * system for the ACPI device with the _PRW object. acpi_bus_get_wakeup_device_flags()
888 err = acpi_device_sleep_wake(device, 0, 0, 0); acpi_bus_get_wakeup_device_flags()
894 static void acpi_bus_init_power_state(struct acpi_device *device, int state) acpi_bus_init_power_state() argument
896 struct acpi_device_power_state *ps = &device->power.states[state]; acpi_bus_init_power_state()
904 status = acpi_evaluate_object(device->handle, pathname, NULL, &buffer); acpi_bus_init_power_state()
914 device->power.flags.power_resources = 1; acpi_bus_init_power_state()
921 if (acpi_has_method(device->handle, pathname)) acpi_bus_init_power_state()
924 /* State is valid if there are means to put the device into it. */ acpi_bus_init_power_state()
932 static void acpi_bus_get_power_flags(struct acpi_device *device) acpi_bus_get_power_flags() argument
937 if (!acpi_has_method(device->handle, "_PS0") && acpi_bus_get_power_flags()
938 !acpi_has_method(device->handle, "_PR0")) acpi_bus_get_power_flags()
941 device->flags.power_manageable = 1; acpi_bus_get_power_flags()
946 if (acpi_has_method(device->handle, "_PSC")) acpi_bus_get_power_flags()
947 device->power.flags.explicit_get = 1; acpi_bus_get_power_flags()
949 if (acpi_has_method(device->handle, "_IRC")) acpi_bus_get_power_flags()
950 device->power.flags.inrush_current = 1; acpi_bus_get_power_flags()
952 if (acpi_has_method(device->handle, "_DSW")) acpi_bus_get_power_flags()
953 device->power.flags.dsw_present = 1; acpi_bus_get_power_flags()
959 acpi_bus_init_power_state(device, i); acpi_bus_get_power_flags()
961 INIT_LIST_HEAD(&device->power.states[ACPI_STATE_D3_COLD].resources); acpi_bus_get_power_flags()
962 if (!list_empty(&device->power.states[ACPI_STATE_D3_HOT].resources)) acpi_bus_get_power_flags()
963 device->power.states[ACPI_STATE_D3_COLD].flags.valid = 1; acpi_bus_get_power_flags()
966 device->power.states[ACPI_STATE_D0].flags.valid = 1; acpi_bus_get_power_flags()
967 device->power.states[ACPI_STATE_D0].power = 100; acpi_bus_get_power_flags()
968 device->power.states[ACPI_STATE_D3_HOT].flags.valid = 1; acpi_bus_get_power_flags()
970 if (acpi_bus_init_power(device)) acpi_bus_get_power_flags()
971 device->flags.power_manageable = 0; acpi_bus_get_power_flags()
974 static void acpi_bus_get_flags(struct acpi_device *device) acpi_bus_get_flags() argument
977 if (acpi_has_method(device->handle, "_STA")) acpi_bus_get_flags()
978 device->flags.dynamic_status = 1; acpi_bus_get_flags()
981 if (acpi_has_method(device->handle, "_RMV")) acpi_bus_get_flags()
982 device->flags.removable = 1; acpi_bus_get_flags()
985 if (acpi_has_method(device->handle, "_EJD") || acpi_bus_get_flags()
986 acpi_has_method(device->handle, "_EJ0")) acpi_bus_get_flags()
987 device->flags.ejectable = 1; acpi_bus_get_flags()
990 static void acpi_device_get_busid(struct acpi_device *device) acpi_device_get_busid() argument
999 * The device's Bus ID is simply the object name. acpi_device_get_busid()
1002 if (ACPI_IS_ROOT_DEVICE(device)) { acpi_device_get_busid()
1003 strcpy(device->pnp.bus_id, "ACPI"); acpi_device_get_busid()
1007 switch (device->device_type) { acpi_device_get_busid()
1009 strcpy(device->pnp.bus_id, "PWRF"); acpi_device_get_busid()
1012 strcpy(device->pnp.bus_id, "SLPF"); acpi_device_get_busid()
1015 acpi_get_name(device->handle, ACPI_SINGLE_NAME, &buffer); acpi_device_get_busid()
1023 strcpy(device->pnp.bus_id, bus_id); acpi_device_get_busid()
1029 * acpi_ata_match - see if an acpi object is an ATA device
1032 * then we can safely call it an ATA device.
1111 /* Returns true if the ACPI object is a video device which can be
1113 * The device will get a Linux specific CID added in scan.c to
1114 * identify the device as an ACPI graphics device
1115 * Be aware that the graphics device may not be physically present
1123 /* Is this device able to support video switching ? */ acpi_is_video_device()
1127 /* Is this device able to retrieve a video ROM ? */ acpi_is_video_device()
1131 /* Is this device able to configure which video head to be POSTed ? */ acpi_is_video_device()
1147 const char *acpi_device_hid(struct acpi_device *device) acpi_device_hid() argument
1151 if (list_empty(&device->pnp.ids)) acpi_device_hid()
1154 hid = list_first_entry(&device->pnp.ids, struct acpi_hardware_id, list); acpi_device_hid()
1235 pr_err(PREFIX "%s: Error reading device info\n", acpi_set_pnp_ids()
1312 * acpi_dma_supported - Check DMA support for the specified device.
1313 * @adev: The pointer to acpi device
1337 * acpi_get_dma_attr - Check the supported DMA attr for the specified device.
1338 * @adev: The pointer to acpi device
1380 "ACPI device is missing _CCA.\n"); acpi_init_coherency()
1386 void acpi_init_device_object(struct acpi_device *device, acpi_handle handle, acpi_init_device_object() argument
1389 INIT_LIST_HEAD(&device->pnp.ids); acpi_init_device_object()
1390 device->device_type = type; acpi_init_device_object()
1391 device->handle = handle; acpi_init_device_object()
1392 device->parent = acpi_bus_get_parent(handle); acpi_init_device_object()
1393 device->fwnode.type = FWNODE_ACPI; acpi_init_device_object()
1394 acpi_set_device_status(device, sta); acpi_init_device_object()
1395 acpi_device_get_busid(device); acpi_init_device_object()
1396 acpi_set_pnp_ids(handle, &device->pnp, type); acpi_init_device_object()
1397 acpi_init_properties(device); acpi_init_device_object()
1398 acpi_bus_get_flags(device); acpi_init_device_object()
1399 device->flags.match_driver = false; acpi_init_device_object()
1400 device->flags.initialized = true; acpi_init_device_object()
1401 device->flags.visited = false; acpi_init_device_object()
1402 device_initialize(&device->dev); acpi_init_device_object()
1403 dev_set_uevent_suppress(&device->dev, true); acpi_init_device_object()
1404 acpi_init_coherency(device); acpi_init_device_object()
1407 void acpi_device_add_finalize(struct acpi_device *device) acpi_device_add_finalize() argument
1409 dev_set_uevent_suppress(&device->dev, false); acpi_device_add_finalize()
1410 kobject_uevent(&device->dev.kobj, KOBJ_ADD); acpi_device_add_finalize()
1418 struct acpi_device *device; acpi_add_single_object() local
1421 device = kzalloc(sizeof(struct acpi_device), GFP_KERNEL); acpi_add_single_object()
1422 if (!device) { acpi_add_single_object()
1427 acpi_init_device_object(device, handle, type, sta); acpi_add_single_object()
1428 acpi_bus_get_power_flags(device); acpi_add_single_object()
1429 acpi_bus_get_wakeup_device_flags(device); acpi_add_single_object()
1431 result = acpi_device_add(device, acpi_device_release); acpi_add_single_object()
1433 acpi_device_release(&device->dev); acpi_add_single_object()
1437 acpi_power_add_remove_device(device, true); acpi_add_single_object()
1438 acpi_device_add_finalize(device); acpi_add_single_object()
1441 dev_name(&device->dev), (char *) buffer.pointer, acpi_add_single_object()
1442 device->parent ? dev_name(&device->parent->dev) : "(null)")); acpi_add_single_object()
1444 *child = device; acpi_add_single_object()
1582 dev_dbg(&adev->dev, "Error reading _DEP device info\n"); acpi_device_dep_initialize()
1615 struct acpi_device *device = NULL; acpi_bus_check_add() local
1620 acpi_bus_get_device(handle, &device); acpi_bus_check_add()
1621 if (device) acpi_bus_check_add()
1633 acpi_add_single_object(&device, handle, type, sta); acpi_bus_check_add()
1634 if (!device) acpi_bus_check_add()
1637 acpi_scan_init_hotplug(device); acpi_bus_check_add()
1638 acpi_device_dep_initialize(device); acpi_bus_check_add()
1642 *return_value = device; acpi_bus_check_add()
1665 static void acpi_default_enumeration(struct acpi_device *device) acpi_default_enumeration() argument
1675 acpi_dev_get_resources(device, &resource_list, acpi_check_spi_i2c_slave, acpi_default_enumeration()
1679 acpi_create_platform_device(device); acpi_default_enumeration()
1705 static int acpi_scan_attach_handler(struct acpi_device *device) acpi_scan_attach_handler() argument
1710 list_for_each_entry(hwid, &device->pnp.ids, list) { acpi_scan_attach_handler()
1717 device->pnp.type.platform_id = 0; acpi_scan_attach_handler()
1720 device->handler = handler; acpi_scan_attach_handler()
1721 ret = handler->attach(device, devid); acpi_scan_attach_handler()
1725 device->handler = NULL; acpi_scan_attach_handler()
1734 static void acpi_bus_attach(struct acpi_device *device) acpi_bus_attach() argument
1740 if (ACPI_SUCCESS(acpi_bus_get_ejd(device->handle, &ejd))) acpi_bus_attach()
1741 register_dock_dependent_device(device, ejd); acpi_bus_attach()
1743 acpi_bus_get_status(device); acpi_bus_attach()
1745 if (!acpi_device_is_present(device)) { acpi_bus_attach()
1746 device->flags.visited = false; acpi_bus_attach()
1747 device->flags.power_manageable = 0; acpi_bus_attach()
1750 if (device->handler) acpi_bus_attach()
1753 if (!device->flags.initialized) { acpi_bus_attach()
1754 device->flags.power_manageable = acpi_bus_attach()
1755 device->power.states[ACPI_STATE_D0].flags.valid; acpi_bus_attach()
1756 if (acpi_bus_init_power(device)) acpi_bus_attach()
1757 device->flags.power_manageable = 0; acpi_bus_attach()
1759 device->flags.initialized = true; acpi_bus_attach()
1761 device->flags.visited = false; acpi_bus_attach()
1762 ret = acpi_scan_attach_handler(device); acpi_bus_attach()
1766 device->flags.match_driver = true; acpi_bus_attach()
1768 ret = device_attach(&device->dev); acpi_bus_attach()
1772 if (!ret && device->pnp.type.platform_id) acpi_bus_attach()
1773 acpi_default_enumeration(device); acpi_bus_attach()
1775 device->flags.visited = true; acpi_bus_attach()
1778 list_for_each_entry(child, &device->children, node) acpi_bus_attach()
1781 if (device->handler && device->handler->hotplug.notify_online) acpi_bus_attach()
1782 device->handler->hotplug.notify_online(device); acpi_bus_attach()
1809 * acpi_bus_scan - Add ACPI device node objects in a given namespace scope.
1817 * in the table trunk from which the kernel could create a device and add an
1824 void *device = NULL; acpi_bus_scan() local
1826 if (ACPI_SUCCESS(acpi_bus_check_add(handle, 0, NULL, &device))) acpi_bus_scan()
1828 acpi_bus_check_add, NULL, NULL, &device); acpi_bus_scan()
1830 if (device) { acpi_bus_scan()
1831 acpi_bus_attach(device); acpi_bus_scan()
1839 * acpi_bus_trim - Detach scan handlers and drivers from ACPI device objects.
1862 * Most likely, the device is going away, so put it into D3cold before acpi_bus_trim()
1879 struct acpi_device *device = NULL; acpi_bus_scan_fixed() local
1881 result = acpi_add_single_object(&device, NULL, acpi_bus_scan_fixed()
1887 device->flags.match_driver = true; acpi_bus_scan_fixed()
1888 result = device_attach(&device->dev); acpi_bus_scan_fixed()
1892 device_init_wakeup(&device->dev, true); acpi_bus_scan_fixed()
1896 struct acpi_device *device = NULL; acpi_bus_scan_fixed() local
1898 result = acpi_add_single_object(&device, NULL, acpi_bus_scan_fixed()
1904 device->flags.match_driver = true; acpi_bus_scan_fixed()
1905 result = device_attach(&device->dev); acpi_bus_scan_fixed()
H A Dprocessor_driver.c51 static int acpi_processor_start(struct device *dev);
52 static int acpi_processor_stop(struct device *dev);
71 struct acpi_device *device = data; acpi_processor_notify() local
75 if (device->handle != handle) acpi_processor_notify()
78 pr = acpi_driver_data(device); acpi_processor_notify()
88 acpi_bus_generate_netlink_event(device->pnp.device_class, acpi_processor_notify()
89 dev_name(&device->dev), event, acpi_processor_notify()
94 acpi_bus_generate_netlink_event(device->pnp.device_class, acpi_processor_notify()
95 dev_name(&device->dev), event, 0); acpi_processor_notify()
99 acpi_bus_generate_netlink_event(device->pnp.device_class, acpi_processor_notify()
100 dev_name(&device->dev), event, 0); acpi_processor_notify()
111 static int __acpi_processor_start(struct acpi_device *device);
118 struct acpi_device *device; acpi_cpu_soft_notify() local
128 if (!pr || acpi_bus_get_device(pr->handle, &device)) acpi_cpu_soft_notify()
142 ret = __acpi_processor_start(device); acpi_cpu_soft_notify()
164 struct acpi_device *device) acpi_pss_perf_init()
175 pr->cdev = thermal_cooling_device_register("Processor", device, acpi_pss_perf_init()
182 dev_dbg(&device->dev, "registered as cooling_device%d\n", acpi_pss_perf_init()
185 result = sysfs_create_link(&device->dev.kobj, acpi_pss_perf_init()
186 &pr->cdev->device.kobj, acpi_pss_perf_init()
189 dev_err(&device->dev, acpi_pss_perf_init()
194 result = sysfs_create_link(&pr->cdev->device.kobj, acpi_pss_perf_init()
195 &device->dev.kobj, acpi_pss_perf_init()
196 "device"); acpi_pss_perf_init()
198 dev_err(&pr->cdev->device, acpi_pss_perf_init()
199 "Failed to create sysfs link 'device'\n"); acpi_pss_perf_init()
206 sysfs_remove_link(&device->dev.kobj, "thermal_cooling"); acpi_pss_perf_init()
214 struct acpi_device *device) acpi_pss_perf_exit()
217 sysfs_remove_link(&device->dev.kobj, "thermal_cooling"); acpi_pss_perf_exit()
218 sysfs_remove_link(&pr->cdev->device.kobj, "device"); acpi_pss_perf_exit()
225 struct acpi_device *device) acpi_pss_perf_init()
231 struct acpi_device *device) {}
234 static int __acpi_processor_start(struct acpi_device *device) __acpi_processor_start() argument
236 struct acpi_processor *pr = acpi_driver_data(device); __acpi_processor_start()
253 result = acpi_pss_perf_init(pr, device); __acpi_processor_start()
257 status = acpi_install_notify_handler(device->handle, ACPI_DEVICE_NOTIFY, __acpi_processor_start()
258 acpi_processor_notify, device); __acpi_processor_start()
267 static int acpi_processor_start(struct device *dev) acpi_processor_start()
269 struct acpi_device *device = ACPI_COMPANION(dev); acpi_processor_start() local
271 if (!device) acpi_processor_start()
274 return __acpi_processor_start(device); acpi_processor_start()
277 static int acpi_processor_stop(struct device *dev) acpi_processor_stop()
279 struct acpi_device *device = ACPI_COMPANION(dev); acpi_processor_stop() local
282 if (!device) acpi_processor_stop()
285 acpi_remove_notify_handler(device->handle, ACPI_DEVICE_NOTIFY, acpi_processor_stop()
288 pr = acpi_driver_data(device); acpi_processor_stop()
293 acpi_pss_perf_exit(pr, device); acpi_processor_stop()
163 acpi_pss_perf_init(struct acpi_processor *pr, struct acpi_device *device) acpi_pss_perf_init() argument
213 acpi_pss_perf_exit(struct acpi_processor *pr, struct acpi_device *device) acpi_pss_perf_exit() argument
224 acpi_pss_perf_init(struct acpi_processor *pr, struct acpi_device *device) acpi_pss_perf_init() argument
230 acpi_pss_perf_exit(struct acpi_processor *pr, struct acpi_device *device) acpi_pss_perf_exit() argument
H A Dfan.c47 static int acpi_fan_suspend(struct device *dev);
48 static int acpi_fan_resume(struct device *dev);
93 /* thermal cooling device callbacks */ fan_get_max_state()
97 struct acpi_device *device = cdev->devdata; fan_get_max_state() local
98 struct acpi_fan *fan = acpi_driver_data(device); fan_get_max_state()
107 static int fan_get_state_acpi4(struct acpi_device *device, unsigned long *state) fan_get_state_acpi4() argument
110 struct acpi_fan *fan = acpi_driver_data(device); fan_get_state_acpi4()
115 status = acpi_evaluate_object(device->handle, "_FST", NULL, &buffer); fan_get_state_acpi4()
117 dev_err(&device->dev, "Get fan state failed\n"); fan_get_state_acpi4()
125 dev_err(&device->dev, "Invalid _FST data\n"); fan_get_state_acpi4()
136 dev_dbg(&device->dev, "Invalid control value returned\n"); fan_get_state_acpi4()
148 static int fan_get_state(struct acpi_device *device, unsigned long *state) fan_get_state() argument
153 result = acpi_device_update_power(device, &acpi_state); fan_get_state()
166 struct acpi_device *device = cdev->devdata; fan_get_cur_state() local
167 struct acpi_fan *fan = acpi_driver_data(device); fan_get_cur_state()
170 return fan_get_state_acpi4(device, state); fan_get_cur_state()
172 return fan_get_state(device, state); fan_get_cur_state()
175 static int fan_set_state(struct acpi_device *device, unsigned long state) fan_set_state() argument
180 return acpi_device_set_power(device, fan_set_state()
184 static int fan_set_state_acpi4(struct acpi_device *device, unsigned long state) fan_set_state_acpi4() argument
186 struct acpi_fan *fan = acpi_driver_data(device); fan_set_state_acpi4()
192 status = acpi_execute_simple_method(device->handle, "_FSL", fan_set_state_acpi4()
195 dev_dbg(&device->dev, "Failed to set state by _FSL\n"); fan_set_state_acpi4()
205 struct acpi_device *device = cdev->devdata; fan_set_cur_state() local
206 struct acpi_fan *fan = acpi_driver_data(device); fan_set_cur_state()
209 return fan_set_state_acpi4(device, state); fan_set_cur_state()
211 return fan_set_state(device, state); fan_set_cur_state()
225 static bool acpi_fan_is_acpi4(struct acpi_device *device) acpi_fan_is_acpi4() argument
227 return acpi_has_method(device->handle, "_FIF") && acpi_fan_is_acpi4()
228 acpi_has_method(device->handle, "_FPS") && acpi_fan_is_acpi4()
229 acpi_has_method(device->handle, "_FSL") && acpi_fan_is_acpi4()
230 acpi_has_method(device->handle, "_FST"); acpi_fan_is_acpi4()
233 static int acpi_fan_get_fif(struct acpi_device *device) acpi_fan_get_fif() argument
236 struct acpi_fan *fan = acpi_driver_data(device); acpi_fan_get_fif()
242 status = acpi_evaluate_object(device->handle, "_FIF", NULL, &buffer); acpi_fan_get_fif()
248 dev_err(&device->dev, "Invalid _FIF data\n"); acpi_fan_get_fif()
255 dev_err(&device->dev, "Invalid _FIF element\n"); acpi_fan_get_fif()
271 static int acpi_fan_get_fps(struct acpi_device *device) acpi_fan_get_fps() argument
273 struct acpi_fan *fan = acpi_driver_data(device); acpi_fan_get_fps()
279 status = acpi_evaluate_object(device->handle, "_FPS", NULL, &buffer); acpi_fan_get_fps()
285 dev_err(&device->dev, "Invalid _FPS data\n"); acpi_fan_get_fps()
291 fan->fps = devm_kzalloc(&device->dev, acpi_fan_get_fps()
295 dev_err(&device->dev, "Not enough memory\n"); acpi_fan_get_fps()
305 dev_err(&device->dev, "Invalid _FPS element\n"); acpi_fan_get_fps()
324 struct acpi_device *device = ACPI_COMPANION(&pdev->dev); acpi_fan_probe() local
329 dev_err(&device->dev, "No memory for fan\n"); acpi_fan_probe()
332 device->driver_data = fan; acpi_fan_probe()
335 if (acpi_fan_is_acpi4(device)) { acpi_fan_probe()
336 if (acpi_fan_get_fif(device) || acpi_fan_get_fps(device)) acpi_fan_probe()
340 result = acpi_device_update_power(device, NULL); acpi_fan_probe()
342 dev_err(&device->dev, "Setting initial power state\n"); acpi_fan_probe()
350 name = acpi_device_bid(device); acpi_fan_probe()
352 cdev = thermal_cooling_device_register(name, device, acpi_fan_probe()
363 &cdev->device.kobj, acpi_fan_probe()
368 result = sysfs_create_link(&cdev->device.kobj, acpi_fan_probe()
370 "device"); acpi_fan_probe()
372 dev_err(&pdev->dev, "Failed to create sysfs link 'device'\n"); acpi_fan_probe()
383 sysfs_remove_link(&fan->cdev->device.kobj, "device"); acpi_fan_remove()
390 static int acpi_fan_suspend(struct device *dev) acpi_fan_suspend()
401 static int acpi_fan_resume(struct device *dev) acpi_fan_resume()
H A Dbutton.c73 static int acpi_button_add(struct acpi_device *device);
74 static int acpi_button_remove(struct acpi_device *device);
75 static void acpi_button_notify(struct acpi_device *device, u32 event);
78 static int acpi_button_suspend(struct device *dev);
79 static int acpi_button_resume(struct device *dev);
101 char phys[32]; /* for input device */
118 struct acpi_device *device = seq->private; acpi_button_state_seq_show() local
122 status = acpi_evaluate_integer(device->handle, "_LID", NULL, &state); acpi_button_state_seq_show()
142 static int acpi_button_add_fs(struct acpi_device *device) acpi_button_add_fs() argument
144 struct acpi_button *button = acpi_driver_data(device); acpi_button_add_fs()
148 /* procfs I/F for ACPI lid device only */ acpi_button_add_fs()
153 printk(KERN_ERR PREFIX "More than one Lid device found!\n"); acpi_button_add_fs()
170 acpi_device_dir(device) = proc_mkdir(acpi_device_bid(device), acpi_lid_dir); acpi_button_add_fs()
171 if (!acpi_device_dir(device)) { acpi_button_add_fs()
178 S_IRUGO, acpi_device_dir(device), acpi_button_add_fs()
179 &acpi_button_state_fops, device); acpi_button_add_fs()
189 remove_proc_entry(acpi_device_bid(device), acpi_button_add_fs()
191 acpi_device_dir(device) = NULL; acpi_button_add_fs()
199 static int acpi_button_remove_fs(struct acpi_device *device) acpi_button_remove_fs() argument
201 struct acpi_button *button = acpi_driver_data(device); acpi_button_remove_fs()
207 acpi_device_dir(device)); acpi_button_remove_fs()
208 remove_proc_entry(acpi_device_bid(device), acpi_button_remove_fs()
210 acpi_device_dir(device) = NULL; acpi_button_remove_fs()
249 static int acpi_lid_send_state(struct acpi_device *device) acpi_lid_send_state() argument
251 struct acpi_button *button = acpi_driver_data(device); acpi_lid_send_state()
256 status = acpi_evaluate_integer(device->handle, "_LID", NULL, &state); acpi_lid_send_state()
265 pm_wakeup_event(&device->dev, 0); acpi_lid_send_state()
267 ret = blocking_notifier_call_chain(&acpi_lid_notifier, state, device); acpi_lid_send_state()
270 device); acpi_lid_send_state()
281 static void acpi_button_notify(struct acpi_device *device, u32 event) acpi_button_notify() argument
283 struct acpi_button *button = acpi_driver_data(device); acpi_button_notify()
293 acpi_lid_send_state(device); acpi_button_notify()
297 pm_wakeup_event(&device->dev, 0); acpi_button_notify()
309 device->pnp.device_class, acpi_button_notify()
310 dev_name(&device->dev), acpi_button_notify()
322 static int acpi_button_suspend(struct device *dev) acpi_button_suspend()
324 struct acpi_device *device = to_acpi_device(dev); acpi_button_suspend() local
325 struct acpi_button *button = acpi_driver_data(device); acpi_button_suspend()
331 static int acpi_button_resume(struct device *dev) acpi_button_resume()
333 struct acpi_device *device = to_acpi_device(dev); acpi_button_resume() local
334 struct acpi_button *button = acpi_driver_data(device); acpi_button_resume()
338 return acpi_lid_send_state(device); acpi_button_resume()
343 static int acpi_button_add(struct acpi_device *device) acpi_button_add() argument
347 const char *hid = acpi_device_hid(device); acpi_button_add()
355 device->driver_data = button; acpi_button_add()
363 name = acpi_device_name(device); acpi_button_add()
364 class = acpi_device_class(device); acpi_button_add()
389 error = acpi_button_add_fs(device); acpi_button_add()
399 input->dev.parent = &device->dev; acpi_button_add()
419 acpi_lid_send_state(device); acpi_button_add()
421 * This assumes there's only one lid device, or if there are acpi_button_add()
424 lid_device = device; acpi_button_add()
427 printk(KERN_INFO PREFIX "%s [%s]\n", name, acpi_device_bid(device)); acpi_button_add()
431 acpi_button_remove_fs(device); acpi_button_add()
439 static int acpi_button_remove(struct acpi_device *device) acpi_button_remove() argument
441 struct acpi_button *button = acpi_driver_data(device); acpi_button_remove()
443 acpi_button_remove_fs(device); acpi_button_remove()
H A Ddevice_pm.c2 * drivers/acpi/device_pm.c - ACPI device power management routines.
33 * acpi_power_state_string - String representation of ACPI device power state.
34 * @state: ACPI device power state to return the string representation of.
55 * acpi_device_get_power - Get power state of an ACPI device.
56 * @device: Device to get the power state of.
57 * @state: Place to store the power state of the device.
59 * This function does not update the device's power.state field, but it may
61 * unknown and the device's power state turns out to be D0).
63 int acpi_device_get_power(struct acpi_device *device, int *state) acpi_device_get_power() argument
67 if (!device || !state) acpi_device_get_power()
70 if (!device->flags.power_manageable) { acpi_device_get_power()
72 *state = device->parent ? acpi_device_get_power()
73 device->parent->power.state : ACPI_STATE_D0; acpi_device_get_power()
78 * Get the device's power state from power resources settings and _PSC, acpi_device_get_power()
81 if (device->power.flags.power_resources) { acpi_device_get_power()
82 int error = acpi_power_get_inferred_state(device, &result); acpi_device_get_power()
86 if (device->power.flags.explicit_get) { acpi_device_get_power()
87 acpi_handle handle = device->handle; acpi_device_get_power()
97 * shallower than the actual power state of the device, because acpi_device_get_power()
110 * If we were unsure about the device parent's power state up to this acpi_device_get_power()
111 * point, the fact that the device is in D0 implies that the parent has acpi_device_get_power()
114 if (!device->power.flags.ignore_parent && device->parent acpi_device_get_power()
115 && device->parent->power.state == ACPI_STATE_UNKNOWN acpi_device_get_power()
117 device->parent->power.state = ACPI_STATE_D0; acpi_device_get_power()
123 device->pnp.bus_id, acpi_power_state_string(*state))); acpi_device_get_power()
142 * acpi_device_set_power - Set power state of an ACPI device.
143 * @device: Device to set the power state of.
146 * Callers must ensure that the device is power manageable before using this
149 int acpi_device_set_power(struct acpi_device *device, int state) acpi_device_set_power() argument
154 if (!device || !device->flags.power_manageable acpi_device_set_power()
160 if (state == device->power.state) { acpi_device_set_power()
162 device->pnp.bus_id, acpi_device_set_power()
174 if (!device->power.states[ACPI_STATE_D3_COLD].flags.valid) acpi_device_set_power()
176 } else if (!device->power.states[state].flags.valid) { acpi_device_set_power()
177 dev_warn(&device->dev, "Power state %s not supported\n", acpi_device_set_power()
182 if (!device->power.flags.ignore_parent && acpi_device_set_power()
183 device->parent && (state < device->parent->power.state)) { acpi_device_set_power()
184 dev_warn(&device->dev, acpi_device_set_power()
187 acpi_power_state_string(device->parent->power.state)); acpi_device_set_power()
203 if (state < device->power.state) { acpi_device_set_power()
204 dev_warn(&device->dev, "Cannot transition from %s to %s\n", acpi_device_set_power()
205 acpi_power_state_string(device->power.state), acpi_device_set_power()
210 result = acpi_dev_pm_explicit_set(device, state); acpi_device_set_power()
214 if (device->power.flags.power_resources) acpi_device_set_power()
215 result = acpi_power_transition(device, target_state); acpi_device_set_power()
217 if (device->power.flags.power_resources) { acpi_device_set_power()
218 result = acpi_power_transition(device, ACPI_STATE_D0); acpi_device_set_power()
222 result = acpi_dev_pm_explicit_set(device, ACPI_STATE_D0); acpi_device_set_power()
227 dev_warn(&device->dev, "Failed to change power state to %s\n", acpi_device_set_power()
230 device->power.state = target_state; acpi_device_set_power()
233 device->pnp.bus_id, acpi_device_set_power()
243 struct acpi_device *device; acpi_bus_set_power() local
246 result = acpi_bus_get_device(handle, &device); acpi_bus_set_power()
250 return acpi_device_set_power(device, state); acpi_bus_set_power()
254 int acpi_bus_init_power(struct acpi_device *device) acpi_bus_init_power() argument
259 if (!device) acpi_bus_init_power()
262 device->power.state = ACPI_STATE_UNKNOWN; acpi_bus_init_power()
263 if (!acpi_device_is_present(device)) acpi_bus_init_power()
266 result = acpi_device_get_power(device, &state); acpi_bus_init_power()
270 if (state < ACPI_STATE_D3_COLD && device->power.flags.power_resources) { acpi_bus_init_power()
272 result = acpi_power_on_resources(device, state); acpi_bus_init_power()
281 * another device using the same power resources may acpi_bus_init_power()
285 result = acpi_dev_pm_explicit_set(device, state); acpi_bus_init_power()
292 * it D0 in hope that this is what the BIOS put the device into. acpi_bus_init_power()
298 device->power.state = state; acpi_bus_init_power()
303 * acpi_device_fix_up_power - Force device with missing _PSC into D0.
304 * @device: Device object whose power state is to be fixed up.
310 int acpi_device_fix_up_power(struct acpi_device *device) acpi_device_fix_up_power() argument
314 if (!device->power.flags.power_resources acpi_device_fix_up_power()
315 && !device->power.flags.explicit_get acpi_device_fix_up_power()
316 && device->power.state == ACPI_STATE_D0) acpi_device_fix_up_power()
317 ret = acpi_dev_pm_explicit_set(device, ACPI_STATE_D0); acpi_device_fix_up_power()
322 int acpi_device_update_power(struct acpi_device *device, int *state_p) acpi_device_update_power() argument
327 if (device->power.state == ACPI_STATE_UNKNOWN) { acpi_device_update_power()
328 result = acpi_bus_init_power(device); acpi_device_update_power()
330 *state_p = device->power.state; acpi_device_update_power()
335 result = acpi_device_get_power(device, &state); acpi_device_update_power()
341 result = acpi_device_set_power(device, state); acpi_device_update_power()
345 if (device->power.flags.power_resources) { acpi_device_update_power()
350 result = acpi_power_transition(device, state); acpi_device_update_power()
354 device->power.state = state; acpi_device_update_power()
365 struct acpi_device *device; acpi_bus_update_power() local
368 result = acpi_bus_get_device(handle, &device); acpi_bus_update_power()
369 return result ? result : acpi_device_update_power(device, state_p); acpi_bus_update_power()
375 struct acpi_device *device; acpi_bus_power_manageable() local
378 result = acpi_bus_get_device(handle, &device); acpi_bus_power_manageable()
379 return result ? false : device->flags.power_manageable; acpi_bus_power_manageable()
411 * acpi_add_pm_notifier - Register PM notify handler for given ACPI device.
412 * @adev: ACPI device to add the notify handler for.
416 * NOTE: @adev need not be a run-wake or wakeup device to be a valid source of
421 acpi_status acpi_add_pm_notifier(struct acpi_device *adev, struct device *dev, acpi_add_pm_notifier()
452 * acpi_remove_pm_notifier - Unregister PM notifier from given ACPI device.
453 * @adev: ACPI device to remove the notifier from.
486 struct acpi_device *device; acpi_bus_can_wakeup() local
489 result = acpi_bus_get_device(handle, &device); acpi_bus_can_wakeup()
490 return result ? false : device->wakeup.flags.valid; acpi_bus_can_wakeup()
495 * acpi_dev_pm_get_state - Get preferred power state of ACPI device.
497 * @adev: ACPI device node corresponding to @dev.
498 * @target_state: System state to match the resultant device state.
499 * @d_min_p: Location to store the highest power state available to the device.
500 * @d_max_p: Location to store the lowest power state available to the device.
503 * device power states that the device can be in while the system is in the
515 static int acpi_dev_pm_get_state(struct device *dev, struct acpi_device *adev, acpi_dev_pm_get_state()
526 * If the system state is S0, the lowest power state the device can be acpi_dev_pm_get_state()
527 * in is D3cold, unless the device has _S0W and is supposed to signal acpi_dev_pm_get_state()
529 * lowest power state available to the device. acpi_dev_pm_get_state()
603 * acpi_pm_device_sleep_state - Get preferred power state of ACPI device.
607 * Return value: Preferred power state of the device on success, -ENODEV
613 int acpi_pm_device_sleep_state(struct device *dev, int *d_min_p, int d_max_in) acpi_pm_device_sleep_state()
663 struct device *dev; acpi_pm_notify_work_func()
673 * acpi_device_wakeup - Enable/disable wakeup functionality for device.
674 * @adev: ACPI device to enable/disable wakeup functionality for.
679 * wakeup signals for the device in response to external (remote) events and
680 * enable/disable device wakeup power.
682 * Callers must ensure that @adev is a valid ACPI device node before executing
719 * acpi_pm_device_run_wake - Enable/disable remote wakeup for given device.
723 int acpi_pm_device_run_wake(struct device *phys_dev, bool enable) acpi_pm_device_run_wake()
742 * acpi_pm_device_sleep_wake - Enable or disable device to wake up the system.
746 int acpi_pm_device_sleep_wake(struct device *dev, bool enable) acpi_pm_device_sleep_wake()
770 * acpi_dev_pm_low_power - Put ACPI device into a low-power state.
772 * @adev: ACPI device node corresponding to @dev.
773 * @system_state: System state to choose the device state for.
775 static int acpi_dev_pm_low_power(struct device *dev, struct acpi_device *adev, acpi_dev_pm_low_power()
788 * acpi_dev_pm_full_power - Put ACPI device into the full-power state.
789 * @adev: ACPI device node to put into the full-power state.
798 * acpi_dev_runtime_suspend - Put device into a low-power state using ACPI.
801 * Put the given device into a runtime low-power state using the standard ACPI
803 * device into (this checks if remote wakeup is expected to work too), and set
804 * the power state of the device.
806 int acpi_dev_runtime_suspend(struct device *dev) acpi_dev_runtime_suspend()
830 * acpi_dev_runtime_resume - Put device into the full-power state using ACPI.
833 * Put the given device into the full-power state using the standard ACPI
834 * mechanism at run time. Set the power state of the device to ACPI D0 and
837 int acpi_dev_runtime_resume(struct device *dev) acpi_dev_runtime_resume()
852 * acpi_subsys_runtime_suspend - Suspend device using ACPI.
858 int acpi_subsys_runtime_suspend(struct device *dev) acpi_subsys_runtime_suspend()
866 * acpi_subsys_runtime_resume - Resume device using ACPI.
869 * Use ACPI to put the given device into the full-power state and carry out the
872 int acpi_subsys_runtime_resume(struct device *dev) acpi_subsys_runtime_resume()
881 * acpi_dev_suspend_late - Put device into a low-power state using ACPI.
884 * Put the given device into a low-power state during system transition to a
886 * desired, choose the state to put the device into (this checks if system
887 * wakeup is expected to work too), and set the power state of the device.
889 int acpi_dev_suspend_late(struct device *dev) acpi_dev_suspend_late()
914 * acpi_dev_resume_early - Put device into the full-power state using ACPI.
917 * Put the given device into the full-power state using the standard ACPI
919 * state of the device to ACPI D0 and disable remote wakeup.
921 int acpi_dev_resume_early(struct device *dev) acpi_dev_resume_early()
936 * acpi_subsys_prepare - Prepare device for system transition to a sleep state.
939 int acpi_subsys_prepare(struct device *dev) acpi_subsys_prepare()
966 * acpi_subsys_suspend - Run the device driver's suspend callback.
972 int acpi_subsys_suspend(struct device *dev) acpi_subsys_suspend()
980 * acpi_subsys_suspend_late - Suspend device using ACPI.
986 int acpi_subsys_suspend_late(struct device *dev) acpi_subsys_suspend_late()
994 * acpi_subsys_resume_early - Resume device using ACPI.
997 * Use ACPI to put the given device into the full-power state and carry out the
1001 int acpi_subsys_resume_early(struct device *dev) acpi_subsys_resume_early()
1009 * acpi_subsys_freeze - Run the device driver's freeze callback.
1012 int acpi_subsys_freeze(struct device *dev) acpi_subsys_freeze()
1046 * acpi_dev_pm_detach - Remove ACPI power management from the device.
1048 * @power_off: Whether or not to try to remove power from the device.
1050 * Remove the device from the general ACPI PM domain and remove its wakeup
1051 * notifier. If @power_off is set, additionally remove power from the device if
1057 static void acpi_dev_pm_detach(struct device *dev, bool power_off) acpi_dev_pm_detach()
1066 * If the device's PM QoS resume latency limit or flags acpi_dev_pm_detach()
1069 * choice of the low-power state to put the device into. acpi_dev_pm_detach()
1080 * acpi_dev_pm_attach - Prepare device for ACPI power management.
1082 * @power_on: Whether or not to power on the device.
1085 * attached to it, install a wakeup notification handler for the device and
1086 * add it to the general ACPI PM domain. If @power_on is set, the device will
1095 int acpi_dev_pm_attach(struct device *dev, bool power_on) acpi_dev_pm_attach()
1106 * Only attach the power domain to the first device if the acpi_dev_pm_attach()
H A Dacpi_video.c90 static int acpi_video_bus_add(struct acpi_device *device);
91 static int acpi_video_bus_remove(struct acpi_device *device);
92 static void acpi_video_bus_notify(struct acpi_device *device, u32 event);
134 u32 bios_can_detect:1; /* BIOS can detect the device */
135 u32 depend_on_vga:1; /* Non-VGA output device whose power is related to
136 the VGA device. */
151 struct acpi_device *device; member in struct:acpi_video_bus
163 char phys[32]; /* for input device */
184 u8 _DDC:1; /* Return the EDID for this device */
215 "motherboard VGA device",
216 "PCI VGA device",
217 "AGP VGA device",
224 struct acpi_video_device *device);
226 static int acpi_video_device_lcd_set_level(struct acpi_video_device *device,
229 struct acpi_video_device *device,
231 static int acpi_video_get_next_level(struct acpi_video_device *device,
235 /* backlight device sysfs support */ acpi_video_get_brightness()
270 /* thermal cooling device callbacks */ video_get_max_state()
274 struct acpi_device *device = cooling_dev->devdata; video_get_max_state() local
275 struct acpi_video_device *video = acpi_driver_data(device); video_get_max_state()
284 struct acpi_device *device = cooling_dev->devdata; video_get_cur_state() local
285 struct acpi_video_device *video = acpi_driver_data(device); video_get_cur_state()
303 struct acpi_device *device = cooling_dev->devdata; video_set_cur_state() local
304 struct acpi_video_device *video = acpi_driver_data(device); video_set_cur_state()
328 acpi_video_device_lcd_query_levels(struct acpi_video_device *device, acpi_video_device_lcd_query_levels() argument
338 status = acpi_evaluate_object(device->dev->handle, "_BCL", NULL, &buffer); acpi_video_device_lcd_query_levels()
359 acpi_video_device_lcd_set_level(struct acpi_video_device *device, int level) acpi_video_device_lcd_set_level() argument
364 status = acpi_execute_simple_method(device->dev->handle, acpi_video_device_lcd_set_level()
371 device->brightness->curr = level; acpi_video_device_lcd_set_level()
372 for (state = 2; state < device->brightness->count; state++) acpi_video_device_lcd_set_level()
373 if (level == device->brightness->levels[state]) { acpi_video_device_lcd_set_level()
374 if (device->backlight) acpi_video_device_lcd_set_level()
375 device->backlight->props.brightness = state - 2; acpi_video_device_lcd_set_level()
525 acpi_video_bqc_value_to_level(struct acpi_video_device *device, acpi_video_bqc_value_to_level() argument
530 if (device->brightness->flags._BQC_use_index) { acpi_video_bqc_value_to_level()
536 if (device->brightness->flags._BCL_reversed) acpi_video_bqc_value_to_level()
537 bqc_value = device->brightness->count - 3 - bqc_value; acpi_video_bqc_value_to_level()
539 level = device->brightness->levels[bqc_value + 2]; acpi_video_bqc_value_to_level()
550 acpi_video_device_lcd_get_level_current(struct acpi_video_device *device, acpi_video_device_lcd_get_level_current() argument
556 if (device->cap._BQC || device->cap._BCQ) { acpi_video_device_lcd_get_level_current()
557 char *buf = device->cap._BQC ? "_BQC" : "_BCQ"; acpi_video_device_lcd_get_level_current()
559 status = acpi_evaluate_integer(device->dev->handle, buf, acpi_video_device_lcd_get_level_current()
571 *level = acpi_video_bqc_value_to_level(device, *level); acpi_video_device_lcd_get_level_current()
573 for (i = 2; i < device->brightness->count; i++) acpi_video_device_lcd_get_level_current()
574 if (device->brightness->levels[i] == *level) { acpi_video_device_lcd_get_level_current()
575 device->brightness->curr = *level; acpi_video_device_lcd_get_level_current()
585 device->cap._BQC = device->cap._BCQ = 0; acpi_video_device_lcd_get_level_current()
596 device->cap._BQC = device->cap._BCQ = 0; acpi_video_device_lcd_get_level_current()
600 *level = device->brightness->curr; acpi_video_device_lcd_get_level_current()
605 acpi_video_device_EDID(struct acpi_video_device *device, acpi_video_device_EDID() argument
617 if (!device) acpi_video_device_EDID()
626 status = acpi_evaluate_object(device->dev->handle, "_DDC", &args, &buffer); acpi_video_device_EDID()
647 * video : video bus device pointer
677 status = acpi_execute_simple_method(video->device->handle, "_DOS", acpi_video_bus_DOS()
702 static int acpi_video_bqc_quirk(struct acpi_video_device *device, acpi_video_bqc_quirk() argument
705 struct acpi_video_device_brightness *br = device->brightness; acpi_video_bqc_quirk()
720 result = acpi_video_device_lcd_set_level(device, test_level); acpi_video_bqc_quirk()
724 result = acpi_video_device_lcd_get_level_current(device, &level, true); acpi_video_bqc_quirk()
738 device->cap._BQC = device->cap._BCQ = 0; acpi_video_bqc_quirk()
747 * device : video output device (LCD, CRT, ..)
752 * Allocate and initialize device->brightness.
756 acpi_video_init_brightness(struct acpi_video_device *device) acpi_video_init_brightness() argument
766 if (!ACPI_SUCCESS(acpi_video_device_lcd_query_levels(device, &obj))) { acpi_video_init_brightness()
839 device->brightness = br; acpi_video_init_brightness()
844 if (!device->cap._BQC) acpi_video_init_brightness()
847 result = acpi_video_device_lcd_get_level_current(device, acpi_video_init_brightness()
852 result = acpi_video_bqc_quirk(device, max_level, level_old); acpi_video_init_brightness()
859 if (!device->cap._BQC) acpi_video_init_brightness()
862 level = acpi_video_bqc_value_to_level(device, level_old); acpi_video_init_brightness()
876 result = acpi_video_device_lcd_set_level(device, level); acpi_video_init_brightness()
890 device->brightness = NULL; acpi_video_init_brightness()
897 * device : video output device (LCD, CRT, ..)
903 * device.
906 static void acpi_video_device_find_cap(struct acpi_video_device *device) acpi_video_device_find_cap() argument
908 if (acpi_has_method(device->dev->handle, "_ADR")) acpi_video_device_find_cap()
909 device->cap._ADR = 1; acpi_video_device_find_cap()
910 if (acpi_has_method(device->dev->handle, "_BCL")) acpi_video_device_find_cap()
911 device->cap._BCL = 1; acpi_video_device_find_cap()
912 if (acpi_has_method(device->dev->handle, "_BCM")) acpi_video_device_find_cap()
913 device->cap._BCM = 1; acpi_video_device_find_cap()
914 if (acpi_has_method(device->dev->handle, "_BQC")) { acpi_video_device_find_cap()
915 device->cap._BQC = 1; acpi_video_device_find_cap()
916 } else if (acpi_has_method(device->dev->handle, "_BCQ")) { acpi_video_device_find_cap()
918 device->cap._BCQ = 1; acpi_video_device_find_cap()
921 if (acpi_has_method(device->dev->handle, "_DDC")) acpi_video_device_find_cap()
922 device->cap._DDC = 1; acpi_video_device_find_cap()
927 * device : video output device (VGA)
932 * Find out all required AML methods defined under the video bus device.
937 if (acpi_has_method(video->device->handle, "_DOS")) acpi_video_bus_find_cap()
939 if (acpi_has_method(video->device->handle, "_DOD")) acpi_video_bus_find_cap()
941 if (acpi_has_method(video->device->handle, "_ROM")) acpi_video_bus_find_cap()
943 if (acpi_has_method(video->device->handle, "_GPD")) acpi_video_bus_find_cap()
945 if (acpi_has_method(video->device->handle, "_SPD")) acpi_video_bus_find_cap()
947 if (acpi_has_method(video->device->handle, "_VPO")) acpi_video_bus_find_cap()
952 * Check whether the video bus device has required AML method to
964 dev = acpi_get_pci_dev(video->device->handle); acpi_video_bus_check()
974 /* Does this device support video switching? */ acpi_video_bus_check()
979 acpi_device_bid(video->device)); acpi_video_bus_check()
985 /* Does this device support retrieving a video ROM? */ acpi_video_bus_check()
991 /* Does this device support configuring which video device to POST? */ acpi_video_bus_check()
1006 /* device interface */
1039 acpi_video_bus_get_one_device(struct acpi_device *device, acpi_video_bus_get_one_device() argument
1048 acpi_evaluate_integer(device->handle, "_ADR", NULL, &device_id); acpi_video_bus_get_one_device()
1049 /* Some device omits _ADR, we skip them instead of fail */ acpi_video_bus_get_one_device()
1057 strcpy(acpi_device_name(device), ACPI_VIDEO_DEVICE_NAME); acpi_video_bus_get_one_device()
1058 strcpy(acpi_device_class(device), ACPI_VIDEO_CLASS); acpi_video_bus_get_one_device()
1059 device->driver_data = data; acpi_video_bus_get_one_device()
1063 data->dev = device; acpi_video_bus_get_one_device()
1120 * video : video bus device
1125 * Enumerate the video device list of the video bus,
1144 * video : video bus device
1145 * device : video output device under the video
1157 struct acpi_video_device *device) acpi_video_device_bind()
1164 if (device->device_id == (ids->value.int_val & 0xffff)) { acpi_video_device_bind()
1165 ids->bind_info = device; acpi_video_device_bind()
1171 static bool acpi_video_device_in_dod(struct acpi_video_device *device) acpi_video_device_in_dod() argument
1173 struct acpi_video_bus *video = device->video; acpi_video_device_in_dod()
1186 (device->device_id & 0xfff)) acpi_video_device_in_dod()
1195 * video : video bus device
1214 status = acpi_evaluate_object(video->device->handle, "_DOD", NULL, &buffer); acpi_video_device_enumerate()
1266 acpi_video_get_next_level(struct acpi_video_device *device, acpi_video_get_next_level() argument
1273 for (i = 2; i < device->brightness->count; i++) { acpi_video_get_next_level()
1274 l = device->brightness->levels[i]; acpi_video_get_next_level()
1283 for (i = 2; i < device->brightness->count; i++) { acpi_video_get_next_level()
1284 l = device->brightness->levels[i]; acpi_video_get_next_level()
1313 struct acpi_video_device *device = container_of(to_delayed_work(work), acpi_video_switch_brightness() local
1316 int event = device->switch_brightness_event; acpi_video_switch_brightness()
1320 if (!device->backlight) acpi_video_switch_brightness()
1323 if (!device->brightness) acpi_video_switch_brightness()
1326 result = acpi_video_device_lcd_get_level_current(device, acpi_video_switch_brightness()
1332 level_next = acpi_video_get_next_level(device, level_current, event); acpi_video_switch_brightness()
1334 result = acpi_video_device_lcd_set_level(device, level_next); acpi_video_switch_brightness()
1337 backlight_force_update(device->backlight, acpi_video_switch_brightness()
1345 int acpi_video_get_edid(struct acpi_device *device, int type, int device_id, acpi_video_get_edid() argument
1354 if (!device || !acpi_driver_data(device)) acpi_video_get_edid()
1357 video = acpi_driver_data(device); acpi_video_get_edid()
1415 struct acpi_device *device) acpi_video_bus_get_devices()
1427 list_for_each_entry(dev, &device->children, node) { acpi_video_bus_get_devices()
1431 dev_err(&dev->dev, "Can't attach device\n"); acpi_video_bus_get_devices()
1457 static void acpi_video_bus_notify(struct acpi_device *device, u32 event) acpi_video_bus_notify() argument
1459 struct acpi_video_bus *video = acpi_driver_data(device); acpi_video_bus_notify()
1497 if (acpi_notifier_call_chain(device, event, 0)) acpi_video_bus_notify()
1524 struct acpi_device *device = NULL; acpi_video_device_notify() local
1532 device = video_device->dev; acpi_video_device_notify()
1553 case ACPI_VIDEO_NOTIFY_DISPLAY_OFF: /* display device off */ acpi_video_device_notify()
1563 acpi_notifier_call_chain(device, event, 0); acpi_video_device_notify()
1591 dev_info(&video->device->dev, "Restoring backlight state\n"); acpi_video_resume()
1607 struct acpi_device *device = context; acpi_video_bus_match() local
1611 if (handle == device->handle) acpi_video_bus_match()
1624 static void acpi_video_dev_register_backlight(struct acpi_video_device *device) acpi_video_dev_register_backlight() argument
1629 struct device *parent = NULL; acpi_video_dev_register_backlight()
1634 result = acpi_video_init_brightness(device); acpi_video_dev_register_backlight()
1646 acpi_get_parent(device->dev->handle, &acpi_parent); acpi_video_dev_register_backlight()
1656 props.max_brightness = device->brightness->count - 3; acpi_video_dev_register_backlight()
1657 device->backlight = backlight_device_register(name, acpi_video_dev_register_backlight()
1659 device, acpi_video_dev_register_backlight()
1663 if (IS_ERR(device->backlight)) { acpi_video_dev_register_backlight()
1664 device->backlight = NULL; acpi_video_dev_register_backlight()
1672 device->backlight->props.brightness = acpi_video_dev_register_backlight()
1673 acpi_video_get_brightness(device->backlight); acpi_video_dev_register_backlight()
1675 device->cooling_dev = thermal_cooling_device_register("LCD", acpi_video_dev_register_backlight()
1676 device->dev, &video_cooling_ops); acpi_video_dev_register_backlight()
1677 if (IS_ERR(device->cooling_dev)) { acpi_video_dev_register_backlight()
1681 * register video output if cooling device registration failed? acpi_video_dev_register_backlight()
1684 device->cooling_dev = NULL; acpi_video_dev_register_backlight()
1688 dev_info(&device->dev->dev, "registered as cooling_device%d\n", acpi_video_dev_register_backlight()
1689 device->cooling_dev->id); acpi_video_dev_register_backlight()
1690 result = sysfs_create_link(&device->dev->dev.kobj, acpi_video_dev_register_backlight()
1691 &device->cooling_dev->device.kobj, acpi_video_dev_register_backlight()
1695 result = sysfs_create_link(&device->cooling_dev->device.kobj, acpi_video_dev_register_backlight()
1696 &device->dev->dev.kobj, "device"); acpi_video_dev_register_backlight()
1717 * Do not create backlight device for video output acpi_video_should_register_backlight()
1718 * device that is not in the enumerated list. acpi_video_should_register_backlight()
1756 static void acpi_video_dev_unregister_backlight(struct acpi_video_device *device) acpi_video_dev_unregister_backlight() argument
1758 if (device->backlight) { acpi_video_dev_unregister_backlight()
1759 backlight_device_unregister(device->backlight); acpi_video_dev_unregister_backlight()
1760 device->backlight = NULL; acpi_video_dev_unregister_backlight()
1762 if (device->brightness) { acpi_video_dev_unregister_backlight()
1763 kfree(device->brightness->levels); acpi_video_dev_unregister_backlight()
1764 kfree(device->brightness); acpi_video_dev_unregister_backlight()
1765 device->brightness = NULL; acpi_video_dev_unregister_backlight()
1767 if (device->cooling_dev) { acpi_video_dev_unregister_backlight()
1768 sysfs_remove_link(&device->dev->dev.kobj, "thermal_cooling"); acpi_video_dev_unregister_backlight()
1769 sysfs_remove_link(&device->cooling_dev->device.kobj, "device"); acpi_video_dev_unregister_backlight()
1770 thermal_cooling_device_unregister(device->cooling_dev); acpi_video_dev_unregister_backlight()
1771 device->cooling_dev = NULL; acpi_video_dev_unregister_backlight()
1795 static void acpi_video_dev_add_notify_handler(struct acpi_video_device *device) acpi_video_dev_add_notify_handler() argument
1798 struct acpi_device *adev = device->dev; acpi_video_dev_add_notify_handler()
1801 acpi_video_device_notify, device); acpi_video_dev_add_notify_handler()
1805 device->flags.notify = 1; acpi_video_dev_add_notify_handler()
1825 "%s/video/input0", acpi_device_hid(video->device)); acpi_video_bus_add_notify_handler()
1827 input->name = acpi_device_name(video->device); acpi_video_bus_add_notify_handler()
1831 input->dev.parent = &video->device->dev; acpi_video_bus_add_notify_handler()
1901 static int acpi_video_bus_add(struct acpi_device *device) acpi_video_bus_add() argument
1908 device->parent->handle, 1, acpi_video_bus_add()
1910 device, NULL); acpi_video_bus_add()
1926 if (!strcmp(device->pnp.bus_id, "VID")) { acpi_video_bus_add()
1928 device->pnp.bus_id[3] = '0' + instance; acpi_video_bus_add()
1932 if (!strcmp(device->pnp.bus_id, "VGA")) { acpi_video_bus_add()
1934 device->pnp.bus_id[3] = '0' + instance; acpi_video_bus_add()
1938 video->device = device; acpi_video_bus_add()
1939 strcpy(acpi_device_name(device), ACPI_VIDEO_BUS_NAME); acpi_video_bus_add()
1940 strcpy(acpi_device_class(device), ACPI_VIDEO_CLASS); acpi_video_bus_add()
1941 device->driver_data = video; acpi_video_bus_add()
1951 error = acpi_video_bus_get_devices(video, device); acpi_video_bus_add()
1956 ACPI_VIDEO_DEVICE_NAME, acpi_device_bid(device), acpi_video_bus_add()
1974 device->driver_data = NULL; acpi_video_bus_add()
1979 static int acpi_video_bus_remove(struct acpi_device *device) acpi_video_bus_remove() argument
1984 if (!device || !acpi_driver_data(device)) acpi_video_bus_remove()
1987 video = acpi_driver_data(device); acpi_video_bus_remove()
2005 if (dev->device == 0x00D1) is_i740()
2007 if (dev->device == 0x7000) is_i740()
1156 acpi_video_device_bind(struct acpi_video_bus *video, struct acpi_video_device *device) acpi_video_device_bind() argument
1414 acpi_video_bus_get_devices(struct acpi_video_bus *video, struct acpi_device *device) acpi_video_bus_get_devices() argument
/linux-4.4.14/include/xen/
H A Dswiotlb-xen.h10 *xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
15 xen_swiotlb_free_coherent(struct device *hwdev, size_t size,
19 extern dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
24 extern void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
28 xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
33 xen_swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
38 xen_swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
42 xen_swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
46 xen_swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
50 xen_swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
54 xen_swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr);
57 xen_swiotlb_dma_supported(struct device *hwdev, u64 mask);
60 xen_swiotlb_set_dma_mask(struct device *dev, u64 dma_mask);
/linux-4.4.14/drivers/gpu/drm/nouveau/nvkm/engine/
H A Dxtensa.c47 return nvkm_gpuobj_new(object->engine->subdev.device, 0x10000, align, nvkm_xtensa_cclass_bind()
61 struct nvkm_device *device = subdev->device; nvkm_xtensa_intr() local
63 u32 unk104 = nvkm_rd32(device, base + 0xd04); nvkm_xtensa_intr()
64 u32 intr = nvkm_rd32(device, base + 0xc20); nvkm_xtensa_intr()
65 u32 chan = nvkm_rd32(device, base + 0xc28); nvkm_xtensa_intr()
66 u32 unk10c = nvkm_rd32(device, base + 0xd0c); nvkm_xtensa_intr()
70 nvkm_wr32(device, base + 0xc20, intr); nvkm_xtensa_intr()
71 intr = nvkm_rd32(device, base + 0xc20); nvkm_xtensa_intr()
74 nvkm_mask(device, xtensa->addr + 0xd94, 0, xtensa->func->fifo_val); nvkm_xtensa_intr()
82 struct nvkm_device *device = xtensa->engine.subdev.device; nvkm_xtensa_fini() local
85 nvkm_wr32(device, base + 0xd84, 0); /* INTR_EN */ nvkm_xtensa_fini()
86 nvkm_wr32(device, base + 0xd94, 0); /* FIFO_CTRL */ nvkm_xtensa_fini()
98 struct nvkm_device *device = subdev->device; nvkm_xtensa_init() local
110 ret = request_firmware(&fw, name, device->dev); nvkm_xtensa_init()
122 ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, nvkm_xtensa_init()
140 nvkm_wr32(device, base + 0xd10, 0x1fffffff); /* ?? */ nvkm_xtensa_init()
141 nvkm_wr32(device, base + 0xd08, 0x0fffffff); /* ?? */ nvkm_xtensa_init()
143 nvkm_wr32(device, base + 0xd28, xtensa->func->unkd28); /* ?? */ nvkm_xtensa_init()
144 nvkm_wr32(device, base + 0xc20, 0x3f); /* INTR */ nvkm_xtensa_init()
145 nvkm_wr32(device, base + 0xd84, 0x3f); /* INTR_EN */ nvkm_xtensa_init()
147 nvkm_wr32(device, base + 0xcc0, addr >> 8); /* XT_REGION_BASE */ nvkm_xtensa_init()
148 nvkm_wr32(device, base + 0xcc4, 0x1c); /* XT_REGION_SETUP */ nvkm_xtensa_init()
149 nvkm_wr32(device, base + 0xcc8, size >> 8); /* XT_REGION_LIMIT */ nvkm_xtensa_init()
151 tmp = nvkm_rd32(device, 0x0); nvkm_xtensa_init()
152 nvkm_wr32(device, base + 0xde0, tmp); /* SCRATCH_H2X */ nvkm_xtensa_init()
154 nvkm_wr32(device, base + 0xce8, 0xf); /* XT_REGION_SETUP */ nvkm_xtensa_init()
156 nvkm_wr32(device, base + 0xc20, 0x3f); /* INTR */ nvkm_xtensa_init()
157 nvkm_wr32(device, base + 0xd84, 0x3f); /* INTR_EN */ nvkm_xtensa_init()
179 struct nvkm_device *device, int index, bool enable, nvkm_xtensa_new_()
190 return nvkm_engine_ctor(&nvkm_xtensa, device, index, func->pmc_enable, nvkm_xtensa_new_()
178 nvkm_xtensa_new_(const struct nvkm_xtensa_func *func, struct nvkm_device *device, int index, bool enable, u32 addr, struct nvkm_engine **pengine) nvkm_xtensa_new_() argument
H A Dfalcon.c48 return nvkm_gpuobj_new(object->engine->subdev.device, 256, nvkm_falcon_cclass_bind()
62 struct nvkm_device *device = subdev->device; nvkm_falcon_intr() local
64 u32 dest = nvkm_rd32(device, base + 0x01c); nvkm_falcon_intr()
65 u32 intr = nvkm_rd32(device, base + 0x008) & dest & ~(dest >> 16); nvkm_falcon_intr()
66 u32 inst = nvkm_rd32(device, base + 0x050) & 0x3fffffff; nvkm_falcon_intr()
70 chan = nvkm_fifo_chan_inst(device->fifo, (u64)inst << 12, &flags); nvkm_falcon_intr()
75 nvkm_wr32(device, base + 0x004, 0x00000040); nvkm_falcon_intr()
82 nvkm_wr32(device, base + 0x004, 0x00000010); nvkm_falcon_intr()
88 nvkm_wr32(device, base + 0x004, intr); nvkm_falcon_intr()
91 nvkm_fifo_chan_put(device->fifo, flags, &chan); nvkm_falcon_intr()
98 struct nvkm_device *device = falcon->engine.subdev.device; nvkm_falcon_fini() local
110 nvkm_mask(device, base + 0x048, 0x00000003, 0x00000000); nvkm_falcon_fini()
111 nvkm_wr32(device, base + 0x014, 0xffffffff); nvkm_falcon_fini()
130 struct nvkm_device *device = subdev->device; nvkm_falcon_oneinit() local
135 if (device->chipset < 0xa3 || nvkm_falcon_oneinit()
136 device->chipset == 0xaa || device->chipset == 0xac) { nvkm_falcon_oneinit()
140 caps = nvkm_rd32(device, base + 0x12c); nvkm_falcon_oneinit()
145 caps = nvkm_rd32(device, base + 0x108); nvkm_falcon_oneinit()
161 struct nvkm_device *device = subdev->device; nvkm_falcon_init() local
170 nvkm_msec(device, 2000, nvkm_falcon_init()
171 if (nvkm_rd32(device, base + 0x008) & 0x00000010) nvkm_falcon_init()
175 nvkm_msec(device, 2000, nvkm_falcon_init()
176 if (!(nvkm_rd32(device, base + 0x180) & 0x80000000)) nvkm_falcon_init()
180 nvkm_wr32(device, base + 0x004, 0x00000010); nvkm_falcon_init()
184 nvkm_wr32(device, base + 0x014, 0xffffffff); nvkm_falcon_init()
191 device->chipset, falcon->addr >> 12); nvkm_falcon_init()
193 ret = request_firmware(&fw, name, device->dev); nvkm_falcon_init()
210 device->chipset, falcon->addr >> 12); nvkm_falcon_init()
212 ret = request_firmware(&fw, name, device->dev); nvkm_falcon_init()
225 device->chipset, falcon->addr >> 12); nvkm_falcon_init()
227 ret = request_firmware(&fw, name, device->dev); nvkm_falcon_init()
245 ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, nvkm_falcon_init()
262 if (device->card_type < NV_C0) nvkm_falcon_init()
263 nvkm_wr32(device, base + 0x618, 0x04000000); nvkm_falcon_init()
265 nvkm_wr32(device, base + 0x618, 0x00000114); nvkm_falcon_init()
266 nvkm_wr32(device, base + 0x11c, 0); nvkm_falcon_init()
267 nvkm_wr32(device, base + 0x110, addr >> 8); nvkm_falcon_init()
268 nvkm_wr32(device, base + 0x114, 0); nvkm_falcon_init()
269 nvkm_wr32(device, base + 0x118, 0x00006610); nvkm_falcon_init()
278 nvkm_wr32(device, base + 0xff8, 0x00100000); nvkm_falcon_init()
280 nvkm_wr32(device, base + 0xff4, falcon->code.data[i]); nvkm_falcon_init()
282 nvkm_wr32(device, base + 0x180, 0x01000000); nvkm_falcon_init()
285 nvkm_wr32(device, base + 0x188, i >> 6); nvkm_falcon_init()
286 nvkm_wr32(device, base + 0x184, falcon->code.data[i]); nvkm_falcon_init()
293 nvkm_wr32(device, base + 0xff8, 0x00000000); nvkm_falcon_init()
295 nvkm_wr32(device, base + 0xff4, falcon->data.data[i]); nvkm_falcon_init()
297 nvkm_wr32(device, base + 0xff4, 0x00000000); nvkm_falcon_init()
299 nvkm_wr32(device, base + 0x1c0, 0x01000000); nvkm_falcon_init()
301 nvkm_wr32(device, base + 0x1c4, falcon->data.data[i]); nvkm_falcon_init()
303 nvkm_wr32(device, base + 0x1c4, 0x00000000); nvkm_falcon_init()
307 nvkm_wr32(device, base + 0x10c, 0x00000001); /* BLOCK_ON_FIFO */ nvkm_falcon_init()
308 nvkm_wr32(device, base + 0x104, 0x00000000); /* ENTRY */ nvkm_falcon_init()
309 nvkm_wr32(device, base + 0x100, 0x00000002); /* TRIGGER */ nvkm_falcon_init()
310 nvkm_wr32(device, base + 0x048, 0x00000003); /* FIFO | CHSW */ nvkm_falcon_init()
336 struct nvkm_device *device, int index, bool enable, nvkm_falcon_new_()
351 return nvkm_engine_ctor(&nvkm_falcon, device, index, func->pmc_enable, nvkm_falcon_new_()
335 nvkm_falcon_new_(const struct nvkm_falcon_func *func, struct nvkm_device *device, int index, bool enable, u32 addr, struct nvkm_engine **pengine) nvkm_falcon_new_() argument
/linux-4.4.14/drivers/s390/net/
H A Dqeth_l2.h11 int qeth_l2_create_device_attributes(struct device *);
12 void qeth_l2_remove_device_attributes(struct device *);
/linux-4.4.14/drivers/iio/accel/
H A Dbmc150-accel.h15 int bmc150_accel_core_probe(struct device *dev, struct regmap *regmap, int irq,
17 int bmc150_accel_core_remove(struct device *dev);
/linux-4.4.14/drivers/iio/gyro/
H A Dbmg160.h6 int bmg160_core_probe(struct device *dev, struct regmap *regmap, int irq,
8 void bmg160_core_remove(struct device *dev);
/linux-4.4.14/drivers/mfd/
H A Dtwl-core.h4 extern int twl6030_init_irq(struct device *dev, int irq_num);
6 extern int twl4030_init_irq(struct device *dev, int irq_num);
/linux-4.4.14/drivers/gpu/drm/nouveau/include/nvkm/core/
H A Dpci.h3 #include <core/device.h>
6 struct nvkm_device device; member in struct:nvkm_device_pci
/linux-4.4.14/arch/xtensa/include/asm/
H A Ddevice.h2 * Arch specific extensions to struct device
12 /* DMA operations on that device */
/linux-4.4.14/arch/mips/include/asm/
H A Ddevice.h2 * Arch specific extensions to struct device
12 /* DMA operations on that device */
/linux-4.4.14/arch/mips/include/asm/mach-au1x00/
H A Dau1550nd.h13 int devwidth; /* 0 = 8bit device, 1 = 16bit device */
/linux-4.4.14/arch/mips/include/asm/mach-jazz/
H A Ddma-coherence.h13 struct device;
15 static inline dma_addr_t plat_map_dma_mem(struct device *dev, void *addr, size_t size) plat_map_dma_mem()
20 static inline dma_addr_t plat_map_dma_mem_page(struct device *dev, plat_map_dma_mem_page()
26 static inline unsigned long plat_dma_addr_to_phys(struct device *dev, plat_dma_addr_to_phys()
32 static inline void plat_unmap_dma_mem(struct device *dev, dma_addr_t dma_addr, plat_unmap_dma_mem()
38 static inline int plat_dma_supported(struct device *dev, u64 mask) plat_dma_supported()
51 static inline void plat_post_dma_flush(struct device *dev) plat_post_dma_flush()
55 static inline int plat_device_is_coherent(struct device *dev) plat_device_is_coherent()
/linux-4.4.14/arch/arm/mach-integrator/
H A Dimpd1.h12 struct device;
14 void impd1_tweak_control(struct device *dev, u32 mask, u32 val);
/linux-4.4.14/include/linux/iio/
H A Dkfifo_buf.h11 struct iio_buffer *devm_iio_kfifo_allocate(struct device *dev);
12 void devm_iio_kfifo_free(struct device *dev, struct iio_buffer *r);
/linux-4.4.14/arch/cris/include/asm/
H A Ddma-mapping.h19 void *dma_alloc_coherent(struct device *dev, size_t size,
22 void dma_free_coherent(struct device *dev, size_t size,
26 dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, dma_alloc_coherent()
34 dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_free_coherent()
41 dma_map_single(struct device *dev, void *ptr, size_t size, dma_map_single()
49 dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, dma_unmap_single()
56 dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, dma_map_sg()
64 dma_map_page(struct device *dev, struct page *page, unsigned long offset, dma_map_page()
72 dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, dma_unmap_page()
80 dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries, dma_unmap_sg()
87 dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size, dma_sync_single_for_cpu()
93 dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size, dma_sync_single_for_device()
99 dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle, dma_sync_single_range_for_cpu()
106 dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle, dma_sync_single_range_for_device()
113 dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, dma_sync_sg_for_cpu()
119 dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems, dma_sync_sg_for_device()
125 dma_mapping_error(struct device *dev, dma_addr_t dma_addr) dma_mapping_error()
131 dma_supported(struct device *dev, u64 mask) dma_supported()
145 dma_set_mask(struct device *dev, u64 mask) dma_set_mask()
156 dma_cache_sync(struct device *dev, void *vaddr, size_t size, dma_cache_sync()
162 extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
164 extern int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
/linux-4.4.14/drivers/staging/vme/devices/
H A DMakefile2 # Makefile for the VME device drivers.
/linux-4.4.14/drivers/uwb/
H A Dumc-dev.c2 * UWB Multi-interface Controller device management.
13 static void umc_device_release(struct device *dev) umc_device_release()
21 * umc_device_create - allocate a child UMC device
22 * @parent: parent of the new UMC device.
23 * @n: index of the new device.
25 * The new UMC device will have a bus ID of the parent with '-n'
28 struct umc_dev *umc_device_create(struct device *parent, int n) umc_device_create()
46 * umc_device_register - register a UMC device
47 * @umc: pointer to the UMC device
49 * The memory resource for the UMC device is acquired and the device
77 * umc_device_unregister - unregister a UMC device
78 * @umc: pointer to the UMC device
80 * First we unregister the device, make sure the driver can do it's
82 * resources. We take a ref to the device, to make sure it doesn't
87 struct device *dev; umc_device_unregister()
/linux-4.4.14/drivers/video/fbdev/sis/
H A DMakefile2 # Makefile for the SiS framebuffer device driver
/linux-4.4.14/drivers/infiniband/hw/cxgb4/
H A DMakefile5 iw_cxgb4-y := device.o cm.o provider.o mem.o cq.o qp.o resource.o ev.o id_table.o
/linux-4.4.14/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/
H A Dg94.c29 struct nvkm_device *device = gpio->subdev.device; g94_gpio_intr_stat() local
30 u32 intr0 = nvkm_rd32(device, 0x00e054); g94_gpio_intr_stat()
31 u32 intr1 = nvkm_rd32(device, 0x00e074); g94_gpio_intr_stat()
32 u32 stat0 = nvkm_rd32(device, 0x00e050) & intr0; g94_gpio_intr_stat()
33 u32 stat1 = nvkm_rd32(device, 0x00e070) & intr1; g94_gpio_intr_stat()
36 nvkm_wr32(device, 0x00e054, intr0); g94_gpio_intr_stat()
37 nvkm_wr32(device, 0x00e074, intr1); g94_gpio_intr_stat()
43 struct nvkm_device *device = gpio->subdev.device; g94_gpio_intr_mask() local
44 u32 inte0 = nvkm_rd32(device, 0x00e050); g94_gpio_intr_mask()
45 u32 inte1 = nvkm_rd32(device, 0x00e070); g94_gpio_intr_mask()
56 nvkm_wr32(device, 0x00e050, inte0); g94_gpio_intr_mask()
57 nvkm_wr32(device, 0x00e070, inte1); g94_gpio_intr_mask()
71 g94_gpio_new(struct nvkm_device *device, int index, struct nvkm_gpio **pgpio) g94_gpio_new() argument
73 return nvkm_gpio_new_(&g94_gpio, device, index, pgpio); g94_gpio_new()
H A Dgk104.c29 struct nvkm_device *device = gpio->subdev.device; gk104_gpio_intr_stat() local
30 u32 intr0 = nvkm_rd32(device, 0x00dc00); gk104_gpio_intr_stat()
31 u32 intr1 = nvkm_rd32(device, 0x00dc80); gk104_gpio_intr_stat()
32 u32 stat0 = nvkm_rd32(device, 0x00dc08) & intr0; gk104_gpio_intr_stat()
33 u32 stat1 = nvkm_rd32(device, 0x00dc88) & intr1; gk104_gpio_intr_stat()
36 nvkm_wr32(device, 0x00dc00, intr0); gk104_gpio_intr_stat()
37 nvkm_wr32(device, 0x00dc80, intr1); gk104_gpio_intr_stat()
43 struct nvkm_device *device = gpio->subdev.device; gk104_gpio_intr_mask() local
44 u32 inte0 = nvkm_rd32(device, 0x00dc08); gk104_gpio_intr_mask()
45 u32 inte1 = nvkm_rd32(device, 0x00dc88); gk104_gpio_intr_mask()
56 nvkm_wr32(device, 0x00dc08, inte0); gk104_gpio_intr_mask()
57 nvkm_wr32(device, 0x00dc88, inte1); gk104_gpio_intr_mask()
71 gk104_gpio_new(struct nvkm_device *device, int index, struct nvkm_gpio **pgpio) gk104_gpio_new() argument
73 return nvkm_gpio_new_(&gk104_gpio, device, index, pgpio); gk104_gpio_new()
H A Dnv10.c31 struct nvkm_device *device = gpio->subdev.device; nv10_gpio_sense() local
34 line = nvkm_rd32(device, 0x600818) >> line; nv10_gpio_sense()
39 line = nvkm_rd32(device, 0x60081c) >> line; nv10_gpio_sense()
44 line = nvkm_rd32(device, 0x600850) >> line; nv10_gpio_sense()
54 struct nvkm_device *device = gpio->subdev.device; nv10_gpio_drive() local
78 nvkm_mask(device, reg, mask << line, data << line); nv10_gpio_drive()
85 struct nvkm_device *device = gpio->subdev.device; nv10_gpio_intr_stat() local
86 u32 intr = nvkm_rd32(device, 0x001104); nv10_gpio_intr_stat()
87 u32 stat = nvkm_rd32(device, 0x001144) & intr; nv10_gpio_intr_stat()
90 nvkm_wr32(device, 0x001104, intr); nv10_gpio_intr_stat()
96 struct nvkm_device *device = gpio->subdev.device; nv10_gpio_intr_mask() local
97 u32 inte = nvkm_rd32(device, 0x001144); nv10_gpio_intr_mask()
102 nvkm_wr32(device, 0x001144, inte); nv10_gpio_intr_mask()
115 nv10_gpio_new(struct nvkm_device *device, int index, struct nvkm_gpio **pgpio) nv10_gpio_new() argument
117 return nvkm_gpio_new_(&nv10_gpio, device, index, pgpio); nv10_gpio_new()
/linux-4.4.14/drivers/net/ethernet/ibm/ehea/
H A DMakefile2 # Makefile for the eHEA ethernet device driver for IBM eServer System p
/linux-4.4.14/drivers/net/ethernet/altera/
H A DMakefile2 # Makefile for the Altera device drivers.
/linux-4.4.14/drivers/net/ethernet/arc/
H A DMakefile2 # Makefile for the ARC network device drivers.
/linux-4.4.14/arch/sh/include/asm/
H A Ddma-mapping.h7 static inline struct dma_map_ops *get_dma_ops(struct device *dev) get_dma_ops()
16 void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
20 extern void *dma_generic_alloc_coherent(struct device *dev, size_t size,
23 extern void dma_generic_free_coherent(struct device *dev, size_t size,
/linux-4.4.14/sound/usb/caiaq/
H A DMakefile1 snd-usb-caiaq-y := device.o audio.o midi.o control.o
/linux-4.4.14/include/uapi/linux/
H A Dusbip.h10 /* usbip device status - exported in usbip device sysfs status */
19 /* vdev does not connect a remote device. */
/linux-4.4.14/arch/powerpc/sysdev/
H A Daxonram.c6 * Axon DDR2 device driver.
7 * It registers one block device per Axon's DDR2 memory bank found on a system.
28 #include <linux/device.h>
62 struct platform_device *device; member in struct:axon_ram_bank
72 axon_ram_sysfs_ecc(struct device *dev, struct device_attribute *attr, char *buf) axon_ram_sysfs_ecc()
74 struct platform_device *device = to_platform_device(dev); axon_ram_sysfs_ecc() local
75 struct axon_ram_bank *bank = device->dev.platform_data; axon_ram_sysfs_ecc()
92 struct platform_device *device = dev; axon_ram_irq_handler() local
93 struct axon_ram_bank *bank = device->dev.platform_data; axon_ram_irq_handler()
97 dev_err(&device->dev, "Correctable memory error occurred\n"); axon_ram_irq_handler()
103 * axon_ram_make_request - make_request() method for block device
140 * axon_ram_direct_access - direct_access() method for block device
141 * @device, @sector, @data: see block_device_operations method
144 axon_ram_direct_access(struct block_device *device, sector_t sector, axon_ram_direct_access() argument
147 struct axon_ram_bank *bank = device->bd_disk->private_data; axon_ram_direct_access()
164 * @device: see platform_driver method
166 static int axon_ram_probe(struct platform_device *device) axon_ram_probe() argument
175 dev_info(&device->dev, "Found memory controller on %s\n", axon_ram_probe()
176 device->dev.of_node->full_name); axon_ram_probe()
180 dev_err(&device->dev, "Out of memory\n"); axon_ram_probe()
185 device->dev.platform_data = bank; axon_ram_probe()
187 bank->device = device; axon_ram_probe()
189 if (of_address_to_resource(device->dev.of_node, 0, &resource) != 0) { axon_ram_probe()
190 dev_err(&device->dev, "Cannot access device tree\n"); axon_ram_probe()
198 dev_err(&device->dev, "No DDR2 memory found for %s%d\n", axon_ram_probe()
204 dev_info(&device->dev, "Register DDR2 memory device %s%d with %luMB\n", axon_ram_probe()
211 dev_err(&device->dev, "ioremap() failed\n"); axon_ram_probe()
218 dev_err(&device->dev, "Cannot register disk\n"); axon_ram_probe()
227 bank->disk->driverfs_dev = &device->dev; axon_ram_probe()
234 dev_err(&device->dev, "Cannot register disk queue\n"); axon_ram_probe()
244 bank->irq_id = irq_of_parse_and_map(device->dev.of_node, 0); axon_ram_probe()
246 dev_err(&device->dev, "Cannot access ECC interrupt ID\n"); axon_ram_probe()
252 AXON_RAM_IRQ_FLAGS, bank->disk->disk_name, device); axon_ram_probe()
254 dev_err(&device->dev, "Cannot register ECC interrupt handler\n"); axon_ram_probe()
260 rc = device_create_file(&device->dev, &dev_attr_ecc); axon_ram_probe()
262 dev_err(&device->dev, "Cannot create sysfs file\n"); axon_ram_probe()
274 free_irq(bank->irq_id, device); axon_ram_probe()
281 device->dev.platform_data = NULL; axon_ram_probe()
292 * @device: see of_platform_driver method
295 axon_ram_remove(struct platform_device *device) axon_ram_remove() argument
297 struct axon_ram_bank *bank = device->dev.platform_data; axon_ram_remove()
301 device_remove_file(&device->dev, &dev_attr_ecc); axon_ram_remove()
302 free_irq(bank->irq_id, device); axon_ram_remove()
334 printk(KERN_ERR "%s cannot become block device major number\n", axon_ram_init()
358 MODULE_DESCRIPTION("Axon DDR2 RAM device driver for IBM Cell BE");
/linux-4.4.14/sound/soc/codecs/
H A Dadau1761.h16 struct device;
18 int adau1761_probe(struct device *dev, struct regmap *regmap,
19 enum adau17x1_type type, void (*switch_mode)(struct device *dev));
H A Dadau1781.h16 struct device;
18 int adau1781_probe(struct device *dev, struct regmap *regmap,
19 enum adau17x1_type type, void (*switch_mode)(struct device *dev));
H A Dadau1977.h15 struct device;
23 int adau1977_probe(struct device *dev, struct regmap *regmap,
24 enum adau1977_type type, void (*switch_mode)(struct device *dev));
/linux-4.4.14/arch/frv/include/asm/
H A Ddma-mapping.h4 #include <linux/device.h>
21 void *dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp);
22 void dma_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle);
24 extern dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size,
28 void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, dma_unmap_single()
34 extern int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
38 void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries, dma_unmap_sg()
45 dma_addr_t dma_map_page(struct device *dev, struct page *page, unsigned long offset,
49 void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, dma_unmap_page()
57 void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size, dma_sync_single_for_cpu()
63 void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size, dma_sync_single_for_device()
70 void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle, dma_sync_single_range_for_cpu()
77 void dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle, dma_sync_single_range_for_device()
85 void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, dma_sync_sg_for_cpu()
91 void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems, dma_sync_sg_for_device()
98 int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) dma_mapping_error()
104 int dma_supported(struct device *dev, u64 mask) dma_supported()
118 int dma_set_mask(struct device *dev, u64 mask) dma_set_mask()
129 void dma_cache_sync(struct device *dev, void *vaddr, size_t size, dma_cache_sync()
136 static inline int dma_mmap_coherent(struct device *dev, dma_mmap_coherent()
143 static inline int dma_get_sgtable(struct device *dev, struct sg_table *sgt, dma_get_sgtable()
/linux-4.4.14/sound/soc/
H A Dsoc-devres.c17 static void devm_component_release(struct device *dev, void *res) devm_component_release()
19 snd_soc_unregister_component(*(struct device **)res); devm_component_release()
29 * Register a component with automatic unregistration when the device is
32 int devm_snd_soc_register_component(struct device *dev, devm_snd_soc_register_component()
36 struct device **ptr; devm_snd_soc_register_component()
55 static void devm_platform_release(struct device *dev, void *res) devm_platform_release()
57 snd_soc_unregister_platform(*(struct device **)res); devm_platform_release()
65 * Register a platform driver with automatic unregistration when the device is
68 int devm_snd_soc_register_platform(struct device *dev, devm_snd_soc_register_platform()
71 struct device **ptr; devm_snd_soc_register_platform()
90 static void devm_card_release(struct device *dev, void *res) devm_card_release()
100 * Register a card with automatic unregistration when the device is
103 int devm_snd_soc_register_card(struct device *dev, struct snd_soc_card *card) devm_snd_soc_register_card()
126 static void devm_dmaengine_pcm_release(struct device *dev, void *res) devm_dmaengine_pcm_release()
128 snd_dmaengine_pcm_unregister(*(struct device **)res); devm_dmaengine_pcm_release()
133 * @dev: The parent device for the PCM device
137 * Register a dmaengine based PCM device with automatic unregistration when the
138 * device is unregistered.
140 int devm_snd_dmaengine_pcm_register(struct device *dev, devm_snd_dmaengine_pcm_register()
143 struct device **ptr; devm_snd_dmaengine_pcm_register()
/linux-4.4.14/drivers/infiniband/core/
H A Ddevice.c56 /* The device or client is going down. Do not call client or device
72 * client_list. device_mutex protects writer access by device and client
85 static int ib_device_check_mandatory(struct ib_device *device) ib_device_check_mandatory() argument
116 if (!*(void **) ((void *) device + mandatory_table[i].offset)) { ib_device_check_mandatory()
118 device->name, mandatory_table[i].name); ib_device_check_mandatory()
128 struct ib_device *device; __ib_device_get_by_name() local
130 list_for_each_entry(device, &device_list, core_list) __ib_device_get_by_name()
131 if (!strncmp(name, device->name, IB_DEVICE_NAME_MAX)) __ib_device_get_by_name()
132 return device; __ib_device_get_by_name()
142 struct ib_device *device; alloc_name() local
149 list_for_each_entry(device, &device_list, core_list) { alloc_name()
150 if (!sscanf(device->name, name, &i)) alloc_name()
155 if (!strncmp(buf, device->name, IB_DEVICE_NAME_MAX)) alloc_name()
170 static void ib_device_release(struct device *device) ib_device_release() argument
172 struct ib_device *dev = container_of(device, struct ib_device, dev); ib_device_release()
179 static int ib_device_uevent(struct device *device, ib_device_uevent() argument
182 struct ib_device *dev = container_of(device, struct ib_device, dev); ib_device_uevent()
201 * ib_alloc_device - allocate an IB device struct
212 struct ib_device *device; ib_alloc_device() local
217 device = kzalloc(size, GFP_KERNEL); ib_alloc_device()
218 if (!device) ib_alloc_device()
221 device->dev.class = &ib_class; ib_alloc_device()
222 device_initialize(&device->dev); ib_alloc_device()
224 dev_set_drvdata(&device->dev, device); ib_alloc_device()
226 INIT_LIST_HEAD(&device->event_handler_list); ib_alloc_device()
227 spin_lock_init(&device->event_handler_lock); ib_alloc_device()
228 spin_lock_init(&device->client_data_lock); ib_alloc_device()
229 INIT_LIST_HEAD(&device->client_data_list); ib_alloc_device()
230 INIT_LIST_HEAD(&device->port_list); ib_alloc_device()
232 return device; ib_alloc_device()
237 * ib_dealloc_device - free an IB device struct
238 * @device:structure to free
242 void ib_dealloc_device(struct ib_device *device) ib_dealloc_device() argument
244 WARN_ON(device->reg_state != IB_DEV_UNREGISTERED && ib_dealloc_device()
245 device->reg_state != IB_DEV_UNINITIALIZED); ib_dealloc_device()
246 kobject_put(&device->dev.kobj); ib_dealloc_device()
250 static int add_client_context(struct ib_device *device, struct ib_client *client) add_client_context() argument
258 device->name, client->name); add_client_context()
267 spin_lock_irqsave(&device->client_data_lock, flags); add_client_context()
268 list_add(&context->list, &device->client_data_list); add_client_context()
269 spin_unlock_irqrestore(&device->client_data_lock, flags); add_client_context()
281 static int read_port_immutable(struct ib_device *device) read_port_immutable() argument
284 u8 start_port = rdma_start_port(device); read_port_immutable()
285 u8 end_port = rdma_end_port(device); read_port_immutable()
289 * device->port_immutable is indexed directly by the port number to make read_port_immutable()
295 device->port_immutable = kzalloc(sizeof(*device->port_immutable) read_port_immutable()
298 if (!device->port_immutable) read_port_immutable()
302 ret = device->get_port_immutable(device, port, read_port_immutable()
303 &device->port_immutable[port]); read_port_immutable()
307 if (verify_immutable(device, port)) read_port_immutable()
314 * ib_register_device - Register an IB device with IB core
315 * @device:Device to register
319 * callback for each device that is added. @device must be allocated
322 int ib_register_device(struct ib_device *device, ib_register_device() argument
331 if (strchr(device->name, '%')) { ib_register_device()
332 ret = alloc_name(device->name); ib_register_device()
337 if (ib_device_check_mandatory(device)) { ib_register_device()
342 ret = read_port_immutable(device); ib_register_device()
345 device->name); ib_register_device()
349 ret = ib_cache_setup_one(device); ib_register_device()
355 ret = ib_device_register_sysfs(device, port_callback); ib_register_device()
357 printk(KERN_WARNING "Couldn't register device %s with driver model\n", ib_register_device()
358 device->name); ib_register_device()
359 ib_cache_cleanup_one(device); ib_register_device()
363 device->reg_state = IB_DEV_REGISTERED; ib_register_device()
366 if (client->add && !add_client_context(device, client)) ib_register_device()
367 client->add(device); ib_register_device()
370 list_add_tail(&device->core_list, &device_list); ib_register_device()
379 * ib_unregister_device - Unregister an IB device
380 * @device:Device to unregister
382 * Unregister an IB device. All clients will receive a remove callback.
384 void ib_unregister_device(struct ib_device *device) ib_unregister_device() argument
392 list_del(&device->core_list); ib_unregister_device()
393 spin_lock_irqsave(&device->client_data_lock, flags); ib_unregister_device()
394 list_for_each_entry_safe(context, tmp, &device->client_data_list, list) ib_unregister_device()
396 spin_unlock_irqrestore(&device->client_data_lock, flags); ib_unregister_device()
399 list_for_each_entry_safe(context, tmp, &device->client_data_list, ib_unregister_device()
402 context->client->remove(device, context->data); ib_unregister_device()
408 ib_device_unregister_sysfs(device); ib_unregister_device()
409 ib_cache_cleanup_one(device); ib_unregister_device()
412 spin_lock_irqsave(&device->client_data_lock, flags); ib_unregister_device()
413 list_for_each_entry_safe(context, tmp, &device->client_data_list, list) ib_unregister_device()
415 spin_unlock_irqrestore(&device->client_data_lock, flags); ib_unregister_device()
418 device->reg_state = IB_DEV_UNREGISTERED; ib_unregister_device()
427 * register callbacks for IB device addition and removal. When an IB
428 * device is added, each registered client's add method will be called
429 * (in the order the clients were registered), and when a device is
437 struct ib_device *device; ib_register_client() local
441 list_for_each_entry(device, &device_list, core_list) ib_register_client()
442 if (client->add && !add_client_context(device, client)) ib_register_client()
443 client->add(device); ib_register_client()
461 * will receive a remove callback for each IB device still registered.
466 struct ib_device *device; ib_unregister_client() local
475 list_for_each_entry(device, &device_list, core_list) { ib_unregister_client()
479 spin_lock_irqsave(&device->client_data_lock, flags); ib_unregister_client()
480 list_for_each_entry_safe(context, tmp, &device->client_data_list, list) ib_unregister_client()
486 spin_unlock_irqrestore(&device->client_data_lock, flags); ib_unregister_client()
490 client->remove(device, found_context ? ib_unregister_client()
495 device->name, client->name); ib_unregister_client()
500 spin_lock_irqsave(&device->client_data_lock, flags); ib_unregister_client()
503 spin_unlock_irqrestore(&device->client_data_lock, flags); ib_unregister_client()
513 * @device:Device to get context for
519 void *ib_get_client_data(struct ib_device *device, struct ib_client *client) ib_get_client_data() argument
525 spin_lock_irqsave(&device->client_data_lock, flags); ib_get_client_data()
526 list_for_each_entry(context, &device->client_data_list, list) ib_get_client_data()
531 spin_unlock_irqrestore(&device->client_data_lock, flags); ib_get_client_data()
539 * @device:Device to set context for
546 void ib_set_client_data(struct ib_device *device, struct ib_client *client, ib_set_client_data() argument
552 spin_lock_irqsave(&device->client_data_lock, flags); ib_set_client_data()
553 list_for_each_entry(context, &device->client_data_list, list) ib_set_client_data()
560 device->name, client->name); ib_set_client_data()
563 spin_unlock_irqrestore(&device->client_data_lock, flags); ib_set_client_data()
580 spin_lock_irqsave(&event_handler->device->event_handler_lock, flags); ib_register_event_handler()
582 &event_handler->device->event_handler_list); ib_register_event_handler()
583 spin_unlock_irqrestore(&event_handler->device->event_handler_lock, flags); ib_register_event_handler()
600 spin_lock_irqsave(&event_handler->device->event_handler_lock, flags); ib_unregister_event_handler()
602 spin_unlock_irqrestore(&event_handler->device->event_handler_lock, flags); ib_unregister_event_handler()
621 spin_lock_irqsave(&event->device->event_handler_lock, flags); ib_dispatch_event()
623 list_for_each_entry(handler, &event->device->event_handler_list, list) ib_dispatch_event()
626 spin_unlock_irqrestore(&event->device->event_handler_lock, flags); ib_dispatch_event()
631 * ib_query_device - Query IB device attributes
632 * @device:Device to query
635 * ib_query_device() returns the attributes of a device through the
638 int ib_query_device(struct ib_device *device, ib_query_device() argument
645 return device->query_device(device, device_attr, &uhw); ib_query_device()
651 * @device:Device to query
658 int ib_query_port(struct ib_device *device, ib_query_port() argument
662 if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device)) ib_query_port()
665 return device->query_port(device, port_num, port_attr); ib_query_port()
671 * @device:Device to query
680 int ib_query_gid(struct ib_device *device, ib_query_gid() argument
684 if (rdma_cap_roce_gid_table(device, port_num)) ib_query_gid()
685 return ib_get_cached_gid(device, port_num, index, gid, attr); ib_query_gid()
690 return device->query_gid(device, port_num, index, gid); ib_query_gid()
696 * @ib_dev : IB device we want to query
704 * device for which filter() function returns non zero.
744 * to netdevices and calls callback() on each device for which
762 * @device:Device to query
769 int ib_query_pkey(struct ib_device *device, ib_query_pkey() argument
772 return device->query_pkey(device, port_num, index, pkey); ib_query_pkey()
777 * ib_modify_device - Change IB device attributes
778 * @device:Device to modify
782 * ib_modify_device() changes a device's attributes as specified by
785 int ib_modify_device(struct ib_device *device, ib_modify_device() argument
789 if (!device->modify_device) ib_modify_device()
792 return device->modify_device(device, device_modify_mask, ib_modify_device()
799 * @device: The device to modify.
808 int ib_modify_port(struct ib_device *device, ib_modify_port() argument
812 if (!device->modify_port) ib_modify_port()
815 if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device)) ib_modify_port()
818 return device->modify_port(device, port_num, port_modify_mask, ib_modify_port()
826 * @device: The device to query.
829 * @port_num: The port number of the device where the GID value was found.
833 int ib_find_gid(struct ib_device *device, union ib_gid *gid, ib_find_gid() argument
839 for (port = rdma_start_port(device); port <= rdma_end_port(device); ++port) { ib_find_gid()
840 if (rdma_cap_roce_gid_table(device, port)) { ib_find_gid()
841 if (!ib_find_cached_gid_by_port(device, gid, port, ib_find_gid()
848 for (i = 0; i < device->port_immutable[port].gid_tbl_len; ++i) { ib_find_gid()
849 ret = ib_query_gid(device, port, i, &tmp_gid, NULL); ib_find_gid()
868 * @device: The device to query.
869 * @port_num: The port number of the device to search for the PKey.
873 int ib_find_pkey(struct ib_device *device, ib_find_pkey() argument
880 for (i = 0; i < device->port_immutable[port_num].pkey_tbl_len; ++i) { ib_find_pkey()
881 ret = ib_query_pkey(device, port_num, i, &tmp_pkey); ib_find_pkey()
907 * @dev: An RDMA device on which the request has been received.
908 * @port: Port number on the RDMA device.
959 printk(KERN_WARNING "Couldn't create InfiniBand device class\n"); ib_core_init()
/linux-4.4.14/arch/x86/include/asm/
H A Ddma-mapping.h26 extern struct device x86_dma_fallback_dev;
31 static inline struct dma_map_ops *get_dma_ops(struct device *dev) get_dma_ops()
43 bool arch_dma_alloc_attrs(struct device **dev, gfp_t *gfp);
47 extern int dma_supported(struct device *hwdev, u64 mask);
51 extern void *dma_generic_alloc_coherent(struct device *dev, size_t size,
55 extern void dma_generic_free_coherent(struct device *dev, size_t size,
60 extern bool dma_capable(struct device *dev, dma_addr_t addr, size_t size);
61 extern dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr);
62 extern phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr);
65 static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) dma_capable()
73 static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) phys_to_dma()
78 static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr) dma_to_phys()
85 dma_cache_sync(struct device *dev, void *vaddr, size_t size, dma_cache_sync()
91 static inline unsigned long dma_alloc_coherent_mask(struct device *dev, dma_alloc_coherent_mask()
103 static inline gfp_t dma_alloc_coherent_gfp_flags(struct device *dev, gfp_t gfp) dma_alloc_coherent_gfp_flags()
/linux-4.4.14/drivers/input/misc/
H A Dadxl34x.h13 struct device;
18 int (*read)(struct device *, unsigned char);
19 int (*read_block)(struct device *, unsigned char, int, void *);
20 int (*write)(struct device *, unsigned char, unsigned char);
25 struct adxl34x *adxl34x_probe(struct device *dev, int irq,
/linux-4.4.14/drivers/platform/x86/
H A Dxo15-ebook.c46 char phys[32]; /* for input device */
49 static int ebook_send_state(struct acpi_device *device) ebook_send_state() argument
51 struct ebook_switch *button = acpi_driver_data(device); ebook_send_state()
55 status = acpi_evaluate_integer(device->handle, "EBK", NULL, &state); ebook_send_state()
65 static void ebook_switch_notify(struct acpi_device *device, u32 event) ebook_switch_notify() argument
70 ebook_send_state(device); ebook_switch_notify()
80 static int ebook_switch_resume(struct device *dev) ebook_switch_resume()
88 static int ebook_switch_add(struct acpi_device *device) ebook_switch_add() argument
92 const char *hid = acpi_device_hid(device); ebook_switch_add()
100 device->driver_data = button; ebook_switch_add()
108 name = acpi_device_name(device); ebook_switch_add()
109 class = acpi_device_class(device); ebook_switch_add()
125 input->dev.parent = &device->dev; ebook_switch_add()
134 ebook_send_state(device); ebook_switch_add()
136 if (device->wakeup.flags.valid) { ebook_switch_add()
138 acpi_enable_gpe(device->wakeup.gpe_device, ebook_switch_add()
139 device->wakeup.gpe_number); ebook_switch_add()
140 device_set_wakeup_enable(&device->dev, true); ebook_switch_add()
152 static int ebook_switch_remove(struct acpi_device *device) ebook_switch_remove() argument
154 struct ebook_switch *button = acpi_driver_data(device); ebook_switch_remove()
H A Ddell-rbtn.c39 static enum rbtn_type rbtn_check(struct acpi_device *device) rbtn_check() argument
44 status = acpi_evaluate_integer(device->handle, "CRBT", NULL, &output); rbtn_check()
60 static int rbtn_get(struct acpi_device *device) rbtn_get() argument
65 status = acpi_evaluate_integer(device->handle, "GRBT", NULL, &output); rbtn_get()
72 static int rbtn_acquire(struct acpi_device *device, bool enable) rbtn_acquire() argument
83 status = acpi_evaluate_object(device->handle, "ARBT", &input, NULL); rbtn_acquire()
92 * rfkill device
97 struct acpi_device *device = data; rbtn_rfkill_query() local
100 state = rbtn_get(device); rbtn_rfkill_query()
118 static int rbtn_rfkill_init(struct acpi_device *device) rbtn_rfkill_init() argument
120 struct rbtn_data *rbtn_data = device->driver_data; rbtn_rfkill_init()
131 rbtn_data->rfkill = rfkill_alloc("dell-rbtn", &device->dev, rbtn_rfkill_init()
132 RFKILL_TYPE_WLAN, &rbtn_ops, device); rbtn_rfkill_init()
146 static void rbtn_rfkill_exit(struct acpi_device *device) rbtn_rfkill_exit() argument
148 struct rbtn_data *rbtn_data = device->driver_data; rbtn_rfkill_exit()
158 static void rbtn_rfkill_event(struct acpi_device *device) rbtn_rfkill_event() argument
160 struct rbtn_data *rbtn_data = device->driver_data; rbtn_rfkill_event()
163 rbtn_rfkill_query(rbtn_data->rfkill, device); rbtn_rfkill_event()
168 * input device
214 static int rbtn_add(struct acpi_device *device);
215 static int rbtn_remove(struct acpi_device *device);
216 static void rbtn_notify(struct acpi_device *device, u32 event);
232 static int rbtn_suspend(struct device *dev) rbtn_suspend()
234 struct acpi_device *device = to_acpi_device(dev); rbtn_suspend() local
235 struct rbtn_data *rbtn_data = acpi_driver_data(device); rbtn_suspend()
242 static int rbtn_resume(struct device *dev) rbtn_resume()
244 struct acpi_device *device = to_acpi_device(dev); rbtn_resume() local
245 struct rbtn_data *rbtn_data = acpi_driver_data(device); rbtn_resume()
290 static int rbtn_inc_count(struct device *dev, void *data) rbtn_inc_count()
292 struct acpi_device *device = to_acpi_device(dev); rbtn_inc_count() local
293 struct rbtn_data *rbtn_data = device->driver_data; rbtn_inc_count()
302 static int rbtn_switch_dev(struct device *dev, void *data) rbtn_switch_dev()
304 struct acpi_device *device = to_acpi_device(dev); rbtn_switch_dev() local
305 struct rbtn_data *rbtn_data = device->driver_data; rbtn_switch_dev()
312 rbtn_rfkill_init(device); rbtn_switch_dev()
314 rbtn_rfkill_exit(device); rbtn_switch_dev()
366 static int rbtn_add(struct acpi_device *device) rbtn_add() argument
372 type = rbtn_check(device); rbtn_add()
374 dev_info(&device->dev, "Unknown device type\n"); rbtn_add()
378 ret = rbtn_acquire(device, true); rbtn_add()
380 dev_err(&device->dev, "Cannot enable device\n"); rbtn_add()
384 rbtn_data = devm_kzalloc(&device->dev, sizeof(*rbtn_data), GFP_KERNEL); rbtn_add()
389 device->driver_data = rbtn_data; rbtn_add()
399 ret = rbtn_rfkill_init(device); rbtn_add()
409 static int rbtn_remove(struct acpi_device *device) rbtn_remove() argument
411 struct rbtn_data *rbtn_data = device->driver_data; rbtn_remove()
418 rbtn_rfkill_exit(device); rbtn_remove()
424 rbtn_acquire(device, false); rbtn_remove()
425 device->driver_data = NULL; rbtn_remove()
430 static void rbtn_notify(struct acpi_device *device, u32 event) rbtn_notify() argument
432 struct rbtn_data *rbtn_data = device->driver_data; rbtn_notify()
439 dev_dbg(&device->dev, "ACPI notification ignored\n"); rbtn_notify()
444 dev_info(&device->dev, "Received unknown event (0x%x)\n", rbtn_notify()
454 rbtn_rfkill_event(device); rbtn_notify()
455 atomic_notifier_call_chain(&rbtn_chain_head, event, device); rbtn_notify()
/linux-4.4.14/arch/x86/video/
H A Dfbdev.c16 struct device *device = info->device; fb_is_primary_device() local
21 if (device) fb_is_primary_device()
22 pci_dev = to_pci_dev(device); fb_is_primary_device()
/linux-4.4.14/arch/arm/mach-w90x900/include/mach/
H A Dmfp.h19 extern void mfp_set_groupf(struct device *dev);
20 extern void mfp_set_groupc(struct device *dev);
21 extern void mfp_set_groupi(struct device *dev);
22 extern void mfp_set_groupg(struct device *dev, const char *subname);
23 extern void mfp_set_groupd(struct device *dev, const char *subname);
/linux-4.4.14/drivers/base/
H A Dcore.c2 * drivers/base/core.c - core driver model code (device registration, etc)
13 #include <linux/device.h>
47 int (*platform_notify)(struct device *dev) = NULL;
48 int (*platform_notify_remove)(struct device *dev) = NULL;
76 static inline int device_is_not_partition(struct device *dev) device_is_not_partition()
81 static inline int device_is_not_partition(struct device *dev) device_is_not_partition()
88 * dev_driver_string - Return a device's driver name, if at all possible
89 * @dev: struct device to get the name of
91 * Will return the device's driver's name if it is bound to a device. If
92 * the device is not bound to a driver, it will return the name of the bus
96 const char *dev_driver_string(const struct device *dev) dev_driver_string()
117 struct device *dev = kobj_to_dev(kobj); dev_attr_show()
133 struct device *dev = kobj_to_dev(kobj); dev_attr_store()
148 ssize_t device_store_ulong(struct device *dev, device_store_ulong()
163 ssize_t device_show_ulong(struct device *dev, device_show_ulong()
172 ssize_t device_store_int(struct device *dev, device_store_int()
187 ssize_t device_show_int(struct device *dev, device_show_int()
197 ssize_t device_store_bool(struct device *dev, struct device_attribute *attr, device_store_bool()
209 ssize_t device_show_bool(struct device *dev, struct device_attribute *attr, device_show_bool()
219 * device_release - free device structure.
220 * @kobj: device's kobject.
223 * reaches 0. We forward the call to the device's release
228 struct device *dev = kobj_to_dev(kobj); device_release()
236 * Drivers still can add resources into device after device device_release()
257 struct device *dev = kobj_to_dev(kobj); device_namespace()
278 struct device *dev = kobj_to_dev(kobj); dev_uevent_filter()
289 struct device *dev = kobj_to_dev(kobj); dev_uevent_name()
301 struct device *dev = kobj_to_dev(kobj); dev_uevent()
304 /* add device node properties if present */ dev_uevent()
333 /* Add common DT information about the device */ dev_uevent()
340 pr_debug("device: '%s': %s: bus uevent() returned %d\n", dev_uevent()
348 pr_debug("device: '%s': %s: class uevent() " dev_uevent()
353 /* have the device type specific function add its stuff */ dev_uevent()
357 pr_debug("device: '%s': %s: dev_type uevent() " dev_uevent()
371 static ssize_t uevent_show(struct device *dev, struct device_attribute *attr, uevent_show()
381 /* search the kset, the device belongs to */ uevent_show()
414 static ssize_t uevent_store(struct device *dev, struct device_attribute *attr, uevent_store()
427 static ssize_t online_show(struct device *dev, struct device_attribute *attr, online_show()
438 static ssize_t online_store(struct device *dev, struct device_attribute *attr, online_store()
458 int device_add_groups(struct device *dev, const struct attribute_group **groups) device_add_groups()
463 void device_remove_groups(struct device *dev, device_remove_groups()
469 static int device_add_attrs(struct device *dev) device_add_attrs()
511 static void device_remove_attrs(struct device *dev) device_remove_attrs()
526 static ssize_t dev_show(struct device *dev, struct device_attribute *attr, dev_show()
537 * devices_kset_move_before - Move device in the devices_kset's list.
541 static void devices_kset_move_before(struct device *deva, struct device *devb) devices_kset_move_before()
553 * devices_kset_move_after - Move device in the devices_kset's list.
557 static void devices_kset_move_after(struct device *deva, struct device *devb) devices_kset_move_after()
569 * devices_kset_move_last - move the device to the end of devices_kset's list.
570 * @dev: device to move
572 void devices_kset_move_last(struct device *dev) devices_kset_move_last()
583 * device_create_file - create sysfs attribute file for device.
584 * @dev: device.
585 * @attr: device attribute descriptor.
587 int device_create_file(struct device *dev, device_create_file()
608 * @dev: device.
609 * @attr: device attribute descriptor.
611 void device_remove_file(struct device *dev, device_remove_file()
621 * @dev: device.
622 * @attr: device attribute descriptor.
626 bool device_remove_file_self(struct device *dev, device_remove_file_self()
637 * device_create_bin_file - create sysfs binary attribute file for device.
638 * @dev: device.
639 * @attr: device binary attribute descriptor.
641 int device_create_bin_file(struct device *dev, device_create_bin_file()
653 * @dev: device.
654 * @attr: device binary attribute descriptor.
656 void device_remove_bin_file(struct device *dev, device_remove_bin_file()
667 struct device *dev = p->device; klist_children_get()
675 struct device *dev = p->device; klist_children_put()
681 * device_initialize - init device structure.
682 * @dev: device.
684 * This prepares the device for use by other layers by initializing
700 void device_initialize(struct device *dev) device_initialize()
717 struct kobject *virtual_device_parent(struct device *dev) virtual_device_parent()
779 static struct kobject *get_device_parent(struct device *dev, get_device_parent()
780 struct device *parent) get_device_parent()
798 * Class-devices with a non class-device as parent, live get_device_parent()
823 /* or create a new class-directory at the parent device */ get_device_parent()
839 static void cleanup_glue_dir(struct device *dev, struct kobject *glue_dir) cleanup_glue_dir()
851 static void cleanup_device_parent(struct device *dev) cleanup_device_parent()
856 static int device_add_class_symlinks(struct device *dev) device_add_class_symlinks()
865 /* An error here doesn't warrant bringing down the device */ device_add_class_symlinks()
879 "device"); device_add_class_symlinks()
890 /* link in the class directory pointing to the device */ device_add_class_symlinks()
899 sysfs_remove_link(&dev->kobj, "device"); device_add_class_symlinks()
908 static void device_remove_class_symlinks(struct device *dev) device_remove_class_symlinks()
917 sysfs_remove_link(&dev->kobj, "device"); device_remove_class_symlinks()
927 * dev_set_name - set a device name
928 * @dev: device
929 * @fmt: format string for the device's name
931 int dev_set_name(struct device *dev, const char *fmt, ...) dev_set_name()
944 * device_to_dev_kobj - select a /sys/dev/ directory for the device
945 * @dev: device
954 static struct kobject *device_to_dev_kobj(struct device *dev) device_to_dev_kobj()
966 static int device_create_sys_dev_entry(struct device *dev) device_create_sys_dev_entry()
980 static void device_remove_sys_dev_entry(struct device *dev) device_remove_sys_dev_entry()
991 int device_private_init(struct device *dev) device_private_init()
996 dev->p->device = dev; device_private_init()
1004 * device_add - add device to device hierarchy.
1005 * @dev: device.
1011 * to the global and sibling lists for the device, then
1015 * any device structure. The driver model core is not designed to work
1019 * and register a fresh new struct device instead.
1025 int device_add(struct device *dev) device_add()
1027 struct device *parent = NULL; device_add()
1052 /* subsystems can specify simple device enumeration */ device_add()
1061 pr_debug("device: '%s': %s\n", dev_name(dev), __func__); device_add()
1078 /* notify platform of device entry */ device_add()
1112 /* Notify clients of device addition. This call must come device_add()
1127 /* tie the class to the device */ device_add()
1131 /* notify any interfaces that the device is here */ device_add()
1169 * device_register - register a device with the system.
1170 * @dev: pointer to the device structure
1172 * This happens in two clean steps - initialize the device
1176 * have a clearly defined need to use and refcount the device
1186 int device_register(struct device *dev) device_register()
1194 * get_device - increment reference count for device.
1195 * @dev: device.
1201 struct device *get_device(struct device *dev) get_device()
1209 * @dev: device in question.
1211 void put_device(struct device *dev) put_device()
1220 * device_del - delete device from system.
1221 * @dev: device.
1223 * This is the first part of the device unregistration
1224 * sequence. This removes the device from the lists we control
1232 void device_del(struct device *dev) device_del()
1234 struct device *parent = dev->parent; device_del()
1237 /* Notify clients of device removal. This call must come device_del()
1255 /* notify any interfaces that the device is now gone */ device_del()
1260 /* remove the device from the class list */ device_del()
1286 * device_unregister - unregister device from system.
1287 * @dev: device going away.
1292 * is the final reference count, the device will be cleaned up
1294 * stick around until the final reference to the device is dropped.
1296 void device_unregister(struct device *dev) device_unregister()
1298 pr_debug("device: '%s': %s\n", dev_name(dev), __func__); device_unregister()
1304 static struct device *prev_device(struct klist_iter *i) prev_device()
1307 struct device *dev = NULL; prev_device()
1312 dev = p->device; prev_device()
1317 static struct device *next_device(struct klist_iter *i) next_device()
1320 struct device *dev = NULL; next_device()
1325 dev = p->device; next_device()
1331 * device_get_devnode - path of device node file
1332 * @dev: device
1338 * Return the relative path of a possible device node.
1343 const char *device_get_devnode(struct device *dev, device_get_devnode()
1351 /* the device type may provide a specific name */ device_get_devnode()
1376 * device_for_each_child - device child iterator.
1377 * @parent: parent struct device.
1378 * @fn: function to be called for each device.
1387 int device_for_each_child(struct device *parent, void *data, device_for_each_child()
1388 int (*fn)(struct device *dev, void *data)) device_for_each_child()
1391 struct device *child; device_for_each_child()
1406 * device_for_each_child_reverse - device child iterator in reversed order.
1407 * @parent: parent struct device.
1408 * @fn: function to be called for each device.
1417 int device_for_each_child_reverse(struct device *parent, void *data, device_for_each_child_reverse()
1418 int (*fn)(struct device *dev, void *data)) device_for_each_child_reverse()
1421 struct device *child; device_for_each_child_reverse()
1436 * device_find_child - device iterator for locating a particular device.
1437 * @parent: parent struct device
1438 * @match: Callback function to check device
1442 * returns a reference to a device that is 'found' for later use, as
1445 * The callback should return 0 if the device doesn't match and non-zero
1447 * current device can be obtained, this function will return to the caller
1452 struct device *device_find_child(struct device *parent, void *data, device_find_child()
1453 int (*match)(struct device *dev, void *data)) device_find_child()
1456 struct device *child; device_find_child()
1496 static int device_check_offline(struct device *dev, void *not_used) device_check_offline()
1508 * device_offline - Prepare the device for hot-removal.
1511 * Execute the device bus type's .offline() callback, if present, to prepare
1512 * the device for a subsequent hot-removal. If that succeeds, the device must
1518 int device_offline(struct device *dev) device_offline()
1547 * device_online - Put the device back online after successful device_offline().
1550 * If device_offline() has been successfully executed for @dev, but the device
1552 * to indicate that the device can be used again.
1556 int device_online(struct device *dev) device_online()
1578 struct device dev;
1582 static inline struct root_device *to_root_device(struct device *d) to_root_device()
1587 static void root_device_release(struct device *dev) root_device_release()
1593 * __root_device_register - allocate and register a root device
1594 * @name: root device name
1595 * @owner: owner module of the root device, usually THIS_MODULE
1597 * This function allocates a root device and registers it
1599 * device, use root_device_unregister().
1603 * allocate a root device and then use it as the parent of
1604 * any device which should appear under /sys/devices/{name}
1610 * Returns &struct device pointer on success, or ERR_PTR() on error.
1614 struct device *__root_device_register(const char *name, struct module *owner) __root_device_register()
1655 * root_device_unregister - unregister and free a root device
1656 * @dev: device going away
1658 * This function unregisters and cleans up a device that was created by
1661 void root_device_unregister(struct device *dev) root_device_unregister()
1673 static void device_create_release(struct device *dev) device_create_release()
1675 pr_debug("device: '%s': %s\n", dev_name(dev), __func__); device_create_release()
1679 static struct device * device_create_groups_vargs()
1680 device_create_groups_vargs(struct class *class, struct device *parent, device_create_groups_vargs()
1685 struct device *dev = NULL; device_create_groups_vargs()
1721 * device_create_vargs - creates a device and registers it with sysfs
1722 * @class: pointer to the struct class that this device should be registered to
1723 * @parent: pointer to the parent struct device of this new device, if any
1724 * @devt: the dev_t for the char device to be added
1725 * @drvdata: the data to be added to the device for callbacks
1726 * @fmt: string for the device's name
1727 * @args: va_list for the device's name
1729 * This function can be used by char device classes. A struct device
1732 * A "dev" file will be created, showing the dev_t for the device, if
1734 * If a pointer to a parent struct device is passed in, the newly created
1735 * struct device will be a child of that device in sysfs.
1736 * The pointer to the struct device will be returned from the call.
1740 * Returns &struct device pointer on success, or ERR_PTR() on error.
1745 struct device *device_create_vargs(struct class *class, struct device *parent, device_create_vargs()
1755 * device_create - creates a device and registers it with sysfs
1756 * @class: pointer to the struct class that this device should be registered to
1757 * @parent: pointer to the parent struct device of this new device, if any
1758 * @devt: the dev_t for the char device to be added
1759 * @drvdata: the data to be added to the device for callbacks
1760 * @fmt: string for the device's name
1762 * This function can be used by char device classes. A struct device
1765 * A "dev" file will be created, showing the dev_t for the device, if
1767 * If a pointer to a parent struct device is passed in, the newly created
1768 * struct device will be a child of that device in sysfs.
1769 * The pointer to the struct device will be returned from the call.
1773 * Returns &struct device pointer on success, or ERR_PTR() on error.
1778 struct device *device_create(struct class *class, struct device *parent, device_create()
1782 struct device *dev; device_create()
1792 * device_create_with_groups - creates a device and registers it with sysfs
1793 * @class: pointer to the struct class that this device should be registered to
1794 * @parent: pointer to the parent struct device of this new device, if any
1795 * @devt: the dev_t for the char device to be added
1796 * @drvdata: the data to be added to the device for callbacks
1798 * @fmt: string for the device's name
1800 * This function can be used by char device classes. A struct device
1805 * A "dev" file will be created, showing the dev_t for the device, if
1807 * If a pointer to a parent struct device is passed in, the newly created
1808 * struct device will be a child of that device in sysfs.
1809 * The pointer to the struct device will be returned from the call.
1813 * Returns &struct device pointer on success, or ERR_PTR() on error.
1818 struct device *device_create_with_groups(struct class *class, device_create_with_groups()
1819 struct device *parent, dev_t devt, device_create_with_groups()
1825 struct device *dev; device_create_with_groups()
1835 static int __match_devt(struct device *dev, const void *data) __match_devt()
1843 * device_destroy - removes a device that was created with device_create()
1844 * @class: pointer to the struct class that this device was registered with
1845 * @devt: the dev_t of the device that was previously registered
1847 * This call unregisters and cleans up a device that was created with a
1852 struct device *dev; device_destroy()
1863 * device_rename - renames a device
1864 * @dev: the pointer to the struct device to be renamed
1865 * @new_name: the new name of the device
1869 * on the same device to ensure that new_name is valid and
1878 * connect the event to the old and new device. Device nodes are not renamed at
1885 * kernel device renaming. Besides that, it's not even implemented now for
1896 * some other attributes for userspace to find the device, or use udev to add
1901 int device_rename(struct device *dev, const char *new_name) device_rename()
1940 static int device_move_class_links(struct device *dev, device_move_class_links()
1941 struct device *old_parent, device_move_class_links()
1942 struct device *new_parent) device_move_class_links()
1947 sysfs_remove_link(&dev->kobj, "device"); device_move_class_links()
1950 "device"); device_move_class_links()
1955 * device_move - moves a device to a new parent
1956 * @dev: the pointer to the struct device to be moved
1957 * @new_parent: the new parent of the device (can by NULL)
1960 int device_move(struct device *dev, struct device *new_parent, device_move()
1964 struct device *old_parent; device_move()
1975 pr_debug("device: '%s': %s: moving to '%s'\n", dev_name(dev), device_move()
2039 * device_shutdown - call ->shutdown() on each device to shutdown.
2043 struct device *dev, *parent; device_shutdown()
2048 * Beware that device unplug events may also start pulling device_shutdown()
2052 dev = list_entry(devices_kset->list.prev, struct device, device_shutdown()
2056 * hold reference count of device's parent to device_shutdown()
2063 * Make sure the device is off the kset list, in the device_shutdown()
2106 create_syslog_header(const struct device *dev, char *hdr, size_t hdrlen) create_syslog_header()
2123 * Add device identifier DEVICE=: create_syslog_header()
2158 dev_WARN(dev, "device/subsystem name too long"); create_syslog_header()
2162 int dev_vprintk_emit(int level, const struct device *dev, dev_vprintk_emit()
2174 int dev_printk_emit(int level, const struct device *dev, const char *fmt, ...) dev_printk_emit()
2189 static void __dev_printk(const char *level, const struct device *dev, __dev_printk()
2196 printk("%s(NULL device *): %pV", level, vaf); __dev_printk()
2199 void dev_printk(const char *level, const struct device *dev, dev_printk()
2217 void func(const struct device *dev, const char *fmt, ...) \
2249 * set_primary_fwnode - Change the primary firmware node of a given device.
2251 * @fwnode: New primary firmware node of the device.
2253 * Set the device's firmware node pointer to @fwnode, but if a secondary
2254 * firmware node of the device is present, preserve it.
2256 void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode) set_primary_fwnode()
2274 * set_secondary_fwnode - Change the secondary firmware node of a given device.
2276 * @fwnode: New secondary firmware node of the device.
2278 * If a primary firmware node of the device is present, set its secondary
2279 * pointer to @fwnode. Otherwise, set the device's firmware node pointer to
2282 void set_secondary_fwnode(struct device *dev, struct fwnode_handle *fwnode) set_secondary_fwnode()
/linux-4.4.14/drivers/gpu/drm/nouveau/nvif/
H A Ddevice.c25 #include <nvif/device.h>
28 nvif_device_time(struct nvif_device *device) nvif_device_time() argument
31 int ret = nvif_object_mthd(&device->object, NV_DEVICE_V0_TIME, nvif_device_time()
38 nvif_device_fini(struct nvif_device *device) nvif_device_fini() argument
40 nvif_object_fini(&device->object); nvif_device_fini()
45 void *data, u32 size, struct nvif_device *device) nvif_device_init()
48 &device->object); nvif_device_init()
50 device->info.version = 0; nvif_device_init()
51 ret = nvif_object_mthd(&device->object, NV_DEVICE_V0_INFO, nvif_device_init()
52 &device->info, sizeof(device->info)); nvif_device_init()
44 nvif_device_init(struct nvif_object *parent, u32 handle, s32 oclass, void *data, u32 size, struct nvif_device *device) nvif_device_init() argument
/linux-4.4.14/include/linux/usb/
H A Dquirks.h3 * Only quirks that affect the whole device, not an interface,
13 /* device can't resume correctly so reset it instead */
16 /* device can't handle Set-Interface requests */
19 /* device can't handle its Configuration or Interface strings */
22 /* device can't be reset(e.g morph devices), don't use reset */
25 /* device has more interface descriptions than the bNumInterfaces count,
29 /* device needs a pause during initialization, after we read the device
44 /* device can't handle device_qualifier descriptor requests */
47 /* device generates spurious wakeup, ignore remote wakeup capability */
50 /* device can't handle Link Power Management */
/linux-4.4.14/tools/power/cpupower/utils/helpers/
H A Dpci.c18 * device: device
29 struct pci_dev *device; pci_acc_init() local
41 filter_nb_link.device = dev; pci_acc_init()
46 for (device = (*pacc)->devices; device; device = device->next) { pci_acc_init()
47 if (pci_filter_match(&filter_nb_link, device)) pci_acc_init()
48 return device; pci_acc_init()
54 /* Typically one wants to get a specific slot(device)/func of the root domain
/linux-4.4.14/drivers/misc/cxl/
H A Dsysfs.c11 #include <linux/device.h>
21 static ssize_t caia_version_show(struct device *device, caia_version_show() argument
25 struct cxl *adapter = to_cxl_adapter(device); caia_version_show()
31 static ssize_t psl_revision_show(struct device *device, psl_revision_show() argument
35 struct cxl *adapter = to_cxl_adapter(device); psl_revision_show()
40 static ssize_t base_image_show(struct device *device, base_image_show() argument
44 struct cxl *adapter = to_cxl_adapter(device); base_image_show()
49 static ssize_t image_loaded_show(struct device *device, image_loaded_show() argument
53 struct cxl *adapter = to_cxl_adapter(device); image_loaded_show()
60 static ssize_t reset_adapter_store(struct device *device, reset_adapter_store() argument
64 struct cxl *adapter = to_cxl_adapter(device); reset_adapter_store()
77 static ssize_t load_image_on_perst_show(struct device *device, load_image_on_perst_show() argument
81 struct cxl *adapter = to_cxl_adapter(device); load_image_on_perst_show()
91 static ssize_t load_image_on_perst_store(struct device *device, load_image_on_perst_store() argument
95 struct cxl *adapter = to_cxl_adapter(device); load_image_on_perst_store()
115 static ssize_t perst_reloads_same_image_show(struct device *device, perst_reloads_same_image_show() argument
119 struct cxl *adapter = to_cxl_adapter(device); perst_reloads_same_image_show()
124 static ssize_t perst_reloads_same_image_store(struct device *device, perst_reloads_same_image_store() argument
128 struct cxl *adapter = to_cxl_adapter(device); perst_reloads_same_image_store()
153 static ssize_t mmio_size_show_master(struct device *device, mmio_size_show_master() argument
157 struct cxl_afu *afu = to_afu_chardev_m(device); mmio_size_show_master()
162 static ssize_t pp_mmio_off_show(struct device *device, pp_mmio_off_show() argument
166 struct cxl_afu *afu = to_afu_chardev_m(device); pp_mmio_off_show()
171 static ssize_t pp_mmio_len_show(struct device *device, pp_mmio_len_show() argument
175 struct cxl_afu *afu = to_afu_chardev_m(device); pp_mmio_len_show()
189 static ssize_t mmio_size_show(struct device *device, mmio_size_show() argument
193 struct cxl_afu *afu = to_cxl_afu(device); mmio_size_show()
200 static ssize_t reset_store_afu(struct device *device, reset_store_afu() argument
204 struct cxl_afu *afu = to_cxl_afu(device); reset_store_afu()
223 static ssize_t irqs_min_show(struct device *device, irqs_min_show() argument
227 struct cxl_afu *afu = to_cxl_afu(device); irqs_min_show()
232 static ssize_t irqs_max_show(struct device *device, irqs_max_show() argument
236 struct cxl_afu *afu = to_cxl_afu(device); irqs_max_show()
241 static ssize_t irqs_max_store(struct device *device, irqs_max_store() argument
245 struct cxl_afu *afu = to_cxl_afu(device); irqs_max_store()
263 static ssize_t modes_supported_show(struct device *device, modes_supported_show() argument
266 struct cxl_afu *afu = to_cxl_afu(device); modes_supported_show()
276 static ssize_t prefault_mode_show(struct device *device, prefault_mode_show() argument
280 struct cxl_afu *afu = to_cxl_afu(device); prefault_mode_show()
292 static ssize_t prefault_mode_store(struct device *device, prefault_mode_store() argument
296 struct cxl_afu *afu = to_cxl_afu(device); prefault_mode_store()
313 static ssize_t mode_show(struct device *device, mode_show() argument
317 struct cxl_afu *afu = to_cxl_afu(device); mode_show()
326 static ssize_t mode_store(struct device *device, struct device_attribute *attr, mode_store() argument
329 struct cxl_afu *afu = to_cxl_afu(device); mode_store()
371 static ssize_t api_version_show(struct device *device, api_version_show() argument
378 static ssize_t api_version_compatible_show(struct device *device, api_version_compatible_show() argument
390 struct device, kobj)); afu_eb_read()
434 u16 device; member in struct:afu_config_record
454 return scnprintf(buf, PAGE_SIZE, "0x%.4x\n", cr->device); device_show()
470 struct cxl_afu *afu = to_cxl_afu(container_of(kobj->parent, struct device, kobj)); afu_read_config()
486 __ATTR_RO(device);
520 cr->device = cxl_afu_cr_read16(afu, cr_idx, PCI_DEVICE_ID); cxl_sysfs_afu_new_cr()
531 * AFUs, which can be done via the vendor, device and class attributes. cxl_sysfs_afu_new_cr()
/linux-4.4.14/include/sound/
H A Domap-hdmi-audio.h25 int (*audio_startup)(struct device *dev,
26 void (*abort_cb)(struct device *dev));
27 int (*audio_shutdown)(struct device *dev);
28 int (*audio_start)(struct device *dev);
29 void (*audio_stop)(struct device *dev);
30 int (*audio_config)(struct device *dev,
36 struct device *dev;
/linux-4.4.14/drivers/video/fbdev/core/
H A Dfbsysfs.c2 * fbsysfs.c - framebuffer device class and attributes
30 * @dev: pointer to the device for this fb, this can be NULL
39 struct fb_info *framebuffer_alloc(size_t size, struct device *dev) framebuffer_alloc()
60 info->device = dev; framebuffer_alloc()
77 * Drop the reference count of the device embedded in the
127 static ssize_t store_mode(struct device *device, struct device_attribute *attr, store_mode() argument
130 struct fb_info *fb_info = dev_get_drvdata(device); store_mode()
158 static ssize_t show_mode(struct device *device, struct device_attribute *attr, show_mode() argument
161 struct fb_info *fb_info = dev_get_drvdata(device); show_mode()
169 static ssize_t store_modes(struct device *device, store_modes() argument
173 struct fb_info *fb_info = dev_get_drvdata(device); store_modes()
201 static ssize_t show_modes(struct device *device, struct device_attribute *attr, show_modes() argument
204 struct fb_info *fb_info = dev_get_drvdata(device); show_modes()
219 static ssize_t store_bpp(struct device *device, struct device_attribute *attr, store_bpp() argument
222 struct fb_info *fb_info = dev_get_drvdata(device); store_bpp()
234 static ssize_t show_bpp(struct device *device, struct device_attribute *attr, show_bpp() argument
237 struct fb_info *fb_info = dev_get_drvdata(device); show_bpp()
241 static ssize_t store_rotate(struct device *device, store_rotate() argument
245 struct fb_info *fb_info = dev_get_drvdata(device); store_rotate()
260 static ssize_t show_rotate(struct device *device, show_rotate() argument
263 struct fb_info *fb_info = dev_get_drvdata(device); show_rotate()
268 static ssize_t store_virtual(struct device *device, store_virtual() argument
272 struct fb_info *fb_info = dev_get_drvdata(device); store_virtual()
289 static ssize_t show_virtual(struct device *device, show_virtual() argument
292 struct fb_info *fb_info = dev_get_drvdata(device); show_virtual()
297 static ssize_t show_stride(struct device *device, show_stride() argument
300 struct fb_info *fb_info = dev_get_drvdata(device); show_stride()
304 static ssize_t store_blank(struct device *device, store_blank() argument
308 struct fb_info *fb_info = dev_get_drvdata(device); store_blank()
322 static ssize_t show_blank(struct device *device, show_blank() argument
325 // struct fb_info *fb_info = dev_get_drvdata(device); show_blank()
329 static ssize_t store_console(struct device *device, store_console() argument
333 // struct fb_info *fb_info = dev_get_drvdata(device); store_console()
337 static ssize_t show_console(struct device *device, show_console() argument
340 // struct fb_info *fb_info = dev_get_drvdata(device); show_console()
344 static ssize_t store_cursor(struct device *device, store_cursor() argument
348 // struct fb_info *fb_info = dev_get_drvdata(device); store_cursor()
352 static ssize_t show_cursor(struct device *device, show_cursor() argument
355 // struct fb_info *fb_info = dev_get_drvdata(device); show_cursor()
359 static ssize_t store_pan(struct device *device, store_pan() argument
363 struct fb_info *fb_info = dev_get_drvdata(device); store_pan()
384 static ssize_t show_pan(struct device *device, show_pan() argument
387 struct fb_info *fb_info = dev_get_drvdata(device); show_pan()
392 static ssize_t show_name(struct device *device, show_name() argument
395 struct fb_info *fb_info = dev_get_drvdata(device); show_name()
400 static ssize_t store_fbstate(struct device *device, store_fbstate() argument
404 struct fb_info *fb_info = dev_get_drvdata(device); store_fbstate()
424 static ssize_t show_fbstate(struct device *device, show_fbstate() argument
427 struct fb_info *fb_info = dev_get_drvdata(device); show_fbstate()
432 static ssize_t store_bl_curve(struct device *device, store_bl_curve() argument
436 struct fb_info *fb_info = dev_get_drvdata(device); store_bl_curve()
473 static ssize_t show_bl_curve(struct device *device, show_bl_curve() argument
476 struct fb_info *fb_info = dev_get_drvdata(device); show_bl_curve()
/linux-4.4.14/drivers/gpu/drm/nouveau/nvkm/subdev/mc/
H A Dnv04.c44 struct nvkm_device *device = mc->subdev.device; nv04_mc_intr_unarm() local
45 nvkm_wr32(device, 0x000140, 0x00000000); nv04_mc_intr_unarm()
46 nvkm_rd32(device, 0x000140); nv04_mc_intr_unarm()
52 struct nvkm_device *device = mc->subdev.device; nv04_mc_intr_rearm() local
53 nvkm_wr32(device, 0x000140, 0x00000001); nv04_mc_intr_rearm()
59 return nvkm_rd32(mc->subdev.device, 0x000100); nv04_mc_intr_mask()
65 struct nvkm_device *device = mc->subdev.device; nv04_mc_init() local
66 nvkm_wr32(device, 0x000200, 0xffffffff); /* everything enabled */ nv04_mc_init()
67 nvkm_wr32(device, 0x001850, 0x00000001); /* disable rom access */ nv04_mc_init()
80 nv04_mc_new(struct nvkm_device *device, int index, struct nvkm_mc **pmc) nv04_mc_new() argument
82 return nvkm_mc_new_(&nv04_mc, device, index, pmc); nv04_mc_new()
H A Dnv44.c29 struct nvkm_device *device = mc->subdev.device; nv44_mc_init() local
30 u32 tmp = nvkm_rd32(device, 0x10020c); nv44_mc_init()
32 nvkm_wr32(device, 0x000200, 0xffffffff); /* everything enabled */ nv44_mc_init()
34 nvkm_wr32(device, 0x001700, tmp); nv44_mc_init()
35 nvkm_wr32(device, 0x001704, 0); nv44_mc_init()
36 nvkm_wr32(device, 0x001708, 0); nv44_mc_init()
37 nvkm_wr32(device, 0x00170c, tmp); nv44_mc_init()
50 nv44_mc_new(struct nvkm_device *device, int index, struct nvkm_mc **pmc) nv44_mc_new() argument
52 return nvkm_mc_new_(&nv44_mc, device, index, pmc); nv44_mc_new()
/linux-4.4.14/drivers/gpu/drm/nouveau/nvkm/engine/pm/
H A Dnv40.c30 struct nvkm_device *device = pm->engine.subdev.device; nv40_perfctr_init() local
38 nvkm_wr32(device, 0x00a7c0 + dom->addr, 0x00000001 | (dom->mode << 4)); nv40_perfctr_init()
39 nvkm_wr32(device, 0x00a400 + dom->addr + (ctr->slot * 0x40), src); nv40_perfctr_init()
40 nvkm_wr32(device, 0x00a420 + dom->addr + (ctr->slot * 0x40), log); nv40_perfctr_init()
47 struct nvkm_device *device = pm->engine.subdev.device; nv40_perfctr_read() local
50 case 0: ctr->ctr = nvkm_rd32(device, 0x00a700 + dom->addr); break; nv40_perfctr_read()
51 case 1: ctr->ctr = nvkm_rd32(device, 0x00a6c0 + dom->addr); break; nv40_perfctr_read()
52 case 2: ctr->ctr = nvkm_rd32(device, 0x00a680 + dom->addr); break; nv40_perfctr_read()
53 case 3: ctr->ctr = nvkm_rd32(device, 0x00a740 + dom->addr); break; nv40_perfctr_read()
55 dom->clk = nvkm_rd32(device, 0x00a600 + dom->addr); nv40_perfctr_read()
61 struct nvkm_device *device = pm->engine.subdev.device; nv40_perfctr_next() local
63 nvkm_wr32(device, 0x400084, 0x00000020); nv40_perfctr_next()
80 nv40_pm_new_(const struct nvkm_specdom *doms, struct nvkm_device *device, nv40_pm_new_() argument
90 ret = nvkm_pm_ctor(&nv40_pm_, device, index, &pm->base); nv40_pm_new_()
118 nv40_pm_new(struct nvkm_device *device, int index, struct nvkm_pm **ppm) nv40_pm_new() argument
120 return nv40_pm_new_(nv40_pm, device, index, ppm); nv40_pm_new()
/linux-4.4.14/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/
H A Dnv41.c72 struct nvkm_device *device = mmu->base.subdev.device; nv41_vm_flush() local
75 nvkm_wr32(device, 0x100810, 0x00000022); nv41_vm_flush()
76 nvkm_msec(device, 2000, nv41_vm_flush()
77 if (nvkm_rd32(device, 0x100810) & 0x00000020) nv41_vm_flush()
80 nvkm_wr32(device, 0x100810, 0x00000000); nv41_vm_flush()
92 struct nvkm_device *device = mmu->base.subdev.device; nv41_mmu_oneinit() local
100 ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, nv41_mmu_oneinit()
111 struct nvkm_device *device = mmu->base.subdev.device; nv41_mmu_init() local
113 nvkm_wr32(device, 0x100800, 0x00000002 | nvkm_memory_addr(dma)); nv41_mmu_init()
114 nvkm_mask(device, 0x10008c, 0x00000100, 0x00000100); nv41_mmu_init()
115 nvkm_wr32(device, 0x100820, 0x00000000); nv41_mmu_init()
134 nv41_mmu_new(struct nvkm_device *device, int index, struct nvkm_mmu **pmmu) nv41_mmu_new() argument
136 if (device->type == NVKM_DEVICE_AGP || nv41_mmu_new()
137 !nvkm_boolopt(device->cfgopt, "NvPCIE", true)) nv41_mmu_new()
138 return nv04_mmu_new(device, index, pmmu); nv41_mmu_new()
140 return nv04_mmu_new_(&nv41_mmu, device, index, pmmu); nv41_mmu_new()
/linux-4.4.14/drivers/usb/serial/
H A Dqcserial.c27 /* standard device layouts supported by this driver */
44 {DEVICE_G1K(0x05c6, 0x9211)}, /* Acer Gobi QDL device */
48 {DEVICE_G1K(0x04da, 0x250d)}, /* Panasonic Gobi Modem device */
49 {DEVICE_G1K(0x04da, 0x250c)}, /* Panasonic Gobi QDL device */
50 {DEVICE_G1K(0x413c, 0x8172)}, /* Dell Gobi Modem device */
51 {DEVICE_G1K(0x413c, 0x8171)}, /* Dell Gobi QDL device */
53 {DEVICE_G1K(0x1410, 0xa002)}, /* Novatel Gobi Modem device */
54 {DEVICE_G1K(0x1410, 0xa003)}, /* Novatel Gobi Modem device */
55 {DEVICE_G1K(0x1410, 0xa004)}, /* Novatel Gobi Modem device */
56 {DEVICE_G1K(0x1410, 0xa005)}, /* Novatel Gobi Modem device */
57 {DEVICE_G1K(0x1410, 0xa006)}, /* Novatel Gobi Modem device */
58 {DEVICE_G1K(0x1410, 0xa007)}, /* Novatel Gobi Modem device */
59 {DEVICE_G1K(0x1410, 0xa008)}, /* Novatel Gobi QDL device */
60 {DEVICE_G1K(0x0b05, 0x1776)}, /* Asus Gobi Modem device */
61 {DEVICE_G1K(0x0b05, 0x1774)}, /* Asus Gobi QDL device */
62 {DEVICE_G1K(0x19d2, 0xfff3)}, /* ONDA Gobi Modem device */
63 {DEVICE_G1K(0x19d2, 0xfff2)}, /* ONDA Gobi QDL device */
64 {DEVICE_G1K(0x1557, 0x0a80)}, /* OQO Gobi QDL device */
65 {DEVICE_G1K(0x05c6, 0x9001)}, /* Generic Gobi Modem device */
66 {DEVICE_G1K(0x05c6, 0x9002)}, /* Generic Gobi Modem device */
67 {DEVICE_G1K(0x05c6, 0x9202)}, /* Generic Gobi Modem device */
68 {DEVICE_G1K(0x05c6, 0x9203)}, /* Generic Gobi Modem device */
69 {DEVICE_G1K(0x05c6, 0x9222)}, /* Generic Gobi Modem device */
70 {DEVICE_G1K(0x05c6, 0x9008)}, /* Generic Gobi QDL device */
71 {DEVICE_G1K(0x05c6, 0x9009)}, /* Generic Gobi Modem device */
72 {DEVICE_G1K(0x05c6, 0x9201)}, /* Generic Gobi QDL device */
73 {DEVICE_G1K(0x05c6, 0x9221)}, /* Generic Gobi QDL device */
74 {DEVICE_G1K(0x05c6, 0x9231)}, /* Generic Gobi QDL device */
75 {DEVICE_G1K(0x1f45, 0x0001)}, /* Unknown Gobi QDL device */
76 {DEVICE_G1K(0x1bc7, 0x900e)}, /* Telit Gobi QDL device */
79 {USB_DEVICE(0x1410, 0xa010)}, /* Novatel Gobi 2000 QDL device */
80 {USB_DEVICE(0x1410, 0xa011)}, /* Novatel Gobi 2000 QDL device */
81 {USB_DEVICE(0x1410, 0xa012)}, /* Novatel Gobi 2000 QDL device */
82 {USB_DEVICE(0x1410, 0xa013)}, /* Novatel Gobi 2000 QDL device */
83 {USB_DEVICE(0x1410, 0xa014)}, /* Novatel Gobi 2000 QDL device */
84 {USB_DEVICE(0x413c, 0x8185)}, /* Dell Gobi 2000 QDL device (N0218, VU936) */
85 {USB_DEVICE(0x413c, 0x8186)}, /* Dell Gobi 2000 Modem device (N0218, VU936) */
86 {USB_DEVICE(0x05c6, 0x9208)}, /* Generic Gobi 2000 QDL device */
87 {USB_DEVICE(0x05c6, 0x920b)}, /* Generic Gobi 2000 Modem device */
88 {USB_DEVICE(0x05c6, 0x9224)}, /* Sony Gobi 2000 QDL device (N0279, VU730) */
89 {USB_DEVICE(0x05c6, 0x9225)}, /* Sony Gobi 2000 Modem device (N0279, VU730) */
90 {USB_DEVICE(0x05c6, 0x9244)}, /* Samsung Gobi 2000 QDL device (VL176) */
91 {USB_DEVICE(0x05c6, 0x9245)}, /* Samsung Gobi 2000 Modem device (VL176) */
92 {USB_DEVICE(0x03f0, 0x241d)}, /* HP Gobi 2000 QDL device (VP412) */
93 {USB_DEVICE(0x03f0, 0x251d)}, /* HP Gobi 2000 Modem device (VP412) */
94 {USB_DEVICE(0x05c6, 0x9214)}, /* Acer Gobi 2000 QDL device (VP413) */
95 {USB_DEVICE(0x05c6, 0x9215)}, /* Acer Gobi 2000 Modem device (VP413) */
96 {USB_DEVICE(0x05c6, 0x9264)}, /* Asus Gobi 2000 QDL device (VR305) */
97 {USB_DEVICE(0x05c6, 0x9265)}, /* Asus Gobi 2000 Modem device (VR305) */
98 {USB_DEVICE(0x05c6, 0x9234)}, /* Top Global Gobi 2000 QDL device (VR306) */
99 {USB_DEVICE(0x05c6, 0x9235)}, /* Top Global Gobi 2000 Modem device (VR306) */
100 {USB_DEVICE(0x05c6, 0x9274)}, /* iRex Technologies Gobi 2000 QDL device (VR307) */
101 {USB_DEVICE(0x05c6, 0x9275)}, /* iRex Technologies Gobi 2000 Modem device (VR307) */
102 {USB_DEVICE(0x1199, 0x9000)}, /* Sierra Wireless Gobi 2000 QDL device (VT773) */
103 {USB_DEVICE(0x1199, 0x9001)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
104 {USB_DEVICE(0x1199, 0x9002)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
105 {USB_DEVICE(0x1199, 0x9003)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
106 {USB_DEVICE(0x1199, 0x9004)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
107 {USB_DEVICE(0x1199, 0x9005)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
108 {USB_DEVICE(0x1199, 0x9006)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
109 {USB_DEVICE(0x1199, 0x9007)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
110 {USB_DEVICE(0x1199, 0x9008)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
111 {USB_DEVICE(0x1199, 0x9009)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
112 {USB_DEVICE(0x1199, 0x900a)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
113 {USB_DEVICE(0x1199, 0x9011)}, /* Sierra Wireless Gobi 2000 Modem device (MC8305) */
114 {USB_DEVICE(0x16d8, 0x8001)}, /* CMDTech Gobi 2000 QDL device (VU922) */
115 {USB_DEVICE(0x16d8, 0x8002)}, /* CMDTech Gobi 2000 Modem device (VU922) */
116 {USB_DEVICE(0x05c6, 0x9204)}, /* Gobi 2000 QDL device */
117 {USB_DEVICE(0x05c6, 0x9205)}, /* Gobi 2000 Modem device */
133 {USB_DEVICE(0x1199, 0x9013)}, /* Sierra Wireless Gobi 3000 Modem device (MC8355) */
135 {USB_DEVICE(0x1199, 0x9015)}, /* Sierra Wireless Gobi 3000 Modem device */
137 {USB_DEVICE(0x1199, 0x9019)}, /* Sierra Wireless Gobi 3000 Modem device */
179 static int handle_quectel_ec20(struct device *dev, int ifnum) handle_quectel_ec20()
214 struct device *dev = &serial->dev->dev; qcprobe()
387 dev_err(dev, "unsupported device layout type: %lu\n", qcprobe()
/linux-4.4.14/drivers/gpu/drm/nouveau/nvkm/subdev/fb/
H A Dramnv40.c38 struct nvkm_bios *bios = subdev->device->bios; nv40_ram_calc()
71 struct nvkm_device *device = subdev->device; nv40_ram_prog() local
72 struct nvkm_bios *bios = device->bios; nv40_ram_prog()
80 u32 vbl = nvkm_rd32(device, 0x600808 + (i * 0x2000)); nv40_ram_prog()
83 if (vbl != nvkm_rd32(device, 0x600808 + (i * 0x2000))) { nv40_ram_prog()
84 nvkm_wr08(device, 0x0c03c4 + (i * 0x2000), 0x01); nv40_ram_prog()
85 sr1[i] = nvkm_rd08(device, 0x0c03c5 + (i * 0x2000)); nv40_ram_prog()
99 nvkm_msec(device, 2000, nv40_ram_prog()
100 u32 tmp = nvkm_rd32(device, 0x600808 + (i * 0x2000)); nv40_ram_prog()
105 nvkm_msec(device, 2000, nv40_ram_prog()
106 u32 tmp = nvkm_rd32(device, 0x600808 + (i * 0x2000)); nv40_ram_prog()
111 nvkm_wr08(device, 0x0c03c4 + (i * 0x2000), 0x01); nv40_ram_prog()
112 nvkm_wr08(device, 0x0c03c5 + (i * 0x2000), sr1[i] | 0x20); nv40_ram_prog()
116 nvkm_wr32(device, 0x1002d4, 0x00000001); /* precharge */ nv40_ram_prog()
117 nvkm_wr32(device, 0x1002d0, 0x00000001); /* refresh */ nv40_ram_prog()
118 nvkm_wr32(device, 0x1002d0, 0x00000001); /* refresh */ nv40_ram_prog()
119 nvkm_mask(device, 0x100210, 0x80000000, 0x00000000); /* no auto refresh */ nv40_ram_prog()
120 nvkm_wr32(device, 0x1002dc, 0x00000001); /* enable self-refresh */ nv40_ram_prog()
123 nvkm_mask(device, 0x00c040, 0x0000c000, 0x00000000); nv40_ram_prog()
124 switch (device->chipset) { nv40_ram_prog()
130 nvkm_mask(device, 0x004044, 0xc0771100, ram->ctrl); nv40_ram_prog()
131 nvkm_mask(device, 0x00402c, 0xc0771100, ram->ctrl); nv40_ram_prog()
132 nvkm_wr32(device, 0x004048, ram->coef); nv40_ram_prog()
133 nvkm_wr32(device, 0x004030, ram->coef); nv40_ram_prog()
137 nvkm_mask(device, 0x004038, 0xc0771100, ram->ctrl); nv40_ram_prog()
138 nvkm_wr32(device, 0x00403c, ram->coef); nv40_ram_prog()
140 nvkm_mask(device, 0x004020, 0xc0771100, ram->ctrl); nv40_ram_prog()
141 nvkm_wr32(device, 0x004024, ram->coef); nv40_ram_prog()
145 nvkm_mask(device, 0x00c040, 0x0000c000, 0x0000c000); nv40_ram_prog()
148 nvkm_wr32(device, 0x1002dc, 0x00000000); nv40_ram_prog()
149 nvkm_mask(device, 0x100210, 0x80000000, 0x80000000); nv40_ram_prog()
171 nvkm_msec(device, 2000, nv40_ram_prog()
172 u32 tmp = nvkm_rd32(device, 0x600808 + (i * 0x2000)); nv40_ram_prog()
177 nvkm_wr08(device, 0x0c03c4 + (i * 0x2000), 0x01); nv40_ram_prog()
178 nvkm_wr08(device, 0x0c03c5 + (i * 0x2000), sr1[i]); nv40_ram_prog()
210 struct nvkm_device *device = fb->subdev.device; nv40_ram_new() local
211 u32 pbus1218 = nvkm_rd32(device, 0x001218); nv40_ram_new()
212 u32 size = nvkm_rd32(device, 0x10020c) & 0xff000000; nv40_ram_new()
213 u32 tags = nvkm_rd32(device, 0x100320); nv40_ram_new()
228 (*pram)->parts = (nvkm_rd32(device, 0x100200) & 0x00000003) + 1; nv40_ram_new()
/linux-4.4.14/drivers/gpu/drm/nouveau/nvkm/subdev/clk/
H A Dgt215.c45 struct nvkm_device *device = clk->base.subdev.device; read_vco() local
46 u32 sctl = nvkm_rd32(device, 0x4120 + (idx * 4)); read_vco()
50 return device->crystal; read_vco()
63 struct nvkm_device *device = clk->base.subdev.device; read_clk() local
68 if (device->chipset == 0xaf) { read_clk()
70 return nvkm_rd32(device, 0x00471c) * 1000; read_clk()
73 return device->crystal; read_clk()
76 sctl = nvkm_rd32(device, 0x4120 + (idx * 4)); read_clk()
88 return device->crystal; read_clk()
110 struct nvkm_device *device = clk->base.subdev.device; read_pll() local
111 u32 ctrl = nvkm_rd32(device, pll + 0); read_pll()
116 u32 coef = nvkm_rd32(device, pll + 4); read_pll()
144 struct nvkm_device *device = subdev->device; gt215_clk_read() local
149 return device->crystal; gt215_clk_read()
164 hsrc = (nvkm_rd32(device, 0xc040) & 0x30000000) >> 28; gt215_clk_read()
252 ret = nvbios_pll_parse(subdev->device->bios, pll, &limits); gt215_pll_info()
306 struct nvkm_device *device = clk->subdev.device; gt215_clk_pre() local
307 struct nvkm_fifo *fifo = device->fifo; gt215_clk_pre()
310 nvkm_mask(device, 0x020060, 0x00070000, 0x00000000); gt215_clk_pre()
311 nvkm_mask(device, 0x002504, 0x00000001, 0x00000001); gt215_clk_pre()
313 if (nvkm_msec(device, 2000, gt215_clk_pre()
314 if (!nvkm_rd32(device, 0x000100)) gt215_clk_pre()
322 if (nvkm_msec(device, 2000, gt215_clk_pre()
323 if (nvkm_rd32(device, 0x002504) & 0x00000010) gt215_clk_pre()
328 if (nvkm_msec(device, 2000, gt215_clk_pre()
329 u32 tmp = nvkm_rd32(device, 0x00251c) & 0x0000003f; gt215_clk_pre()
341 struct nvkm_device *device = clk->subdev.device; gt215_clk_post() local
342 struct nvkm_fifo *fifo = device->fifo; gt215_clk_post()
347 nvkm_mask(device, 0x002504, 0x00000001, 0x00000000); gt215_clk_post()
348 nvkm_mask(device, 0x020060, 0x00070000, 0x00040000); gt215_clk_post()
354 struct nvkm_device *device = clk->base.subdev.device; disable_clk_src() local
355 nvkm_mask(device, src, 0x00000100, 0x00000000); disable_clk_src()
356 nvkm_mask(device, src, 0x00000001, 0x00000000); disable_clk_src()
363 struct nvkm_device *device = clk->base.subdev.device; prog_pll() local
372 bypass = nvkm_rd32(device, ctrl) & 0x00000008; prog_pll()
374 nvkm_mask(device, src1, 0x00000101, 0x00000101); prog_pll()
375 nvkm_mask(device, ctrl, 0x00000008, 0x00000008); prog_pll()
379 nvkm_mask(device, src0, 0x003f3141, 0x00000101 | info->clk); prog_pll()
380 nvkm_wr32(device, coef, info->pll); prog_pll()
381 nvkm_mask(device, ctrl, 0x00000015, 0x00000015); prog_pll()
382 nvkm_mask(device, ctrl, 0x00000010, 0x00000000); prog_pll()
383 if (nvkm_msec(device, 2000, prog_pll()
384 if (nvkm_rd32(device, ctrl) & 0x00020000) prog_pll()
387 nvkm_mask(device, ctrl, 0x00000010, 0x00000010); prog_pll()
388 nvkm_mask(device, src0, 0x00000101, 0x00000000); prog_pll()
391 nvkm_mask(device, ctrl, 0x00000010, 0x00000010); prog_pll()
392 nvkm_mask(device, ctrl, 0x00000008, 0x00000000); prog_pll()
395 nvkm_mask(device, src1, 0x003f3141, 0x00000101 | info->clk); prog_pll()
396 nvkm_mask(device, ctrl, 0x00000018, 0x00000018); prog_pll()
398 nvkm_mask(device, ctrl, 0x00000001, 0x00000000); prog_pll()
407 struct nvkm_device *device = clk->base.subdev.device; prog_clk() local
408 nvkm_mask(device, 0x004120 + (idx * 4), 0x003f3141, 0x00000101 | info->clk); prog_clk()
415 struct nvkm_device *device = clk->base.subdev.device; prog_host() local
416 u32 hsrc = (nvkm_rd32(device, 0xc040)); prog_host()
421 nvkm_wr32(device, 0xc040, hsrc | 0x20000000); prog_host()
428 nvkm_wr32(device, 0xc040, hsrc & ~0x30000000); prog_host()
436 nvkm_wr32(device, 0xc044, 0x3e); prog_host()
443 struct nvkm_device *device = clk->base.subdev.device; prog_core() local
444 u32 fb_delay = nvkm_rd32(device, 0x10002c); prog_core()
447 nvkm_wr32(device, 0x10002c, info->fb_delay); prog_core()
452 nvkm_wr32(device, 0x10002c, info->fb_delay); prog_core()
537 gt215_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk) gt215_clk_new() argument
545 return nvkm_clk_ctor(&gt215_clk, device, index, true, &clk->base); gt215_clk_new()
/linux-4.4.14/drivers/gpu/drm/nouveau/nvkm/engine/cipher/
H A Dg84.c37 int ret = nvkm_gpuobj_new(object->engine->subdev.device, 16, g84_cipher_oclass_bind()
59 return nvkm_gpuobj_new(object->engine->subdev.device, 256, g84_cipher_cclass_bind()
83 struct nvkm_device *device = subdev->device; g84_cipher_intr() local
84 struct nvkm_fifo *fifo = device->fifo; g84_cipher_intr()
86 u32 stat = nvkm_rd32(device, 0x102130); g84_cipher_intr()
87 u32 mthd = nvkm_rd32(device, 0x102190); g84_cipher_intr()
88 u32 data = nvkm_rd32(device, 0x102194); g84_cipher_intr()
89 u32 inst = nvkm_rd32(device, 0x102188) & 0x7fffffff; g84_cipher_intr()
104 nvkm_wr32(device, 0x102130, stat); g84_cipher_intr()
105 nvkm_wr32(device, 0x10200c, 0x10); g84_cipher_intr()
111 struct nvkm_device *device = cipher->subdev.device; g84_cipher_init() local
112 nvkm_wr32(device, 0x102130, 0xffffffff); g84_cipher_init()
113 nvkm_wr32(device, 0x102140, 0xffffffbf); g84_cipher_init()
114 nvkm_wr32(device, 0x10200c, 0x00000010); g84_cipher_init()
130 g84_cipher_new(struct nvkm_device *device, int index, g84_cipher_new() argument
133 return nvkm_engine_new_(&g84_cipher, device, index, g84_cipher_new()
/linux-4.4.14/drivers/pci/
H A Dsearch.c20 * pci_for_each_dma_alias - Iterate over DMA aliases for a device
21 * @pdev: starting downstream device
40 * If the device is broken and uses an alias requester ID for pci_for_each_dma_alias()
153 * @from is not %NULL, searches continue from next device on the
172 * pci_get_slot - locate PCI device for a given PCI slot
173 * @bus: PCI bus on which desired PCI device resides
175 * device resides and the logical device number within that slot
178 * Given a PCI bus and slot/function number, the desired PCI device
180 * If the device is found, its reference count is increased and this
183 * If no device is found, %NULL is returned.
206 * pci_get_domain_bus_and_slot - locate PCI device for a given PCI domain (segment), bus, and slot
207 * @domain: PCI domain/segment on which the PCI device resides.
208 * @bus: PCI bus on which desired PCI device resides
209 * @devfn: encodes number of PCI slot in which the desired PCI device
210 * resides and the logical device number within that slot in case of
214 * device is located in the list of PCI devices. If the device is
217 * reference count by calling pci_dev_put(). If no device is found,
234 static int match_pci_dev_by_id(struct device *dev, void *data) match_pci_dev_by_id()
245 * pci_get_dev_by_id - begin or continue searching for a PCI device by id
246 * @id: pointer to struct pci_device_id to match for the device
247 * @from: Previous PCI device found in search, or %NULL for new search.
249 * Iterates through the list of known PCI devices. If a PCI device is found
250 * with a matching id a pointer to its device structure is returned, and the
251 * reference count to the device is incremented. Otherwise, %NULL is returned.
253 * if @from is not %NULL, searches continue from next device on the global
263 struct device *dev; pci_get_dev_by_id()
264 struct device *dev_start = NULL; pci_get_dev_by_id()
279 * pci_get_subsys - begin or continue searching for a PCI device by vendor/subvendor/device/subdevice id
281 * @device: PCI device id to match, or %PCI_ANY_ID to match all device ids
283 * @ss_device: PCI subsystem device id to match, or %PCI_ANY_ID to match all device ids
284 * @from: Previous PCI device found in search, or %NULL for new search.
286 * Iterates through the list of known PCI devices. If a PCI device is found
287 * with a matching @vendor, @device, @ss_vendor and @ss_device, a pointer to its
288 * device structure is returned, and the reference count to the device is
291 * searches continue from next device on the global list.
294 struct pci_dev *pci_get_subsys(unsigned int vendor, unsigned int device, pci_get_subsys() argument
300 .device = device, pci_get_subsys()
310 * pci_get_device - begin or continue searching for a PCI device by vendor/device id
312 * @device: PCI device id to match, or %PCI_ANY_ID to match all device ids
313 * @from: Previous PCI device found in search, or %NULL for new search.
315 * Iterates through the list of known PCI devices. If a PCI device is
316 * found with a matching @vendor and @device, the reference count to the
317 * device is incremented and a pointer to its device structure is returned.
320 * from next device on the global list. The reference count for @from is
323 struct pci_dev *pci_get_device(unsigned int vendor, unsigned int device, pci_get_device() argument
326 return pci_get_subsys(vendor, device, PCI_ANY_ID, PCI_ANY_ID, from); pci_get_device()
331 * pci_get_class - begin or continue searching for a PCI device by class
332 * @class: search for a PCI device with this class designation
333 * @from: Previous PCI device found in search, or %NULL for new search.
335 * Iterates through the list of known PCI devices. If a PCI device is
336 * found with a matching @class, the reference count to the device is
337 * incremented and a pointer to its device structure is returned.
340 * Otherwise if @from is not %NULL, searches continue from next device
348 .device = PCI_ANY_ID, pci_get_class()
360 * pci_dev_present - Returns 1 if device matching the device list is present, 0 if not.
362 * that describe the type of PCI device the caller is trying to find.
364 * Obvious fact: You do not have a reference to any device that might be found
365 * by this function, so if that device is removed from the system right after
368 * to if another device happens to be present at this specific moment in time.
/linux-4.4.14/include/linux/pinctrl/
H A Dconsumer.h23 struct device;
33 extern struct pinctrl * __must_check pinctrl_get(struct device *dev);
40 extern struct pinctrl * __must_check devm_pinctrl_get(struct device *dev);
44 extern int pinctrl_pm_select_default_state(struct device *dev);
45 extern int pinctrl_pm_select_sleep_state(struct device *dev);
46 extern int pinctrl_pm_select_idle_state(struct device *dev);
48 static inline int pinctrl_pm_select_default_state(struct device *dev) pinctrl_pm_select_default_state()
52 static inline int pinctrl_pm_select_sleep_state(struct device *dev) pinctrl_pm_select_sleep_state()
56 static inline int pinctrl_pm_select_idle_state(struct device *dev) pinctrl_pm_select_idle_state()
83 static inline struct pinctrl * __must_check pinctrl_get(struct device *dev) pinctrl_get()
105 static inline struct pinctrl * __must_check devm_pinctrl_get(struct device *dev) devm_pinctrl_get()
114 static inline int pinctrl_pm_select_default_state(struct device *dev) pinctrl_pm_select_default_state()
119 static inline int pinctrl_pm_select_sleep_state(struct device *dev) pinctrl_pm_select_sleep_state()
124 static inline int pinctrl_pm_select_idle_state(struct device *dev) pinctrl_pm_select_idle_state()
132 struct device *dev, const char *name) pinctrl_get_select()
158 struct device *dev) pinctrl_get_select_default()
164 struct device *dev, const char *name) devm_pinctrl_get_select()
190 struct device *dev) devm_pinctrl_get_select_default()
/linux-4.4.14/arch/mips/include/asm/mach-cavium-octeon/
H A Ddma-coherence.h18 struct device;
22 static inline dma_addr_t plat_map_dma_mem(struct device *dev, void *addr, plat_map_dma_mem()
29 static inline dma_addr_t plat_map_dma_mem_page(struct device *dev, plat_map_dma_mem_page()
36 static inline unsigned long plat_dma_addr_to_phys(struct device *dev, plat_dma_addr_to_phys()
43 static inline void plat_unmap_dma_mem(struct device *dev, dma_addr_t dma_addr, plat_unmap_dma_mem()
49 static inline int plat_dma_supported(struct device *dev, u64 mask) plat_dma_supported()
55 static inline int plat_device_is_coherent(struct device *dev) plat_device_is_coherent()
60 static inline void plat_post_dma_flush(struct device *dev) plat_post_dma_flush()
64 dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr);
65 phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr);
/linux-4.4.14/arch/mips/include/asm/mach-generic/
H A Ddma-coherence.h12 struct device;
14 static inline dma_addr_t plat_map_dma_mem(struct device *dev, void *addr, plat_map_dma_mem()
20 static inline dma_addr_t plat_map_dma_mem_page(struct device *dev, plat_map_dma_mem_page()
26 static inline unsigned long plat_dma_addr_to_phys(struct device *dev, plat_dma_addr_to_phys()
32 static inline void plat_unmap_dma_mem(struct device *dev, dma_addr_t dma_addr, plat_unmap_dma_mem()
37 static inline int plat_dma_supported(struct device *dev, u64 mask) plat_dma_supported()
50 static inline int plat_device_is_coherent(struct device *dev) plat_device_is_coherent()
56 static inline void plat_post_dma_flush(struct device *dev) plat_post_dma_flush()
62 static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) phys_to_dma()
67 static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr) dma_to_phys()
/linux-4.4.14/arch/mips/include/asm/mach-loongson64/
H A Ddma-coherence.h18 struct device;
20 extern dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr);
21 extern phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr); plat_map_dma_mem()
22 static inline dma_addr_t plat_map_dma_mem(struct device *dev, void *addr, plat_map_dma_mem()
32 static inline dma_addr_t plat_map_dma_mem_page(struct device *dev, plat_map_dma_mem_page()
42 static inline unsigned long plat_dma_addr_to_phys(struct device *dev, plat_dma_addr_to_phys()
54 static inline void plat_unmap_dma_mem(struct device *dev, dma_addr_t dma_addr, plat_unmap_dma_mem()
59 static inline int plat_dma_supported(struct device *dev, u64 mask) plat_dma_supported()
72 static inline int plat_device_is_coherent(struct device *dev) plat_device_is_coherent()
81 static inline void plat_post_dma_flush(struct device *dev) plat_post_dma_flush()
/linux-4.4.14/arch/s390/include/asm/
H A Ddiag.h64 * Diagnose 210: Get information about a virtual device
67 u16 vrdcdvno; /* device number (input) */
69 u8 vrdcvcla; /* virtual device class (output) */
70 u8 vrdcvtyp; /* virtual device type (output) */
71 u8 vrdcvsta; /* virtual device status (output) */
72 u8 vrdcvfla; /* virtual device flags (output) */
73 u8 vrdcrccl; /* real device class (output) */
74 u8 vrdccrty; /* real device type (output) */
75 u8 vrdccrmd; /* real device model (output) */
76 u8 vrdccrft; /* real device feature (output) */
/linux-4.4.14/include/linux/mfd/
H A Dtps6586x.h97 * of the TPS6586X sub-device drivers
99 extern int tps6586x_write(struct device *dev, int reg, uint8_t val);
100 extern int tps6586x_writes(struct device *dev, int reg, int len, uint8_t *val);
101 extern int tps6586x_read(struct device *dev, int reg, uint8_t *val);
102 extern int tps6586x_reads(struct device *dev, int reg, int len, uint8_t *val);
103 extern int tps6586x_set_bits(struct device *dev, int reg, uint8_t bit_mask);
104 extern int tps6586x_clr_bits(struct device *dev, int reg, uint8_t bit_mask);
105 extern int tps6586x_update(struct device *dev, int reg, uint8_t val,
107 extern int tps6586x_irq_get_virq(struct device *dev, int irq);
108 extern int tps6586x_get_version(struct device *dev);
/linux-4.4.14/drivers/dma/
H A Ddmaengine.c38 * Each device has a channels list, which runs unlocked but is never modified
39 * once the device is registered, it's just setup by the driver.
50 #include <linux/device.h>
74 * dev_to_dma_chan - convert a device pointer to the its sysfs container object
75 * @dev - device node
79 static struct dma_chan *dev_to_dma_chan(struct device *dev) dev_to_dma_chan()
83 chan_dev = container_of(dev, typeof(*chan_dev), device); dev_to_dma_chan()
87 static ssize_t memcpy_count_show(struct device *dev, memcpy_count_show()
109 static ssize_t bytes_transferred_show(struct device *dev, bytes_transferred_show()
131 static ssize_t in_use_show(struct device *dev, struct device_attribute *attr, in_use_show()
157 static void chan_dev_release(struct device *dev) chan_dev_release()
161 chan_dev = container_of(dev, typeof(*chan_dev), device); chan_dev_release()
177 /* --- client and device registration --- */
179 #define dma_device_satisfies_mask(device, mask) \
180 __dma_device_satisfies_mask((device), &(mask))
182 __dma_device_satisfies_mask(struct dma_device *device, __dma_device_satisfies_mask() argument
187 bitmap_and(has.bits, want->bits, device->cap_mask.bits, __dma_device_satisfies_mask()
194 return chan->device->dev->driver->owner; dma_chan_to_owner()
234 if (chan->device->device_alloc_chan_resources) { dma_chan_get()
235 ret = chan->device->device_alloc_chan_resources(chan); dma_chan_get()
240 if (!dma_has_cap(DMA_PRIVATE, chan->device->cap_mask)) dma_chan_get()
268 if (!chan->client_count && chan->device->device_free_chan_resources) dma_chan_put()
269 chan->device->device_free_chan_resources(chan); dma_chan_put()
366 struct dma_device *device; dma_issue_pending_all() local
370 list_for_each_entry_rcu(device, &dma_device_list, global_node) { dma_issue_pending_all()
371 if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) dma_issue_pending_all()
373 list_for_each_entry(chan, &device->channels, device_node) dma_issue_pending_all()
375 device->device_issue_pending(chan); dma_issue_pending_all()
386 int node = dev_to_node(chan->device->dev); dma_chan_is_local()
402 struct dma_device *device; min_chan() local
407 list_for_each_entry(device, &dma_device_list, global_node) { min_chan()
408 if (!dma_has_cap(cap, device->cap_mask) || min_chan()
409 dma_has_cap(DMA_PRIVATE, device->cap_mask)) min_chan()
411 list_for_each_entry(chan, &device->channels, device_node) { min_chan()
443 struct dma_device *device; dma_channel_rebalance() local
452 list_for_each_entry(device, &dma_device_list, global_node) { dma_channel_rebalance()
453 if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) dma_channel_rebalance()
455 list_for_each_entry(chan, &device->channels, device_node) dma_channel_rebalance()
473 struct dma_device *device; dma_get_slave_caps() local
478 device = chan->device; dma_get_slave_caps()
481 if (!test_bit(DMA_SLAVE, device->cap_mask.bits)) dma_get_slave_caps()
489 if (!device->directions) dma_get_slave_caps()
492 caps->src_addr_widths = device->src_addr_widths; dma_get_slave_caps()
493 caps->dst_addr_widths = device->dst_addr_widths; dma_get_slave_caps()
494 caps->directions = device->directions; dma_get_slave_caps()
495 caps->residue_granularity = device->residue_granularity; dma_get_slave_caps()
501 caps->cmd_pause = !!(device->device_pause && device->device_resume); dma_get_slave_caps()
502 caps->cmd_terminate = !!device->device_terminate_all; dma_get_slave_caps()
557 struct dma_device *device = chan->device; dma_get_slave_channel() local
559 dma_cap_set(DMA_PRIVATE, device->cap_mask); dma_get_slave_channel()
560 device->privatecnt++; dma_get_slave_channel()
566 if (--device->privatecnt == 0) dma_get_slave_channel()
567 dma_cap_clear(DMA_PRIVATE, device->cap_mask); dma_get_slave_channel()
579 struct dma_chan *dma_get_any_slave_channel(struct dma_device *device) dma_get_any_slave_channel() argument
591 chan = private_candidate(&mask, device, NULL, NULL); dma_get_any_slave_channel()
593 dma_cap_set(DMA_PRIVATE, device->cap_mask); dma_get_any_slave_channel()
594 device->privatecnt++; dma_get_any_slave_channel()
600 if (--device->privatecnt == 0) dma_get_any_slave_channel()
601 dma_cap_clear(DMA_PRIVATE, device->cap_mask); dma_get_any_slave_channel()
622 struct dma_device *device, *_d; __dma_request_channel() local
628 list_for_each_entry_safe(device, _d, &dma_device_list, global_node) { __dma_request_channel()
629 chan = private_candidate(mask, device, fn, fn_param); __dma_request_channel()
636 dma_cap_set(DMA_PRIVATE, device->cap_mask); __dma_request_channel()
637 device->privatecnt++; __dma_request_channel()
643 list_del_rcu(&device->global_node); __dma_request_channel()
649 if (--device->privatecnt == 0) __dma_request_channel()
650 dma_cap_clear(DMA_PRIVATE, device->cap_mask); __dma_request_channel()
667 * @dev: pointer to client device structure
672 struct dma_chan *dma_request_slave_channel_reason(struct device *dev, dma_request_slave_channel_reason()
675 /* If device-tree is present get slave info from here */ dma_request_slave_channel_reason()
679 /* If device was enumerated by ACPI get slave info from here */ dma_request_slave_channel_reason()
689 * @dev: pointer to client device structure
694 struct dma_chan *dma_request_slave_channel(struct device *dev, dma_request_slave_channel()
701 dma_cap_set(DMA_PRIVATE, ch->device->cap_mask); dma_request_slave_channel()
702 ch->device->privatecnt++; dma_request_slave_channel()
715 if (--chan->device->privatecnt == 0) dma_release_channel()
716 dma_cap_clear(DMA_PRIVATE, chan->device->cap_mask); dma_release_channel()
726 struct dma_device *device, *_d; dmaengine_get() local
734 list_for_each_entry_safe(device, _d, &dma_device_list, global_node) { dmaengine_get()
735 if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) dmaengine_get()
737 list_for_each_entry(chan, &device->channels, device_node) { dmaengine_get()
741 list_del_rcu(&device->global_node); dmaengine_get()
764 struct dma_device *device; dmaengine_put() local
771 list_for_each_entry(device, &dma_device_list, global_node) { dmaengine_put()
772 if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) dmaengine_put()
774 list_for_each_entry(chan, &device->channels, device_node) dmaengine_put()
781 static bool device_has_all_tx_types(struct dma_device *device) device_has_all_tx_types() argument
783 /* A device that satisfies this test has channels that will never cause device_has_all_tx_types()
788 if (!dma_has_cap(DMA_INTERRUPT, device->cap_mask)) device_has_all_tx_types()
793 if (!dma_has_cap(DMA_MEMCPY, device->cap_mask)) device_has_all_tx_types()
798 if (!dma_has_cap(DMA_XOR, device->cap_mask)) device_has_all_tx_types()
802 if (!dma_has_cap(DMA_XOR_VAL, device->cap_mask)) device_has_all_tx_types()
808 if (!dma_has_cap(DMA_PQ, device->cap_mask)) device_has_all_tx_types()
812 if (!dma_has_cap(DMA_PQ_VAL, device->cap_mask)) device_has_all_tx_types()
820 static int get_dma_id(struct dma_device *device) get_dma_id() argument
828 device->dev_id = rc; get_dma_id()
836 * @device: &dma_device
838 int dma_async_device_register(struct dma_device *device) dma_async_device_register() argument
844 if (!device) dma_async_device_register()
847 /* validate device routines */ dma_async_device_register()
848 BUG_ON(dma_has_cap(DMA_MEMCPY, device->cap_mask) && dma_async_device_register()
849 !device->device_prep_dma_memcpy); dma_async_device_register()
850 BUG_ON(dma_has_cap(DMA_XOR, device->cap_mask) && dma_async_device_register()
851 !device->device_prep_dma_xor); dma_async_device_register()
852 BUG_ON(dma_has_cap(DMA_XOR_VAL, device->cap_mask) && dma_async_device_register()
853 !device->device_prep_dma_xor_val); dma_async_device_register()
854 BUG_ON(dma_has_cap(DMA_PQ, device->cap_mask) && dma_async_device_register()
855 !device->device_prep_dma_pq); dma_async_device_register()
856 BUG_ON(dma_has_cap(DMA_PQ_VAL, device->cap_mask) && dma_async_device_register()
857 !device->device_prep_dma_pq_val); dma_async_device_register()
858 BUG_ON(dma_has_cap(DMA_MEMSET, device->cap_mask) && dma_async_device_register()
859 !device->device_prep_dma_memset); dma_async_device_register()
860 BUG_ON(dma_has_cap(DMA_INTERRUPT, device->cap_mask) && dma_async_device_register()
861 !device->device_prep_dma_interrupt); dma_async_device_register()
862 BUG_ON(dma_has_cap(DMA_SG, device->cap_mask) && dma_async_device_register()
863 !device->device_prep_dma_sg); dma_async_device_register()
864 BUG_ON(dma_has_cap(DMA_CYCLIC, device->cap_mask) && dma_async_device_register()
865 !device->device_prep_dma_cyclic); dma_async_device_register()
866 BUG_ON(dma_has_cap(DMA_INTERLEAVE, device->cap_mask) && dma_async_device_register()
867 !device->device_prep_interleaved_dma); dma_async_device_register()
869 BUG_ON(!device->device_tx_status); dma_async_device_register()
870 BUG_ON(!device->device_issue_pending); dma_async_device_register()
871 BUG_ON(!device->dev); dma_async_device_register()
876 if (device_has_all_tx_types(device)) dma_async_device_register()
877 dma_cap_set(DMA_ASYNC_TX, device->cap_mask); dma_async_device_register()
882 rc = get_dma_id(device); dma_async_device_register()
891 list_for_each_entry(chan, &device->channels, device_node) { dma_async_device_register()
904 chan->dev->device.class = &dma_devclass; dma_async_device_register()
905 chan->dev->device.parent = device->dev; dma_async_device_register()
908 chan->dev->dev_id = device->dev_id; dma_async_device_register()
910 dev_set_name(&chan->dev->device, "dma%dchan%d", dma_async_device_register()
911 device->dev_id, chan->chan_id); dma_async_device_register()
913 rc = device_register(&chan->dev->device); dma_async_device_register()
923 device->chancnt = chancnt; dma_async_device_register()
927 if (dmaengine_ref_count && !dma_has_cap(DMA_PRIVATE, device->cap_mask)) dma_async_device_register()
928 list_for_each_entry(chan, &device->channels, device_node) { dma_async_device_register()
942 list_add_tail_rcu(&device->global_node, &dma_device_list); dma_async_device_register()
943 if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) dma_async_device_register()
944 device->privatecnt++; /* Always private */ dma_async_device_register()
954 idr_remove(&dma_idr, device->dev_id); dma_async_device_register()
960 list_for_each_entry(chan, &device->channels, device_node) { dma_async_device_register()
966 device_unregister(&chan->dev->device); dma_async_device_register()
974 * dma_async_device_unregister - unregister a DMA device
975 * @device: &dma_device
980 void dma_async_device_unregister(struct dma_device *device) dma_async_device_unregister() argument
985 list_del_rcu(&device->global_node); dma_async_device_unregister()
989 list_for_each_entry(chan, &device->channels, device_node) { dma_async_device_unregister()
996 device_unregister(&chan->dev->device); dma_async_device_unregister()
1041 struct device *dev = unmap->dev; dmaengine_unmap()
1112 dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags) dmaengine_get_unmap_data()
1196 chan->device->device_issue_pending(chan); dma_run_dependencies()
/linux-4.4.14/drivers/gpu/drm/nouveau/nvkm/subdev/bios/
H A Dshadowramin.c33 struct nvkm_device *device = bios->subdev.device; pramin_read() local
37 *(u32 *)&bios->data[i] = nvkm_rd32(device, 0x700000 + i); pramin_read()
48 struct nvkm_device *device = priv->bios->subdev.device; pramin_fini() local
49 nvkm_wr32(device, 0x001700, priv->bar0); pramin_fini()
58 struct nvkm_device *device = subdev->device; pramin_init() local
63 if (device->card_type < NV_50) pramin_init()
67 if (device->card_type >= GM100) pramin_init()
68 addr = nvkm_rd32(device, 0x021c04); pramin_init()
70 if (device->card_type >= NV_C0) pramin_init()
71 addr = nvkm_rd32(device, 0x022500); pramin_init()
81 addr = nvkm_rd32(device, 0x619f04); pramin_init()
94 addr = (u64)nvkm_rd32(device, 0x001700) << 16; pramin_init()
105 priv->bar0 = nvkm_rd32(device, 0x001700); pramin_init()
106 nvkm_wr32(device, 0x001700, addr >> 16); pramin_init()
/linux-4.4.14/drivers/nvdimm/
H A Dnd-core.h16 #include <linux/device.h>
31 struct device dev;
40 struct device dev;
45 bool is_nvdimm(struct device *dev);
46 bool is_nd_pmem(struct device *dev);
47 bool is_nd_blk(struct device *dev);
48 struct nvdimm_bus *walk_to_nvdimm_bus(struct device *nd_dev);
51 void nd_region_probe_success(struct nvdimm_bus *nvdimm_bus, struct device *dev);
55 void nd_region_disable(struct nvdimm_bus *nvdimm_bus, struct device *dev);
62 void __nd_device_register(struct device *dev);
63 int nd_match_dimm(struct device *dev, void *data);
66 bool nd_is_uuid_unique(struct device *dev, u8 *uuid);
83 void nd_detach_ndns(struct device *dev, struct nd_namespace_common **_ndns);
84 void __nd_detach_ndns(struct device *dev, struct nd_namespace_common **_ndns);
85 bool nd_attach_ndns(struct device *dev, struct nd_namespace_common *attach,
87 bool __nd_attach_ndns(struct device *dev, struct nd_namespace_common *attach,
89 ssize_t nd_namespace_store(struct device *dev,
/linux-4.4.14/arch/c6x/include/asm/
H A Ddma-mapping.h20 static inline void dma_sync_single_range_for_device(struct device *dev, dma_sync_single_range_for_device()
28 static inline int dma_set_mask(struct device *dev, u64 dma_mask) dma_set_mask()
41 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) dma_mapping_error()
47 extern dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
50 extern void dma_unmap_single(struct device *dev, dma_addr_t handle,
53 extern int dma_map_sg(struct device *dev, struct scatterlist *sglist,
56 extern void dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
59 static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, dma_map_page()
72 static inline void dma_unmap_page(struct device *dev, dma_addr_t handle, dma_unmap_page()
80 extern void dma_sync_single_for_cpu(struct device *dev, dma_addr_t handle,
83 extern void dma_sync_single_for_device(struct device *dev, dma_addr_t handle,
87 extern void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
90 extern void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
94 extern void *dma_alloc_coherent(struct device *, size_t, dma_addr_t *, gfp_t);
95 extern void dma_free_coherent(struct device *, size_t, void *, dma_addr_t);
101 static inline int dma_mmap_coherent(struct device *dev, dma_mmap_coherent()
108 static inline int dma_get_sgtable(struct device *dev, struct sg_table *sgt, dma_get_sgtable()
/linux-4.4.14/sound/i2c/
H A Di2c.c35 static int snd_i2c_bit_sendbytes(struct snd_i2c_device *device,
37 static int snd_i2c_bit_readbytes(struct snd_i2c_device *device,
51 struct snd_i2c_device *device; snd_i2c_bus_free() local
56 device = snd_i2c_device(bus->devices.next); snd_i2c_bus_free()
57 snd_i2c_device_free(device); snd_i2c_bus_free()
73 static int snd_i2c_bus_dev_free(struct snd_device *device) snd_i2c_bus_dev_free() argument
75 struct snd_i2c_bus *bus = device->device_data; snd_i2c_bus_dev_free()
116 struct snd_i2c_device *device; snd_i2c_device_create() local
121 device = kzalloc(sizeof(*device), GFP_KERNEL); snd_i2c_device_create()
122 if (device == NULL) snd_i2c_device_create()
124 device->addr = addr; snd_i2c_device_create()
125 strlcpy(device->name, name, sizeof(device->name)); snd_i2c_device_create()
126 list_add_tail(&device->list, &bus->devices); snd_i2c_device_create()
127 device->bus = bus; snd_i2c_device_create()
128 *rdevice = device; snd_i2c_device_create()
134 int snd_i2c_device_free(struct snd_i2c_device *device) snd_i2c_device_free() argument
136 if (device->bus) snd_i2c_device_free()
137 list_del(&device->list); snd_i2c_device_free()
138 if (device->private_free) snd_i2c_device_free()
139 device->private_free(device); snd_i2c_device_free()
140 kfree(device); snd_i2c_device_free()
146 int snd_i2c_sendbytes(struct snd_i2c_device *device, unsigned char *bytes, int count) snd_i2c_sendbytes() argument
148 return device->bus->ops->sendbytes(device, bytes, count); snd_i2c_sendbytes()
153 int snd_i2c_readbytes(struct snd_i2c_device *device, unsigned char *bytes, int count) snd_i2c_readbytes() argument
155 return device->bus->ops->readbytes(device, bytes, count); snd_i2c_readbytes()
275 static int snd_i2c_bit_sendbytes(struct snd_i2c_device *device, snd_i2c_bit_sendbytes() argument
278 struct snd_i2c_bus *bus = device->bus; snd_i2c_bit_sendbytes()
281 if (device->flags & SND_I2C_DEVICE_ADDRTEN) snd_i2c_bit_sendbytes()
284 err = snd_i2c_bit_sendbyte(bus, device->addr << 1); snd_i2c_bit_sendbytes()
301 static int snd_i2c_bit_readbytes(struct snd_i2c_device *device, snd_i2c_bit_readbytes() argument
304 struct snd_i2c_bus *bus = device->bus; snd_i2c_bit_readbytes()
307 if (device->flags & SND_I2C_DEVICE_ADDRTEN) snd_i2c_bit_readbytes()
310 err = snd_i2c_bit_sendbyte(bus, (device->addr << 1) | 1); snd_i2c_bit_readbytes()

Completed in 4185 milliseconds

1234567891011>>