1/*
2 * Device Mapper Uevent Support (dm-uevent)
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License as published by the
6 * Free Software Foundation; either version 2 of the License, or (at your
7 * option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License along
15 * with this program; if not, write to the Free Software Foundation, Inc.,
16 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * Copyright IBM Corporation, 2007
19 * 	Author: Mike Anderson <andmike@linux.vnet.ibm.com>
20 */
21#include <linux/list.h>
22#include <linux/slab.h>
23#include <linux/kobject.h>
24#include <linux/dm-ioctl.h>
25#include <linux/export.h>
26
27#include "dm.h"
28#include "dm-uevent.h"
29
30#define DM_MSG_PREFIX "uevent"
31
32static const struct {
33	enum dm_uevent_type type;
34	enum kobject_action action;
35	char *name;
36} _dm_uevent_type_names[] = {
37	{DM_UEVENT_PATH_FAILED, KOBJ_CHANGE, "PATH_FAILED"},
38	{DM_UEVENT_PATH_REINSTATED, KOBJ_CHANGE, "PATH_REINSTATED"},
39};
40
41static struct kmem_cache *_dm_event_cache;
42
43struct dm_uevent {
44	struct mapped_device *md;
45	enum kobject_action action;
46	struct kobj_uevent_env ku_env;
47	struct list_head elist;
48	char name[DM_NAME_LEN];
49	char uuid[DM_UUID_LEN];
50};
51
52static void dm_uevent_free(struct dm_uevent *event)
53{
54	kmem_cache_free(_dm_event_cache, event);
55}
56
57static struct dm_uevent *dm_uevent_alloc(struct mapped_device *md)
58{
59	struct dm_uevent *event;
60
61	event = kmem_cache_zalloc(_dm_event_cache, GFP_ATOMIC);
62	if (!event)
63		return NULL;
64
65	INIT_LIST_HEAD(&event->elist);
66	event->md = md;
67
68	return event;
69}
70
71static struct dm_uevent *dm_build_path_uevent(struct mapped_device *md,
72					      struct dm_target *ti,
73					      enum kobject_action action,
74					      const char *dm_action,
75					      const char *path,
76					      unsigned nr_valid_paths)
77{
78	struct dm_uevent *event;
79
80	event = dm_uevent_alloc(md);
81	if (!event) {
82		DMERR("%s: dm_uevent_alloc() failed", __func__);
83		goto err_nomem;
84	}
85
86	event->action = action;
87
88	if (add_uevent_var(&event->ku_env, "DM_TARGET=%s", ti->type->name)) {
89		DMERR("%s: add_uevent_var() for DM_TARGET failed",
90		      __func__);
91		goto err_add;
92	}
93
94	if (add_uevent_var(&event->ku_env, "DM_ACTION=%s", dm_action)) {
95		DMERR("%s: add_uevent_var() for DM_ACTION failed",
96		      __func__);
97		goto err_add;
98	}
99
100	if (add_uevent_var(&event->ku_env, "DM_SEQNUM=%u",
101			   dm_next_uevent_seq(md))) {
102		DMERR("%s: add_uevent_var() for DM_SEQNUM failed",
103		      __func__);
104		goto err_add;
105	}
106
107	if (add_uevent_var(&event->ku_env, "DM_PATH=%s", path)) {
108		DMERR("%s: add_uevent_var() for DM_PATH failed", __func__);
109		goto err_add;
110	}
111
112	if (add_uevent_var(&event->ku_env, "DM_NR_VALID_PATHS=%d",
113			   nr_valid_paths)) {
114		DMERR("%s: add_uevent_var() for DM_NR_VALID_PATHS failed",
115		      __func__);
116		goto err_add;
117	}
118
119	return event;
120
121err_add:
122	dm_uevent_free(event);
123err_nomem:
124	return ERR_PTR(-ENOMEM);
125}
126
127/**
128 * dm_send_uevents - send uevents for given list
129 *
130 * @events:	list of events to send
131 * @kobj:	kobject generating event
132 *
133 */
134void dm_send_uevents(struct list_head *events, struct kobject *kobj)
135{
136	int r;
137	struct dm_uevent *event, *next;
138
139	list_for_each_entry_safe(event, next, events, elist) {
140		list_del_init(&event->elist);
141
142		/*
143		 * When a device is being removed this copy fails and we
144		 * discard these unsent events.
145		 */
146		if (dm_copy_name_and_uuid(event->md, event->name,
147					  event->uuid)) {
148			DMINFO("%s: skipping sending uevent for lost device",
149			       __func__);
150			goto uevent_free;
151		}
152
153		if (add_uevent_var(&event->ku_env, "DM_NAME=%s", event->name)) {
154			DMERR("%s: add_uevent_var() for DM_NAME failed",
155			      __func__);
156			goto uevent_free;
157		}
158
159		if (add_uevent_var(&event->ku_env, "DM_UUID=%s", event->uuid)) {
160			DMERR("%s: add_uevent_var() for DM_UUID failed",
161			      __func__);
162			goto uevent_free;
163		}
164
165		r = kobject_uevent_env(kobj, event->action, event->ku_env.envp);
166		if (r)
167			DMERR("%s: kobject_uevent_env failed", __func__);
168uevent_free:
169		dm_uevent_free(event);
170	}
171}
172EXPORT_SYMBOL_GPL(dm_send_uevents);
173
174/**
175 * dm_path_uevent - called to create a new path event and queue it
176 *
177 * @event_type:	path event type enum
178 * @ti:			pointer to a dm_target
179 * @path:		string containing pathname
180 * @nr_valid_paths:	number of valid paths remaining
181 *
182 */
183void dm_path_uevent(enum dm_uevent_type event_type, struct dm_target *ti,
184		   const char *path, unsigned nr_valid_paths)
185{
186	struct mapped_device *md = dm_table_get_md(ti->table);
187	struct dm_uevent *event;
188
189	if (event_type >= ARRAY_SIZE(_dm_uevent_type_names)) {
190		DMERR("%s: Invalid event_type %d", __func__, event_type);
191		return;
192	}
193
194	event = dm_build_path_uevent(md, ti,
195				     _dm_uevent_type_names[event_type].action,
196				     _dm_uevent_type_names[event_type].name,
197				     path, nr_valid_paths);
198	if (IS_ERR(event))
199		return;
200
201	dm_uevent_add(md, &event->elist);
202}
203EXPORT_SYMBOL_GPL(dm_path_uevent);
204
205int dm_uevent_init(void)
206{
207	_dm_event_cache = KMEM_CACHE(dm_uevent, 0);
208	if (!_dm_event_cache)
209		return -ENOMEM;
210
211	DMINFO("version 1.0.3");
212
213	return 0;
214}
215
216void dm_uevent_exit(void)
217{
218	kmem_cache_destroy(_dm_event_cache);
219}
220