This source file includes following definitions.
- gb_control_get_version
- gb_control_get_bundle_version
- gb_control_get_bundle_versions
- gb_control_get_manifest_size_operation
- gb_control_get_manifest_operation
- gb_control_connected_operation
- gb_control_disconnected_operation
- gb_control_disconnecting_operation
- gb_control_mode_switch_operation
- gb_control_bundle_pm_status_map
- gb_control_bundle_suspend
- gb_control_bundle_resume
- gb_control_bundle_deactivate
- gb_control_bundle_activate
- gb_control_interface_pm_status_map
- gb_control_interface_suspend_prepare
- gb_control_interface_deactivate_prepare
- gb_control_interface_hibernate_abort
- vendor_string_show
- product_string_show
- gb_control_release
- gb_control_create
- gb_control_enable
- gb_control_disable
- gb_control_suspend
- gb_control_resume
- gb_control_add
- gb_control_del
- gb_control_get
- gb_control_put
- gb_control_mode_switch_prepare
- gb_control_mode_switch_complete
1
2
3
4
5
6
7
8
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/slab.h>
12 #include <linux/greybus.h>
13
14
15 #define GB_CONTROL_VERSION_MAJOR 0
16 #define GB_CONTROL_VERSION_MINOR 1
17
18 static int gb_control_get_version(struct gb_control *control)
19 {
20 struct gb_interface *intf = control->connection->intf;
21 struct gb_control_version_request request;
22 struct gb_control_version_response response;
23 int ret;
24
25 request.major = GB_CONTROL_VERSION_MAJOR;
26 request.minor = GB_CONTROL_VERSION_MINOR;
27
28 ret = gb_operation_sync(control->connection,
29 GB_CONTROL_TYPE_VERSION,
30 &request, sizeof(request), &response,
31 sizeof(response));
32 if (ret) {
33 dev_err(&intf->dev,
34 "failed to get control-protocol version: %d\n",
35 ret);
36 return ret;
37 }
38
39 if (response.major > request.major) {
40 dev_err(&intf->dev,
41 "unsupported major control-protocol version (%u > %u)\n",
42 response.major, request.major);
43 return -ENOTSUPP;
44 }
45
46 control->protocol_major = response.major;
47 control->protocol_minor = response.minor;
48
49 dev_dbg(&intf->dev, "%s - %u.%u\n", __func__, response.major,
50 response.minor);
51
52 return 0;
53 }
54
55 static int gb_control_get_bundle_version(struct gb_control *control,
56 struct gb_bundle *bundle)
57 {
58 struct gb_interface *intf = control->connection->intf;
59 struct gb_control_bundle_version_request request;
60 struct gb_control_bundle_version_response response;
61 int ret;
62
63 request.bundle_id = bundle->id;
64
65 ret = gb_operation_sync(control->connection,
66 GB_CONTROL_TYPE_BUNDLE_VERSION,
67 &request, sizeof(request),
68 &response, sizeof(response));
69 if (ret) {
70 dev_err(&intf->dev,
71 "failed to get bundle %u class version: %d\n",
72 bundle->id, ret);
73 return ret;
74 }
75
76 bundle->class_major = response.major;
77 bundle->class_minor = response.minor;
78
79 dev_dbg(&intf->dev, "%s - %u: %u.%u\n", __func__, bundle->id,
80 response.major, response.minor);
81
82 return 0;
83 }
84
85 int gb_control_get_bundle_versions(struct gb_control *control)
86 {
87 struct gb_interface *intf = control->connection->intf;
88 struct gb_bundle *bundle;
89 int ret;
90
91 if (!control->has_bundle_version)
92 return 0;
93
94 list_for_each_entry(bundle, &intf->bundles, links) {
95 ret = gb_control_get_bundle_version(control, bundle);
96 if (ret)
97 return ret;
98 }
99
100 return 0;
101 }
102
103
104 int gb_control_get_manifest_size_operation(struct gb_interface *intf)
105 {
106 struct gb_control_get_manifest_size_response response;
107 struct gb_connection *connection = intf->control->connection;
108 int ret;
109
110 ret = gb_operation_sync(connection, GB_CONTROL_TYPE_GET_MANIFEST_SIZE,
111 NULL, 0, &response, sizeof(response));
112 if (ret) {
113 dev_err(&connection->intf->dev,
114 "failed to get manifest size: %d\n", ret);
115 return ret;
116 }
117
118 return le16_to_cpu(response.size);
119 }
120
121
122 int gb_control_get_manifest_operation(struct gb_interface *intf, void *manifest,
123 size_t size)
124 {
125 struct gb_connection *connection = intf->control->connection;
126
127 return gb_operation_sync(connection, GB_CONTROL_TYPE_GET_MANIFEST,
128 NULL, 0, manifest, size);
129 }
130
131 int gb_control_connected_operation(struct gb_control *control, u16 cport_id)
132 {
133 struct gb_control_connected_request request;
134
135 request.cport_id = cpu_to_le16(cport_id);
136 return gb_operation_sync(control->connection, GB_CONTROL_TYPE_CONNECTED,
137 &request, sizeof(request), NULL, 0);
138 }
139
140 int gb_control_disconnected_operation(struct gb_control *control, u16 cport_id)
141 {
142 struct gb_control_disconnected_request request;
143
144 request.cport_id = cpu_to_le16(cport_id);
145 return gb_operation_sync(control->connection,
146 GB_CONTROL_TYPE_DISCONNECTED, &request,
147 sizeof(request), NULL, 0);
148 }
149
150 int gb_control_disconnecting_operation(struct gb_control *control,
151 u16 cport_id)
152 {
153 struct gb_control_disconnecting_request *request;
154 struct gb_operation *operation;
155 int ret;
156
157 operation = gb_operation_create_core(control->connection,
158 GB_CONTROL_TYPE_DISCONNECTING,
159 sizeof(*request), 0, 0,
160 GFP_KERNEL);
161 if (!operation)
162 return -ENOMEM;
163
164 request = operation->request->payload;
165 request->cport_id = cpu_to_le16(cport_id);
166
167 ret = gb_operation_request_send_sync(operation);
168 if (ret) {
169 dev_err(&control->dev, "failed to send disconnecting: %d\n",
170 ret);
171 }
172
173 gb_operation_put(operation);
174
175 return ret;
176 }
177
178 int gb_control_mode_switch_operation(struct gb_control *control)
179 {
180 struct gb_operation *operation;
181 int ret;
182
183 operation = gb_operation_create_core(control->connection,
184 GB_CONTROL_TYPE_MODE_SWITCH,
185 0, 0,
186 GB_OPERATION_FLAG_UNIDIRECTIONAL,
187 GFP_KERNEL);
188 if (!operation)
189 return -ENOMEM;
190
191 ret = gb_operation_request_send_sync(operation);
192 if (ret)
193 dev_err(&control->dev, "failed to send mode switch: %d\n", ret);
194
195 gb_operation_put(operation);
196
197 return ret;
198 }
199
200 static int gb_control_bundle_pm_status_map(u8 status)
201 {
202 switch (status) {
203 case GB_CONTROL_BUNDLE_PM_INVAL:
204 return -EINVAL;
205 case GB_CONTROL_BUNDLE_PM_BUSY:
206 return -EBUSY;
207 case GB_CONTROL_BUNDLE_PM_NA:
208 return -ENOMSG;
209 case GB_CONTROL_BUNDLE_PM_FAIL:
210 default:
211 return -EREMOTEIO;
212 }
213 }
214
215 int gb_control_bundle_suspend(struct gb_control *control, u8 bundle_id)
216 {
217 struct gb_control_bundle_pm_request request;
218 struct gb_control_bundle_pm_response response;
219 int ret;
220
221 request.bundle_id = bundle_id;
222 ret = gb_operation_sync(control->connection,
223 GB_CONTROL_TYPE_BUNDLE_SUSPEND, &request,
224 sizeof(request), &response, sizeof(response));
225 if (ret) {
226 dev_err(&control->dev, "failed to send bundle %u suspend: %d\n",
227 bundle_id, ret);
228 return ret;
229 }
230
231 if (response.status != GB_CONTROL_BUNDLE_PM_OK) {
232 dev_err(&control->dev, "failed to suspend bundle %u: %d\n",
233 bundle_id, response.status);
234 return gb_control_bundle_pm_status_map(response.status);
235 }
236
237 return 0;
238 }
239
240 int gb_control_bundle_resume(struct gb_control *control, u8 bundle_id)
241 {
242 struct gb_control_bundle_pm_request request;
243 struct gb_control_bundle_pm_response response;
244 int ret;
245
246 request.bundle_id = bundle_id;
247 ret = gb_operation_sync(control->connection,
248 GB_CONTROL_TYPE_BUNDLE_RESUME, &request,
249 sizeof(request), &response, sizeof(response));
250 if (ret) {
251 dev_err(&control->dev, "failed to send bundle %u resume: %d\n",
252 bundle_id, ret);
253 return ret;
254 }
255
256 if (response.status != GB_CONTROL_BUNDLE_PM_OK) {
257 dev_err(&control->dev, "failed to resume bundle %u: %d\n",
258 bundle_id, response.status);
259 return gb_control_bundle_pm_status_map(response.status);
260 }
261
262 return 0;
263 }
264
265 int gb_control_bundle_deactivate(struct gb_control *control, u8 bundle_id)
266 {
267 struct gb_control_bundle_pm_request request;
268 struct gb_control_bundle_pm_response response;
269 int ret;
270
271 request.bundle_id = bundle_id;
272 ret = gb_operation_sync(control->connection,
273 GB_CONTROL_TYPE_BUNDLE_DEACTIVATE, &request,
274 sizeof(request), &response, sizeof(response));
275 if (ret) {
276 dev_err(&control->dev,
277 "failed to send bundle %u deactivate: %d\n", bundle_id,
278 ret);
279 return ret;
280 }
281
282 if (response.status != GB_CONTROL_BUNDLE_PM_OK) {
283 dev_err(&control->dev, "failed to deactivate bundle %u: %d\n",
284 bundle_id, response.status);
285 return gb_control_bundle_pm_status_map(response.status);
286 }
287
288 return 0;
289 }
290
291 int gb_control_bundle_activate(struct gb_control *control, u8 bundle_id)
292 {
293 struct gb_control_bundle_pm_request request;
294 struct gb_control_bundle_pm_response response;
295 int ret;
296
297 if (!control->has_bundle_activate)
298 return 0;
299
300 request.bundle_id = bundle_id;
301 ret = gb_operation_sync(control->connection,
302 GB_CONTROL_TYPE_BUNDLE_ACTIVATE, &request,
303 sizeof(request), &response, sizeof(response));
304 if (ret) {
305 dev_err(&control->dev,
306 "failed to send bundle %u activate: %d\n", bundle_id,
307 ret);
308 return ret;
309 }
310
311 if (response.status != GB_CONTROL_BUNDLE_PM_OK) {
312 dev_err(&control->dev, "failed to activate bundle %u: %d\n",
313 bundle_id, response.status);
314 return gb_control_bundle_pm_status_map(response.status);
315 }
316
317 return 0;
318 }
319
320 static int gb_control_interface_pm_status_map(u8 status)
321 {
322 switch (status) {
323 case GB_CONTROL_INTF_PM_BUSY:
324 return -EBUSY;
325 case GB_CONTROL_INTF_PM_NA:
326 return -ENOMSG;
327 default:
328 return -EREMOTEIO;
329 }
330 }
331
332 int gb_control_interface_suspend_prepare(struct gb_control *control)
333 {
334 struct gb_control_intf_pm_response response;
335 int ret;
336
337 ret = gb_operation_sync(control->connection,
338 GB_CONTROL_TYPE_INTF_SUSPEND_PREPARE, NULL, 0,
339 &response, sizeof(response));
340 if (ret) {
341 dev_err(&control->dev,
342 "failed to send interface suspend prepare: %d\n", ret);
343 return ret;
344 }
345
346 if (response.status != GB_CONTROL_INTF_PM_OK) {
347 dev_err(&control->dev, "interface error while preparing suspend: %d\n",
348 response.status);
349 return gb_control_interface_pm_status_map(response.status);
350 }
351
352 return 0;
353 }
354
355 int gb_control_interface_deactivate_prepare(struct gb_control *control)
356 {
357 struct gb_control_intf_pm_response response;
358 int ret;
359
360 ret = gb_operation_sync(control->connection,
361 GB_CONTROL_TYPE_INTF_DEACTIVATE_PREPARE, NULL,
362 0, &response, sizeof(response));
363 if (ret) {
364 dev_err(&control->dev, "failed to send interface deactivate prepare: %d\n",
365 ret);
366 return ret;
367 }
368
369 if (response.status != GB_CONTROL_INTF_PM_OK) {
370 dev_err(&control->dev, "interface error while preparing deactivate: %d\n",
371 response.status);
372 return gb_control_interface_pm_status_map(response.status);
373 }
374
375 return 0;
376 }
377
378 int gb_control_interface_hibernate_abort(struct gb_control *control)
379 {
380 struct gb_control_intf_pm_response response;
381 int ret;
382
383 ret = gb_operation_sync(control->connection,
384 GB_CONTROL_TYPE_INTF_HIBERNATE_ABORT, NULL, 0,
385 &response, sizeof(response));
386 if (ret) {
387 dev_err(&control->dev,
388 "failed to send interface aborting hibernate: %d\n",
389 ret);
390 return ret;
391 }
392
393 if (response.status != GB_CONTROL_INTF_PM_OK) {
394 dev_err(&control->dev, "interface error while aborting hibernate: %d\n",
395 response.status);
396 return gb_control_interface_pm_status_map(response.status);
397 }
398
399 return 0;
400 }
401
402 static ssize_t vendor_string_show(struct device *dev,
403 struct device_attribute *attr, char *buf)
404 {
405 struct gb_control *control = to_gb_control(dev);
406
407 return scnprintf(buf, PAGE_SIZE, "%s\n", control->vendor_string);
408 }
409 static DEVICE_ATTR_RO(vendor_string);
410
411 static ssize_t product_string_show(struct device *dev,
412 struct device_attribute *attr, char *buf)
413 {
414 struct gb_control *control = to_gb_control(dev);
415
416 return scnprintf(buf, PAGE_SIZE, "%s\n", control->product_string);
417 }
418 static DEVICE_ATTR_RO(product_string);
419
420 static struct attribute *control_attrs[] = {
421 &dev_attr_vendor_string.attr,
422 &dev_attr_product_string.attr,
423 NULL,
424 };
425 ATTRIBUTE_GROUPS(control);
426
427 static void gb_control_release(struct device *dev)
428 {
429 struct gb_control *control = to_gb_control(dev);
430
431 gb_connection_destroy(control->connection);
432
433 kfree(control->vendor_string);
434 kfree(control->product_string);
435
436 kfree(control);
437 }
438
439 struct device_type greybus_control_type = {
440 .name = "greybus_control",
441 .release = gb_control_release,
442 };
443
444 struct gb_control *gb_control_create(struct gb_interface *intf)
445 {
446 struct gb_connection *connection;
447 struct gb_control *control;
448
449 control = kzalloc(sizeof(*control), GFP_KERNEL);
450 if (!control)
451 return ERR_PTR(-ENOMEM);
452
453 control->intf = intf;
454
455 connection = gb_connection_create_control(intf);
456 if (IS_ERR(connection)) {
457 dev_err(&intf->dev,
458 "failed to create control connection: %ld\n",
459 PTR_ERR(connection));
460 kfree(control);
461 return ERR_CAST(connection);
462 }
463
464 control->connection = connection;
465
466 control->dev.parent = &intf->dev;
467 control->dev.bus = &greybus_bus_type;
468 control->dev.type = &greybus_control_type;
469 control->dev.groups = control_groups;
470 control->dev.dma_mask = intf->dev.dma_mask;
471 device_initialize(&control->dev);
472 dev_set_name(&control->dev, "%s.ctrl", dev_name(&intf->dev));
473
474 gb_connection_set_data(control->connection, control);
475
476 return control;
477 }
478
479 int gb_control_enable(struct gb_control *control)
480 {
481 int ret;
482
483 dev_dbg(&control->connection->intf->dev, "%s\n", __func__);
484
485 ret = gb_connection_enable_tx(control->connection);
486 if (ret) {
487 dev_err(&control->connection->intf->dev,
488 "failed to enable control connection: %d\n",
489 ret);
490 return ret;
491 }
492
493 ret = gb_control_get_version(control);
494 if (ret)
495 goto err_disable_connection;
496
497 if (control->protocol_major > 0 || control->protocol_minor > 1)
498 control->has_bundle_version = true;
499
500
501 if (!(control->intf->quirks & GB_INTERFACE_QUIRK_NO_BUNDLE_ACTIVATE))
502 control->has_bundle_activate = true;
503
504 return 0;
505
506 err_disable_connection:
507 gb_connection_disable(control->connection);
508
509 return ret;
510 }
511
512 void gb_control_disable(struct gb_control *control)
513 {
514 dev_dbg(&control->connection->intf->dev, "%s\n", __func__);
515
516 if (control->intf->disconnected)
517 gb_connection_disable_forced(control->connection);
518 else
519 gb_connection_disable(control->connection);
520 }
521
522 int gb_control_suspend(struct gb_control *control)
523 {
524 gb_connection_disable(control->connection);
525
526 return 0;
527 }
528
529 int gb_control_resume(struct gb_control *control)
530 {
531 int ret;
532
533 ret = gb_connection_enable_tx(control->connection);
534 if (ret) {
535 dev_err(&control->connection->intf->dev,
536 "failed to enable control connection: %d\n", ret);
537 return ret;
538 }
539
540 return 0;
541 }
542
543 int gb_control_add(struct gb_control *control)
544 {
545 int ret;
546
547 ret = device_add(&control->dev);
548 if (ret) {
549 dev_err(&control->dev,
550 "failed to register control device: %d\n",
551 ret);
552 return ret;
553 }
554
555 return 0;
556 }
557
558 void gb_control_del(struct gb_control *control)
559 {
560 if (device_is_registered(&control->dev))
561 device_del(&control->dev);
562 }
563
564 struct gb_control *gb_control_get(struct gb_control *control)
565 {
566 get_device(&control->dev);
567
568 return control;
569 }
570
571 void gb_control_put(struct gb_control *control)
572 {
573 put_device(&control->dev);
574 }
575
576 void gb_control_mode_switch_prepare(struct gb_control *control)
577 {
578 gb_connection_mode_switch_prepare(control->connection);
579 }
580
581 void gb_control_mode_switch_complete(struct gb_control *control)
582 {
583 gb_connection_mode_switch_complete(control->connection);
584 }