This source file includes following definitions.
- hpi_lookup_entry_point_function
- hw_entry_point
- subsys_message
- adapter_message
- mixer_message
- outstream_message
- instream_message
- hpi_send_recv_ex
- adapter_open
- adapter_close
- mixer_open
- mixer_close
- instream_open
- instream_close
- outstream_open
- outstream_close
- adapter_prepare
- HPIMSGX__reset
- HPIMSGX__init
- HPIMSGX__cleanup
1
2
3
4
5
6
7
8
9
10
11
12 #define SOURCEFILE_NAME "hpimsgx.c"
13 #include "hpi_internal.h"
14 #include "hpi_version.h"
15 #include "hpimsginit.h"
16 #include "hpicmn.h"
17 #include "hpimsgx.h"
18 #include "hpidebug.h"
19
20 static struct pci_device_id asihpi_pci_tbl[] = {
21 #include "hpipcida.h"
22 };
23
24 static struct hpios_spinlock msgx_lock;
25
26 static hpi_handler_func *hpi_entry_points[HPI_MAX_ADAPTERS];
27 static int logging_enabled = 1;
28
29 static hpi_handler_func *hpi_lookup_entry_point_function(const struct hpi_pci
30 *pci_info)
31 {
32
33 int i;
34
35 for (i = 0; asihpi_pci_tbl[i].vendor != 0; i++) {
36 if (asihpi_pci_tbl[i].vendor != PCI_ANY_ID
37 && asihpi_pci_tbl[i].vendor !=
38 pci_info->pci_dev->vendor)
39 continue;
40 if (asihpi_pci_tbl[i].device != PCI_ANY_ID
41 && asihpi_pci_tbl[i].device !=
42 pci_info->pci_dev->device)
43 continue;
44 if (asihpi_pci_tbl[i].subvendor != PCI_ANY_ID
45 && asihpi_pci_tbl[i].subvendor !=
46 pci_info->pci_dev->subsystem_vendor)
47 continue;
48 if (asihpi_pci_tbl[i].subdevice != PCI_ANY_ID
49 && asihpi_pci_tbl[i].subdevice !=
50 pci_info->pci_dev->subsystem_device)
51 continue;
52
53
54
55 return (hpi_handler_func *) asihpi_pci_tbl[i].driver_data;
56 }
57
58 return NULL;
59 }
60
61 static inline void hw_entry_point(struct hpi_message *phm,
62 struct hpi_response *phr)
63 {
64 if ((phm->adapter_index < HPI_MAX_ADAPTERS)
65 && hpi_entry_points[phm->adapter_index])
66 hpi_entry_points[phm->adapter_index] (phm, phr);
67 else
68 hpi_init_response(phr, phm->object, phm->function,
69 HPI_ERROR_PROCESSING_MESSAGE);
70 }
71
72 static void adapter_open(struct hpi_message *phm, struct hpi_response *phr);
73 static void adapter_close(struct hpi_message *phm, struct hpi_response *phr);
74
75 static void mixer_open(struct hpi_message *phm, struct hpi_response *phr);
76 static void mixer_close(struct hpi_message *phm, struct hpi_response *phr);
77
78 static void outstream_open(struct hpi_message *phm, struct hpi_response *phr,
79 void *h_owner);
80 static void outstream_close(struct hpi_message *phm, struct hpi_response *phr,
81 void *h_owner);
82 static void instream_open(struct hpi_message *phm, struct hpi_response *phr,
83 void *h_owner);
84 static void instream_close(struct hpi_message *phm, struct hpi_response *phr,
85 void *h_owner);
86
87 static void HPIMSGX__reset(u16 adapter_index);
88
89 static u16 HPIMSGX__init(struct hpi_message *phm, struct hpi_response *phr);
90 static void HPIMSGX__cleanup(u16 adapter_index, void *h_owner);
91
92 #ifndef DISABLE_PRAGMA_PACK1
93 #pragma pack(push, 1)
94 #endif
95
96 struct hpi_subsys_response {
97 struct hpi_response_header h;
98 struct hpi_subsys_res s;
99 };
100
101 struct hpi_adapter_response {
102 struct hpi_response_header h;
103 struct hpi_adapter_res a;
104 };
105
106 struct hpi_mixer_response {
107 struct hpi_response_header h;
108 struct hpi_mixer_res m;
109 };
110
111 struct hpi_stream_response {
112 struct hpi_response_header h;
113 struct hpi_stream_res d;
114 };
115
116 struct adapter_info {
117 u16 type;
118 u16 num_instreams;
119 u16 num_outstreams;
120 };
121
122 struct asi_open_state {
123 int open_flag;
124 void *h_owner;
125 };
126
127 #ifndef DISABLE_PRAGMA_PACK1
128 #pragma pack(pop)
129 #endif
130
131
132 static struct hpi_adapter_response rESP_HPI_ADAPTER_OPEN[HPI_MAX_ADAPTERS];
133
134 static struct hpi_stream_response
135 rESP_HPI_OSTREAM_OPEN[HPI_MAX_ADAPTERS][HPI_MAX_STREAMS];
136
137 static struct hpi_stream_response
138 rESP_HPI_ISTREAM_OPEN[HPI_MAX_ADAPTERS][HPI_MAX_STREAMS];
139
140 static struct hpi_mixer_response rESP_HPI_MIXER_OPEN[HPI_MAX_ADAPTERS];
141
142 static struct adapter_info aDAPTER_INFO[HPI_MAX_ADAPTERS];
143
144
145 static struct asi_open_state
146 outstream_user_open[HPI_MAX_ADAPTERS][HPI_MAX_STREAMS];
147
148 static struct asi_open_state
149 instream_user_open[HPI_MAX_ADAPTERS][HPI_MAX_STREAMS];
150
151 static void subsys_message(struct hpi_message *phm, struct hpi_response *phr,
152 void *h_owner)
153 {
154 if (phm->adapter_index != HPI_ADAPTER_INDEX_INVALID)
155 HPI_DEBUG_LOG(WARNING,
156 "suspicious adapter index %d in subsys message 0x%x.\n",
157 phm->adapter_index, phm->function);
158
159 switch (phm->function) {
160 case HPI_SUBSYS_GET_VERSION:
161 hpi_init_response(phr, HPI_OBJ_SUBSYSTEM,
162 HPI_SUBSYS_GET_VERSION, 0);
163 phr->u.s.version = HPI_VER >> 8;
164 phr->u.s.data = HPI_VER;
165 break;
166 case HPI_SUBSYS_OPEN:
167
168 hpi_init_response(phr, HPI_OBJ_SUBSYSTEM, HPI_SUBSYS_OPEN, 0);
169 break;
170 case HPI_SUBSYS_CLOSE:
171
172 hpi_init_response(phr, HPI_OBJ_SUBSYSTEM, HPI_SUBSYS_CLOSE,
173 0);
174 HPIMSGX__cleanup(HPIMSGX_ALLADAPTERS, h_owner);
175 break;
176 case HPI_SUBSYS_DRIVER_LOAD:
177
178 hpios_msgxlock_init(&msgx_lock);
179 memset(&hpi_entry_points, 0, sizeof(hpi_entry_points));
180
181 HPIMSGX__reset(HPIMSGX_ALLADAPTERS);
182 hpi_init_response(phr, HPI_OBJ_SUBSYSTEM,
183 HPI_SUBSYS_DRIVER_LOAD, 0);
184
185 HPI_COMMON(phm, phr);
186 break;
187 case HPI_SUBSYS_DRIVER_UNLOAD:
188 HPI_COMMON(phm, phr);
189 HPIMSGX__cleanup(HPIMSGX_ALLADAPTERS, h_owner);
190 hpi_init_response(phr, HPI_OBJ_SUBSYSTEM,
191 HPI_SUBSYS_DRIVER_UNLOAD, 0);
192 return;
193
194 case HPI_SUBSYS_GET_NUM_ADAPTERS:
195 case HPI_SUBSYS_GET_ADAPTER:
196 HPI_COMMON(phm, phr);
197 break;
198
199 case HPI_SUBSYS_CREATE_ADAPTER:
200 HPIMSGX__init(phm, phr);
201 break;
202
203 default:
204
205 hpi_init_response(phr, HPI_OBJ_SUBSYSTEM, phm->function,
206 HPI_ERROR_INVALID_FUNC);
207 break;
208 }
209 }
210
211 static void adapter_message(struct hpi_message *phm, struct hpi_response *phr,
212 void *h_owner)
213 {
214 switch (phm->function) {
215 case HPI_ADAPTER_OPEN:
216 adapter_open(phm, phr);
217 break;
218 case HPI_ADAPTER_CLOSE:
219 adapter_close(phm, phr);
220 break;
221 case HPI_ADAPTER_DELETE:
222 HPIMSGX__cleanup(phm->adapter_index, h_owner);
223 {
224 struct hpi_message hm;
225 struct hpi_response hr;
226 hpi_init_message_response(&hm, &hr, HPI_OBJ_ADAPTER,
227 HPI_ADAPTER_CLOSE);
228 hm.adapter_index = phm->adapter_index;
229 hw_entry_point(&hm, &hr);
230 }
231 hw_entry_point(phm, phr);
232 break;
233
234 default:
235 hw_entry_point(phm, phr);
236 break;
237 }
238 }
239
240 static void mixer_message(struct hpi_message *phm, struct hpi_response *phr)
241 {
242 switch (phm->function) {
243 case HPI_MIXER_OPEN:
244 mixer_open(phm, phr);
245 break;
246 case HPI_MIXER_CLOSE:
247 mixer_close(phm, phr);
248 break;
249 default:
250 hw_entry_point(phm, phr);
251 break;
252 }
253 }
254
255 static void outstream_message(struct hpi_message *phm,
256 struct hpi_response *phr, void *h_owner)
257 {
258 if (phm->obj_index >= aDAPTER_INFO[phm->adapter_index].num_outstreams) {
259 hpi_init_response(phr, HPI_OBJ_OSTREAM, phm->function,
260 HPI_ERROR_INVALID_OBJ_INDEX);
261 return;
262 }
263
264 switch (phm->function) {
265 case HPI_OSTREAM_OPEN:
266 outstream_open(phm, phr, h_owner);
267 break;
268 case HPI_OSTREAM_CLOSE:
269 outstream_close(phm, phr, h_owner);
270 break;
271 default:
272 hw_entry_point(phm, phr);
273 break;
274 }
275 }
276
277 static void instream_message(struct hpi_message *phm,
278 struct hpi_response *phr, void *h_owner)
279 {
280 if (phm->obj_index >= aDAPTER_INFO[phm->adapter_index].num_instreams) {
281 hpi_init_response(phr, HPI_OBJ_ISTREAM, phm->function,
282 HPI_ERROR_INVALID_OBJ_INDEX);
283 return;
284 }
285
286 switch (phm->function) {
287 case HPI_ISTREAM_OPEN:
288 instream_open(phm, phr, h_owner);
289 break;
290 case HPI_ISTREAM_CLOSE:
291 instream_close(phm, phr, h_owner);
292 break;
293 default:
294 hw_entry_point(phm, phr);
295 break;
296 }
297 }
298
299
300
301
302 void hpi_send_recv_ex(struct hpi_message *phm, struct hpi_response *phr,
303 void *h_owner)
304 {
305
306 if (logging_enabled)
307 HPI_DEBUG_MESSAGE(DEBUG, phm);
308
309 if (phm->type != HPI_TYPE_REQUEST) {
310 hpi_init_response(phr, phm->object, phm->function,
311 HPI_ERROR_INVALID_TYPE);
312 return;
313 }
314
315 if (phm->adapter_index >= HPI_MAX_ADAPTERS
316 && phm->adapter_index != HPIMSGX_ALLADAPTERS) {
317 hpi_init_response(phr, phm->object, phm->function,
318 HPI_ERROR_BAD_ADAPTER_NUMBER);
319 return;
320 }
321
322 switch (phm->object) {
323 case HPI_OBJ_SUBSYSTEM:
324 subsys_message(phm, phr, h_owner);
325 break;
326
327 case HPI_OBJ_ADAPTER:
328 adapter_message(phm, phr, h_owner);
329 break;
330
331 case HPI_OBJ_MIXER:
332 mixer_message(phm, phr);
333 break;
334
335 case HPI_OBJ_OSTREAM:
336 outstream_message(phm, phr, h_owner);
337 break;
338
339 case HPI_OBJ_ISTREAM:
340 instream_message(phm, phr, h_owner);
341 break;
342
343 default:
344 hw_entry_point(phm, phr);
345 break;
346 }
347
348 if (logging_enabled)
349 HPI_DEBUG_RESPONSE(phr);
350
351 if (phr->error >= HPI_ERROR_DSP_COMMUNICATION) {
352 hpi_debug_level_set(HPI_DEBUG_LEVEL_ERROR);
353 logging_enabled = 0;
354 }
355 }
356
357 static void adapter_open(struct hpi_message *phm, struct hpi_response *phr)
358 {
359 HPI_DEBUG_LOG(VERBOSE, "adapter_open\n");
360 memcpy(phr, &rESP_HPI_ADAPTER_OPEN[phm->adapter_index],
361 sizeof(rESP_HPI_ADAPTER_OPEN[0]));
362 }
363
364 static void adapter_close(struct hpi_message *phm, struct hpi_response *phr)
365 {
366 HPI_DEBUG_LOG(VERBOSE, "adapter_close\n");
367 hpi_init_response(phr, HPI_OBJ_ADAPTER, HPI_ADAPTER_CLOSE, 0);
368 }
369
370 static void mixer_open(struct hpi_message *phm, struct hpi_response *phr)
371 {
372 memcpy(phr, &rESP_HPI_MIXER_OPEN[phm->adapter_index],
373 sizeof(rESP_HPI_MIXER_OPEN[0]));
374 }
375
376 static void mixer_close(struct hpi_message *phm, struct hpi_response *phr)
377 {
378 hpi_init_response(phr, HPI_OBJ_MIXER, HPI_MIXER_CLOSE, 0);
379 }
380
381 static void instream_open(struct hpi_message *phm, struct hpi_response *phr,
382 void *h_owner)
383 {
384
385 struct hpi_message hm;
386 struct hpi_response hr;
387
388 hpi_init_response(phr, HPI_OBJ_ISTREAM, HPI_ISTREAM_OPEN, 0);
389
390 hpios_msgxlock_lock(&msgx_lock);
391
392 if (instream_user_open[phm->adapter_index][phm->obj_index].open_flag)
393 phr->error = HPI_ERROR_OBJ_ALREADY_OPEN;
394 else if (rESP_HPI_ISTREAM_OPEN[phm->adapter_index]
395 [phm->obj_index].h.error)
396 memcpy(phr,
397 &rESP_HPI_ISTREAM_OPEN[phm->adapter_index][phm->
398 obj_index],
399 sizeof(rESP_HPI_ISTREAM_OPEN[0][0]));
400 else {
401 instream_user_open[phm->adapter_index][phm->
402 obj_index].open_flag = 1;
403 hpios_msgxlock_unlock(&msgx_lock);
404
405
406 hpi_init_message_response(&hm, &hr, HPI_OBJ_ISTREAM,
407 HPI_ISTREAM_RESET);
408 hm.adapter_index = phm->adapter_index;
409 hm.obj_index = phm->obj_index;
410 hw_entry_point(&hm, &hr);
411
412 hpios_msgxlock_lock(&msgx_lock);
413 if (hr.error) {
414 instream_user_open[phm->adapter_index][phm->
415 obj_index].open_flag = 0;
416 phr->error = hr.error;
417 } else {
418 instream_user_open[phm->adapter_index][phm->
419 obj_index].open_flag = 1;
420 instream_user_open[phm->adapter_index][phm->
421 obj_index].h_owner = h_owner;
422 memcpy(phr,
423 &rESP_HPI_ISTREAM_OPEN[phm->adapter_index]
424 [phm->obj_index],
425 sizeof(rESP_HPI_ISTREAM_OPEN[0][0]));
426 }
427 }
428 hpios_msgxlock_unlock(&msgx_lock);
429 }
430
431 static void instream_close(struct hpi_message *phm, struct hpi_response *phr,
432 void *h_owner)
433 {
434
435 struct hpi_message hm;
436 struct hpi_response hr;
437
438 hpi_init_response(phr, HPI_OBJ_ISTREAM, HPI_ISTREAM_CLOSE, 0);
439
440 hpios_msgxlock_lock(&msgx_lock);
441 if (h_owner ==
442 instream_user_open[phm->adapter_index][phm->
443 obj_index].h_owner) {
444
445
446
447 instream_user_open[phm->adapter_index][phm->
448 obj_index].h_owner = NULL;
449 hpios_msgxlock_unlock(&msgx_lock);
450
451 hpi_init_message_response(&hm, &hr, HPI_OBJ_ISTREAM,
452 HPI_ISTREAM_RESET);
453 hm.adapter_index = phm->adapter_index;
454 hm.obj_index = phm->obj_index;
455 hw_entry_point(&hm, &hr);
456 hpios_msgxlock_lock(&msgx_lock);
457 if (hr.error) {
458 instream_user_open[phm->adapter_index][phm->
459 obj_index].h_owner = h_owner;
460 phr->error = hr.error;
461 } else {
462 instream_user_open[phm->adapter_index][phm->
463 obj_index].open_flag = 0;
464 instream_user_open[phm->adapter_index][phm->
465 obj_index].h_owner = NULL;
466 }
467 } else {
468 HPI_DEBUG_LOG(WARNING,
469 "%p trying to close %d instream %d owned by %p\n",
470 h_owner, phm->adapter_index, phm->obj_index,
471 instream_user_open[phm->adapter_index][phm->
472 obj_index].h_owner);
473 phr->error = HPI_ERROR_OBJ_NOT_OPEN;
474 }
475 hpios_msgxlock_unlock(&msgx_lock);
476 }
477
478 static void outstream_open(struct hpi_message *phm, struct hpi_response *phr,
479 void *h_owner)
480 {
481
482 struct hpi_message hm;
483 struct hpi_response hr;
484
485 hpi_init_response(phr, HPI_OBJ_OSTREAM, HPI_OSTREAM_OPEN, 0);
486
487 hpios_msgxlock_lock(&msgx_lock);
488
489 if (outstream_user_open[phm->adapter_index][phm->obj_index].open_flag)
490 phr->error = HPI_ERROR_OBJ_ALREADY_OPEN;
491 else if (rESP_HPI_OSTREAM_OPEN[phm->adapter_index]
492 [phm->obj_index].h.error)
493 memcpy(phr,
494 &rESP_HPI_OSTREAM_OPEN[phm->adapter_index][phm->
495 obj_index],
496 sizeof(rESP_HPI_OSTREAM_OPEN[0][0]));
497 else {
498 outstream_user_open[phm->adapter_index][phm->
499 obj_index].open_flag = 1;
500 hpios_msgxlock_unlock(&msgx_lock);
501
502
503 hpi_init_message_response(&hm, &hr, HPI_OBJ_OSTREAM,
504 HPI_OSTREAM_RESET);
505 hm.adapter_index = phm->adapter_index;
506 hm.obj_index = phm->obj_index;
507 hw_entry_point(&hm, &hr);
508
509 hpios_msgxlock_lock(&msgx_lock);
510 if (hr.error) {
511 outstream_user_open[phm->adapter_index][phm->
512 obj_index].open_flag = 0;
513 phr->error = hr.error;
514 } else {
515 outstream_user_open[phm->adapter_index][phm->
516 obj_index].open_flag = 1;
517 outstream_user_open[phm->adapter_index][phm->
518 obj_index].h_owner = h_owner;
519 memcpy(phr,
520 &rESP_HPI_OSTREAM_OPEN[phm->adapter_index]
521 [phm->obj_index],
522 sizeof(rESP_HPI_OSTREAM_OPEN[0][0]));
523 }
524 }
525 hpios_msgxlock_unlock(&msgx_lock);
526 }
527
528 static void outstream_close(struct hpi_message *phm, struct hpi_response *phr,
529 void *h_owner)
530 {
531
532 struct hpi_message hm;
533 struct hpi_response hr;
534
535 hpi_init_response(phr, HPI_OBJ_OSTREAM, HPI_OSTREAM_CLOSE, 0);
536
537 hpios_msgxlock_lock(&msgx_lock);
538
539 if (h_owner ==
540 outstream_user_open[phm->adapter_index][phm->
541 obj_index].h_owner) {
542
543
544
545 outstream_user_open[phm->adapter_index][phm->
546 obj_index].h_owner = NULL;
547 hpios_msgxlock_unlock(&msgx_lock);
548
549 hpi_init_message_response(&hm, &hr, HPI_OBJ_OSTREAM,
550 HPI_OSTREAM_RESET);
551 hm.adapter_index = phm->adapter_index;
552 hm.obj_index = phm->obj_index;
553 hw_entry_point(&hm, &hr);
554 hpios_msgxlock_lock(&msgx_lock);
555 if (hr.error) {
556 outstream_user_open[phm->adapter_index][phm->
557 obj_index].h_owner = h_owner;
558 phr->error = hr.error;
559 } else {
560 outstream_user_open[phm->adapter_index][phm->
561 obj_index].open_flag = 0;
562 outstream_user_open[phm->adapter_index][phm->
563 obj_index].h_owner = NULL;
564 }
565 } else {
566 HPI_DEBUG_LOG(WARNING,
567 "%p trying to close %d outstream %d owned by %p\n",
568 h_owner, phm->adapter_index, phm->obj_index,
569 outstream_user_open[phm->adapter_index][phm->
570 obj_index].h_owner);
571 phr->error = HPI_ERROR_OBJ_NOT_OPEN;
572 }
573 hpios_msgxlock_unlock(&msgx_lock);
574 }
575
576 static u16 adapter_prepare(u16 adapter)
577 {
578 struct hpi_message hm;
579 struct hpi_response hr;
580
581
582 u16 i;
583
584
585 hpi_init_message_response(&hm, &hr, HPI_OBJ_ADAPTER,
586 HPI_ADAPTER_OPEN);
587 hm.adapter_index = adapter;
588 hw_entry_point(&hm, &hr);
589 memcpy(&rESP_HPI_ADAPTER_OPEN[adapter], &hr,
590 sizeof(rESP_HPI_ADAPTER_OPEN[0]));
591 if (hr.error)
592 return hr.error;
593
594
595 hpi_init_message_response(&hm, &hr, HPI_OBJ_ADAPTER,
596 HPI_ADAPTER_GET_INFO);
597 hm.adapter_index = adapter;
598 hw_entry_point(&hm, &hr);
599 if (hr.error)
600 return hr.error;
601
602 aDAPTER_INFO[adapter].num_outstreams = hr.u.ax.info.num_outstreams;
603 aDAPTER_INFO[adapter].num_instreams = hr.u.ax.info.num_instreams;
604 aDAPTER_INFO[adapter].type = hr.u.ax.info.adapter_type;
605
606
607 for (i = 0; i < aDAPTER_INFO[adapter].num_outstreams; i++) {
608 hpi_init_message_response(&hm, &hr, HPI_OBJ_OSTREAM,
609 HPI_OSTREAM_OPEN);
610 hm.adapter_index = adapter;
611 hm.obj_index = i;
612 hw_entry_point(&hm, &hr);
613 memcpy(&rESP_HPI_OSTREAM_OPEN[adapter][i], &hr,
614 sizeof(rESP_HPI_OSTREAM_OPEN[0][0]));
615 outstream_user_open[adapter][i].open_flag = 0;
616 outstream_user_open[adapter][i].h_owner = NULL;
617 }
618
619
620 for (i = 0; i < aDAPTER_INFO[adapter].num_instreams; i++) {
621 hpi_init_message_response(&hm, &hr, HPI_OBJ_ISTREAM,
622 HPI_ISTREAM_OPEN);
623 hm.adapter_index = adapter;
624 hm.obj_index = i;
625 hw_entry_point(&hm, &hr);
626 memcpy(&rESP_HPI_ISTREAM_OPEN[adapter][i], &hr,
627 sizeof(rESP_HPI_ISTREAM_OPEN[0][0]));
628 instream_user_open[adapter][i].open_flag = 0;
629 instream_user_open[adapter][i].h_owner = NULL;
630 }
631
632
633 hpi_init_message_response(&hm, &hr, HPI_OBJ_MIXER, HPI_MIXER_OPEN);
634 hm.adapter_index = adapter;
635 hw_entry_point(&hm, &hr);
636 memcpy(&rESP_HPI_MIXER_OPEN[adapter], &hr,
637 sizeof(rESP_HPI_MIXER_OPEN[0]));
638
639 return 0;
640 }
641
642 static void HPIMSGX__reset(u16 adapter_index)
643 {
644 int i;
645 u16 adapter;
646 struct hpi_response hr;
647
648 if (adapter_index == HPIMSGX_ALLADAPTERS) {
649 for (adapter = 0; adapter < HPI_MAX_ADAPTERS; adapter++) {
650
651 hpi_init_response(&hr, HPI_OBJ_ADAPTER,
652 HPI_ADAPTER_OPEN, HPI_ERROR_BAD_ADAPTER);
653 memcpy(&rESP_HPI_ADAPTER_OPEN[adapter], &hr,
654 sizeof(rESP_HPI_ADAPTER_OPEN[adapter]));
655
656 hpi_init_response(&hr, HPI_OBJ_MIXER, HPI_MIXER_OPEN,
657 HPI_ERROR_INVALID_OBJ);
658 memcpy(&rESP_HPI_MIXER_OPEN[adapter], &hr,
659 sizeof(rESP_HPI_MIXER_OPEN[adapter]));
660
661 for (i = 0; i < HPI_MAX_STREAMS; i++) {
662 hpi_init_response(&hr, HPI_OBJ_OSTREAM,
663 HPI_OSTREAM_OPEN,
664 HPI_ERROR_INVALID_OBJ);
665 memcpy(&rESP_HPI_OSTREAM_OPEN[adapter][i],
666 &hr,
667 sizeof(rESP_HPI_OSTREAM_OPEN[adapter]
668 [i]));
669 hpi_init_response(&hr, HPI_OBJ_ISTREAM,
670 HPI_ISTREAM_OPEN,
671 HPI_ERROR_INVALID_OBJ);
672 memcpy(&rESP_HPI_ISTREAM_OPEN[adapter][i],
673 &hr,
674 sizeof(rESP_HPI_ISTREAM_OPEN[adapter]
675 [i]));
676 }
677 }
678 } else if (adapter_index < HPI_MAX_ADAPTERS) {
679 rESP_HPI_ADAPTER_OPEN[adapter_index].h.error =
680 HPI_ERROR_BAD_ADAPTER;
681 rESP_HPI_MIXER_OPEN[adapter_index].h.error =
682 HPI_ERROR_INVALID_OBJ;
683 for (i = 0; i < HPI_MAX_STREAMS; i++) {
684 rESP_HPI_OSTREAM_OPEN[adapter_index][i].h.error =
685 HPI_ERROR_INVALID_OBJ;
686 rESP_HPI_ISTREAM_OPEN[adapter_index][i].h.error =
687 HPI_ERROR_INVALID_OBJ;
688 }
689 }
690 }
691
692 static u16 HPIMSGX__init(struct hpi_message *phm,
693
694
695 struct hpi_response *phr
696
697 )
698 {
699 hpi_handler_func *entry_point_func;
700 struct hpi_response hr;
701
702
703 hpi_init_response(&hr, phm->object, phm->function,
704 HPI_ERROR_INVALID_OBJ);
705
706 entry_point_func =
707 hpi_lookup_entry_point_function(phm->u.s.resource.r.pci);
708
709 if (entry_point_func) {
710 HPI_DEBUG_MESSAGE(DEBUG, phm);
711 entry_point_func(phm, &hr);
712 } else {
713 phr->error = HPI_ERROR_PROCESSING_MESSAGE;
714 return phr->error;
715 }
716 if (hr.error == 0) {
717
718
719 hpi_entry_points[hr.u.s.adapter_index] = entry_point_func;
720
721 HPI_DEBUG_LOG(DEBUG,
722 "HPI_SUBSYS_CREATE_ADAPTER successful,"
723 " preparing adapter\n");
724 adapter_prepare(hr.u.s.adapter_index);
725 }
726 memcpy(phr, &hr, hr.size);
727 return phr->error;
728 }
729
730 static void HPIMSGX__cleanup(u16 adapter_index, void *h_owner)
731 {
732 int i, adapter, adapter_limit;
733
734 if (!h_owner)
735 return;
736
737 if (adapter_index == HPIMSGX_ALLADAPTERS) {
738 adapter = 0;
739 adapter_limit = HPI_MAX_ADAPTERS;
740 } else {
741 adapter = adapter_index;
742 adapter_limit = adapter + 1;
743 }
744
745 for (; adapter < adapter_limit; adapter++) {
746
747 for (i = 0; i < HPI_MAX_STREAMS; i++) {
748 if (h_owner ==
749 outstream_user_open[adapter][i].h_owner) {
750 struct hpi_message hm;
751 struct hpi_response hr;
752
753 HPI_DEBUG_LOG(DEBUG,
754 "Close adapter %d ostream %d\n",
755 adapter, i);
756
757 hpi_init_message_response(&hm, &hr,
758 HPI_OBJ_OSTREAM, HPI_OSTREAM_RESET);
759 hm.adapter_index = (u16)adapter;
760 hm.obj_index = (u16)i;
761 hw_entry_point(&hm, &hr);
762
763 hm.function = HPI_OSTREAM_HOSTBUFFER_FREE;
764 hw_entry_point(&hm, &hr);
765
766 hm.function = HPI_OSTREAM_GROUP_RESET;
767 hw_entry_point(&hm, &hr);
768
769 outstream_user_open[adapter][i].open_flag = 0;
770 outstream_user_open[adapter][i].h_owner =
771 NULL;
772 }
773 if (h_owner == instream_user_open[adapter][i].h_owner) {
774 struct hpi_message hm;
775 struct hpi_response hr;
776
777 HPI_DEBUG_LOG(DEBUG,
778 "Close adapter %d istream %d\n",
779 adapter, i);
780
781 hpi_init_message_response(&hm, &hr,
782 HPI_OBJ_ISTREAM, HPI_ISTREAM_RESET);
783 hm.adapter_index = (u16)adapter;
784 hm.obj_index = (u16)i;
785 hw_entry_point(&hm, &hr);
786
787 hm.function = HPI_ISTREAM_HOSTBUFFER_FREE;
788 hw_entry_point(&hm, &hr);
789
790 hm.function = HPI_ISTREAM_GROUP_RESET;
791 hw_entry_point(&hm, &hr);
792
793 instream_user_open[adapter][i].open_flag = 0;
794 instream_user_open[adapter][i].h_owner = NULL;
795 }
796 }
797 }
798 }