This source file includes following definitions.
- _queue_message
- mISDN_queue_message
- get_channel4id
- send_socklist
- send_layer2
- send_msg_to_layer
- do_clear_stack
- mISDNStackd
- l1_receive
- set_channel_address
- __add_layer2
- add_layer2
- st_own_ctrl
- create_stack
- connect_layer1
- connect_Bstack
- create_l2entity
- delete_channel
- delete_stack
- mISDN_initstack
1
2
3
4
5
6
7
8
9 #include <linux/slab.h>
10 #include <linux/mISDNif.h>
11 #include <linux/kthread.h>
12 #include <linux/sched.h>
13 #include <linux/sched/cputime.h>
14 #include <linux/signal.h>
15
16 #include "core.h"
17
18 static u_int *debug;
19
20 static inline void
21 _queue_message(struct mISDNstack *st, struct sk_buff *skb)
22 {
23 struct mISDNhead *hh = mISDN_HEAD_P(skb);
24
25 if (*debug & DEBUG_QUEUE_FUNC)
26 printk(KERN_DEBUG "%s prim(%x) id(%x) %p\n",
27 __func__, hh->prim, hh->id, skb);
28 skb_queue_tail(&st->msgq, skb);
29 if (likely(!test_bit(mISDN_STACK_STOPPED, &st->status))) {
30 test_and_set_bit(mISDN_STACK_WORK, &st->status);
31 wake_up_interruptible(&st->workq);
32 }
33 }
34
35 static int
36 mISDN_queue_message(struct mISDNchannel *ch, struct sk_buff *skb)
37 {
38 _queue_message(ch->st, skb);
39 return 0;
40 }
41
42 static struct mISDNchannel *
43 get_channel4id(struct mISDNstack *st, u_int id)
44 {
45 struct mISDNchannel *ch;
46
47 mutex_lock(&st->lmutex);
48 list_for_each_entry(ch, &st->layer2, list) {
49 if (id == ch->nr)
50 goto unlock;
51 }
52 ch = NULL;
53 unlock:
54 mutex_unlock(&st->lmutex);
55 return ch;
56 }
57
58 static void
59 send_socklist(struct mISDN_sock_list *sl, struct sk_buff *skb)
60 {
61 struct sock *sk;
62 struct sk_buff *cskb = NULL;
63
64 read_lock(&sl->lock);
65 sk_for_each(sk, &sl->head) {
66 if (sk->sk_state != MISDN_BOUND)
67 continue;
68 if (!cskb)
69 cskb = skb_copy(skb, GFP_ATOMIC);
70 if (!cskb) {
71 printk(KERN_WARNING "%s no skb\n", __func__);
72 break;
73 }
74 if (!sock_queue_rcv_skb(sk, cskb))
75 cskb = NULL;
76 }
77 read_unlock(&sl->lock);
78 dev_kfree_skb(cskb);
79 }
80
81 static void
82 send_layer2(struct mISDNstack *st, struct sk_buff *skb)
83 {
84 struct sk_buff *cskb;
85 struct mISDNhead *hh = mISDN_HEAD_P(skb);
86 struct mISDNchannel *ch;
87 int ret;
88
89 if (!st)
90 return;
91 mutex_lock(&st->lmutex);
92 if ((hh->id & MISDN_ID_ADDR_MASK) == MISDN_ID_ANY) {
93 list_for_each_entry(ch, &st->layer2, list) {
94 if (list_is_last(&ch->list, &st->layer2)) {
95 cskb = skb;
96 skb = NULL;
97 } else {
98 cskb = skb_copy(skb, GFP_KERNEL);
99 }
100 if (cskb) {
101 ret = ch->send(ch, cskb);
102 if (ret) {
103 if (*debug & DEBUG_SEND_ERR)
104 printk(KERN_DEBUG
105 "%s ch%d prim(%x) addr(%x)"
106 " err %d\n",
107 __func__, ch->nr,
108 hh->prim, ch->addr, ret);
109 dev_kfree_skb(cskb);
110 }
111 } else {
112 printk(KERN_WARNING "%s ch%d addr %x no mem\n",
113 __func__, ch->nr, ch->addr);
114 goto out;
115 }
116 }
117 } else {
118 list_for_each_entry(ch, &st->layer2, list) {
119 if ((hh->id & MISDN_ID_ADDR_MASK) == ch->addr) {
120 ret = ch->send(ch, skb);
121 if (!ret)
122 skb = NULL;
123 goto out;
124 }
125 }
126 ret = st->dev->teimgr->ctrl(st->dev->teimgr, CHECK_DATA, skb);
127 if (!ret)
128 skb = NULL;
129 else if (*debug & DEBUG_SEND_ERR)
130 printk(KERN_DEBUG
131 "%s mgr prim(%x) err %d\n",
132 __func__, hh->prim, ret);
133 }
134 out:
135 mutex_unlock(&st->lmutex);
136 dev_kfree_skb(skb);
137 }
138
139 static inline int
140 send_msg_to_layer(struct mISDNstack *st, struct sk_buff *skb)
141 {
142 struct mISDNhead *hh = mISDN_HEAD_P(skb);
143 struct mISDNchannel *ch;
144 int lm;
145
146 lm = hh->prim & MISDN_LAYERMASK;
147 if (*debug & DEBUG_QUEUE_FUNC)
148 printk(KERN_DEBUG "%s prim(%x) id(%x) %p\n",
149 __func__, hh->prim, hh->id, skb);
150 if (lm == 0x1) {
151 if (!hlist_empty(&st->l1sock.head)) {
152 __net_timestamp(skb);
153 send_socklist(&st->l1sock, skb);
154 }
155 return st->layer1->send(st->layer1, skb);
156 } else if (lm == 0x2) {
157 if (!hlist_empty(&st->l1sock.head))
158 send_socklist(&st->l1sock, skb);
159 send_layer2(st, skb);
160 return 0;
161 } else if (lm == 0x4) {
162 ch = get_channel4id(st, hh->id);
163 if (ch)
164 return ch->send(ch, skb);
165 else
166 printk(KERN_WARNING
167 "%s: dev(%s) prim(%x) id(%x) no channel\n",
168 __func__, dev_name(&st->dev->dev), hh->prim,
169 hh->id);
170 } else if (lm == 0x8) {
171 WARN_ON(lm == 0x8);
172 ch = get_channel4id(st, hh->id);
173 if (ch)
174 return ch->send(ch, skb);
175 else
176 printk(KERN_WARNING
177 "%s: dev(%s) prim(%x) id(%x) no channel\n",
178 __func__, dev_name(&st->dev->dev), hh->prim,
179 hh->id);
180 } else {
181
182 printk(KERN_WARNING "%s: dev(%s) prim %x not delivered\n",
183 __func__, dev_name(&st->dev->dev), hh->prim);
184 }
185 return -ESRCH;
186 }
187
188 static void
189 do_clear_stack(struct mISDNstack *st)
190 {
191 }
192
193 static int
194 mISDNStackd(void *data)
195 {
196 struct mISDNstack *st = data;
197 #ifdef MISDN_MSG_STATS
198 u64 utime, stime;
199 #endif
200 int err = 0;
201
202 sigfillset(¤t->blocked);
203 if (*debug & DEBUG_MSG_THREAD)
204 printk(KERN_DEBUG "mISDNStackd %s started\n",
205 dev_name(&st->dev->dev));
206
207 if (st->notify != NULL) {
208 complete(st->notify);
209 st->notify = NULL;
210 }
211
212 for (;;) {
213 struct sk_buff *skb;
214
215 if (unlikely(test_bit(mISDN_STACK_STOPPED, &st->status))) {
216 test_and_clear_bit(mISDN_STACK_WORK, &st->status);
217 test_and_clear_bit(mISDN_STACK_RUNNING, &st->status);
218 } else
219 test_and_set_bit(mISDN_STACK_RUNNING, &st->status);
220 while (test_bit(mISDN_STACK_WORK, &st->status)) {
221 skb = skb_dequeue(&st->msgq);
222 if (!skb) {
223 test_and_clear_bit(mISDN_STACK_WORK,
224 &st->status);
225
226 skb = skb_dequeue(&st->msgq);
227 if (!skb)
228 continue;
229 test_and_set_bit(mISDN_STACK_WORK,
230 &st->status);
231 }
232 #ifdef MISDN_MSG_STATS
233 st->msg_cnt++;
234 #endif
235 err = send_msg_to_layer(st, skb);
236 if (unlikely(err)) {
237 if (*debug & DEBUG_SEND_ERR)
238 printk(KERN_DEBUG
239 "%s: %s prim(%x) id(%x) "
240 "send call(%d)\n",
241 __func__, dev_name(&st->dev->dev),
242 mISDN_HEAD_PRIM(skb),
243 mISDN_HEAD_ID(skb), err);
244 dev_kfree_skb(skb);
245 continue;
246 }
247 if (unlikely(test_bit(mISDN_STACK_STOPPED,
248 &st->status))) {
249 test_and_clear_bit(mISDN_STACK_WORK,
250 &st->status);
251 test_and_clear_bit(mISDN_STACK_RUNNING,
252 &st->status);
253 break;
254 }
255 }
256 if (test_bit(mISDN_STACK_CLEARING, &st->status)) {
257 test_and_set_bit(mISDN_STACK_STOPPED, &st->status);
258 test_and_clear_bit(mISDN_STACK_RUNNING, &st->status);
259 do_clear_stack(st);
260 test_and_clear_bit(mISDN_STACK_CLEARING, &st->status);
261 test_and_set_bit(mISDN_STACK_RESTART, &st->status);
262 }
263 if (test_and_clear_bit(mISDN_STACK_RESTART, &st->status)) {
264 test_and_clear_bit(mISDN_STACK_STOPPED, &st->status);
265 test_and_set_bit(mISDN_STACK_RUNNING, &st->status);
266 if (!skb_queue_empty(&st->msgq))
267 test_and_set_bit(mISDN_STACK_WORK,
268 &st->status);
269 }
270 if (test_bit(mISDN_STACK_ABORT, &st->status))
271 break;
272 if (st->notify != NULL) {
273 complete(st->notify);
274 st->notify = NULL;
275 }
276 #ifdef MISDN_MSG_STATS
277 st->sleep_cnt++;
278 #endif
279 test_and_clear_bit(mISDN_STACK_ACTIVE, &st->status);
280 wait_event_interruptible(st->workq, (st->status &
281 mISDN_STACK_ACTION_MASK));
282 if (*debug & DEBUG_MSG_THREAD)
283 printk(KERN_DEBUG "%s: %s wake status %08lx\n",
284 __func__, dev_name(&st->dev->dev), st->status);
285 test_and_set_bit(mISDN_STACK_ACTIVE, &st->status);
286
287 test_and_clear_bit(mISDN_STACK_WAKEUP, &st->status);
288
289 if (test_bit(mISDN_STACK_STOPPED, &st->status)) {
290 test_and_clear_bit(mISDN_STACK_RUNNING, &st->status);
291 #ifdef MISDN_MSG_STATS
292 st->stopped_cnt++;
293 #endif
294 }
295 }
296 #ifdef MISDN_MSG_STATS
297 printk(KERN_DEBUG "mISDNStackd daemon for %s proceed %d "
298 "msg %d sleep %d stopped\n",
299 dev_name(&st->dev->dev), st->msg_cnt, st->sleep_cnt,
300 st->stopped_cnt);
301 task_cputime(st->thread, &utime, &stime);
302 printk(KERN_DEBUG
303 "mISDNStackd daemon for %s utime(%llu) stime(%llu)\n",
304 dev_name(&st->dev->dev), utime, stime);
305 printk(KERN_DEBUG
306 "mISDNStackd daemon for %s nvcsw(%ld) nivcsw(%ld)\n",
307 dev_name(&st->dev->dev), st->thread->nvcsw, st->thread->nivcsw);
308 printk(KERN_DEBUG "mISDNStackd daemon for %s killed now\n",
309 dev_name(&st->dev->dev));
310 #endif
311 test_and_set_bit(mISDN_STACK_KILLED, &st->status);
312 test_and_clear_bit(mISDN_STACK_RUNNING, &st->status);
313 test_and_clear_bit(mISDN_STACK_ACTIVE, &st->status);
314 test_and_clear_bit(mISDN_STACK_ABORT, &st->status);
315 skb_queue_purge(&st->msgq);
316 st->thread = NULL;
317 if (st->notify != NULL) {
318 complete(st->notify);
319 st->notify = NULL;
320 }
321 return 0;
322 }
323
324 static int
325 l1_receive(struct mISDNchannel *ch, struct sk_buff *skb)
326 {
327 if (!ch->st)
328 return -ENODEV;
329 __net_timestamp(skb);
330 _queue_message(ch->st, skb);
331 return 0;
332 }
333
334 void
335 set_channel_address(struct mISDNchannel *ch, u_int sapi, u_int tei)
336 {
337 ch->addr = sapi | (tei << 8);
338 }
339
340 void
341 __add_layer2(struct mISDNchannel *ch, struct mISDNstack *st)
342 {
343 list_add_tail(&ch->list, &st->layer2);
344 }
345
346 void
347 add_layer2(struct mISDNchannel *ch, struct mISDNstack *st)
348 {
349 mutex_lock(&st->lmutex);
350 __add_layer2(ch, st);
351 mutex_unlock(&st->lmutex);
352 }
353
354 static int
355 st_own_ctrl(struct mISDNchannel *ch, u_int cmd, void *arg)
356 {
357 if (!ch->st || !ch->st->layer1)
358 return -EINVAL;
359 return ch->st->layer1->ctrl(ch->st->layer1, cmd, arg);
360 }
361
362 int
363 create_stack(struct mISDNdevice *dev)
364 {
365 struct mISDNstack *newst;
366 int err;
367 DECLARE_COMPLETION_ONSTACK(done);
368
369 newst = kzalloc(sizeof(struct mISDNstack), GFP_KERNEL);
370 if (!newst) {
371 printk(KERN_ERR "kmalloc mISDN_stack failed\n");
372 return -ENOMEM;
373 }
374 newst->dev = dev;
375 INIT_LIST_HEAD(&newst->layer2);
376 INIT_HLIST_HEAD(&newst->l1sock.head);
377 rwlock_init(&newst->l1sock.lock);
378 init_waitqueue_head(&newst->workq);
379 skb_queue_head_init(&newst->msgq);
380 mutex_init(&newst->lmutex);
381 dev->D.st = newst;
382 err = create_teimanager(dev);
383 if (err) {
384 printk(KERN_ERR "kmalloc teimanager failed\n");
385 kfree(newst);
386 return err;
387 }
388 dev->teimgr->peer = &newst->own;
389 dev->teimgr->recv = mISDN_queue_message;
390 dev->teimgr->st = newst;
391 newst->layer1 = &dev->D;
392 dev->D.recv = l1_receive;
393 dev->D.peer = &newst->own;
394 newst->own.st = newst;
395 newst->own.ctrl = st_own_ctrl;
396 newst->own.send = mISDN_queue_message;
397 newst->own.recv = mISDN_queue_message;
398 if (*debug & DEBUG_CORE_FUNC)
399 printk(KERN_DEBUG "%s: st(%s)\n", __func__,
400 dev_name(&newst->dev->dev));
401 newst->notify = &done;
402 newst->thread = kthread_run(mISDNStackd, (void *)newst, "mISDN_%s",
403 dev_name(&newst->dev->dev));
404 if (IS_ERR(newst->thread)) {
405 err = PTR_ERR(newst->thread);
406 printk(KERN_ERR
407 "mISDN:cannot create kernel thread for %s (%d)\n",
408 dev_name(&newst->dev->dev), err);
409 delete_teimanager(dev->teimgr);
410 kfree(newst);
411 } else
412 wait_for_completion(&done);
413 return err;
414 }
415
416 int
417 connect_layer1(struct mISDNdevice *dev, struct mISDNchannel *ch,
418 u_int protocol, struct sockaddr_mISDN *adr)
419 {
420 struct mISDN_sock *msk = container_of(ch, struct mISDN_sock, ch);
421 struct channel_req rq;
422 int err;
423
424
425 if (*debug & DEBUG_CORE_FUNC)
426 printk(KERN_DEBUG "%s: %s proto(%x) adr(%d %d %d %d)\n",
427 __func__, dev_name(&dev->dev), protocol, adr->dev,
428 adr->channel, adr->sapi, adr->tei);
429 switch (protocol) {
430 case ISDN_P_NT_S0:
431 case ISDN_P_NT_E1:
432 case ISDN_P_TE_S0:
433 case ISDN_P_TE_E1:
434 ch->recv = mISDN_queue_message;
435 ch->peer = &dev->D.st->own;
436 ch->st = dev->D.st;
437 rq.protocol = protocol;
438 rq.adr.channel = adr->channel;
439 err = dev->D.ctrl(&dev->D, OPEN_CHANNEL, &rq);
440 printk(KERN_DEBUG "%s: ret %d (dev %d)\n", __func__, err,
441 dev->id);
442 if (err)
443 return err;
444 write_lock_bh(&dev->D.st->l1sock.lock);
445 sk_add_node(&msk->sk, &dev->D.st->l1sock.head);
446 write_unlock_bh(&dev->D.st->l1sock.lock);
447 break;
448 default:
449 return -ENOPROTOOPT;
450 }
451 return 0;
452 }
453
454 int
455 connect_Bstack(struct mISDNdevice *dev, struct mISDNchannel *ch,
456 u_int protocol, struct sockaddr_mISDN *adr)
457 {
458 struct channel_req rq, rq2;
459 int pmask, err;
460 struct Bprotocol *bp;
461
462 if (*debug & DEBUG_CORE_FUNC)
463 printk(KERN_DEBUG "%s: %s proto(%x) adr(%d %d %d %d)\n",
464 __func__, dev_name(&dev->dev), protocol,
465 adr->dev, adr->channel, adr->sapi,
466 adr->tei);
467 ch->st = dev->D.st;
468 pmask = 1 << (protocol & ISDN_P_B_MASK);
469 if (pmask & dev->Bprotocols) {
470 rq.protocol = protocol;
471 rq.adr = *adr;
472 err = dev->D.ctrl(&dev->D, OPEN_CHANNEL, &rq);
473 if (err)
474 return err;
475 ch->recv = rq.ch->send;
476 ch->peer = rq.ch;
477 rq.ch->recv = ch->send;
478 rq.ch->peer = ch;
479 rq.ch->st = dev->D.st;
480 } else {
481 bp = get_Bprotocol4mask(pmask);
482 if (!bp)
483 return -ENOPROTOOPT;
484 rq2.protocol = protocol;
485 rq2.adr = *adr;
486 rq2.ch = ch;
487 err = bp->create(&rq2);
488 if (err)
489 return err;
490 ch->recv = rq2.ch->send;
491 ch->peer = rq2.ch;
492 rq2.ch->st = dev->D.st;
493 rq.protocol = rq2.protocol;
494 rq.adr = *adr;
495 err = dev->D.ctrl(&dev->D, OPEN_CHANNEL, &rq);
496 if (err) {
497 rq2.ch->ctrl(rq2.ch, CLOSE_CHANNEL, NULL);
498 return err;
499 }
500 rq2.ch->recv = rq.ch->send;
501 rq2.ch->peer = rq.ch;
502 rq.ch->recv = rq2.ch->send;
503 rq.ch->peer = rq2.ch;
504 rq.ch->st = dev->D.st;
505 }
506 ch->protocol = protocol;
507 ch->nr = rq.ch->nr;
508 return 0;
509 }
510
511 int
512 create_l2entity(struct mISDNdevice *dev, struct mISDNchannel *ch,
513 u_int protocol, struct sockaddr_mISDN *adr)
514 {
515 struct channel_req rq;
516 int err;
517
518 if (*debug & DEBUG_CORE_FUNC)
519 printk(KERN_DEBUG "%s: %s proto(%x) adr(%d %d %d %d)\n",
520 __func__, dev_name(&dev->dev), protocol,
521 adr->dev, adr->channel, adr->sapi,
522 adr->tei);
523 rq.protocol = ISDN_P_TE_S0;
524 if (dev->Dprotocols & (1 << ISDN_P_TE_E1))
525 rq.protocol = ISDN_P_TE_E1;
526 switch (protocol) {
527 case ISDN_P_LAPD_NT:
528 rq.protocol = ISDN_P_NT_S0;
529 if (dev->Dprotocols & (1 << ISDN_P_NT_E1))
530 rq.protocol = ISDN_P_NT_E1;
531
532 case ISDN_P_LAPD_TE:
533 ch->recv = mISDN_queue_message;
534 ch->peer = &dev->D.st->own;
535 ch->st = dev->D.st;
536 rq.adr.channel = 0;
537 err = dev->D.ctrl(&dev->D, OPEN_CHANNEL, &rq);
538 printk(KERN_DEBUG "%s: ret 1 %d\n", __func__, err);
539 if (err)
540 break;
541 rq.protocol = protocol;
542 rq.adr = *adr;
543 rq.ch = ch;
544 err = dev->teimgr->ctrl(dev->teimgr, OPEN_CHANNEL, &rq);
545 printk(KERN_DEBUG "%s: ret 2 %d\n", __func__, err);
546 if (!err) {
547 if ((protocol == ISDN_P_LAPD_NT) && !rq.ch)
548 break;
549 add_layer2(rq.ch, dev->D.st);
550 rq.ch->recv = mISDN_queue_message;
551 rq.ch->peer = &dev->D.st->own;
552 rq.ch->ctrl(rq.ch, OPEN_CHANNEL, NULL);
553 }
554 break;
555 default:
556 err = -EPROTONOSUPPORT;
557 }
558 return err;
559 }
560
561 void
562 delete_channel(struct mISDNchannel *ch)
563 {
564 struct mISDN_sock *msk = container_of(ch, struct mISDN_sock, ch);
565 struct mISDNchannel *pch;
566
567 if (!ch->st) {
568 printk(KERN_WARNING "%s: no stack\n", __func__);
569 return;
570 }
571 if (*debug & DEBUG_CORE_FUNC)
572 printk(KERN_DEBUG "%s: st(%s) protocol(%x)\n", __func__,
573 dev_name(&ch->st->dev->dev), ch->protocol);
574 if (ch->protocol >= ISDN_P_B_START) {
575 if (ch->peer) {
576 ch->peer->ctrl(ch->peer, CLOSE_CHANNEL, NULL);
577 ch->peer = NULL;
578 }
579 return;
580 }
581 switch (ch->protocol) {
582 case ISDN_P_NT_S0:
583 case ISDN_P_TE_S0:
584 case ISDN_P_NT_E1:
585 case ISDN_P_TE_E1:
586 write_lock_bh(&ch->st->l1sock.lock);
587 sk_del_node_init(&msk->sk);
588 write_unlock_bh(&ch->st->l1sock.lock);
589 ch->st->dev->D.ctrl(&ch->st->dev->D, CLOSE_CHANNEL, NULL);
590 break;
591 case ISDN_P_LAPD_TE:
592 pch = get_channel4id(ch->st, ch->nr);
593 if (pch) {
594 mutex_lock(&ch->st->lmutex);
595 list_del(&pch->list);
596 mutex_unlock(&ch->st->lmutex);
597 pch->ctrl(pch, CLOSE_CHANNEL, NULL);
598 pch = ch->st->dev->teimgr;
599 pch->ctrl(pch, CLOSE_CHANNEL, NULL);
600 } else
601 printk(KERN_WARNING "%s: no l2 channel\n",
602 __func__);
603 break;
604 case ISDN_P_LAPD_NT:
605 pch = ch->st->dev->teimgr;
606 if (pch) {
607 pch->ctrl(pch, CLOSE_CHANNEL, NULL);
608 } else
609 printk(KERN_WARNING "%s: no l2 channel\n",
610 __func__);
611 break;
612 default:
613 break;
614 }
615 return;
616 }
617
618 void
619 delete_stack(struct mISDNdevice *dev)
620 {
621 struct mISDNstack *st = dev->D.st;
622 DECLARE_COMPLETION_ONSTACK(done);
623
624 if (*debug & DEBUG_CORE_FUNC)
625 printk(KERN_DEBUG "%s: st(%s)\n", __func__,
626 dev_name(&st->dev->dev));
627 if (dev->teimgr)
628 delete_teimanager(dev->teimgr);
629 if (st->thread) {
630 if (st->notify) {
631 printk(KERN_WARNING "%s: notifier in use\n",
632 __func__);
633 complete(st->notify);
634 }
635 st->notify = &done;
636 test_and_set_bit(mISDN_STACK_ABORT, &st->status);
637 test_and_set_bit(mISDN_STACK_WAKEUP, &st->status);
638 wake_up_interruptible(&st->workq);
639 wait_for_completion(&done);
640 }
641 if (!list_empty(&st->layer2))
642 printk(KERN_WARNING "%s: layer2 list not empty\n",
643 __func__);
644 if (!hlist_empty(&st->l1sock.head))
645 printk(KERN_WARNING "%s: layer1 list not empty\n",
646 __func__);
647 kfree(st);
648 }
649
650 void
651 mISDN_initstack(u_int *dp)
652 {
653 debug = dp;
654 }