stack.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661
  1. /*
  2. *
  3. * Author Karsten Keil <kkeil@novell.com>
  4. *
  5. * Copyright 2008 by Karsten Keil <kkeil@novell.com>
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 as
  9. * published by the Free Software Foundation.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. */
  17. #include <linux/slab.h>
  18. #include <linux/mISDNif.h>
  19. #include <linux/kthread.h>
  20. #include <linux/sched.h>
  21. #include "core.h"
  22. static u_int *debug;
  23. static inline void
  24. _queue_message(struct mISDNstack *st, struct sk_buff *skb)
  25. {
  26. struct mISDNhead *hh = mISDN_HEAD_P(skb);
  27. if (*debug & DEBUG_QUEUE_FUNC)
  28. printk(KERN_DEBUG "%s prim(%x) id(%x) %p\n",
  29. __func__, hh->prim, hh->id, skb);
  30. skb_queue_tail(&st->msgq, skb);
  31. if (likely(!test_bit(mISDN_STACK_STOPPED, &st->status))) {
  32. test_and_set_bit(mISDN_STACK_WORK, &st->status);
  33. wake_up_interruptible(&st->workq);
  34. }
  35. }
  36. static int
  37. mISDN_queue_message(struct mISDNchannel *ch, struct sk_buff *skb)
  38. {
  39. _queue_message(ch->st, skb);
  40. return 0;
  41. }
  42. static struct mISDNchannel *
  43. get_channel4id(struct mISDNstack *st, u_int id)
  44. {
  45. struct mISDNchannel *ch;
  46. mutex_lock(&st->lmutex);
  47. list_for_each_entry(ch, &st->layer2, list) {
  48. if (id == ch->nr)
  49. goto unlock;
  50. }
  51. ch = NULL;
  52. unlock:
  53. mutex_unlock(&st->lmutex);
  54. return ch;
  55. }
  56. static void
  57. send_socklist(struct mISDN_sock_list *sl, struct sk_buff *skb)
  58. {
  59. struct sock *sk;
  60. struct sk_buff *cskb = NULL;
  61. read_lock(&sl->lock);
  62. sk_for_each(sk, &sl->head) {
  63. if (sk->sk_state != MISDN_BOUND)
  64. continue;
  65. if (!cskb)
  66. cskb = skb_copy(skb, GFP_ATOMIC);
  67. if (!cskb) {
  68. printk(KERN_WARNING "%s no skb\n", __func__);
  69. break;
  70. }
  71. if (!sock_queue_rcv_skb(sk, cskb))
  72. cskb = NULL;
  73. }
  74. read_unlock(&sl->lock);
  75. if (cskb)
  76. dev_kfree_skb(cskb);
  77. }
  78. static void
  79. send_layer2(struct mISDNstack *st, struct sk_buff *skb)
  80. {
  81. struct sk_buff *cskb;
  82. struct mISDNhead *hh = mISDN_HEAD_P(skb);
  83. struct mISDNchannel *ch;
  84. int ret;
  85. if (!st)
  86. return;
  87. mutex_lock(&st->lmutex);
  88. if ((hh->id & MISDN_ID_ADDR_MASK) == MISDN_ID_ANY) { /* L2 for all */
  89. list_for_each_entry(ch, &st->layer2, list) {
  90. if (list_is_last(&ch->list, &st->layer2)) {
  91. cskb = skb;
  92. skb = NULL;
  93. } else {
  94. cskb = skb_copy(skb, GFP_KERNEL);
  95. }
  96. if (cskb) {
  97. ret = ch->send(ch, cskb);
  98. if (ret) {
  99. if (*debug & DEBUG_SEND_ERR)
  100. printk(KERN_DEBUG
  101. "%s ch%d prim(%x) addr(%x)"
  102. " err %d\n",
  103. __func__, ch->nr,
  104. hh->prim, ch->addr, ret);
  105. dev_kfree_skb(cskb);
  106. }
  107. } else {
  108. printk(KERN_WARNING "%s ch%d addr %x no mem\n",
  109. __func__, ch->nr, ch->addr);
  110. goto out;
  111. }
  112. }
  113. } else {
  114. list_for_each_entry(ch, &st->layer2, list) {
  115. if ((hh->id & MISDN_ID_ADDR_MASK) == ch->addr) {
  116. ret = ch->send(ch, skb);
  117. if (!ret)
  118. skb = NULL;
  119. goto out;
  120. }
  121. }
  122. ret = st->dev->teimgr->ctrl(st->dev->teimgr, CHECK_DATA, skb);
  123. if (!ret)
  124. skb = NULL;
  125. else if (*debug & DEBUG_SEND_ERR)
  126. printk(KERN_DEBUG
  127. "%s mgr prim(%x) err %d\n",
  128. __func__, hh->prim, ret);
  129. }
  130. out:
  131. mutex_unlock(&st->lmutex);
  132. if (skb)
  133. dev_kfree_skb(skb);
  134. }
  135. static inline int
  136. send_msg_to_layer(struct mISDNstack *st, struct sk_buff *skb)
  137. {
  138. struct mISDNhead *hh = mISDN_HEAD_P(skb);
  139. struct mISDNchannel *ch;
  140. int lm;
  141. lm = hh->prim & MISDN_LAYERMASK;
  142. if (*debug & DEBUG_QUEUE_FUNC)
  143. printk(KERN_DEBUG "%s prim(%x) id(%x) %p\n",
  144. __func__, hh->prim, hh->id, skb);
  145. if (lm == 0x1) {
  146. if (!hlist_empty(&st->l1sock.head)) {
  147. __net_timestamp(skb);
  148. send_socklist(&st->l1sock, skb);
  149. }
  150. return st->layer1->send(st->layer1, skb);
  151. } else if (lm == 0x2) {
  152. if (!hlist_empty(&st->l1sock.head))
  153. send_socklist(&st->l1sock, skb);
  154. send_layer2(st, skb);
  155. return 0;
  156. } else if (lm == 0x4) {
  157. ch = get_channel4id(st, hh->id);
  158. if (ch)
  159. return ch->send(ch, skb);
  160. else
  161. printk(KERN_WARNING
  162. "%s: dev(%s) prim(%x) id(%x) no channel\n",
  163. __func__, dev_name(&st->dev->dev), hh->prim,
  164. hh->id);
  165. } else if (lm == 0x8) {
  166. WARN_ON(lm == 0x8);
  167. ch = get_channel4id(st, hh->id);
  168. if (ch)
  169. return ch->send(ch, skb);
  170. else
  171. printk(KERN_WARNING
  172. "%s: dev(%s) prim(%x) id(%x) no channel\n",
  173. __func__, dev_name(&st->dev->dev), hh->prim,
  174. hh->id);
  175. } else {
  176. /* broadcast not handled yet */
  177. printk(KERN_WARNING "%s: dev(%s) prim %x not delivered\n",
  178. __func__, dev_name(&st->dev->dev), hh->prim);
  179. }
  180. return -ESRCH;
  181. }
  182. static void
  183. do_clear_stack(struct mISDNstack *st)
  184. {
  185. }
  186. static int
  187. mISDNStackd(void *data)
  188. {
  189. struct mISDNstack *st = data;
  190. #ifdef MISDN_MSG_STATS
  191. cputime_t utime, stime;
  192. #endif
  193. int err = 0;
  194. sigfillset(&current->blocked);
  195. if (*debug & DEBUG_MSG_THREAD)
  196. printk(KERN_DEBUG "mISDNStackd %s started\n",
  197. dev_name(&st->dev->dev));
  198. if (st->notify != NULL) {
  199. complete(st->notify);
  200. st->notify = NULL;
  201. }
  202. for (;;) {
  203. struct sk_buff *skb;
  204. if (unlikely(test_bit(mISDN_STACK_STOPPED, &st->status))) {
  205. test_and_clear_bit(mISDN_STACK_WORK, &st->status);
  206. test_and_clear_bit(mISDN_STACK_RUNNING, &st->status);
  207. } else
  208. test_and_set_bit(mISDN_STACK_RUNNING, &st->status);
  209. while (test_bit(mISDN_STACK_WORK, &st->status)) {
  210. skb = skb_dequeue(&st->msgq);
  211. if (!skb) {
  212. test_and_clear_bit(mISDN_STACK_WORK,
  213. &st->status);
  214. /* test if a race happens */
  215. skb = skb_dequeue(&st->msgq);
  216. if (!skb)
  217. continue;
  218. test_and_set_bit(mISDN_STACK_WORK,
  219. &st->status);
  220. }
  221. #ifdef MISDN_MSG_STATS
  222. st->msg_cnt++;
  223. #endif
  224. err = send_msg_to_layer(st, skb);
  225. if (unlikely(err)) {
  226. if (*debug & DEBUG_SEND_ERR)
  227. printk(KERN_DEBUG
  228. "%s: %s prim(%x) id(%x) "
  229. "send call(%d)\n",
  230. __func__, dev_name(&st->dev->dev),
  231. mISDN_HEAD_PRIM(skb),
  232. mISDN_HEAD_ID(skb), err);
  233. dev_kfree_skb(skb);
  234. continue;
  235. }
  236. if (unlikely(test_bit(mISDN_STACK_STOPPED,
  237. &st->status))) {
  238. test_and_clear_bit(mISDN_STACK_WORK,
  239. &st->status);
  240. test_and_clear_bit(mISDN_STACK_RUNNING,
  241. &st->status);
  242. break;
  243. }
  244. }
  245. if (test_bit(mISDN_STACK_CLEARING, &st->status)) {
  246. test_and_set_bit(mISDN_STACK_STOPPED, &st->status);
  247. test_and_clear_bit(mISDN_STACK_RUNNING, &st->status);
  248. do_clear_stack(st);
  249. test_and_clear_bit(mISDN_STACK_CLEARING, &st->status);
  250. test_and_set_bit(mISDN_STACK_RESTART, &st->status);
  251. }
  252. if (test_and_clear_bit(mISDN_STACK_RESTART, &st->status)) {
  253. test_and_clear_bit(mISDN_STACK_STOPPED, &st->status);
  254. test_and_set_bit(mISDN_STACK_RUNNING, &st->status);
  255. if (!skb_queue_empty(&st->msgq))
  256. test_and_set_bit(mISDN_STACK_WORK,
  257. &st->status);
  258. }
  259. if (test_bit(mISDN_STACK_ABORT, &st->status))
  260. break;
  261. if (st->notify != NULL) {
  262. complete(st->notify);
  263. st->notify = NULL;
  264. }
  265. #ifdef MISDN_MSG_STATS
  266. st->sleep_cnt++;
  267. #endif
  268. test_and_clear_bit(mISDN_STACK_ACTIVE, &st->status);
  269. wait_event_interruptible(st->workq, (st->status &
  270. mISDN_STACK_ACTION_MASK));
  271. if (*debug & DEBUG_MSG_THREAD)
  272. printk(KERN_DEBUG "%s: %s wake status %08lx\n",
  273. __func__, dev_name(&st->dev->dev), st->status);
  274. test_and_set_bit(mISDN_STACK_ACTIVE, &st->status);
  275. test_and_clear_bit(mISDN_STACK_WAKEUP, &st->status);
  276. if (test_bit(mISDN_STACK_STOPPED, &st->status)) {
  277. test_and_clear_bit(mISDN_STACK_RUNNING, &st->status);
  278. #ifdef MISDN_MSG_STATS
  279. st->stopped_cnt++;
  280. #endif
  281. }
  282. }
  283. #ifdef MISDN_MSG_STATS
  284. printk(KERN_DEBUG "mISDNStackd daemon for %s proceed %d "
  285. "msg %d sleep %d stopped\n",
  286. dev_name(&st->dev->dev), st->msg_cnt, st->sleep_cnt,
  287. st->stopped_cnt);
  288. task_cputime(st->thread, &utime, &stime);
  289. printk(KERN_DEBUG
  290. "mISDNStackd daemon for %s utime(%ld) stime(%ld)\n",
  291. dev_name(&st->dev->dev), utime, stime);
  292. printk(KERN_DEBUG
  293. "mISDNStackd daemon for %s nvcsw(%ld) nivcsw(%ld)\n",
  294. dev_name(&st->dev->dev), st->thread->nvcsw, st->thread->nivcsw);
  295. printk(KERN_DEBUG "mISDNStackd daemon for %s killed now\n",
  296. dev_name(&st->dev->dev));
  297. #endif
  298. test_and_set_bit(mISDN_STACK_KILLED, &st->status);
  299. test_and_clear_bit(mISDN_STACK_RUNNING, &st->status);
  300. test_and_clear_bit(mISDN_STACK_ACTIVE, &st->status);
  301. test_and_clear_bit(mISDN_STACK_ABORT, &st->status);
  302. skb_queue_purge(&st->msgq);
  303. st->thread = NULL;
  304. if (st->notify != NULL) {
  305. complete(st->notify);
  306. st->notify = NULL;
  307. }
  308. return 0;
  309. }
  310. static int
  311. l1_receive(struct mISDNchannel *ch, struct sk_buff *skb)
  312. {
  313. if (!ch->st)
  314. return -ENODEV;
  315. __net_timestamp(skb);
  316. _queue_message(ch->st, skb);
  317. return 0;
  318. }
  319. void
  320. set_channel_address(struct mISDNchannel *ch, u_int sapi, u_int tei)
  321. {
  322. ch->addr = sapi | (tei << 8);
  323. }
  324. void
  325. __add_layer2(struct mISDNchannel *ch, struct mISDNstack *st)
  326. {
  327. list_add_tail(&ch->list, &st->layer2);
  328. }
  329. void
  330. add_layer2(struct mISDNchannel *ch, struct mISDNstack *st)
  331. {
  332. mutex_lock(&st->lmutex);
  333. __add_layer2(ch, st);
  334. mutex_unlock(&st->lmutex);
  335. }
  336. static int
  337. st_own_ctrl(struct mISDNchannel *ch, u_int cmd, void *arg)
  338. {
  339. if (!ch->st || !ch->st->layer1)
  340. return -EINVAL;
  341. return ch->st->layer1->ctrl(ch->st->layer1, cmd, arg);
  342. }
  343. int
  344. create_stack(struct mISDNdevice *dev)
  345. {
  346. struct mISDNstack *newst;
  347. int err;
  348. DECLARE_COMPLETION_ONSTACK(done);
  349. newst = kzalloc(sizeof(struct mISDNstack), GFP_KERNEL);
  350. if (!newst) {
  351. printk(KERN_ERR "kmalloc mISDN_stack failed\n");
  352. return -ENOMEM;
  353. }
  354. newst->dev = dev;
  355. INIT_LIST_HEAD(&newst->layer2);
  356. INIT_HLIST_HEAD(&newst->l1sock.head);
  357. rwlock_init(&newst->l1sock.lock);
  358. init_waitqueue_head(&newst->workq);
  359. skb_queue_head_init(&newst->msgq);
  360. mutex_init(&newst->lmutex);
  361. dev->D.st = newst;
  362. err = create_teimanager(dev);
  363. if (err) {
  364. printk(KERN_ERR "kmalloc teimanager failed\n");
  365. kfree(newst);
  366. return err;
  367. }
  368. dev->teimgr->peer = &newst->own;
  369. dev->teimgr->recv = mISDN_queue_message;
  370. dev->teimgr->st = newst;
  371. newst->layer1 = &dev->D;
  372. dev->D.recv = l1_receive;
  373. dev->D.peer = &newst->own;
  374. newst->own.st = newst;
  375. newst->own.ctrl = st_own_ctrl;
  376. newst->own.send = mISDN_queue_message;
  377. newst->own.recv = mISDN_queue_message;
  378. if (*debug & DEBUG_CORE_FUNC)
  379. printk(KERN_DEBUG "%s: st(%s)\n", __func__,
  380. dev_name(&newst->dev->dev));
  381. newst->notify = &done;
  382. newst->thread = kthread_run(mISDNStackd, (void *)newst, "mISDN_%s",
  383. dev_name(&newst->dev->dev));
  384. if (IS_ERR(newst->thread)) {
  385. err = PTR_ERR(newst->thread);
  386. printk(KERN_ERR
  387. "mISDN:cannot create kernel thread for %s (%d)\n",
  388. dev_name(&newst->dev->dev), err);
  389. delete_teimanager(dev->teimgr);
  390. kfree(newst);
  391. } else
  392. wait_for_completion(&done);
  393. return err;
  394. }
  395. int
  396. connect_layer1(struct mISDNdevice *dev, struct mISDNchannel *ch,
  397. u_int protocol, struct sockaddr_mISDN *adr)
  398. {
  399. struct mISDN_sock *msk = container_of(ch, struct mISDN_sock, ch);
  400. struct channel_req rq;
  401. int err;
  402. if (*debug & DEBUG_CORE_FUNC)
  403. printk(KERN_DEBUG "%s: %s proto(%x) adr(%d %d %d %d)\n",
  404. __func__, dev_name(&dev->dev), protocol, adr->dev,
  405. adr->channel, adr->sapi, adr->tei);
  406. switch (protocol) {
  407. case ISDN_P_NT_S0:
  408. case ISDN_P_NT_E1:
  409. case ISDN_P_TE_S0:
  410. case ISDN_P_TE_E1:
  411. ch->recv = mISDN_queue_message;
  412. ch->peer = &dev->D.st->own;
  413. ch->st = dev->D.st;
  414. rq.protocol = protocol;
  415. rq.adr.channel = adr->channel;
  416. err = dev->D.ctrl(&dev->D, OPEN_CHANNEL, &rq);
  417. printk(KERN_DEBUG "%s: ret %d (dev %d)\n", __func__, err,
  418. dev->id);
  419. if (err)
  420. return err;
  421. write_lock_bh(&dev->D.st->l1sock.lock);
  422. sk_add_node(&msk->sk, &dev->D.st->l1sock.head);
  423. write_unlock_bh(&dev->D.st->l1sock.lock);
  424. break;
  425. default:
  426. return -ENOPROTOOPT;
  427. }
  428. return 0;
  429. }
  430. int
  431. connect_Bstack(struct mISDNdevice *dev, struct mISDNchannel *ch,
  432. u_int protocol, struct sockaddr_mISDN *adr)
  433. {
  434. struct channel_req rq, rq2;
  435. int pmask, err;
  436. struct Bprotocol *bp;
  437. if (*debug & DEBUG_CORE_FUNC)
  438. printk(KERN_DEBUG "%s: %s proto(%x) adr(%d %d %d %d)\n",
  439. __func__, dev_name(&dev->dev), protocol,
  440. adr->dev, adr->channel, adr->sapi,
  441. adr->tei);
  442. ch->st = dev->D.st;
  443. pmask = 1 << (protocol & ISDN_P_B_MASK);
  444. if (pmask & dev->Bprotocols) {
  445. rq.protocol = protocol;
  446. rq.adr = *adr;
  447. err = dev->D.ctrl(&dev->D, OPEN_CHANNEL, &rq);
  448. if (err)
  449. return err;
  450. ch->recv = rq.ch->send;
  451. ch->peer = rq.ch;
  452. rq.ch->recv = ch->send;
  453. rq.ch->peer = ch;
  454. rq.ch->st = dev->D.st;
  455. } else {
  456. bp = get_Bprotocol4mask(pmask);
  457. if (!bp)
  458. return -ENOPROTOOPT;
  459. rq2.protocol = protocol;
  460. rq2.adr = *adr;
  461. rq2.ch = ch;
  462. err = bp->create(&rq2);
  463. if (err)
  464. return err;
  465. ch->recv = rq2.ch->send;
  466. ch->peer = rq2.ch;
  467. rq2.ch->st = dev->D.st;
  468. rq.protocol = rq2.protocol;
  469. rq.adr = *adr;
  470. err = dev->D.ctrl(&dev->D, OPEN_CHANNEL, &rq);
  471. if (err) {
  472. rq2.ch->ctrl(rq2.ch, CLOSE_CHANNEL, NULL);
  473. return err;
  474. }
  475. rq2.ch->recv = rq.ch->send;
  476. rq2.ch->peer = rq.ch;
  477. rq.ch->recv = rq2.ch->send;
  478. rq.ch->peer = rq2.ch;
  479. rq.ch->st = dev->D.st;
  480. }
  481. ch->protocol = protocol;
  482. ch->nr = rq.ch->nr;
  483. return 0;
  484. }
  485. int
  486. create_l2entity(struct mISDNdevice *dev, struct mISDNchannel *ch,
  487. u_int protocol, struct sockaddr_mISDN *adr)
  488. {
  489. struct channel_req rq;
  490. int err;
  491. if (*debug & DEBUG_CORE_FUNC)
  492. printk(KERN_DEBUG "%s: %s proto(%x) adr(%d %d %d %d)\n",
  493. __func__, dev_name(&dev->dev), protocol,
  494. adr->dev, adr->channel, adr->sapi,
  495. adr->tei);
  496. rq.protocol = ISDN_P_TE_S0;
  497. if (dev->Dprotocols & (1 << ISDN_P_TE_E1))
  498. rq.protocol = ISDN_P_TE_E1;
  499. switch (protocol) {
  500. case ISDN_P_LAPD_NT:
  501. rq.protocol = ISDN_P_NT_S0;
  502. if (dev->Dprotocols & (1 << ISDN_P_NT_E1))
  503. rq.protocol = ISDN_P_NT_E1;
  504. case ISDN_P_LAPD_TE:
  505. ch->recv = mISDN_queue_message;
  506. ch->peer = &dev->D.st->own;
  507. ch->st = dev->D.st;
  508. rq.adr.channel = 0;
  509. err = dev->D.ctrl(&dev->D, OPEN_CHANNEL, &rq);
  510. printk(KERN_DEBUG "%s: ret 1 %d\n", __func__, err);
  511. if (err)
  512. break;
  513. rq.protocol = protocol;
  514. rq.adr = *adr;
  515. rq.ch = ch;
  516. err = dev->teimgr->ctrl(dev->teimgr, OPEN_CHANNEL, &rq);
  517. printk(KERN_DEBUG "%s: ret 2 %d\n", __func__, err);
  518. if (!err) {
  519. if ((protocol == ISDN_P_LAPD_NT) && !rq.ch)
  520. break;
  521. add_layer2(rq.ch, dev->D.st);
  522. rq.ch->recv = mISDN_queue_message;
  523. rq.ch->peer = &dev->D.st->own;
  524. rq.ch->ctrl(rq.ch, OPEN_CHANNEL, NULL); /* can't fail */
  525. }
  526. break;
  527. default:
  528. err = -EPROTONOSUPPORT;
  529. }
  530. return err;
  531. }
  532. void
  533. delete_channel(struct mISDNchannel *ch)
  534. {
  535. struct mISDN_sock *msk = container_of(ch, struct mISDN_sock, ch);
  536. struct mISDNchannel *pch;
  537. if (!ch->st) {
  538. printk(KERN_WARNING "%s: no stack\n", __func__);
  539. return;
  540. }
  541. if (*debug & DEBUG_CORE_FUNC)
  542. printk(KERN_DEBUG "%s: st(%s) protocol(%x)\n", __func__,
  543. dev_name(&ch->st->dev->dev), ch->protocol);
  544. if (ch->protocol >= ISDN_P_B_START) {
  545. if (ch->peer) {
  546. ch->peer->ctrl(ch->peer, CLOSE_CHANNEL, NULL);
  547. ch->peer = NULL;
  548. }
  549. return;
  550. }
  551. switch (ch->protocol) {
  552. case ISDN_P_NT_S0:
  553. case ISDN_P_TE_S0:
  554. case ISDN_P_NT_E1:
  555. case ISDN_P_TE_E1:
  556. write_lock_bh(&ch->st->l1sock.lock);
  557. sk_del_node_init(&msk->sk);
  558. write_unlock_bh(&ch->st->l1sock.lock);
  559. ch->st->dev->D.ctrl(&ch->st->dev->D, CLOSE_CHANNEL, NULL);
  560. break;
  561. case ISDN_P_LAPD_TE:
  562. pch = get_channel4id(ch->st, ch->nr);
  563. if (pch) {
  564. mutex_lock(&ch->st->lmutex);
  565. list_del(&pch->list);
  566. mutex_unlock(&ch->st->lmutex);
  567. pch->ctrl(pch, CLOSE_CHANNEL, NULL);
  568. pch = ch->st->dev->teimgr;
  569. pch->ctrl(pch, CLOSE_CHANNEL, NULL);
  570. } else
  571. printk(KERN_WARNING "%s: no l2 channel\n",
  572. __func__);
  573. break;
  574. case ISDN_P_LAPD_NT:
  575. pch = ch->st->dev->teimgr;
  576. if (pch) {
  577. pch->ctrl(pch, CLOSE_CHANNEL, NULL);
  578. } else
  579. printk(KERN_WARNING "%s: no l2 channel\n",
  580. __func__);
  581. break;
  582. default:
  583. break;
  584. }
  585. return;
  586. }
  587. void
  588. delete_stack(struct mISDNdevice *dev)
  589. {
  590. struct mISDNstack *st = dev->D.st;
  591. DECLARE_COMPLETION_ONSTACK(done);
  592. if (*debug & DEBUG_CORE_FUNC)
  593. printk(KERN_DEBUG "%s: st(%s)\n", __func__,
  594. dev_name(&st->dev->dev));
  595. if (dev->teimgr)
  596. delete_teimanager(dev->teimgr);
  597. if (st->thread) {
  598. if (st->notify) {
  599. printk(KERN_WARNING "%s: notifier in use\n",
  600. __func__);
  601. complete(st->notify);
  602. }
  603. st->notify = &done;
  604. test_and_set_bit(mISDN_STACK_ABORT, &st->status);
  605. test_and_set_bit(mISDN_STACK_WAKEUP, &st->status);
  606. wake_up_interruptible(&st->workq);
  607. wait_for_completion(&done);
  608. }
  609. if (!list_empty(&st->layer2))
  610. printk(KERN_WARNING "%s: layer2 list not empty\n",
  611. __func__);
  612. if (!hlist_empty(&st->l1sock.head))
  613. printk(KERN_WARNING "%s: layer1 list not empty\n",
  614. __func__);
  615. kfree(st);
  616. }
  617. void
  618. mISDN_initstack(u_int *dp)
  619. {
  620. debug = dp;
  621. }