ctcm_mpc.c 58 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162
  1. /*
  2. * Copyright IBM Corp. 2004, 2007
  3. * Authors: Belinda Thompson (belindat@us.ibm.com)
  4. * Andy Richter (richtera@us.ibm.com)
  5. * Peter Tiedemann (ptiedem@de.ibm.com)
  6. */
  7. /*
  8. This module exports functions to be used by CCS:
  9. EXPORT_SYMBOL(ctc_mpc_alloc_channel);
  10. EXPORT_SYMBOL(ctc_mpc_establish_connectivity);
  11. EXPORT_SYMBOL(ctc_mpc_dealloc_ch);
  12. EXPORT_SYMBOL(ctc_mpc_flow_control);
  13. */
  14. #undef DEBUG
  15. #undef DEBUGDATA
  16. #undef DEBUGCCW
  17. #define KMSG_COMPONENT "ctcm"
  18. #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  19. #include <linux/module.h>
  20. #include <linux/init.h>
  21. #include <linux/kernel.h>
  22. #include <linux/slab.h>
  23. #include <linux/errno.h>
  24. #include <linux/types.h>
  25. #include <linux/interrupt.h>
  26. #include <linux/timer.h>
  27. #include <linux/sched.h>
  28. #include <linux/signal.h>
  29. #include <linux/string.h>
  30. #include <linux/proc_fs.h>
  31. #include <linux/ip.h>
  32. #include <linux/if_arp.h>
  33. #include <linux/tcp.h>
  34. #include <linux/skbuff.h>
  35. #include <linux/ctype.h>
  36. #include <linux/netdevice.h>
  37. #include <net/dst.h>
  38. #include <linux/io.h> /* instead of <asm/io.h> ok ? */
  39. #include <asm/ccwdev.h>
  40. #include <asm/ccwgroup.h>
  41. #include <linux/bitops.h> /* instead of <asm/bitops.h> ok ? */
  42. #include <linux/uaccess.h> /* instead of <asm/uaccess.h> ok ? */
  43. #include <linux/wait.h>
  44. #include <linux/moduleparam.h>
  45. #include <asm/idals.h>
  46. #include "ctcm_main.h"
  47. #include "ctcm_mpc.h"
  48. #include "ctcm_fsms.h"
  49. static const struct xid2 init_xid = {
  50. .xid2_type_id = XID_FM2,
  51. .xid2_len = 0x45,
  52. .xid2_adj_id = 0,
  53. .xid2_rlen = 0x31,
  54. .xid2_resv1 = 0,
  55. .xid2_flag1 = 0,
  56. .xid2_fmtt = 0,
  57. .xid2_flag4 = 0x80,
  58. .xid2_resv2 = 0,
  59. .xid2_tgnum = 0,
  60. .xid2_sender_id = 0,
  61. .xid2_flag2 = 0,
  62. .xid2_option = XID2_0,
  63. .xid2_resv3 = "\x00",
  64. .xid2_resv4 = 0,
  65. .xid2_dlc_type = XID2_READ_SIDE,
  66. .xid2_resv5 = 0,
  67. .xid2_mpc_flag = 0,
  68. .xid2_resv6 = 0,
  69. .xid2_buf_len = (MPC_BUFSIZE_DEFAULT - 35),
  70. };
  71. static const struct th_header thnorm = {
  72. .th_seg = 0x00,
  73. .th_ch_flag = TH_IS_XID,
  74. .th_blk_flag = TH_DATA_IS_XID,
  75. .th_is_xid = 0x01,
  76. .th_seq_num = 0x00000000,
  77. };
  78. static const struct th_header thdummy = {
  79. .th_seg = 0x00,
  80. .th_ch_flag = 0x00,
  81. .th_blk_flag = TH_DATA_IS_XID,
  82. .th_is_xid = 0x01,
  83. .th_seq_num = 0x00000000,
  84. };
  85. /*
  86. * Definition of one MPC group
  87. */
  88. /*
  89. * Compatibility macros for busy handling
  90. * of network devices.
  91. */
  92. static void ctcmpc_unpack_skb(struct channel *ch, struct sk_buff *pskb);
  93. /*
  94. * MPC Group state machine actions (static prototypes)
  95. */
  96. static void mpc_action_nop(fsm_instance *fsm, int event, void *arg);
  97. static void mpc_action_go_ready(fsm_instance *fsm, int event, void *arg);
  98. static void mpc_action_go_inop(fsm_instance *fi, int event, void *arg);
  99. static void mpc_action_timeout(fsm_instance *fi, int event, void *arg);
  100. static int mpc_validate_xid(struct mpcg_info *mpcginfo);
  101. static void mpc_action_yside_xid(fsm_instance *fsm, int event, void *arg);
  102. static void mpc_action_doxid0(fsm_instance *fsm, int event, void *arg);
  103. static void mpc_action_doxid7(fsm_instance *fsm, int event, void *arg);
  104. static void mpc_action_xside_xid(fsm_instance *fsm, int event, void *arg);
  105. static void mpc_action_rcvd_xid0(fsm_instance *fsm, int event, void *arg);
  106. static void mpc_action_rcvd_xid7(fsm_instance *fsm, int event, void *arg);
  107. #ifdef DEBUGDATA
  108. /*-------------------------------------------------------------------*
  109. * Dump buffer format *
  110. * *
  111. *--------------------------------------------------------------------*/
  112. void ctcmpc_dumpit(char *buf, int len)
  113. {
  114. __u32 ct, sw, rm, dup;
  115. char *ptr, *rptr;
  116. char tbuf[82], tdup[82];
  117. char addr[22];
  118. char boff[12];
  119. char bhex[82], duphex[82];
  120. char basc[40];
  121. sw = 0;
  122. rptr = ptr = buf;
  123. rm = 16;
  124. duphex[0] = 0x00;
  125. dup = 0;
  126. for (ct = 0; ct < len; ct++, ptr++, rptr++) {
  127. if (sw == 0) {
  128. sprintf(addr, "%16.16llx", (__u64)rptr);
  129. sprintf(boff, "%4.4X", (__u32)ct);
  130. bhex[0] = '\0';
  131. basc[0] = '\0';
  132. }
  133. if ((sw == 4) || (sw == 12))
  134. strcat(bhex, " ");
  135. if (sw == 8)
  136. strcat(bhex, " ");
  137. sprintf(tbuf, "%2.2llX", (__u64)*ptr);
  138. tbuf[2] = '\0';
  139. strcat(bhex, tbuf);
  140. if ((0 != isprint(*ptr)) && (*ptr >= 0x20))
  141. basc[sw] = *ptr;
  142. else
  143. basc[sw] = '.';
  144. basc[sw+1] = '\0';
  145. sw++;
  146. rm--;
  147. if (sw != 16)
  148. continue;
  149. if ((strcmp(duphex, bhex)) != 0) {
  150. if (dup != 0) {
  151. sprintf(tdup,
  152. "Duplicate as above to %s", addr);
  153. ctcm_pr_debug(" --- %s ---\n",
  154. tdup);
  155. }
  156. ctcm_pr_debug(" %s (+%s) : %s [%s]\n",
  157. addr, boff, bhex, basc);
  158. dup = 0;
  159. strcpy(duphex, bhex);
  160. } else
  161. dup++;
  162. sw = 0;
  163. rm = 16;
  164. } /* endfor */
  165. if (sw != 0) {
  166. for ( ; rm > 0; rm--, sw++) {
  167. if ((sw == 4) || (sw == 12))
  168. strcat(bhex, " ");
  169. if (sw == 8)
  170. strcat(bhex, " ");
  171. strcat(bhex, " ");
  172. strcat(basc, " ");
  173. }
  174. if (dup != 0) {
  175. sprintf(tdup, "Duplicate as above to %s", addr);
  176. ctcm_pr_debug(" --- %s ---\n", tdup);
  177. }
  178. ctcm_pr_debug(" %s (+%s) : %s [%s]\n",
  179. addr, boff, bhex, basc);
  180. } else {
  181. if (dup >= 1) {
  182. sprintf(tdup, "Duplicate as above to %s", addr);
  183. ctcm_pr_debug(" --- %s ---\n", tdup);
  184. }
  185. if (dup != 0) {
  186. ctcm_pr_debug(" %s (+%s) : %s [%s]\n",
  187. addr, boff, bhex, basc);
  188. }
  189. }
  190. return;
  191. } /* end of ctcmpc_dumpit */
  192. #endif
  193. #ifdef DEBUGDATA
  194. /*
  195. * Dump header and first 16 bytes of an sk_buff for debugging purposes.
  196. *
  197. * skb The sk_buff to dump.
  198. * offset Offset relative to skb-data, where to start the dump.
  199. */
  200. void ctcmpc_dump_skb(struct sk_buff *skb, int offset)
  201. {
  202. __u8 *p = skb->data;
  203. struct th_header *header;
  204. struct pdu *pheader;
  205. int bl = skb->len;
  206. int i;
  207. if (p == NULL)
  208. return;
  209. p += offset;
  210. header = (struct th_header *)p;
  211. ctcm_pr_debug("dump:\n");
  212. ctcm_pr_debug("skb len=%d \n", skb->len);
  213. if (skb->len > 2) {
  214. switch (header->th_ch_flag) {
  215. case TH_HAS_PDU:
  216. break;
  217. case 0x00:
  218. case TH_IS_XID:
  219. if ((header->th_blk_flag == TH_DATA_IS_XID) &&
  220. (header->th_is_xid == 0x01))
  221. goto dumpth;
  222. case TH_SWEEP_REQ:
  223. goto dumpth;
  224. case TH_SWEEP_RESP:
  225. goto dumpth;
  226. default:
  227. break;
  228. }
  229. pheader = (struct pdu *)p;
  230. ctcm_pr_debug("pdu->offset: %d hex: %04x\n",
  231. pheader->pdu_offset, pheader->pdu_offset);
  232. ctcm_pr_debug("pdu->flag : %02x\n", pheader->pdu_flag);
  233. ctcm_pr_debug("pdu->proto : %02x\n", pheader->pdu_proto);
  234. ctcm_pr_debug("pdu->seq : %02x\n", pheader->pdu_seq);
  235. goto dumpdata;
  236. dumpth:
  237. ctcm_pr_debug("th->seg : %02x\n", header->th_seg);
  238. ctcm_pr_debug("th->ch : %02x\n", header->th_ch_flag);
  239. ctcm_pr_debug("th->blk_flag: %02x\n", header->th_blk_flag);
  240. ctcm_pr_debug("th->type : %s\n",
  241. (header->th_is_xid) ? "DATA" : "XID");
  242. ctcm_pr_debug("th->seqnum : %04x\n", header->th_seq_num);
  243. }
  244. dumpdata:
  245. if (bl > 32)
  246. bl = 32;
  247. ctcm_pr_debug("data: ");
  248. for (i = 0; i < bl; i++)
  249. ctcm_pr_debug("%02x%s", *p++, (i % 16) ? " " : "\n");
  250. ctcm_pr_debug("\n");
  251. }
  252. #endif
  253. static struct net_device *ctcmpc_get_dev(int port_num)
  254. {
  255. char device[20];
  256. struct net_device *dev;
  257. struct ctcm_priv *priv;
  258. sprintf(device, "%s%i", MPC_DEVICE_NAME, port_num);
  259. dev = __dev_get_by_name(&init_net, device);
  260. if (dev == NULL) {
  261. CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
  262. "%s: Device not found by name: %s",
  263. CTCM_FUNTAIL, device);
  264. return NULL;
  265. }
  266. priv = dev->ml_priv;
  267. if (priv == NULL) {
  268. CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
  269. "%s(%s): dev->ml_priv is NULL",
  270. CTCM_FUNTAIL, device);
  271. return NULL;
  272. }
  273. if (priv->mpcg == NULL) {
  274. CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
  275. "%s(%s): priv->mpcg is NULL",
  276. CTCM_FUNTAIL, device);
  277. return NULL;
  278. }
  279. return dev;
  280. }
  281. /*
  282. * ctc_mpc_alloc_channel
  283. * (exported interface)
  284. *
  285. * Device Initialization :
  286. * ACTPATH driven IO operations
  287. */
  288. int ctc_mpc_alloc_channel(int port_num, void (*callback)(int, int))
  289. {
  290. struct net_device *dev;
  291. struct mpc_group *grp;
  292. struct ctcm_priv *priv;
  293. dev = ctcmpc_get_dev(port_num);
  294. if (dev == NULL)
  295. return 1;
  296. priv = dev->ml_priv;
  297. grp = priv->mpcg;
  298. grp->allochanfunc = callback;
  299. grp->port_num = port_num;
  300. grp->port_persist = 1;
  301. CTCM_DBF_TEXT_(MPC_SETUP, CTC_DBF_INFO,
  302. "%s(%s): state=%s",
  303. CTCM_FUNTAIL, dev->name, fsm_getstate_str(grp->fsm));
  304. switch (fsm_getstate(grp->fsm)) {
  305. case MPCG_STATE_INOP:
  306. /* Group is in the process of terminating */
  307. grp->alloc_called = 1;
  308. break;
  309. case MPCG_STATE_RESET:
  310. /* MPC Group will transition to state */
  311. /* MPCG_STATE_XID2INITW iff the minimum number */
  312. /* of 1 read and 1 write channel have successfully*/
  313. /* activated */
  314. /*fsm_newstate(grp->fsm, MPCG_STATE_XID2INITW);*/
  315. if (callback)
  316. grp->send_qllc_disc = 1;
  317. case MPCG_STATE_XID0IOWAIT:
  318. fsm_deltimer(&grp->timer);
  319. grp->outstanding_xid2 = 0;
  320. grp->outstanding_xid7 = 0;
  321. grp->outstanding_xid7_p2 = 0;
  322. grp->saved_xid2 = NULL;
  323. if (callback)
  324. ctcm_open(dev);
  325. fsm_event(priv->fsm, DEV_EVENT_START, dev);
  326. break;
  327. case MPCG_STATE_READY:
  328. /* XID exchanges completed after PORT was activated */
  329. /* Link station already active */
  330. /* Maybe timing issue...retry callback */
  331. grp->allocchan_callback_retries++;
  332. if (grp->allocchan_callback_retries < 4) {
  333. if (grp->allochanfunc)
  334. grp->allochanfunc(grp->port_num,
  335. grp->group_max_buflen);
  336. } else {
  337. /* there are problems...bail out */
  338. /* there may be a state mismatch so restart */
  339. fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
  340. grp->allocchan_callback_retries = 0;
  341. }
  342. break;
  343. }
  344. return 0;
  345. }
  346. EXPORT_SYMBOL(ctc_mpc_alloc_channel);
  347. /*
  348. * ctc_mpc_establish_connectivity
  349. * (exported interface)
  350. */
  351. void ctc_mpc_establish_connectivity(int port_num,
  352. void (*callback)(int, int, int))
  353. {
  354. struct net_device *dev;
  355. struct mpc_group *grp;
  356. struct ctcm_priv *priv;
  357. struct channel *rch, *wch;
  358. dev = ctcmpc_get_dev(port_num);
  359. if (dev == NULL)
  360. return;
  361. priv = dev->ml_priv;
  362. grp = priv->mpcg;
  363. rch = priv->channel[CTCM_READ];
  364. wch = priv->channel[CTCM_WRITE];
  365. CTCM_DBF_TEXT_(MPC_SETUP, CTC_DBF_INFO,
  366. "%s(%s): state=%s",
  367. CTCM_FUNTAIL, dev->name, fsm_getstate_str(grp->fsm));
  368. grp->estconnfunc = callback;
  369. grp->port_num = port_num;
  370. switch (fsm_getstate(grp->fsm)) {
  371. case MPCG_STATE_READY:
  372. /* XID exchanges completed after PORT was activated */
  373. /* Link station already active */
  374. /* Maybe timing issue...retry callback */
  375. fsm_deltimer(&grp->timer);
  376. grp->estconn_callback_retries++;
  377. if (grp->estconn_callback_retries < 4) {
  378. if (grp->estconnfunc) {
  379. grp->estconnfunc(grp->port_num, 0,
  380. grp->group_max_buflen);
  381. grp->estconnfunc = NULL;
  382. }
  383. } else {
  384. /* there are problems...bail out */
  385. fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
  386. grp->estconn_callback_retries = 0;
  387. }
  388. break;
  389. case MPCG_STATE_INOP:
  390. case MPCG_STATE_RESET:
  391. /* MPC Group is not ready to start XID - min num of */
  392. /* 1 read and 1 write channel have not been acquired*/
  393. CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
  394. "%s(%s): REJECTED - inactive channels",
  395. CTCM_FUNTAIL, dev->name);
  396. if (grp->estconnfunc) {
  397. grp->estconnfunc(grp->port_num, -1, 0);
  398. grp->estconnfunc = NULL;
  399. }
  400. break;
  401. case MPCG_STATE_XID2INITW:
  402. /* alloc channel was called but no XID exchange */
  403. /* has occurred. initiate xside XID exchange */
  404. /* make sure yside XID0 processing has not started */
  405. if ((fsm_getstate(rch->fsm) > CH_XID0_PENDING) ||
  406. (fsm_getstate(wch->fsm) > CH_XID0_PENDING)) {
  407. CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
  408. "%s(%s): ABORT - PASSIVE XID",
  409. CTCM_FUNTAIL, dev->name);
  410. break;
  411. }
  412. grp->send_qllc_disc = 1;
  413. fsm_newstate(grp->fsm, MPCG_STATE_XID0IOWAIT);
  414. fsm_deltimer(&grp->timer);
  415. fsm_addtimer(&grp->timer, MPC_XID_TIMEOUT_VALUE,
  416. MPCG_EVENT_TIMER, dev);
  417. grp->outstanding_xid7 = 0;
  418. grp->outstanding_xid7_p2 = 0;
  419. grp->saved_xid2 = NULL;
  420. if ((rch->in_mpcgroup) &&
  421. (fsm_getstate(rch->fsm) == CH_XID0_PENDING))
  422. fsm_event(grp->fsm, MPCG_EVENT_XID0DO, rch);
  423. else {
  424. CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
  425. "%s(%s): RX-%s not ready for ACTIVE XID0",
  426. CTCM_FUNTAIL, dev->name, rch->id);
  427. if (grp->estconnfunc) {
  428. grp->estconnfunc(grp->port_num, -1, 0);
  429. grp->estconnfunc = NULL;
  430. }
  431. fsm_deltimer(&grp->timer);
  432. goto done;
  433. }
  434. if ((wch->in_mpcgroup) &&
  435. (fsm_getstate(wch->fsm) == CH_XID0_PENDING))
  436. fsm_event(grp->fsm, MPCG_EVENT_XID0DO, wch);
  437. else {
  438. CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
  439. "%s(%s): WX-%s not ready for ACTIVE XID0",
  440. CTCM_FUNTAIL, dev->name, wch->id);
  441. if (grp->estconnfunc) {
  442. grp->estconnfunc(grp->port_num, -1, 0);
  443. grp->estconnfunc = NULL;
  444. }
  445. fsm_deltimer(&grp->timer);
  446. goto done;
  447. }
  448. break;
  449. case MPCG_STATE_XID0IOWAIT:
  450. /* already in active XID negotiations */
  451. default:
  452. break;
  453. }
  454. done:
  455. CTCM_PR_DEBUG("Exit %s()\n", __func__);
  456. return;
  457. }
  458. EXPORT_SYMBOL(ctc_mpc_establish_connectivity);
  459. /*
  460. * ctc_mpc_dealloc_ch
  461. * (exported interface)
  462. */
  463. void ctc_mpc_dealloc_ch(int port_num)
  464. {
  465. struct net_device *dev;
  466. struct ctcm_priv *priv;
  467. struct mpc_group *grp;
  468. dev = ctcmpc_get_dev(port_num);
  469. if (dev == NULL)
  470. return;
  471. priv = dev->ml_priv;
  472. grp = priv->mpcg;
  473. CTCM_DBF_TEXT_(MPC_SETUP, CTC_DBF_DEBUG,
  474. "%s: %s: refcount = %d\n",
  475. CTCM_FUNTAIL, dev->name, netdev_refcnt_read(dev));
  476. fsm_deltimer(&priv->restart_timer);
  477. grp->channels_terminating = 0;
  478. fsm_deltimer(&grp->timer);
  479. grp->allochanfunc = NULL;
  480. grp->estconnfunc = NULL;
  481. grp->port_persist = 0;
  482. grp->send_qllc_disc = 0;
  483. fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
  484. ctcm_close(dev);
  485. return;
  486. }
  487. EXPORT_SYMBOL(ctc_mpc_dealloc_ch);
  488. /*
  489. * ctc_mpc_flow_control
  490. * (exported interface)
  491. */
  492. void ctc_mpc_flow_control(int port_num, int flowc)
  493. {
  494. struct ctcm_priv *priv;
  495. struct mpc_group *grp;
  496. struct net_device *dev;
  497. struct channel *rch;
  498. int mpcg_state;
  499. dev = ctcmpc_get_dev(port_num);
  500. if (dev == NULL)
  501. return;
  502. priv = dev->ml_priv;
  503. grp = priv->mpcg;
  504. CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_DEBUG,
  505. "%s: %s: flowc = %d",
  506. CTCM_FUNTAIL, dev->name, flowc);
  507. rch = priv->channel[CTCM_READ];
  508. mpcg_state = fsm_getstate(grp->fsm);
  509. switch (flowc) {
  510. case 1:
  511. if (mpcg_state == MPCG_STATE_FLOWC)
  512. break;
  513. if (mpcg_state == MPCG_STATE_READY) {
  514. if (grp->flow_off_called == 1)
  515. grp->flow_off_called = 0;
  516. else
  517. fsm_newstate(grp->fsm, MPCG_STATE_FLOWC);
  518. break;
  519. }
  520. break;
  521. case 0:
  522. if (mpcg_state == MPCG_STATE_FLOWC) {
  523. fsm_newstate(grp->fsm, MPCG_STATE_READY);
  524. /* ensure any data that has accumulated */
  525. /* on the io_queue will now be sen t */
  526. tasklet_schedule(&rch->ch_tasklet);
  527. }
  528. /* possible race condition */
  529. if (mpcg_state == MPCG_STATE_READY) {
  530. grp->flow_off_called = 1;
  531. break;
  532. }
  533. break;
  534. }
  535. }
  536. EXPORT_SYMBOL(ctc_mpc_flow_control);
  537. static int mpc_send_qllc_discontact(struct net_device *);
  538. /*
  539. * helper function of ctcmpc_unpack_skb
  540. */
  541. static void mpc_rcvd_sweep_resp(struct mpcg_info *mpcginfo)
  542. {
  543. struct channel *rch = mpcginfo->ch;
  544. struct net_device *dev = rch->netdev;
  545. struct ctcm_priv *priv = dev->ml_priv;
  546. struct mpc_group *grp = priv->mpcg;
  547. struct channel *ch = priv->channel[CTCM_WRITE];
  548. CTCM_PR_DEBUG("%s: ch=0x%p id=%s\n", __func__, ch, ch->id);
  549. CTCM_D3_DUMP((char *)mpcginfo->sweep, TH_SWEEP_LENGTH);
  550. grp->sweep_rsp_pend_num--;
  551. if ((grp->sweep_req_pend_num == 0) &&
  552. (grp->sweep_rsp_pend_num == 0)) {
  553. fsm_deltimer(&ch->sweep_timer);
  554. grp->in_sweep = 0;
  555. rch->th_seq_num = 0x00;
  556. ch->th_seq_num = 0x00;
  557. ctcm_clear_busy_do(dev);
  558. }
  559. kfree(mpcginfo);
  560. return;
  561. }
  562. /*
  563. * helper function of mpc_rcvd_sweep_req
  564. * which is a helper of ctcmpc_unpack_skb
  565. */
  566. static void ctcmpc_send_sweep_resp(struct channel *rch)
  567. {
  568. struct net_device *dev = rch->netdev;
  569. struct ctcm_priv *priv = dev->ml_priv;
  570. struct mpc_group *grp = priv->mpcg;
  571. struct th_sweep *header;
  572. struct sk_buff *sweep_skb;
  573. struct channel *ch = priv->channel[CTCM_WRITE];
  574. CTCM_PR_DEBUG("%s: ch=0x%p id=%s\n", __func__, rch, rch->id);
  575. sweep_skb = __dev_alloc_skb(MPC_BUFSIZE_DEFAULT, GFP_ATOMIC | GFP_DMA);
  576. if (sweep_skb == NULL) {
  577. CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
  578. "%s(%s): sweep_skb allocation ERROR\n",
  579. CTCM_FUNTAIL, rch->id);
  580. goto done;
  581. }
  582. header = kmalloc(sizeof(struct th_sweep), gfp_type());
  583. if (!header) {
  584. dev_kfree_skb_any(sweep_skb);
  585. goto done;
  586. }
  587. header->th.th_seg = 0x00 ;
  588. header->th.th_ch_flag = TH_SWEEP_RESP;
  589. header->th.th_blk_flag = 0x00;
  590. header->th.th_is_xid = 0x00;
  591. header->th.th_seq_num = 0x00;
  592. header->sw.th_last_seq = ch->th_seq_num;
  593. memcpy(skb_put(sweep_skb, TH_SWEEP_LENGTH), header, TH_SWEEP_LENGTH);
  594. kfree(header);
  595. dev->trans_start = jiffies;
  596. skb_queue_tail(&ch->sweep_queue, sweep_skb);
  597. fsm_addtimer(&ch->sweep_timer, 100, CTC_EVENT_RSWEEP_TIMER, ch);
  598. return;
  599. done:
  600. grp->in_sweep = 0;
  601. ctcm_clear_busy_do(dev);
  602. fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
  603. return;
  604. }
  605. /*
  606. * helper function of ctcmpc_unpack_skb
  607. */
  608. static void mpc_rcvd_sweep_req(struct mpcg_info *mpcginfo)
  609. {
  610. struct channel *rch = mpcginfo->ch;
  611. struct net_device *dev = rch->netdev;
  612. struct ctcm_priv *priv = dev->ml_priv;
  613. struct mpc_group *grp = priv->mpcg;
  614. struct channel *ch = priv->channel[CTCM_WRITE];
  615. if (do_debug)
  616. CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_DEBUG,
  617. " %s(): ch=0x%p id=%s\n", __func__, ch, ch->id);
  618. if (grp->in_sweep == 0) {
  619. grp->in_sweep = 1;
  620. ctcm_test_and_set_busy(dev);
  621. grp->sweep_req_pend_num = grp->active_channels[CTCM_READ];
  622. grp->sweep_rsp_pend_num = grp->active_channels[CTCM_READ];
  623. }
  624. CTCM_D3_DUMP((char *)mpcginfo->sweep, TH_SWEEP_LENGTH);
  625. grp->sweep_req_pend_num--;
  626. ctcmpc_send_sweep_resp(ch);
  627. kfree(mpcginfo);
  628. return;
  629. }
  630. /*
  631. * MPC Group Station FSM definitions
  632. */
  633. static const char *mpcg_event_names[] = {
  634. [MPCG_EVENT_INOP] = "INOP Condition",
  635. [MPCG_EVENT_DISCONC] = "Discontact Received",
  636. [MPCG_EVENT_XID0DO] = "Channel Active - Start XID",
  637. [MPCG_EVENT_XID2] = "XID2 Received",
  638. [MPCG_EVENT_XID2DONE] = "XID0 Complete",
  639. [MPCG_EVENT_XID7DONE] = "XID7 Complete",
  640. [MPCG_EVENT_TIMER] = "XID Setup Timer",
  641. [MPCG_EVENT_DOIO] = "XID DoIO",
  642. };
  643. static const char *mpcg_state_names[] = {
  644. [MPCG_STATE_RESET] = "Reset",
  645. [MPCG_STATE_INOP] = "INOP",
  646. [MPCG_STATE_XID2INITW] = "Passive XID- XID0 Pending Start",
  647. [MPCG_STATE_XID2INITX] = "Passive XID- XID0 Pending Complete",
  648. [MPCG_STATE_XID7INITW] = "Passive XID- XID7 Pending P1 Start",
  649. [MPCG_STATE_XID7INITX] = "Passive XID- XID7 Pending P2 Complete",
  650. [MPCG_STATE_XID0IOWAIT] = "Active XID- XID0 Pending Start",
  651. [MPCG_STATE_XID0IOWAIX] = "Active XID- XID0 Pending Complete",
  652. [MPCG_STATE_XID7INITI] = "Active XID- XID7 Pending Start",
  653. [MPCG_STATE_XID7INITZ] = "Active XID- XID7 Pending Complete ",
  654. [MPCG_STATE_XID7INITF] = "XID - XID7 Complete ",
  655. [MPCG_STATE_FLOWC] = "FLOW CONTROL ON",
  656. [MPCG_STATE_READY] = "READY",
  657. };
  658. /*
  659. * The MPC Group Station FSM
  660. * 22 events
  661. */
  662. static const fsm_node mpcg_fsm[] = {
  663. { MPCG_STATE_RESET, MPCG_EVENT_INOP, mpc_action_go_inop },
  664. { MPCG_STATE_INOP, MPCG_EVENT_INOP, mpc_action_nop },
  665. { MPCG_STATE_FLOWC, MPCG_EVENT_INOP, mpc_action_go_inop },
  666. { MPCG_STATE_READY, MPCG_EVENT_DISCONC, mpc_action_discontact },
  667. { MPCG_STATE_READY, MPCG_EVENT_INOP, mpc_action_go_inop },
  668. { MPCG_STATE_XID2INITW, MPCG_EVENT_XID0DO, mpc_action_doxid0 },
  669. { MPCG_STATE_XID2INITW, MPCG_EVENT_XID2, mpc_action_rcvd_xid0 },
  670. { MPCG_STATE_XID2INITW, MPCG_EVENT_INOP, mpc_action_go_inop },
  671. { MPCG_STATE_XID2INITW, MPCG_EVENT_TIMER, mpc_action_timeout },
  672. { MPCG_STATE_XID2INITW, MPCG_EVENT_DOIO, mpc_action_yside_xid },
  673. { MPCG_STATE_XID2INITX, MPCG_EVENT_XID0DO, mpc_action_doxid0 },
  674. { MPCG_STATE_XID2INITX, MPCG_EVENT_XID2, mpc_action_rcvd_xid0 },
  675. { MPCG_STATE_XID2INITX, MPCG_EVENT_INOP, mpc_action_go_inop },
  676. { MPCG_STATE_XID2INITX, MPCG_EVENT_TIMER, mpc_action_timeout },
  677. { MPCG_STATE_XID2INITX, MPCG_EVENT_DOIO, mpc_action_yside_xid },
  678. { MPCG_STATE_XID7INITW, MPCG_EVENT_XID2DONE, mpc_action_doxid7 },
  679. { MPCG_STATE_XID7INITW, MPCG_EVENT_DISCONC, mpc_action_discontact },
  680. { MPCG_STATE_XID7INITW, MPCG_EVENT_XID2, mpc_action_rcvd_xid7 },
  681. { MPCG_STATE_XID7INITW, MPCG_EVENT_INOP, mpc_action_go_inop },
  682. { MPCG_STATE_XID7INITW, MPCG_EVENT_TIMER, mpc_action_timeout },
  683. { MPCG_STATE_XID7INITW, MPCG_EVENT_XID7DONE, mpc_action_doxid7 },
  684. { MPCG_STATE_XID7INITW, MPCG_EVENT_DOIO, mpc_action_yside_xid },
  685. { MPCG_STATE_XID7INITX, MPCG_EVENT_DISCONC, mpc_action_discontact },
  686. { MPCG_STATE_XID7INITX, MPCG_EVENT_XID2, mpc_action_rcvd_xid7 },
  687. { MPCG_STATE_XID7INITX, MPCG_EVENT_INOP, mpc_action_go_inop },
  688. { MPCG_STATE_XID7INITX, MPCG_EVENT_XID7DONE, mpc_action_doxid7 },
  689. { MPCG_STATE_XID7INITX, MPCG_EVENT_TIMER, mpc_action_timeout },
  690. { MPCG_STATE_XID7INITX, MPCG_EVENT_DOIO, mpc_action_yside_xid },
  691. { MPCG_STATE_XID0IOWAIT, MPCG_EVENT_XID0DO, mpc_action_doxid0 },
  692. { MPCG_STATE_XID0IOWAIT, MPCG_EVENT_DISCONC, mpc_action_discontact },
  693. { MPCG_STATE_XID0IOWAIT, MPCG_EVENT_XID2, mpc_action_rcvd_xid0 },
  694. { MPCG_STATE_XID0IOWAIT, MPCG_EVENT_INOP, mpc_action_go_inop },
  695. { MPCG_STATE_XID0IOWAIT, MPCG_EVENT_TIMER, mpc_action_timeout },
  696. { MPCG_STATE_XID0IOWAIT, MPCG_EVENT_DOIO, mpc_action_xside_xid },
  697. { MPCG_STATE_XID0IOWAIX, MPCG_EVENT_XID0DO, mpc_action_doxid0 },
  698. { MPCG_STATE_XID0IOWAIX, MPCG_EVENT_DISCONC, mpc_action_discontact },
  699. { MPCG_STATE_XID0IOWAIX, MPCG_EVENT_XID2, mpc_action_rcvd_xid0 },
  700. { MPCG_STATE_XID0IOWAIX, MPCG_EVENT_INOP, mpc_action_go_inop },
  701. { MPCG_STATE_XID0IOWAIX, MPCG_EVENT_TIMER, mpc_action_timeout },
  702. { MPCG_STATE_XID0IOWAIX, MPCG_EVENT_DOIO, mpc_action_xside_xid },
  703. { MPCG_STATE_XID7INITI, MPCG_EVENT_XID2DONE, mpc_action_doxid7 },
  704. { MPCG_STATE_XID7INITI, MPCG_EVENT_XID2, mpc_action_rcvd_xid7 },
  705. { MPCG_STATE_XID7INITI, MPCG_EVENT_DISCONC, mpc_action_discontact },
  706. { MPCG_STATE_XID7INITI, MPCG_EVENT_INOP, mpc_action_go_inop },
  707. { MPCG_STATE_XID7INITI, MPCG_EVENT_TIMER, mpc_action_timeout },
  708. { MPCG_STATE_XID7INITI, MPCG_EVENT_XID7DONE, mpc_action_doxid7 },
  709. { MPCG_STATE_XID7INITI, MPCG_EVENT_DOIO, mpc_action_xside_xid },
  710. { MPCG_STATE_XID7INITZ, MPCG_EVENT_XID2, mpc_action_rcvd_xid7 },
  711. { MPCG_STATE_XID7INITZ, MPCG_EVENT_XID7DONE, mpc_action_doxid7 },
  712. { MPCG_STATE_XID7INITZ, MPCG_EVENT_DISCONC, mpc_action_discontact },
  713. { MPCG_STATE_XID7INITZ, MPCG_EVENT_INOP, mpc_action_go_inop },
  714. { MPCG_STATE_XID7INITZ, MPCG_EVENT_TIMER, mpc_action_timeout },
  715. { MPCG_STATE_XID7INITZ, MPCG_EVENT_DOIO, mpc_action_xside_xid },
  716. { MPCG_STATE_XID7INITF, MPCG_EVENT_INOP, mpc_action_go_inop },
  717. { MPCG_STATE_XID7INITF, MPCG_EVENT_XID7DONE, mpc_action_go_ready },
  718. };
  719. static int mpcg_fsm_len = ARRAY_SIZE(mpcg_fsm);
  720. /*
  721. * MPC Group Station FSM action
  722. * CTCM_PROTO_MPC only
  723. */
  724. static void mpc_action_go_ready(fsm_instance *fsm, int event, void *arg)
  725. {
  726. struct net_device *dev = arg;
  727. struct ctcm_priv *priv = dev->ml_priv;
  728. struct mpc_group *grp = priv->mpcg;
  729. if (grp == NULL) {
  730. CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
  731. "%s(%s): No MPC group",
  732. CTCM_FUNTAIL, dev->name);
  733. return;
  734. }
  735. fsm_deltimer(&grp->timer);
  736. if (grp->saved_xid2->xid2_flag2 == 0x40) {
  737. priv->xid->xid2_flag2 = 0x00;
  738. if (grp->estconnfunc) {
  739. grp->estconnfunc(grp->port_num, 1,
  740. grp->group_max_buflen);
  741. grp->estconnfunc = NULL;
  742. } else if (grp->allochanfunc)
  743. grp->send_qllc_disc = 1;
  744. fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
  745. CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
  746. "%s(%s): fails",
  747. CTCM_FUNTAIL, dev->name);
  748. return;
  749. }
  750. grp->port_persist = 1;
  751. grp->out_of_sequence = 0;
  752. grp->estconn_called = 0;
  753. tasklet_hi_schedule(&grp->mpc_tasklet2);
  754. return;
  755. }
  756. /*
  757. * helper of ctcm_init_netdevice
  758. * CTCM_PROTO_MPC only
  759. */
  760. void mpc_group_ready(unsigned long adev)
  761. {
  762. struct net_device *dev = (struct net_device *)adev;
  763. struct ctcm_priv *priv = dev->ml_priv;
  764. struct mpc_group *grp = priv->mpcg;
  765. struct channel *ch = NULL;
  766. if (grp == NULL) {
  767. CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
  768. "%s(%s): No MPC group",
  769. CTCM_FUNTAIL, dev->name);
  770. return;
  771. }
  772. CTCM_DBF_TEXT_(MPC_SETUP, CTC_DBF_NOTICE,
  773. "%s: %s: GROUP TRANSITIONED TO READY, maxbuf = %d\n",
  774. CTCM_FUNTAIL, dev->name, grp->group_max_buflen);
  775. fsm_newstate(grp->fsm, MPCG_STATE_READY);
  776. /* Put up a read on the channel */
  777. ch = priv->channel[CTCM_READ];
  778. ch->pdu_seq = 0;
  779. CTCM_PR_DBGDATA("ctcmpc: %s() ToDCM_pdu_seq= %08x\n" ,
  780. __func__, ch->pdu_seq);
  781. ctcmpc_chx_rxidle(ch->fsm, CTC_EVENT_START, ch);
  782. /* Put the write channel in idle state */
  783. ch = priv->channel[CTCM_WRITE];
  784. if (ch->collect_len > 0) {
  785. spin_lock(&ch->collect_lock);
  786. ctcm_purge_skb_queue(&ch->collect_queue);
  787. ch->collect_len = 0;
  788. spin_unlock(&ch->collect_lock);
  789. }
  790. ctcm_chx_txidle(ch->fsm, CTC_EVENT_START, ch);
  791. ctcm_clear_busy(dev);
  792. if (grp->estconnfunc) {
  793. grp->estconnfunc(grp->port_num, 0,
  794. grp->group_max_buflen);
  795. grp->estconnfunc = NULL;
  796. } else if (grp->allochanfunc)
  797. grp->allochanfunc(grp->port_num, grp->group_max_buflen);
  798. grp->send_qllc_disc = 1;
  799. grp->changed_side = 0;
  800. return;
  801. }
  802. /*
  803. * Increment the MPC Group Active Channel Counts
  804. * helper of dev_action (called from channel fsm)
  805. */
  806. void mpc_channel_action(struct channel *ch, int direction, int action)
  807. {
  808. struct net_device *dev = ch->netdev;
  809. struct ctcm_priv *priv = dev->ml_priv;
  810. struct mpc_group *grp = priv->mpcg;
  811. if (grp == NULL) {
  812. CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
  813. "%s(%s): No MPC group",
  814. CTCM_FUNTAIL, dev->name);
  815. return;
  816. }
  817. CTCM_PR_DEBUG("enter %s: ch=0x%p id=%s\n", __func__, ch, ch->id);
  818. CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_NOTICE,
  819. "%s: %i / Grp:%s total_channels=%i, active_channels: "
  820. "read=%i, write=%i\n", __func__, action,
  821. fsm_getstate_str(grp->fsm), grp->num_channel_paths,
  822. grp->active_channels[CTCM_READ],
  823. grp->active_channels[CTCM_WRITE]);
  824. if ((action == MPC_CHANNEL_ADD) && (ch->in_mpcgroup == 0)) {
  825. grp->num_channel_paths++;
  826. grp->active_channels[direction]++;
  827. grp->outstanding_xid2++;
  828. ch->in_mpcgroup = 1;
  829. if (ch->xid_skb != NULL)
  830. dev_kfree_skb_any(ch->xid_skb);
  831. ch->xid_skb = __dev_alloc_skb(MPC_BUFSIZE_DEFAULT,
  832. GFP_ATOMIC | GFP_DMA);
  833. if (ch->xid_skb == NULL) {
  834. CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
  835. "%s(%s): Couldn't alloc ch xid_skb\n",
  836. CTCM_FUNTAIL, dev->name);
  837. fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
  838. return;
  839. }
  840. ch->xid_skb_data = ch->xid_skb->data;
  841. ch->xid_th = (struct th_header *)ch->xid_skb->data;
  842. skb_put(ch->xid_skb, TH_HEADER_LENGTH);
  843. ch->xid = (struct xid2 *)skb_tail_pointer(ch->xid_skb);
  844. skb_put(ch->xid_skb, XID2_LENGTH);
  845. ch->xid_id = skb_tail_pointer(ch->xid_skb);
  846. ch->xid_skb->data = ch->xid_skb_data;
  847. skb_reset_tail_pointer(ch->xid_skb);
  848. ch->xid_skb->len = 0;
  849. memcpy(skb_put(ch->xid_skb, grp->xid_skb->len),
  850. grp->xid_skb->data,
  851. grp->xid_skb->len);
  852. ch->xid->xid2_dlc_type =
  853. ((CHANNEL_DIRECTION(ch->flags) == CTCM_READ)
  854. ? XID2_READ_SIDE : XID2_WRITE_SIDE);
  855. if (CHANNEL_DIRECTION(ch->flags) == CTCM_WRITE)
  856. ch->xid->xid2_buf_len = 0x00;
  857. ch->xid_skb->data = ch->xid_skb_data;
  858. skb_reset_tail_pointer(ch->xid_skb);
  859. ch->xid_skb->len = 0;
  860. fsm_newstate(ch->fsm, CH_XID0_PENDING);
  861. if ((grp->active_channels[CTCM_READ] > 0) &&
  862. (grp->active_channels[CTCM_WRITE] > 0) &&
  863. (fsm_getstate(grp->fsm) < MPCG_STATE_XID2INITW)) {
  864. fsm_newstate(grp->fsm, MPCG_STATE_XID2INITW);
  865. CTCM_DBF_TEXT_(MPC_SETUP, CTC_DBF_NOTICE,
  866. "%s: %s: MPC GROUP CHANNELS ACTIVE\n",
  867. __func__, dev->name);
  868. }
  869. } else if ((action == MPC_CHANNEL_REMOVE) &&
  870. (ch->in_mpcgroup == 1)) {
  871. ch->in_mpcgroup = 0;
  872. grp->num_channel_paths--;
  873. grp->active_channels[direction]--;
  874. if (ch->xid_skb != NULL)
  875. dev_kfree_skb_any(ch->xid_skb);
  876. ch->xid_skb = NULL;
  877. if (grp->channels_terminating)
  878. goto done;
  879. if (((grp->active_channels[CTCM_READ] == 0) &&
  880. (grp->active_channels[CTCM_WRITE] > 0))
  881. || ((grp->active_channels[CTCM_WRITE] == 0) &&
  882. (grp->active_channels[CTCM_READ] > 0)))
  883. fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
  884. }
  885. done:
  886. CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_DEBUG,
  887. "exit %s: %i / Grp:%s total_channels=%i, active_channels: "
  888. "read=%i, write=%i\n", __func__, action,
  889. fsm_getstate_str(grp->fsm), grp->num_channel_paths,
  890. grp->active_channels[CTCM_READ],
  891. grp->active_channels[CTCM_WRITE]);
  892. CTCM_PR_DEBUG("exit %s: ch=0x%p id=%s\n", __func__, ch, ch->id);
  893. }
  894. /**
  895. * Unpack a just received skb and hand it over to
  896. * upper layers.
  897. * special MPC version of unpack_skb.
  898. *
  899. * ch The channel where this skb has been received.
  900. * pskb The received skb.
  901. */
  902. static void ctcmpc_unpack_skb(struct channel *ch, struct sk_buff *pskb)
  903. {
  904. struct net_device *dev = ch->netdev;
  905. struct ctcm_priv *priv = dev->ml_priv;
  906. struct mpc_group *grp = priv->mpcg;
  907. struct pdu *curr_pdu;
  908. struct mpcg_info *mpcginfo;
  909. struct th_header *header = NULL;
  910. struct th_sweep *sweep = NULL;
  911. int pdu_last_seen = 0;
  912. __u32 new_len;
  913. struct sk_buff *skb;
  914. int skblen;
  915. int sendrc = 0;
  916. CTCM_PR_DEBUG("ctcmpc enter: %s() %s cp:%i ch:%s\n",
  917. __func__, dev->name, smp_processor_id(), ch->id);
  918. header = (struct th_header *)pskb->data;
  919. if ((header->th_seg == 0) &&
  920. (header->th_ch_flag == 0) &&
  921. (header->th_blk_flag == 0) &&
  922. (header->th_seq_num == 0))
  923. /* nothing for us */ goto done;
  924. CTCM_PR_DBGDATA("%s: th_header\n", __func__);
  925. CTCM_D3_DUMP((char *)header, TH_HEADER_LENGTH);
  926. CTCM_PR_DBGDATA("%s: pskb len: %04x \n", __func__, pskb->len);
  927. pskb->dev = dev;
  928. pskb->ip_summed = CHECKSUM_UNNECESSARY;
  929. skb_pull(pskb, TH_HEADER_LENGTH);
  930. if (likely(header->th_ch_flag == TH_HAS_PDU)) {
  931. CTCM_PR_DBGDATA("%s: came into th_has_pdu\n", __func__);
  932. if ((fsm_getstate(grp->fsm) == MPCG_STATE_FLOWC) ||
  933. ((fsm_getstate(grp->fsm) == MPCG_STATE_READY) &&
  934. (header->th_seq_num != ch->th_seq_num + 1) &&
  935. (ch->th_seq_num != 0))) {
  936. /* This is NOT the next segment *
  937. * we are not the correct race winner *
  938. * go away and let someone else win *
  939. * BUT..this only applies if xid negot *
  940. * is done *
  941. */
  942. grp->out_of_sequence += 1;
  943. __skb_push(pskb, TH_HEADER_LENGTH);
  944. skb_queue_tail(&ch->io_queue, pskb);
  945. CTCM_PR_DBGDATA("%s: th_seq_num expect:%08x "
  946. "got:%08x\n", __func__,
  947. ch->th_seq_num + 1, header->th_seq_num);
  948. return;
  949. }
  950. grp->out_of_sequence = 0;
  951. ch->th_seq_num = header->th_seq_num;
  952. CTCM_PR_DBGDATA("ctcmpc: %s() FromVTAM_th_seq=%08x\n",
  953. __func__, ch->th_seq_num);
  954. if (unlikely(fsm_getstate(grp->fsm) != MPCG_STATE_READY))
  955. goto done;
  956. while ((pskb->len > 0) && !pdu_last_seen) {
  957. curr_pdu = (struct pdu *)pskb->data;
  958. CTCM_PR_DBGDATA("%s: pdu_header\n", __func__);
  959. CTCM_D3_DUMP((char *)pskb->data, PDU_HEADER_LENGTH);
  960. CTCM_PR_DBGDATA("%s: pskb len: %04x \n",
  961. __func__, pskb->len);
  962. skb_pull(pskb, PDU_HEADER_LENGTH);
  963. if (curr_pdu->pdu_flag & PDU_LAST)
  964. pdu_last_seen = 1;
  965. if (curr_pdu->pdu_flag & PDU_CNTL)
  966. pskb->protocol = htons(ETH_P_SNAP);
  967. else
  968. pskb->protocol = htons(ETH_P_SNA_DIX);
  969. if ((pskb->len <= 0) || (pskb->len > ch->max_bufsize)) {
  970. CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
  971. "%s(%s): Dropping packet with "
  972. "illegal siize %d",
  973. CTCM_FUNTAIL, dev->name, pskb->len);
  974. priv->stats.rx_dropped++;
  975. priv->stats.rx_length_errors++;
  976. goto done;
  977. }
  978. skb_reset_mac_header(pskb);
  979. new_len = curr_pdu->pdu_offset;
  980. CTCM_PR_DBGDATA("%s: new_len: %04x \n",
  981. __func__, new_len);
  982. if ((new_len == 0) || (new_len > pskb->len)) {
  983. /* should never happen */
  984. /* pskb len must be hosed...bail out */
  985. CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
  986. "%s(%s): non valid pdu_offset: %04x",
  987. /* "data may be lost", */
  988. CTCM_FUNTAIL, dev->name, new_len);
  989. goto done;
  990. }
  991. skb = __dev_alloc_skb(new_len+4, GFP_ATOMIC);
  992. if (!skb) {
  993. CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
  994. "%s(%s): MEMORY allocation error",
  995. CTCM_FUNTAIL, dev->name);
  996. priv->stats.rx_dropped++;
  997. fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
  998. goto done;
  999. }
  1000. memcpy(skb_put(skb, new_len), pskb->data, new_len);
  1001. skb_reset_mac_header(skb);
  1002. skb->dev = pskb->dev;
  1003. skb->protocol = pskb->protocol;
  1004. skb->ip_summed = CHECKSUM_UNNECESSARY;
  1005. *((__u32 *) skb_push(skb, 4)) = ch->pdu_seq;
  1006. ch->pdu_seq++;
  1007. if (do_debug_data) {
  1008. ctcm_pr_debug("%s: ToDCM_pdu_seq= %08x\n",
  1009. __func__, ch->pdu_seq);
  1010. ctcm_pr_debug("%s: skb:%0lx "
  1011. "skb len: %d \n", __func__,
  1012. (unsigned long)skb, skb->len);
  1013. ctcm_pr_debug("%s: up to 32 bytes "
  1014. "of pdu_data sent\n", __func__);
  1015. ctcmpc_dump32((char *)skb->data, skb->len);
  1016. }
  1017. skblen = skb->len;
  1018. sendrc = netif_rx(skb);
  1019. priv->stats.rx_packets++;
  1020. priv->stats.rx_bytes += skblen;
  1021. skb_pull(pskb, new_len); /* point to next PDU */
  1022. }
  1023. } else {
  1024. mpcginfo = kmalloc(sizeof(struct mpcg_info), gfp_type());
  1025. if (mpcginfo == NULL)
  1026. goto done;
  1027. mpcginfo->ch = ch;
  1028. mpcginfo->th = header;
  1029. mpcginfo->skb = pskb;
  1030. CTCM_PR_DEBUG("%s: Not PDU - may be control pkt\n",
  1031. __func__);
  1032. /* it's a sweep? */
  1033. sweep = (struct th_sweep *)pskb->data;
  1034. mpcginfo->sweep = sweep;
  1035. if (header->th_ch_flag == TH_SWEEP_REQ)
  1036. mpc_rcvd_sweep_req(mpcginfo);
  1037. else if (header->th_ch_flag == TH_SWEEP_RESP)
  1038. mpc_rcvd_sweep_resp(mpcginfo);
  1039. else if (header->th_blk_flag == TH_DATA_IS_XID) {
  1040. struct xid2 *thisxid = (struct xid2 *)pskb->data;
  1041. skb_pull(pskb, XID2_LENGTH);
  1042. mpcginfo->xid = thisxid;
  1043. fsm_event(grp->fsm, MPCG_EVENT_XID2, mpcginfo);
  1044. } else if (header->th_blk_flag == TH_DISCONTACT)
  1045. fsm_event(grp->fsm, MPCG_EVENT_DISCONC, mpcginfo);
  1046. else if (header->th_seq_num != 0) {
  1047. CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
  1048. "%s(%s): control pkt expected\n",
  1049. CTCM_FUNTAIL, dev->name);
  1050. priv->stats.rx_dropped++;
  1051. /* mpcginfo only used for non-data transfers */
  1052. kfree(mpcginfo);
  1053. if (do_debug_data)
  1054. ctcmpc_dump_skb(pskb, -8);
  1055. }
  1056. }
  1057. done:
  1058. dev_kfree_skb_any(pskb);
  1059. if (sendrc == NET_RX_DROP) {
  1060. dev_warn(&dev->dev,
  1061. "The network backlog for %s is exceeded, "
  1062. "package dropped\n", __func__);
  1063. fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
  1064. }
  1065. CTCM_PR_DEBUG("exit %s: %s: ch=0x%p id=%s\n",
  1066. __func__, dev->name, ch, ch->id);
  1067. }
  1068. /**
  1069. * tasklet helper for mpc's skb unpacking.
  1070. *
  1071. * ch The channel to work on.
  1072. * Allow flow control back pressure to occur here.
  1073. * Throttling back channel can result in excessive
  1074. * channel inactivity and system deact of channel
  1075. */
  1076. void ctcmpc_bh(unsigned long thischan)
  1077. {
  1078. struct channel *ch = (struct channel *)thischan;
  1079. struct sk_buff *skb;
  1080. struct net_device *dev = ch->netdev;
  1081. struct ctcm_priv *priv = dev->ml_priv;
  1082. struct mpc_group *grp = priv->mpcg;
  1083. CTCM_PR_DEBUG("%s cp:%i enter: %s() %s\n",
  1084. dev->name, smp_processor_id(), __func__, ch->id);
  1085. /* caller has requested driver to throttle back */
  1086. while ((fsm_getstate(grp->fsm) != MPCG_STATE_FLOWC) &&
  1087. (skb = skb_dequeue(&ch->io_queue))) {
  1088. ctcmpc_unpack_skb(ch, skb);
  1089. if (grp->out_of_sequence > 20) {
  1090. /* assume data loss has occurred if */
  1091. /* missing seq_num for extended */
  1092. /* period of time */
  1093. grp->out_of_sequence = 0;
  1094. fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
  1095. break;
  1096. }
  1097. if (skb == skb_peek(&ch->io_queue))
  1098. break;
  1099. }
  1100. CTCM_PR_DEBUG("exit %s: %s: ch=0x%p id=%s\n",
  1101. __func__, dev->name, ch, ch->id);
  1102. return;
  1103. }
  1104. /*
  1105. * MPC Group Initializations
  1106. */
  1107. struct mpc_group *ctcmpc_init_mpc_group(struct ctcm_priv *priv)
  1108. {
  1109. struct mpc_group *grp;
  1110. CTCM_DBF_TEXT_(MPC_SETUP, CTC_DBF_INFO,
  1111. "Enter %s(%p)", CTCM_FUNTAIL, priv);
  1112. grp = kzalloc(sizeof(struct mpc_group), GFP_KERNEL);
  1113. if (grp == NULL)
  1114. return NULL;
  1115. grp->fsm = init_fsm("mpcg", mpcg_state_names, mpcg_event_names,
  1116. MPCG_NR_STATES, MPCG_NR_EVENTS, mpcg_fsm,
  1117. mpcg_fsm_len, GFP_KERNEL);
  1118. if (grp->fsm == NULL) {
  1119. kfree(grp);
  1120. return NULL;
  1121. }
  1122. fsm_newstate(grp->fsm, MPCG_STATE_RESET);
  1123. fsm_settimer(grp->fsm, &grp->timer);
  1124. grp->xid_skb =
  1125. __dev_alloc_skb(MPC_BUFSIZE_DEFAULT, GFP_ATOMIC | GFP_DMA);
  1126. if (grp->xid_skb == NULL) {
  1127. kfree_fsm(grp->fsm);
  1128. kfree(grp);
  1129. return NULL;
  1130. }
  1131. /* base xid for all channels in group */
  1132. grp->xid_skb_data = grp->xid_skb->data;
  1133. grp->xid_th = (struct th_header *)grp->xid_skb->data;
  1134. memcpy(skb_put(grp->xid_skb, TH_HEADER_LENGTH),
  1135. &thnorm, TH_HEADER_LENGTH);
  1136. grp->xid = (struct xid2 *)skb_tail_pointer(grp->xid_skb);
  1137. memcpy(skb_put(grp->xid_skb, XID2_LENGTH), &init_xid, XID2_LENGTH);
  1138. grp->xid->xid2_adj_id = jiffies | 0xfff00000;
  1139. grp->xid->xid2_sender_id = jiffies;
  1140. grp->xid_id = skb_tail_pointer(grp->xid_skb);
  1141. memcpy(skb_put(grp->xid_skb, 4), "VTAM", 4);
  1142. grp->rcvd_xid_skb =
  1143. __dev_alloc_skb(MPC_BUFSIZE_DEFAULT, GFP_ATOMIC|GFP_DMA);
  1144. if (grp->rcvd_xid_skb == NULL) {
  1145. kfree_fsm(grp->fsm);
  1146. dev_kfree_skb(grp->xid_skb);
  1147. kfree(grp);
  1148. return NULL;
  1149. }
  1150. grp->rcvd_xid_data = grp->rcvd_xid_skb->data;
  1151. grp->rcvd_xid_th = (struct th_header *)grp->rcvd_xid_skb->data;
  1152. memcpy(skb_put(grp->rcvd_xid_skb, TH_HEADER_LENGTH),
  1153. &thnorm, TH_HEADER_LENGTH);
  1154. grp->saved_xid2 = NULL;
  1155. priv->xid = grp->xid;
  1156. priv->mpcg = grp;
  1157. return grp;
  1158. }
  1159. /*
  1160. * The MPC Group Station FSM
  1161. */
  1162. /*
  1163. * MPC Group Station FSM actions
  1164. * CTCM_PROTO_MPC only
  1165. */
  1166. /**
  1167. * NOP action for statemachines
  1168. */
  1169. static void mpc_action_nop(fsm_instance *fi, int event, void *arg)
  1170. {
  1171. }
  1172. /*
  1173. * invoked when the device transitions to dev_stopped
  1174. * MPC will stop each individual channel if a single XID failure
  1175. * occurs, or will intitiate all channels be stopped if a GROUP
  1176. * level failure occurs.
  1177. */
  1178. static void mpc_action_go_inop(fsm_instance *fi, int event, void *arg)
  1179. {
  1180. struct net_device *dev = arg;
  1181. struct ctcm_priv *priv;
  1182. struct mpc_group *grp;
  1183. struct channel *wch;
  1184. CTCM_PR_DEBUG("Enter %s: %s\n", __func__, dev->name);
  1185. priv = dev->ml_priv;
  1186. grp = priv->mpcg;
  1187. grp->flow_off_called = 0;
  1188. fsm_deltimer(&grp->timer);
  1189. if (grp->channels_terminating)
  1190. return;
  1191. grp->channels_terminating = 1;
  1192. grp->saved_state = fsm_getstate(grp->fsm);
  1193. fsm_newstate(grp->fsm, MPCG_STATE_INOP);
  1194. if (grp->saved_state > MPCG_STATE_XID7INITF)
  1195. CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_NOTICE,
  1196. "%s(%s): MPC GROUP INOPERATIVE",
  1197. CTCM_FUNTAIL, dev->name);
  1198. if ((grp->saved_state != MPCG_STATE_RESET) ||
  1199. /* dealloc_channel has been called */
  1200. (grp->port_persist == 0))
  1201. fsm_deltimer(&priv->restart_timer);
  1202. wch = priv->channel[CTCM_WRITE];
  1203. switch (grp->saved_state) {
  1204. case MPCG_STATE_RESET:
  1205. case MPCG_STATE_INOP:
  1206. case MPCG_STATE_XID2INITW:
  1207. case MPCG_STATE_XID0IOWAIT:
  1208. case MPCG_STATE_XID2INITX:
  1209. case MPCG_STATE_XID7INITW:
  1210. case MPCG_STATE_XID7INITX:
  1211. case MPCG_STATE_XID0IOWAIX:
  1212. case MPCG_STATE_XID7INITI:
  1213. case MPCG_STATE_XID7INITZ:
  1214. case MPCG_STATE_XID7INITF:
  1215. break;
  1216. case MPCG_STATE_FLOWC:
  1217. case MPCG_STATE_READY:
  1218. default:
  1219. tasklet_hi_schedule(&wch->ch_disc_tasklet);
  1220. }
  1221. grp->xid2_tgnum = 0;
  1222. grp->group_max_buflen = 0; /*min of all received */
  1223. grp->outstanding_xid2 = 0;
  1224. grp->outstanding_xid7 = 0;
  1225. grp->outstanding_xid7_p2 = 0;
  1226. grp->saved_xid2 = NULL;
  1227. grp->xidnogood = 0;
  1228. grp->changed_side = 0;
  1229. grp->rcvd_xid_skb->data = grp->rcvd_xid_data;
  1230. skb_reset_tail_pointer(grp->rcvd_xid_skb);
  1231. grp->rcvd_xid_skb->len = 0;
  1232. grp->rcvd_xid_th = (struct th_header *)grp->rcvd_xid_skb->data;
  1233. memcpy(skb_put(grp->rcvd_xid_skb, TH_HEADER_LENGTH), &thnorm,
  1234. TH_HEADER_LENGTH);
  1235. if (grp->send_qllc_disc == 1) {
  1236. grp->send_qllc_disc = 0;
  1237. mpc_send_qllc_discontact(dev);
  1238. }
  1239. /* DO NOT issue DEV_EVENT_STOP directly out of this code */
  1240. /* This can result in INOP of VTAM PU due to halting of */
  1241. /* outstanding IO which causes a sense to be returned */
  1242. /* Only about 3 senses are allowed and then IOS/VTAM will*/
  1243. /* become unreachable without manual intervention */
  1244. if ((grp->port_persist == 1) || (grp->alloc_called)) {
  1245. grp->alloc_called = 0;
  1246. fsm_deltimer(&priv->restart_timer);
  1247. fsm_addtimer(&priv->restart_timer, 500, DEV_EVENT_RESTART, dev);
  1248. fsm_newstate(grp->fsm, MPCG_STATE_RESET);
  1249. if (grp->saved_state > MPCG_STATE_XID7INITF)
  1250. CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_ALWAYS,
  1251. "%s(%s): MPC GROUP RECOVERY SCHEDULED",
  1252. CTCM_FUNTAIL, dev->name);
  1253. } else {
  1254. fsm_deltimer(&priv->restart_timer);
  1255. fsm_addtimer(&priv->restart_timer, 500, DEV_EVENT_STOP, dev);
  1256. fsm_newstate(grp->fsm, MPCG_STATE_RESET);
  1257. CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_ALWAYS,
  1258. "%s(%s): NO MPC GROUP RECOVERY ATTEMPTED",
  1259. CTCM_FUNTAIL, dev->name);
  1260. }
  1261. }
  1262. /**
  1263. * Handle mpc group action timeout.
  1264. * MPC Group Station FSM action
  1265. * CTCM_PROTO_MPC only
  1266. *
  1267. * fi An instance of an mpc_group fsm.
  1268. * event The event, just happened.
  1269. * arg Generic pointer, casted from net_device * upon call.
  1270. */
  1271. static void mpc_action_timeout(fsm_instance *fi, int event, void *arg)
  1272. {
  1273. struct net_device *dev = arg;
  1274. struct ctcm_priv *priv;
  1275. struct mpc_group *grp;
  1276. struct channel *wch;
  1277. struct channel *rch;
  1278. priv = dev->ml_priv;
  1279. grp = priv->mpcg;
  1280. wch = priv->channel[CTCM_WRITE];
  1281. rch = priv->channel[CTCM_READ];
  1282. switch (fsm_getstate(grp->fsm)) {
  1283. case MPCG_STATE_XID2INITW:
  1284. /* Unless there is outstanding IO on the */
  1285. /* channel just return and wait for ATTN */
  1286. /* interrupt to begin XID negotiations */
  1287. if ((fsm_getstate(rch->fsm) == CH_XID0_PENDING) &&
  1288. (fsm_getstate(wch->fsm) == CH_XID0_PENDING))
  1289. break;
  1290. default:
  1291. fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
  1292. }
  1293. CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_DEBUG,
  1294. "%s: dev=%s exit",
  1295. CTCM_FUNTAIL, dev->name);
  1296. return;
  1297. }
  1298. /*
  1299. * MPC Group Station FSM action
  1300. * CTCM_PROTO_MPC only
  1301. */
  1302. void mpc_action_discontact(fsm_instance *fi, int event, void *arg)
  1303. {
  1304. struct mpcg_info *mpcginfo = arg;
  1305. struct channel *ch = mpcginfo->ch;
  1306. struct net_device *dev;
  1307. struct ctcm_priv *priv;
  1308. struct mpc_group *grp;
  1309. if (ch) {
  1310. dev = ch->netdev;
  1311. if (dev) {
  1312. priv = dev->ml_priv;
  1313. if (priv) {
  1314. CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_NOTICE,
  1315. "%s: %s: %s\n",
  1316. CTCM_FUNTAIL, dev->name, ch->id);
  1317. grp = priv->mpcg;
  1318. grp->send_qllc_disc = 1;
  1319. fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
  1320. }
  1321. }
  1322. }
  1323. return;
  1324. }
  1325. /*
  1326. * MPC Group Station - not part of FSM
  1327. * CTCM_PROTO_MPC only
  1328. * called from add_channel in ctcm_main.c
  1329. */
  1330. void mpc_action_send_discontact(unsigned long thischan)
  1331. {
  1332. int rc;
  1333. struct channel *ch = (struct channel *)thischan;
  1334. unsigned long saveflags = 0;
  1335. spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
  1336. rc = ccw_device_start(ch->cdev, &ch->ccw[15],
  1337. (unsigned long)ch, 0xff, 0);
  1338. spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
  1339. if (rc != 0) {
  1340. ctcm_ccw_check_rc(ch, rc, (char *)__func__);
  1341. }
  1342. return;
  1343. }
  1344. /*
  1345. * helper function of mpc FSM
  1346. * CTCM_PROTO_MPC only
  1347. * mpc_action_rcvd_xid7
  1348. */
  1349. static int mpc_validate_xid(struct mpcg_info *mpcginfo)
  1350. {
  1351. struct channel *ch = mpcginfo->ch;
  1352. struct net_device *dev = ch->netdev;
  1353. struct ctcm_priv *priv = dev->ml_priv;
  1354. struct mpc_group *grp = priv->mpcg;
  1355. struct xid2 *xid = mpcginfo->xid;
  1356. int rc = 0;
  1357. __u64 our_id = 0;
  1358. __u64 their_id = 0;
  1359. int len = TH_HEADER_LENGTH + PDU_HEADER_LENGTH;
  1360. CTCM_PR_DEBUG("Enter %s: xid=%p\n", __func__, xid);
  1361. if (xid == NULL) {
  1362. rc = 1;
  1363. /* XID REJECTED: xid == NULL */
  1364. CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
  1365. "%s(%s): xid = NULL",
  1366. CTCM_FUNTAIL, ch->id);
  1367. goto done;
  1368. }
  1369. CTCM_D3_DUMP((char *)xid, XID2_LENGTH);
  1370. /*the received direction should be the opposite of ours */
  1371. if (((CHANNEL_DIRECTION(ch->flags) == CTCM_READ) ? XID2_WRITE_SIDE :
  1372. XID2_READ_SIDE) != xid->xid2_dlc_type) {
  1373. rc = 2;
  1374. /* XID REJECTED: r/w channel pairing mismatch */
  1375. CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
  1376. "%s(%s): r/w channel pairing mismatch",
  1377. CTCM_FUNTAIL, ch->id);
  1378. goto done;
  1379. }
  1380. if (xid->xid2_dlc_type == XID2_READ_SIDE) {
  1381. CTCM_PR_DEBUG("%s: grpmaxbuf:%d xid2buflen:%d\n", __func__,
  1382. grp->group_max_buflen, xid->xid2_buf_len);
  1383. if (grp->group_max_buflen == 0 || grp->group_max_buflen >
  1384. xid->xid2_buf_len - len)
  1385. grp->group_max_buflen = xid->xid2_buf_len - len;
  1386. }
  1387. if (grp->saved_xid2 == NULL) {
  1388. grp->saved_xid2 =
  1389. (struct xid2 *)skb_tail_pointer(grp->rcvd_xid_skb);
  1390. memcpy(skb_put(grp->rcvd_xid_skb,
  1391. XID2_LENGTH), xid, XID2_LENGTH);
  1392. grp->rcvd_xid_skb->data = grp->rcvd_xid_data;
  1393. skb_reset_tail_pointer(grp->rcvd_xid_skb);
  1394. grp->rcvd_xid_skb->len = 0;
  1395. /* convert two 32 bit numbers into 1 64 bit for id compare */
  1396. our_id = (__u64)priv->xid->xid2_adj_id;
  1397. our_id = our_id << 32;
  1398. our_id = our_id + priv->xid->xid2_sender_id;
  1399. their_id = (__u64)xid->xid2_adj_id;
  1400. their_id = their_id << 32;
  1401. their_id = their_id + xid->xid2_sender_id;
  1402. /* lower id assume the xside role */
  1403. if (our_id < their_id) {
  1404. grp->roll = XSIDE;
  1405. CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_NOTICE,
  1406. "%s(%s): WE HAVE LOW ID - TAKE XSIDE",
  1407. CTCM_FUNTAIL, ch->id);
  1408. } else {
  1409. grp->roll = YSIDE;
  1410. CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_NOTICE,
  1411. "%s(%s): WE HAVE HIGH ID - TAKE YSIDE",
  1412. CTCM_FUNTAIL, ch->id);
  1413. }
  1414. } else {
  1415. if (xid->xid2_flag4 != grp->saved_xid2->xid2_flag4) {
  1416. rc = 3;
  1417. /* XID REJECTED: xid flag byte4 mismatch */
  1418. CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
  1419. "%s(%s): xid flag byte4 mismatch",
  1420. CTCM_FUNTAIL, ch->id);
  1421. }
  1422. if (xid->xid2_flag2 == 0x40) {
  1423. rc = 4;
  1424. /* XID REJECTED - xid NOGOOD */
  1425. CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
  1426. "%s(%s): xid NOGOOD",
  1427. CTCM_FUNTAIL, ch->id);
  1428. }
  1429. if (xid->xid2_adj_id != grp->saved_xid2->xid2_adj_id) {
  1430. rc = 5;
  1431. /* XID REJECTED - Adjacent Station ID Mismatch */
  1432. CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
  1433. "%s(%s): Adjacent Station ID Mismatch",
  1434. CTCM_FUNTAIL, ch->id);
  1435. }
  1436. if (xid->xid2_sender_id != grp->saved_xid2->xid2_sender_id) {
  1437. rc = 6;
  1438. /* XID REJECTED - Sender Address Mismatch */
  1439. CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
  1440. "%s(%s): Sender Address Mismatch",
  1441. CTCM_FUNTAIL, ch->id);
  1442. }
  1443. }
  1444. done:
  1445. if (rc) {
  1446. dev_warn(&dev->dev,
  1447. "The XID used in the MPC protocol is not valid, "
  1448. "rc = %d\n", rc);
  1449. priv->xid->xid2_flag2 = 0x40;
  1450. grp->saved_xid2->xid2_flag2 = 0x40;
  1451. }
  1452. return rc;
  1453. }
  1454. /*
  1455. * MPC Group Station FSM action
  1456. * CTCM_PROTO_MPC only
  1457. */
  1458. static void mpc_action_side_xid(fsm_instance *fsm, void *arg, int side)
  1459. {
  1460. struct channel *ch = arg;
  1461. int rc = 0;
  1462. int gotlock = 0;
  1463. unsigned long saveflags = 0; /* avoids compiler warning with
  1464. spin_unlock_irqrestore */
  1465. CTCM_PR_DEBUG("Enter %s: cp=%i ch=0x%p id=%s\n",
  1466. __func__, smp_processor_id(), ch, ch->id);
  1467. if (ctcm_checkalloc_buffer(ch))
  1468. goto done;
  1469. /*
  1470. * skb data-buffer referencing:
  1471. */
  1472. ch->trans_skb->data = ch->trans_skb_data;
  1473. skb_reset_tail_pointer(ch->trans_skb);
  1474. ch->trans_skb->len = 0;
  1475. /* result of the previous 3 statements is NOT always
  1476. * already set after ctcm_checkalloc_buffer
  1477. * because of possible reuse of the trans_skb
  1478. */
  1479. memset(ch->trans_skb->data, 0, 16);
  1480. ch->rcvd_xid_th = (struct th_header *)ch->trans_skb_data;
  1481. /* check is main purpose here: */
  1482. skb_put(ch->trans_skb, TH_HEADER_LENGTH);
  1483. ch->rcvd_xid = (struct xid2 *)skb_tail_pointer(ch->trans_skb);
  1484. /* check is main purpose here: */
  1485. skb_put(ch->trans_skb, XID2_LENGTH);
  1486. ch->rcvd_xid_id = skb_tail_pointer(ch->trans_skb);
  1487. /* cleanup back to startpoint */
  1488. ch->trans_skb->data = ch->trans_skb_data;
  1489. skb_reset_tail_pointer(ch->trans_skb);
  1490. ch->trans_skb->len = 0;
  1491. /* non-checking rewrite of above skb data-buffer referencing: */
  1492. /*
  1493. memset(ch->trans_skb->data, 0, 16);
  1494. ch->rcvd_xid_th = (struct th_header *)ch->trans_skb_data;
  1495. ch->rcvd_xid = (struct xid2 *)(ch->trans_skb_data + TH_HEADER_LENGTH);
  1496. ch->rcvd_xid_id = ch->trans_skb_data + TH_HEADER_LENGTH + XID2_LENGTH;
  1497. */
  1498. ch->ccw[8].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
  1499. ch->ccw[8].count = 0;
  1500. ch->ccw[8].cda = 0x00;
  1501. if (!(ch->xid_th && ch->xid && ch->xid_id))
  1502. CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_INFO,
  1503. "%s(%s): xid_th=%p, xid=%p, xid_id=%p",
  1504. CTCM_FUNTAIL, ch->id, ch->xid_th, ch->xid, ch->xid_id);
  1505. if (side == XSIDE) {
  1506. /* mpc_action_xside_xid */
  1507. if (ch->xid_th == NULL)
  1508. goto done;
  1509. ch->ccw[9].cmd_code = CCW_CMD_WRITE;
  1510. ch->ccw[9].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
  1511. ch->ccw[9].count = TH_HEADER_LENGTH;
  1512. ch->ccw[9].cda = virt_to_phys(ch->xid_th);
  1513. if (ch->xid == NULL)
  1514. goto done;
  1515. ch->ccw[10].cmd_code = CCW_CMD_WRITE;
  1516. ch->ccw[10].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
  1517. ch->ccw[10].count = XID2_LENGTH;
  1518. ch->ccw[10].cda = virt_to_phys(ch->xid);
  1519. ch->ccw[11].cmd_code = CCW_CMD_READ;
  1520. ch->ccw[11].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
  1521. ch->ccw[11].count = TH_HEADER_LENGTH;
  1522. ch->ccw[11].cda = virt_to_phys(ch->rcvd_xid_th);
  1523. ch->ccw[12].cmd_code = CCW_CMD_READ;
  1524. ch->ccw[12].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
  1525. ch->ccw[12].count = XID2_LENGTH;
  1526. ch->ccw[12].cda = virt_to_phys(ch->rcvd_xid);
  1527. ch->ccw[13].cmd_code = CCW_CMD_READ;
  1528. ch->ccw[13].cda = virt_to_phys(ch->rcvd_xid_id);
  1529. } else { /* side == YSIDE : mpc_action_yside_xid */
  1530. ch->ccw[9].cmd_code = CCW_CMD_READ;
  1531. ch->ccw[9].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
  1532. ch->ccw[9].count = TH_HEADER_LENGTH;
  1533. ch->ccw[9].cda = virt_to_phys(ch->rcvd_xid_th);
  1534. ch->ccw[10].cmd_code = CCW_CMD_READ;
  1535. ch->ccw[10].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
  1536. ch->ccw[10].count = XID2_LENGTH;
  1537. ch->ccw[10].cda = virt_to_phys(ch->rcvd_xid);
  1538. if (ch->xid_th == NULL)
  1539. goto done;
  1540. ch->ccw[11].cmd_code = CCW_CMD_WRITE;
  1541. ch->ccw[11].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
  1542. ch->ccw[11].count = TH_HEADER_LENGTH;
  1543. ch->ccw[11].cda = virt_to_phys(ch->xid_th);
  1544. if (ch->xid == NULL)
  1545. goto done;
  1546. ch->ccw[12].cmd_code = CCW_CMD_WRITE;
  1547. ch->ccw[12].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
  1548. ch->ccw[12].count = XID2_LENGTH;
  1549. ch->ccw[12].cda = virt_to_phys(ch->xid);
  1550. if (ch->xid_id == NULL)
  1551. goto done;
  1552. ch->ccw[13].cmd_code = CCW_CMD_WRITE;
  1553. ch->ccw[13].cda = virt_to_phys(ch->xid_id);
  1554. }
  1555. ch->ccw[13].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
  1556. ch->ccw[13].count = 4;
  1557. ch->ccw[14].cmd_code = CCW_CMD_NOOP;
  1558. ch->ccw[14].flags = CCW_FLAG_SLI;
  1559. ch->ccw[14].count = 0;
  1560. ch->ccw[14].cda = 0;
  1561. CTCM_CCW_DUMP((char *)&ch->ccw[8], sizeof(struct ccw1) * 7);
  1562. CTCM_D3_DUMP((char *)ch->xid_th, TH_HEADER_LENGTH);
  1563. CTCM_D3_DUMP((char *)ch->xid, XID2_LENGTH);
  1564. CTCM_D3_DUMP((char *)ch->xid_id, 4);
  1565. if (!in_irq()) {
  1566. /* Such conditional locking is a known problem for
  1567. * sparse because its static undeterministic.
  1568. * Warnings should be ignored here. */
  1569. spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
  1570. gotlock = 1;
  1571. }
  1572. fsm_addtimer(&ch->timer, 5000 , CTC_EVENT_TIMER, ch);
  1573. rc = ccw_device_start(ch->cdev, &ch->ccw[8],
  1574. (unsigned long)ch, 0xff, 0);
  1575. if (gotlock) /* see remark above about conditional locking */
  1576. spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
  1577. if (rc != 0) {
  1578. ctcm_ccw_check_rc(ch, rc,
  1579. (side == XSIDE) ? "x-side XID" : "y-side XID");
  1580. }
  1581. done:
  1582. CTCM_PR_DEBUG("Exit %s: ch=0x%p id=%s\n",
  1583. __func__, ch, ch->id);
  1584. return;
  1585. }
  1586. /*
  1587. * MPC Group Station FSM action
  1588. * CTCM_PROTO_MPC only
  1589. */
  1590. static void mpc_action_xside_xid(fsm_instance *fsm, int event, void *arg)
  1591. {
  1592. mpc_action_side_xid(fsm, arg, XSIDE);
  1593. }
  1594. /*
  1595. * MPC Group Station FSM action
  1596. * CTCM_PROTO_MPC only
  1597. */
  1598. static void mpc_action_yside_xid(fsm_instance *fsm, int event, void *arg)
  1599. {
  1600. mpc_action_side_xid(fsm, arg, YSIDE);
  1601. }
  1602. /*
  1603. * MPC Group Station FSM action
  1604. * CTCM_PROTO_MPC only
  1605. */
  1606. static void mpc_action_doxid0(fsm_instance *fsm, int event, void *arg)
  1607. {
  1608. struct channel *ch = arg;
  1609. struct net_device *dev = ch->netdev;
  1610. struct ctcm_priv *priv = dev->ml_priv;
  1611. struct mpc_group *grp = priv->mpcg;
  1612. CTCM_PR_DEBUG("Enter %s: cp=%i ch=0x%p id=%s\n",
  1613. __func__, smp_processor_id(), ch, ch->id);
  1614. if (ch->xid == NULL) {
  1615. CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
  1616. "%s(%s): ch->xid == NULL",
  1617. CTCM_FUNTAIL, dev->name);
  1618. return;
  1619. }
  1620. fsm_newstate(ch->fsm, CH_XID0_INPROGRESS);
  1621. ch->xid->xid2_option = XID2_0;
  1622. switch (fsm_getstate(grp->fsm)) {
  1623. case MPCG_STATE_XID2INITW:
  1624. case MPCG_STATE_XID2INITX:
  1625. ch->ccw[8].cmd_code = CCW_CMD_SENSE_CMD;
  1626. break;
  1627. case MPCG_STATE_XID0IOWAIT:
  1628. case MPCG_STATE_XID0IOWAIX:
  1629. ch->ccw[8].cmd_code = CCW_CMD_WRITE_CTL;
  1630. break;
  1631. }
  1632. fsm_event(grp->fsm, MPCG_EVENT_DOIO, ch);
  1633. return;
  1634. }
  1635. /*
  1636. * MPC Group Station FSM action
  1637. * CTCM_PROTO_MPC only
  1638. */
  1639. static void mpc_action_doxid7(fsm_instance *fsm, int event, void *arg)
  1640. {
  1641. struct net_device *dev = arg;
  1642. struct ctcm_priv *priv = dev->ml_priv;
  1643. struct mpc_group *grp = NULL;
  1644. int direction;
  1645. int send = 0;
  1646. if (priv)
  1647. grp = priv->mpcg;
  1648. if (grp == NULL)
  1649. return;
  1650. for (direction = CTCM_READ; direction <= CTCM_WRITE; direction++) {
  1651. struct channel *ch = priv->channel[direction];
  1652. struct xid2 *thisxid = ch->xid;
  1653. ch->xid_skb->data = ch->xid_skb_data;
  1654. skb_reset_tail_pointer(ch->xid_skb);
  1655. ch->xid_skb->len = 0;
  1656. thisxid->xid2_option = XID2_7;
  1657. send = 0;
  1658. /* xid7 phase 1 */
  1659. if (grp->outstanding_xid7_p2 > 0) {
  1660. if (grp->roll == YSIDE) {
  1661. if (fsm_getstate(ch->fsm) == CH_XID7_PENDING1) {
  1662. fsm_newstate(ch->fsm, CH_XID7_PENDING2);
  1663. ch->ccw[8].cmd_code = CCW_CMD_SENSE_CMD;
  1664. memcpy(skb_put(ch->xid_skb,
  1665. TH_HEADER_LENGTH),
  1666. &thdummy, TH_HEADER_LENGTH);
  1667. send = 1;
  1668. }
  1669. } else if (fsm_getstate(ch->fsm) < CH_XID7_PENDING2) {
  1670. fsm_newstate(ch->fsm, CH_XID7_PENDING2);
  1671. ch->ccw[8].cmd_code = CCW_CMD_WRITE_CTL;
  1672. memcpy(skb_put(ch->xid_skb,
  1673. TH_HEADER_LENGTH),
  1674. &thnorm, TH_HEADER_LENGTH);
  1675. send = 1;
  1676. }
  1677. } else {
  1678. /* xid7 phase 2 */
  1679. if (grp->roll == YSIDE) {
  1680. if (fsm_getstate(ch->fsm) < CH_XID7_PENDING4) {
  1681. fsm_newstate(ch->fsm, CH_XID7_PENDING4);
  1682. memcpy(skb_put(ch->xid_skb,
  1683. TH_HEADER_LENGTH),
  1684. &thnorm, TH_HEADER_LENGTH);
  1685. ch->ccw[8].cmd_code = CCW_CMD_WRITE_CTL;
  1686. send = 1;
  1687. }
  1688. } else if (fsm_getstate(ch->fsm) == CH_XID7_PENDING3) {
  1689. fsm_newstate(ch->fsm, CH_XID7_PENDING4);
  1690. ch->ccw[8].cmd_code = CCW_CMD_SENSE_CMD;
  1691. memcpy(skb_put(ch->xid_skb, TH_HEADER_LENGTH),
  1692. &thdummy, TH_HEADER_LENGTH);
  1693. send = 1;
  1694. }
  1695. }
  1696. if (send)
  1697. fsm_event(grp->fsm, MPCG_EVENT_DOIO, ch);
  1698. }
  1699. return;
  1700. }
  1701. /*
  1702. * MPC Group Station FSM action
  1703. * CTCM_PROTO_MPC only
  1704. */
  1705. static void mpc_action_rcvd_xid0(fsm_instance *fsm, int event, void *arg)
  1706. {
  1707. struct mpcg_info *mpcginfo = arg;
  1708. struct channel *ch = mpcginfo->ch;
  1709. struct net_device *dev = ch->netdev;
  1710. struct ctcm_priv *priv = dev->ml_priv;
  1711. struct mpc_group *grp = priv->mpcg;
  1712. CTCM_PR_DEBUG("%s: ch-id:%s xid2:%i xid7:%i xidt_p2:%i \n",
  1713. __func__, ch->id, grp->outstanding_xid2,
  1714. grp->outstanding_xid7, grp->outstanding_xid7_p2);
  1715. if (fsm_getstate(ch->fsm) < CH_XID7_PENDING)
  1716. fsm_newstate(ch->fsm, CH_XID7_PENDING);
  1717. grp->outstanding_xid2--;
  1718. grp->outstanding_xid7++;
  1719. grp->outstanding_xid7_p2++;
  1720. /* must change state before validating xid to */
  1721. /* properly handle interim interrupts received*/
  1722. switch (fsm_getstate(grp->fsm)) {
  1723. case MPCG_STATE_XID2INITW:
  1724. fsm_newstate(grp->fsm, MPCG_STATE_XID2INITX);
  1725. mpc_validate_xid(mpcginfo);
  1726. break;
  1727. case MPCG_STATE_XID0IOWAIT:
  1728. fsm_newstate(grp->fsm, MPCG_STATE_XID0IOWAIX);
  1729. mpc_validate_xid(mpcginfo);
  1730. break;
  1731. case MPCG_STATE_XID2INITX:
  1732. if (grp->outstanding_xid2 == 0) {
  1733. fsm_newstate(grp->fsm, MPCG_STATE_XID7INITW);
  1734. mpc_validate_xid(mpcginfo);
  1735. fsm_event(grp->fsm, MPCG_EVENT_XID2DONE, dev);
  1736. }
  1737. break;
  1738. case MPCG_STATE_XID0IOWAIX:
  1739. if (grp->outstanding_xid2 == 0) {
  1740. fsm_newstate(grp->fsm, MPCG_STATE_XID7INITI);
  1741. mpc_validate_xid(mpcginfo);
  1742. fsm_event(grp->fsm, MPCG_EVENT_XID2DONE, dev);
  1743. }
  1744. break;
  1745. }
  1746. kfree(mpcginfo);
  1747. CTCM_PR_DEBUG("ctcmpc:%s() %s xid2:%i xid7:%i xidt_p2:%i \n",
  1748. __func__, ch->id, grp->outstanding_xid2,
  1749. grp->outstanding_xid7, grp->outstanding_xid7_p2);
  1750. CTCM_PR_DEBUG("ctcmpc:%s() %s grpstate: %s chanstate: %s \n",
  1751. __func__, ch->id,
  1752. fsm_getstate_str(grp->fsm), fsm_getstate_str(ch->fsm));
  1753. return;
  1754. }
  1755. /*
  1756. * MPC Group Station FSM action
  1757. * CTCM_PROTO_MPC only
  1758. */
  1759. static void mpc_action_rcvd_xid7(fsm_instance *fsm, int event, void *arg)
  1760. {
  1761. struct mpcg_info *mpcginfo = arg;
  1762. struct channel *ch = mpcginfo->ch;
  1763. struct net_device *dev = ch->netdev;
  1764. struct ctcm_priv *priv = dev->ml_priv;
  1765. struct mpc_group *grp = priv->mpcg;
  1766. CTCM_PR_DEBUG("Enter %s: cp=%i ch=0x%p id=%s\n",
  1767. __func__, smp_processor_id(), ch, ch->id);
  1768. CTCM_PR_DEBUG("%s: outstanding_xid7: %i, outstanding_xid7_p2: %i\n",
  1769. __func__, grp->outstanding_xid7, grp->outstanding_xid7_p2);
  1770. grp->outstanding_xid7--;
  1771. ch->xid_skb->data = ch->xid_skb_data;
  1772. skb_reset_tail_pointer(ch->xid_skb);
  1773. ch->xid_skb->len = 0;
  1774. switch (fsm_getstate(grp->fsm)) {
  1775. case MPCG_STATE_XID7INITI:
  1776. fsm_newstate(grp->fsm, MPCG_STATE_XID7INITZ);
  1777. mpc_validate_xid(mpcginfo);
  1778. break;
  1779. case MPCG_STATE_XID7INITW:
  1780. fsm_newstate(grp->fsm, MPCG_STATE_XID7INITX);
  1781. mpc_validate_xid(mpcginfo);
  1782. break;
  1783. case MPCG_STATE_XID7INITZ:
  1784. case MPCG_STATE_XID7INITX:
  1785. if (grp->outstanding_xid7 == 0) {
  1786. if (grp->outstanding_xid7_p2 > 0) {
  1787. grp->outstanding_xid7 =
  1788. grp->outstanding_xid7_p2;
  1789. grp->outstanding_xid7_p2 = 0;
  1790. } else
  1791. fsm_newstate(grp->fsm, MPCG_STATE_XID7INITF);
  1792. mpc_validate_xid(mpcginfo);
  1793. fsm_event(grp->fsm, MPCG_EVENT_XID7DONE, dev);
  1794. break;
  1795. }
  1796. mpc_validate_xid(mpcginfo);
  1797. break;
  1798. }
  1799. kfree(mpcginfo);
  1800. return;
  1801. }
  1802. /*
  1803. * mpc_action helper of an MPC Group Station FSM action
  1804. * CTCM_PROTO_MPC only
  1805. */
  1806. static int mpc_send_qllc_discontact(struct net_device *dev)
  1807. {
  1808. __u32 new_len = 0;
  1809. struct sk_buff *skb;
  1810. struct qllc *qllcptr;
  1811. struct ctcm_priv *priv = dev->ml_priv;
  1812. struct mpc_group *grp = priv->mpcg;
  1813. CTCM_PR_DEBUG("%s: GROUP STATE: %s\n",
  1814. __func__, mpcg_state_names[grp->saved_state]);
  1815. switch (grp->saved_state) {
  1816. /*
  1817. * establish conn callback function is
  1818. * preferred method to report failure
  1819. */
  1820. case MPCG_STATE_XID0IOWAIT:
  1821. case MPCG_STATE_XID0IOWAIX:
  1822. case MPCG_STATE_XID7INITI:
  1823. case MPCG_STATE_XID7INITZ:
  1824. case MPCG_STATE_XID2INITW:
  1825. case MPCG_STATE_XID2INITX:
  1826. case MPCG_STATE_XID7INITW:
  1827. case MPCG_STATE_XID7INITX:
  1828. if (grp->estconnfunc) {
  1829. grp->estconnfunc(grp->port_num, -1, 0);
  1830. grp->estconnfunc = NULL;
  1831. break;
  1832. }
  1833. case MPCG_STATE_FLOWC:
  1834. case MPCG_STATE_READY:
  1835. grp->send_qllc_disc = 2;
  1836. new_len = sizeof(struct qllc);
  1837. qllcptr = kzalloc(new_len, gfp_type() | GFP_DMA);
  1838. if (qllcptr == NULL) {
  1839. CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
  1840. "%s(%s): qllcptr allocation error",
  1841. CTCM_FUNTAIL, dev->name);
  1842. return -ENOMEM;
  1843. }
  1844. qllcptr->qllc_address = 0xcc;
  1845. qllcptr->qllc_commands = 0x03;
  1846. skb = __dev_alloc_skb(new_len, GFP_ATOMIC);
  1847. if (skb == NULL) {
  1848. CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
  1849. "%s(%s): skb allocation error",
  1850. CTCM_FUNTAIL, dev->name);
  1851. priv->stats.rx_dropped++;
  1852. kfree(qllcptr);
  1853. return -ENOMEM;
  1854. }
  1855. memcpy(skb_put(skb, new_len), qllcptr, new_len);
  1856. kfree(qllcptr);
  1857. if (skb_headroom(skb) < 4) {
  1858. CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
  1859. "%s(%s): skb_headroom error",
  1860. CTCM_FUNTAIL, dev->name);
  1861. dev_kfree_skb_any(skb);
  1862. return -ENOMEM;
  1863. }
  1864. *((__u32 *)skb_push(skb, 4)) =
  1865. priv->channel[CTCM_READ]->pdu_seq;
  1866. priv->channel[CTCM_READ]->pdu_seq++;
  1867. CTCM_PR_DBGDATA("ctcmpc: %s ToDCM_pdu_seq= %08x\n",
  1868. __func__, priv->channel[CTCM_READ]->pdu_seq);
  1869. /* receipt of CC03 resets anticipated sequence number on
  1870. receiving side */
  1871. priv->channel[CTCM_READ]->pdu_seq = 0x00;
  1872. skb_reset_mac_header(skb);
  1873. skb->dev = dev;
  1874. skb->protocol = htons(ETH_P_SNAP);
  1875. skb->ip_summed = CHECKSUM_UNNECESSARY;
  1876. CTCM_D3_DUMP(skb->data, (sizeof(struct qllc) + 4));
  1877. netif_rx(skb);
  1878. break;
  1879. default:
  1880. break;
  1881. }
  1882. return 0;
  1883. }
  1884. /* --- This is the END my friend --- */