io.c 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852
  1. /*
  2. *
  3. Copyright (c) Eicon Networks, 2002.
  4. *
  5. This source file is supplied for the use with
  6. Eicon Networks range of DIVA Server Adapters.
  7. *
  8. Eicon File Revision : 2.1
  9. *
  10. This program is free software; you can redistribute it and/or modify
  11. it under the terms of the GNU General Public License as published by
  12. the Free Software Foundation; either version 2, or (at your option)
  13. any later version.
  14. *
  15. This program is distributed in the hope that it will be useful,
  16. but WITHOUT ANY WARRANTY OF ANY KIND WHATSOEVER INCLUDING ANY
  17. implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  18. See the GNU General Public License for more details.
  19. *
  20. You should have received a copy of the GNU General Public License
  21. along with this program; if not, write to the Free Software
  22. Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  23. *
  24. */
  25. #include "platform.h"
  26. #include "di_defs.h"
  27. #include "pc.h"
  28. #include "pr_pc.h"
  29. #include "divasync.h"
  30. #define MIPS_SCOM
  31. #include "pkmaint.h" /* pc_main.h, packed in os-dependent fashion */
  32. #include "di.h"
  33. #include "mi_pc.h"
  34. #include "io.h"
  35. extern ADAPTER *adapter[MAX_ADAPTER];
  36. extern PISDN_ADAPTER IoAdapters[MAX_ADAPTER];
  37. void request(PISDN_ADAPTER, ENTITY *);
  38. static void pcm_req(PISDN_ADAPTER, ENTITY *);
  39. /* --------------------------------------------------------------------------
  40. local functions
  41. -------------------------------------------------------------------------- */
  42. #define ReqFunc(N) \
  43. static void Request##N(ENTITY *e) \
  44. { if (IoAdapters[N]) (*IoAdapters[N]->DIRequest)(IoAdapters[N], e); }
  45. ReqFunc(0)
  46. ReqFunc(1)
  47. ReqFunc(2)
  48. ReqFunc(3)
  49. ReqFunc(4)
  50. ReqFunc(5)
  51. ReqFunc(6)
  52. ReqFunc(7)
  53. ReqFunc(8)
  54. ReqFunc(9)
  55. ReqFunc(10)
  56. ReqFunc(11)
  57. ReqFunc(12)
  58. ReqFunc(13)
  59. ReqFunc(14)
  60. ReqFunc(15)
  61. IDI_CALL Requests[MAX_ADAPTER] =
  62. { &Request0, &Request1, &Request2, &Request3,
  63. &Request4, &Request5, &Request6, &Request7,
  64. &Request8, &Request9, &Request10, &Request11,
  65. &Request12, &Request13, &Request14, &Request15
  66. };
  67. /*****************************************************************************/
  68. /*
  69. This array should indicate all new services, that this version of XDI
  70. is able to provide to his clients
  71. */
  72. static byte extended_xdi_features[DIVA_XDI_EXTENDED_FEATURES_MAX_SZ + 1] = {
  73. (DIVA_XDI_EXTENDED_FEATURES_VALID |
  74. DIVA_XDI_EXTENDED_FEATURE_SDRAM_BAR |
  75. DIVA_XDI_EXTENDED_FEATURE_CAPI_PRMS |
  76. #if defined(DIVA_IDI_RX_DMA)
  77. DIVA_XDI_EXTENDED_FEATURE_CMA |
  78. DIVA_XDI_EXTENDED_FEATURE_RX_DMA |
  79. DIVA_XDI_EXTENDED_FEATURE_MANAGEMENT_DMA |
  80. #endif
  81. DIVA_XDI_EXTENDED_FEATURE_NO_CANCEL_RC),
  82. 0
  83. };
  84. /*****************************************************************************/
  85. void
  86. dump_xlog_buffer(PISDN_ADAPTER IoAdapter, Xdesc *xlogDesc)
  87. {
  88. dword logLen;
  89. word *Xlog = xlogDesc->buf;
  90. word logCnt = xlogDesc->cnt;
  91. word logOut = xlogDesc->out / sizeof(*Xlog);
  92. DBG_FTL(("%s: ************* XLOG recovery (%d) *************",
  93. &IoAdapter->Name[0], (int)logCnt))
  94. DBG_FTL(("Microcode: %s", &IoAdapter->ProtocolIdString[0]))
  95. for (; logCnt > 0; --logCnt)
  96. {
  97. if (!GET_WORD(&Xlog[logOut]))
  98. {
  99. if (--logCnt == 0)
  100. break;
  101. logOut = 0;
  102. }
  103. if (GET_WORD(&Xlog[logOut]) <= (logOut * sizeof(*Xlog)))
  104. {
  105. if (logCnt > 2)
  106. {
  107. DBG_FTL(("Possibly corrupted XLOG: %d entries left",
  108. (int)logCnt))
  109. }
  110. break;
  111. }
  112. logLen = (dword)(GET_WORD(&Xlog[logOut]) - (logOut * sizeof(*Xlog)));
  113. DBG_FTL_MXLOG(((char *)&Xlog[logOut + 1], (dword)(logLen - 2)))
  114. logOut = (GET_WORD(&Xlog[logOut]) + 1) / sizeof(*Xlog);
  115. }
  116. DBG_FTL(("%s: ***************** end of XLOG *****************",
  117. &IoAdapter->Name[0]))
  118. }
  119. /*****************************************************************************/
  120. #if defined(XDI_USE_XLOG)
  121. static char *(ExceptionCauseTable[]) =
  122. {
  123. "Interrupt",
  124. "TLB mod /IBOUND",
  125. "TLB load /DBOUND",
  126. "TLB store",
  127. "Address error load",
  128. "Address error store",
  129. "Instruction load bus error",
  130. "Data load/store bus error",
  131. "Syscall",
  132. "Breakpoint",
  133. "Reverd instruction",
  134. "Coprocessor unusable",
  135. "Overflow",
  136. "TRAP",
  137. "VCEI",
  138. "Floating Point Exception",
  139. "CP2",
  140. "Reserved 17",
  141. "Reserved 18",
  142. "Reserved 19",
  143. "Reserved 20",
  144. "Reserved 21",
  145. "Reserved 22",
  146. "WATCH",
  147. "Reserved 24",
  148. "Reserved 25",
  149. "Reserved 26",
  150. "Reserved 27",
  151. "Reserved 28",
  152. "Reserved 29",
  153. "Reserved 30",
  154. "VCED"
  155. };
  156. #endif
  157. void
  158. dump_trap_frame(PISDN_ADAPTER IoAdapter, byte __iomem *exceptionFrame)
  159. {
  160. MP_XCPTC __iomem *xcept = (MP_XCPTC __iomem *)exceptionFrame;
  161. dword __iomem *regs;
  162. regs = &xcept->regs[0];
  163. DBG_FTL(("%s: ***************** CPU TRAPPED *****************",
  164. &IoAdapter->Name[0]))
  165. DBG_FTL(("Microcode: %s", &IoAdapter->ProtocolIdString[0]))
  166. DBG_FTL(("Cause: %s",
  167. ExceptionCauseTable[(READ_DWORD(&xcept->cr) & 0x0000007c) >> 2]))
  168. DBG_FTL(("sr 0x%08x cr 0x%08x epc 0x%08x vaddr 0x%08x",
  169. READ_DWORD(&xcept->sr), READ_DWORD(&xcept->cr),
  170. READ_DWORD(&xcept->epc), READ_DWORD(&xcept->vaddr)))
  171. DBG_FTL(("zero 0x%08x at 0x%08x v0 0x%08x v1 0x%08x",
  172. READ_DWORD(&regs[0]), READ_DWORD(&regs[1]),
  173. READ_DWORD(&regs[2]), READ_DWORD(&regs[3])))
  174. DBG_FTL(("a0 0x%08x a1 0x%08x a2 0x%08x a3 0x%08x",
  175. READ_DWORD(&regs[4]), READ_DWORD(&regs[5]),
  176. READ_DWORD(&regs[6]), READ_DWORD(&regs[7])))
  177. DBG_FTL(("t0 0x%08x t1 0x%08x t2 0x%08x t3 0x%08x",
  178. READ_DWORD(&regs[8]), READ_DWORD(&regs[9]),
  179. READ_DWORD(&regs[10]), READ_DWORD(&regs[11])))
  180. DBG_FTL(("t4 0x%08x t5 0x%08x t6 0x%08x t7 0x%08x",
  181. READ_DWORD(&regs[12]), READ_DWORD(&regs[13]),
  182. READ_DWORD(&regs[14]), READ_DWORD(&regs[15])))
  183. DBG_FTL(("s0 0x%08x s1 0x%08x s2 0x%08x s3 0x%08x",
  184. READ_DWORD(&regs[16]), READ_DWORD(&regs[17]),
  185. READ_DWORD(&regs[18]), READ_DWORD(&regs[19])))
  186. DBG_FTL(("s4 0x%08x s5 0x%08x s6 0x%08x s7 0x%08x",
  187. READ_DWORD(&regs[20]), READ_DWORD(&regs[21]),
  188. READ_DWORD(&regs[22]), READ_DWORD(&regs[23])))
  189. DBG_FTL(("t8 0x%08x t9 0x%08x k0 0x%08x k1 0x%08x",
  190. READ_DWORD(&regs[24]), READ_DWORD(&regs[25]),
  191. READ_DWORD(&regs[26]), READ_DWORD(&regs[27])))
  192. DBG_FTL(("gp 0x%08x sp 0x%08x s8 0x%08x ra 0x%08x",
  193. READ_DWORD(&regs[28]), READ_DWORD(&regs[29]),
  194. READ_DWORD(&regs[30]), READ_DWORD(&regs[31])))
  195. DBG_FTL(("md 0x%08x|%08x resvd 0x%08x class 0x%08x",
  196. READ_DWORD(&xcept->mdhi), READ_DWORD(&xcept->mdlo),
  197. READ_DWORD(&xcept->reseverd), READ_DWORD(&xcept->xclass)))
  198. }
  199. /* --------------------------------------------------------------------------
  200. Real XDI Request function
  201. -------------------------------------------------------------------------- */
  202. void request(PISDN_ADAPTER IoAdapter, ENTITY *e)
  203. {
  204. byte i;
  205. diva_os_spin_lock_magic_t irql;
  206. /*
  207. * if the Req field in the entity structure is 0,
  208. * we treat this request as a special function call
  209. */
  210. if (!e->Req)
  211. {
  212. IDI_SYNC_REQ *syncReq = (IDI_SYNC_REQ *)e;
  213. switch (e->Rc)
  214. {
  215. #if defined(DIVA_IDI_RX_DMA)
  216. case IDI_SYNC_REQ_DMA_DESCRIPTOR_OPERATION: {
  217. diva_xdi_dma_descriptor_operation_t *pI = \
  218. &syncReq->xdi_dma_descriptor_operation.info;
  219. if (!IoAdapter->dma_map) {
  220. pI->operation = -1;
  221. pI->descriptor_number = -1;
  222. return;
  223. }
  224. diva_os_enter_spin_lock(&IoAdapter->data_spin_lock, &irql, "dma_op");
  225. if (pI->operation == IDI_SYNC_REQ_DMA_DESCRIPTOR_ALLOC) {
  226. pI->descriptor_number = diva_alloc_dma_map_entry(\
  227. (struct _diva_dma_map_entry *)IoAdapter->dma_map);
  228. if (pI->descriptor_number >= 0) {
  229. dword dma_magic;
  230. void *local_addr;
  231. diva_get_dma_map_entry(\
  232. (struct _diva_dma_map_entry *)IoAdapter->dma_map,
  233. pI->descriptor_number,
  234. &local_addr, &dma_magic);
  235. pI->descriptor_address = local_addr;
  236. pI->descriptor_magic = dma_magic;
  237. pI->operation = 0;
  238. } else {
  239. pI->operation = -1;
  240. }
  241. } else if ((pI->operation == IDI_SYNC_REQ_DMA_DESCRIPTOR_FREE) &&
  242. (pI->descriptor_number >= 0)) {
  243. diva_free_dma_map_entry((struct _diva_dma_map_entry *)IoAdapter->dma_map,
  244. pI->descriptor_number);
  245. pI->descriptor_number = -1;
  246. pI->operation = 0;
  247. } else {
  248. pI->descriptor_number = -1;
  249. pI->operation = -1;
  250. }
  251. diva_os_leave_spin_lock(&IoAdapter->data_spin_lock, &irql, "dma_op");
  252. } return;
  253. #endif
  254. case IDI_SYNC_REQ_XDI_GET_LOGICAL_ADAPTER_NUMBER: {
  255. diva_xdi_get_logical_adapter_number_s_t *pI = \
  256. &syncReq->xdi_logical_adapter_number.info;
  257. pI->logical_adapter_number = IoAdapter->ANum;
  258. pI->controller = IoAdapter->ControllerNumber;
  259. pI->total_controllers = IoAdapter->Properties.Adapters;
  260. } return;
  261. case IDI_SYNC_REQ_XDI_GET_CAPI_PARAMS: {
  262. diva_xdi_get_capi_parameters_t prms, *pI = &syncReq->xdi_capi_prms.info;
  263. memset(&prms, 0x00, sizeof(prms));
  264. prms.structure_length = min_t(size_t, sizeof(prms), pI->structure_length);
  265. memset(pI, 0x00, pI->structure_length);
  266. prms.flag_dynamic_l1_down = (IoAdapter->capi_cfg.cfg_1 & \
  267. DIVA_XDI_CAPI_CFG_1_DYNAMIC_L1_ON) ? 1 : 0;
  268. prms.group_optimization_enabled = (IoAdapter->capi_cfg.cfg_1 & \
  269. DIVA_XDI_CAPI_CFG_1_GROUP_POPTIMIZATION_ON) ? 1 : 0;
  270. memcpy(pI, &prms, prms.structure_length);
  271. } return;
  272. case IDI_SYNC_REQ_XDI_GET_ADAPTER_SDRAM_BAR:
  273. syncReq->xdi_sdram_bar.info.bar = IoAdapter->sdram_bar;
  274. return;
  275. case IDI_SYNC_REQ_XDI_GET_EXTENDED_FEATURES: {
  276. dword i;
  277. diva_xdi_get_extended_xdi_features_t *pI =\
  278. &syncReq->xdi_extended_features.info;
  279. pI->buffer_length_in_bytes &= ~0x80000000;
  280. if (pI->buffer_length_in_bytes && pI->features) {
  281. memset(pI->features, 0x00, pI->buffer_length_in_bytes);
  282. }
  283. for (i = 0; ((pI->features) && (i < pI->buffer_length_in_bytes) &&
  284. (i < DIVA_XDI_EXTENDED_FEATURES_MAX_SZ)); i++) {
  285. pI->features[i] = extended_xdi_features[i];
  286. }
  287. if ((pI->buffer_length_in_bytes < DIVA_XDI_EXTENDED_FEATURES_MAX_SZ) ||
  288. (!pI->features)) {
  289. pI->buffer_length_in_bytes =\
  290. (0x80000000 | DIVA_XDI_EXTENDED_FEATURES_MAX_SZ);
  291. }
  292. } return;
  293. case IDI_SYNC_REQ_XDI_GET_STREAM:
  294. if (IoAdapter) {
  295. diva_xdi_provide_istream_info(&IoAdapter->a,
  296. &syncReq->xdi_stream_info.info);
  297. } else {
  298. syncReq->xdi_stream_info.info.provided_service = 0;
  299. }
  300. return;
  301. case IDI_SYNC_REQ_GET_NAME:
  302. if (IoAdapter)
  303. {
  304. strcpy(&syncReq->GetName.name[0], IoAdapter->Name);
  305. DBG_TRC(("xdi: Adapter %d / Name '%s'",
  306. IoAdapter->ANum, IoAdapter->Name))
  307. return;
  308. }
  309. syncReq->GetName.name[0] = '\0';
  310. break;
  311. case IDI_SYNC_REQ_GET_SERIAL:
  312. if (IoAdapter)
  313. {
  314. syncReq->GetSerial.serial = IoAdapter->serialNo;
  315. DBG_TRC(("xdi: Adapter %d / SerialNo %ld",
  316. IoAdapter->ANum, IoAdapter->serialNo))
  317. return;
  318. }
  319. syncReq->GetSerial.serial = 0;
  320. break;
  321. case IDI_SYNC_REQ_GET_CARDTYPE:
  322. if (IoAdapter)
  323. {
  324. syncReq->GetCardType.cardtype = IoAdapter->cardType;
  325. DBG_TRC(("xdi: Adapter %d / CardType %ld",
  326. IoAdapter->ANum, IoAdapter->cardType))
  327. return;
  328. }
  329. syncReq->GetCardType.cardtype = 0;
  330. break;
  331. case IDI_SYNC_REQ_GET_XLOG:
  332. if (IoAdapter)
  333. {
  334. pcm_req(IoAdapter, e);
  335. return;
  336. }
  337. e->Ind = 0;
  338. break;
  339. case IDI_SYNC_REQ_GET_DBG_XLOG:
  340. if (IoAdapter)
  341. {
  342. pcm_req(IoAdapter, e);
  343. return;
  344. }
  345. e->Ind = 0;
  346. break;
  347. case IDI_SYNC_REQ_GET_FEATURES:
  348. if (IoAdapter)
  349. {
  350. syncReq->GetFeatures.features =
  351. (unsigned short)IoAdapter->features;
  352. return;
  353. }
  354. syncReq->GetFeatures.features = 0;
  355. break;
  356. case IDI_SYNC_REQ_PORTDRV_HOOK:
  357. if (IoAdapter)
  358. {
  359. DBG_TRC(("Xdi:IDI_SYNC_REQ_PORTDRV_HOOK - ignored"))
  360. return;
  361. }
  362. break;
  363. }
  364. if (IoAdapter)
  365. {
  366. return;
  367. }
  368. }
  369. DBG_TRC(("xdi: Id 0x%x / Req 0x%x / Rc 0x%x", e->Id, e->Req, e->Rc))
  370. if (!IoAdapter)
  371. {
  372. DBG_FTL(("xdi: uninitialized Adapter used - ignore request"))
  373. return;
  374. }
  375. diva_os_enter_spin_lock(&IoAdapter->data_spin_lock, &irql, "data_req");
  376. /*
  377. * assign an entity
  378. */
  379. if (!(e->Id & 0x1f))
  380. {
  381. if (IoAdapter->e_count >= IoAdapter->e_max)
  382. {
  383. DBG_FTL(("xdi: all Ids in use (max=%d) --> Req ignored",
  384. IoAdapter->e_max))
  385. diva_os_leave_spin_lock(&IoAdapter->data_spin_lock, &irql, "data_req");
  386. return;
  387. }
  388. /*
  389. * find a new free id
  390. */
  391. for (i = 1; IoAdapter->e_tbl[i].e; ++i);
  392. IoAdapter->e_tbl[i].e = e;
  393. IoAdapter->e_count++;
  394. e->No = (byte)i;
  395. e->More = 0;
  396. e->RCurrent = 0xff;
  397. }
  398. else
  399. {
  400. i = e->No;
  401. }
  402. /*
  403. * if the entity is still busy, ignore the request call
  404. */
  405. if (e->More & XBUSY)
  406. {
  407. DBG_FTL(("xdi: Id 0x%x busy --> Req 0x%x ignored", e->Id, e->Req))
  408. if (!IoAdapter->trapped && IoAdapter->trapFnc)
  409. {
  410. IoAdapter->trapFnc(IoAdapter);
  411. /*
  412. Firs trap, also notify user if supported
  413. */
  414. if (IoAdapter->trapped && IoAdapter->os_trap_nfy_Fnc) {
  415. (*(IoAdapter->os_trap_nfy_Fnc))(IoAdapter, IoAdapter->ANum);
  416. }
  417. }
  418. diva_os_leave_spin_lock(&IoAdapter->data_spin_lock, &irql, "data_req");
  419. return;
  420. }
  421. /*
  422. * initialize transmit status variables
  423. */
  424. e->More |= XBUSY;
  425. e->More &= ~XMOREF;
  426. e->XCurrent = 0;
  427. e->XOffset = 0;
  428. /*
  429. * queue this entity in the adapter request queue
  430. */
  431. IoAdapter->e_tbl[i].next = 0;
  432. if (IoAdapter->head)
  433. {
  434. IoAdapter->e_tbl[IoAdapter->tail].next = i;
  435. IoAdapter->tail = i;
  436. }
  437. else
  438. {
  439. IoAdapter->head = i;
  440. IoAdapter->tail = i;
  441. }
  442. /*
  443. * queue the DPC to process the request
  444. */
  445. diva_os_schedule_soft_isr(&IoAdapter->req_soft_isr);
  446. diva_os_leave_spin_lock(&IoAdapter->data_spin_lock, &irql, "data_req");
  447. }
  448. /* ---------------------------------------------------------------------
  449. Main DPC routine
  450. --------------------------------------------------------------------- */
  451. void DIDpcRoutine(struct _diva_os_soft_isr *psoft_isr, void *Context) {
  452. PISDN_ADAPTER IoAdapter = (PISDN_ADAPTER)Context;
  453. ADAPTER *a = &IoAdapter->a;
  454. diva_os_atomic_t *pin_dpc = &IoAdapter->in_dpc;
  455. if (diva_os_atomic_increment(pin_dpc) == 1) {
  456. do {
  457. if (IoAdapter->tst_irq(a))
  458. {
  459. if (!IoAdapter->Unavailable)
  460. IoAdapter->dpc(a);
  461. IoAdapter->clr_irq(a);
  462. }
  463. IoAdapter->out(a);
  464. } while (diva_os_atomic_decrement(pin_dpc) > 0);
  465. /* ----------------------------------------------------------------
  466. Look for XLOG request (cards with indirect addressing)
  467. ---------------------------------------------------------------- */
  468. if (IoAdapter->pcm_pending) {
  469. struct pc_maint *pcm;
  470. diva_os_spin_lock_magic_t OldIrql;
  471. diva_os_enter_spin_lock(&IoAdapter->data_spin_lock,
  472. &OldIrql,
  473. "data_dpc");
  474. pcm = (struct pc_maint *)IoAdapter->pcm_data;
  475. switch (IoAdapter->pcm_pending) {
  476. case 1: /* ask card for XLOG */
  477. a->ram_out(a, &IoAdapter->pcm->rc, 0);
  478. a->ram_out(a, &IoAdapter->pcm->req, pcm->req);
  479. IoAdapter->pcm_pending = 2;
  480. break;
  481. case 2: /* Try to get XLOG from the card */
  482. if ((int)(a->ram_in(a, &IoAdapter->pcm->rc))) {
  483. a->ram_in_buffer(a, IoAdapter->pcm, pcm, sizeof(*pcm));
  484. IoAdapter->pcm_pending = 3;
  485. }
  486. break;
  487. case 3: /* let XDI recovery XLOG */
  488. break;
  489. }
  490. diva_os_leave_spin_lock(&IoAdapter->data_spin_lock,
  491. &OldIrql,
  492. "data_dpc");
  493. }
  494. /* ---------------------------------------------------------------- */
  495. }
  496. }
  497. /* --------------------------------------------------------------------------
  498. XLOG interface
  499. -------------------------------------------------------------------------- */
  500. static void
  501. pcm_req(PISDN_ADAPTER IoAdapter, ENTITY *e)
  502. {
  503. diva_os_spin_lock_magic_t OldIrql;
  504. int i, rc;
  505. ADAPTER *a = &IoAdapter->a;
  506. struct pc_maint *pcm = (struct pc_maint *)&e->Ind;
  507. /*
  508. * special handling of I/O based card interface
  509. * the memory access isn't an atomic operation !
  510. */
  511. if (IoAdapter->Properties.Card == CARD_MAE)
  512. {
  513. diva_os_enter_spin_lock(&IoAdapter->data_spin_lock,
  514. &OldIrql,
  515. "data_pcm_1");
  516. IoAdapter->pcm_data = (void *)pcm;
  517. IoAdapter->pcm_pending = 1;
  518. diva_os_schedule_soft_isr(&IoAdapter->req_soft_isr);
  519. diva_os_leave_spin_lock(&IoAdapter->data_spin_lock,
  520. &OldIrql,
  521. "data_pcm_1");
  522. for (rc = 0, i = (IoAdapter->trapped ? 3000 : 250); !rc && (i > 0); --i)
  523. {
  524. diva_os_sleep(1);
  525. if (IoAdapter->pcm_pending == 3) {
  526. diva_os_enter_spin_lock(&IoAdapter->data_spin_lock,
  527. &OldIrql,
  528. "data_pcm_3");
  529. IoAdapter->pcm_pending = 0;
  530. IoAdapter->pcm_data = NULL;
  531. diva_os_leave_spin_lock(&IoAdapter->data_spin_lock,
  532. &OldIrql,
  533. "data_pcm_3");
  534. return;
  535. }
  536. diva_os_enter_spin_lock(&IoAdapter->data_spin_lock,
  537. &OldIrql,
  538. "data_pcm_2");
  539. diva_os_schedule_soft_isr(&IoAdapter->req_soft_isr);
  540. diva_os_leave_spin_lock(&IoAdapter->data_spin_lock,
  541. &OldIrql,
  542. "data_pcm_2");
  543. }
  544. diva_os_enter_spin_lock(&IoAdapter->data_spin_lock,
  545. &OldIrql,
  546. "data_pcm_4");
  547. IoAdapter->pcm_pending = 0;
  548. IoAdapter->pcm_data = NULL;
  549. diva_os_leave_spin_lock(&IoAdapter->data_spin_lock,
  550. &OldIrql,
  551. "data_pcm_4");
  552. goto Trapped;
  553. }
  554. /*
  555. * memory based shared ram is accessible from different
  556. * processors without disturbing concurrent processes.
  557. */
  558. a->ram_out(a, &IoAdapter->pcm->rc, 0);
  559. a->ram_out(a, &IoAdapter->pcm->req, pcm->req);
  560. for (i = (IoAdapter->trapped ? 3000 : 250); --i > 0;)
  561. {
  562. diva_os_sleep(1);
  563. rc = (int)(a->ram_in(a, &IoAdapter->pcm->rc));
  564. if (rc)
  565. {
  566. a->ram_in_buffer(a, IoAdapter->pcm, pcm, sizeof(*pcm));
  567. return;
  568. }
  569. }
  570. Trapped:
  571. if (IoAdapter->trapFnc)
  572. {
  573. int trapped = IoAdapter->trapped;
  574. IoAdapter->trapFnc(IoAdapter);
  575. /*
  576. Firs trap, also notify user if supported
  577. */
  578. if (!trapped && IoAdapter->trapped && IoAdapter->os_trap_nfy_Fnc) {
  579. (*(IoAdapter->os_trap_nfy_Fnc))(IoAdapter, IoAdapter->ANum);
  580. }
  581. }
  582. }
  583. /*------------------------------------------------------------------*/
  584. /* ram access functions for memory mapped cards */
  585. /*------------------------------------------------------------------*/
  586. byte mem_in(ADAPTER *a, void *addr)
  587. {
  588. byte val;
  589. volatile byte __iomem *Base = DIVA_OS_MEM_ATTACH_RAM((PISDN_ADAPTER)a->io);
  590. val = READ_BYTE(Base + (unsigned long)addr);
  591. DIVA_OS_MEM_DETACH_RAM((PISDN_ADAPTER)a->io, Base);
  592. return (val);
  593. }
  594. word mem_inw(ADAPTER *a, void *addr)
  595. {
  596. word val;
  597. volatile byte __iomem *Base = DIVA_OS_MEM_ATTACH_RAM((PISDN_ADAPTER)a->io);
  598. val = READ_WORD((Base + (unsigned long)addr));
  599. DIVA_OS_MEM_DETACH_RAM((PISDN_ADAPTER)a->io, Base);
  600. return (val);
  601. }
  602. void mem_in_dw(ADAPTER *a, void *addr, dword *data, int dwords)
  603. {
  604. volatile byte __iomem *Base = DIVA_OS_MEM_ATTACH_RAM((PISDN_ADAPTER)a->io);
  605. while (dwords--) {
  606. *data++ = READ_DWORD((Base + (unsigned long)addr));
  607. addr += 4;
  608. }
  609. DIVA_OS_MEM_DETACH_RAM((PISDN_ADAPTER)a->io, Base);
  610. }
  611. void mem_in_buffer(ADAPTER *a, void *addr, void *buffer, word length)
  612. {
  613. volatile byte __iomem *Base = DIVA_OS_MEM_ATTACH_RAM((PISDN_ADAPTER)a->io);
  614. memcpy_fromio(buffer, (Base + (unsigned long)addr), length);
  615. DIVA_OS_MEM_DETACH_RAM((PISDN_ADAPTER)a->io, Base);
  616. }
  617. void mem_look_ahead(ADAPTER *a, PBUFFER *RBuffer, ENTITY *e)
  618. {
  619. PISDN_ADAPTER IoAdapter = (PISDN_ADAPTER)a->io;
  620. IoAdapter->RBuffer.length = mem_inw(a, &RBuffer->length);
  621. mem_in_buffer(a, RBuffer->P, IoAdapter->RBuffer.P,
  622. IoAdapter->RBuffer.length);
  623. e->RBuffer = (DBUFFER *)&IoAdapter->RBuffer;
  624. }
  625. void mem_out(ADAPTER *a, void *addr, byte data)
  626. {
  627. volatile byte __iomem *Base = DIVA_OS_MEM_ATTACH_RAM((PISDN_ADAPTER)a->io);
  628. WRITE_BYTE(Base + (unsigned long)addr, data);
  629. DIVA_OS_MEM_DETACH_RAM((PISDN_ADAPTER)a->io, Base);
  630. }
  631. void mem_outw(ADAPTER *a, void *addr, word data)
  632. {
  633. volatile byte __iomem *Base = DIVA_OS_MEM_ATTACH_RAM((PISDN_ADAPTER)a->io);
  634. WRITE_WORD((Base + (unsigned long)addr), data);
  635. DIVA_OS_MEM_DETACH_RAM((PISDN_ADAPTER)a->io, Base);
  636. }
  637. void mem_out_dw(ADAPTER *a, void *addr, const dword *data, int dwords)
  638. {
  639. volatile byte __iomem *Base = DIVA_OS_MEM_ATTACH_RAM((PISDN_ADAPTER)a->io);
  640. while (dwords--) {
  641. WRITE_DWORD((Base + (unsigned long)addr), *data);
  642. addr += 4;
  643. data++;
  644. }
  645. DIVA_OS_MEM_DETACH_RAM((PISDN_ADAPTER)a->io, Base);
  646. }
  647. void mem_out_buffer(ADAPTER *a, void *addr, void *buffer, word length)
  648. {
  649. volatile byte __iomem *Base = DIVA_OS_MEM_ATTACH_RAM((PISDN_ADAPTER)a->io);
  650. memcpy_toio((Base + (unsigned long)addr), buffer, length);
  651. DIVA_OS_MEM_DETACH_RAM((PISDN_ADAPTER)a->io, Base);
  652. }
  653. void mem_inc(ADAPTER *a, void *addr)
  654. {
  655. volatile byte __iomem *Base = DIVA_OS_MEM_ATTACH_RAM((PISDN_ADAPTER)a->io);
  656. byte x = READ_BYTE(Base + (unsigned long)addr);
  657. WRITE_BYTE(Base + (unsigned long)addr, x + 1);
  658. DIVA_OS_MEM_DETACH_RAM((PISDN_ADAPTER)a->io, Base);
  659. }
  660. /*------------------------------------------------------------------*/
  661. /* ram access functions for io-mapped cards */
  662. /*------------------------------------------------------------------*/
  663. byte io_in(ADAPTER *a, void *adr)
  664. {
  665. byte val;
  666. byte __iomem *Port = DIVA_OS_MEM_ATTACH_PORT((PISDN_ADAPTER)a->io);
  667. outppw(Port + 4, (word)(unsigned long)adr);
  668. val = inpp(Port);
  669. DIVA_OS_MEM_DETACH_PORT((PISDN_ADAPTER)a->io, Port);
  670. return (val);
  671. }
  672. word io_inw(ADAPTER *a, void *adr)
  673. {
  674. word val;
  675. byte __iomem *Port = DIVA_OS_MEM_ATTACH_PORT((PISDN_ADAPTER)a->io);
  676. outppw(Port + 4, (word)(unsigned long)adr);
  677. val = inppw(Port);
  678. DIVA_OS_MEM_DETACH_PORT((PISDN_ADAPTER)a->io, Port);
  679. return (val);
  680. }
  681. void io_in_buffer(ADAPTER *a, void *adr, void *buffer, word len)
  682. {
  683. byte __iomem *Port = DIVA_OS_MEM_ATTACH_PORT((PISDN_ADAPTER)a->io);
  684. byte *P = (byte *)buffer;
  685. if ((long)adr & 1) {
  686. outppw(Port + 4, (word)(unsigned long)adr);
  687. *P = inpp(Port);
  688. P++;
  689. adr = ((byte *) adr) + 1;
  690. len--;
  691. if (!len) {
  692. DIVA_OS_MEM_DETACH_PORT((PISDN_ADAPTER)a->io, Port);
  693. return;
  694. }
  695. }
  696. outppw(Port + 4, (word)(unsigned long)adr);
  697. inppw_buffer(Port, P, len + 1);
  698. DIVA_OS_MEM_DETACH_PORT((PISDN_ADAPTER)a->io, Port);
  699. }
  700. void io_look_ahead(ADAPTER *a, PBUFFER *RBuffer, ENTITY *e)
  701. {
  702. byte __iomem *Port = DIVA_OS_MEM_ATTACH_PORT((PISDN_ADAPTER)a->io);
  703. outppw(Port + 4, (word)(unsigned long)RBuffer);
  704. ((PISDN_ADAPTER)a->io)->RBuffer.length = inppw(Port);
  705. inppw_buffer(Port, ((PISDN_ADAPTER)a->io)->RBuffer.P, ((PISDN_ADAPTER)a->io)->RBuffer.length + 1);
  706. e->RBuffer = (DBUFFER *) &(((PISDN_ADAPTER)a->io)->RBuffer);
  707. DIVA_OS_MEM_DETACH_PORT((PISDN_ADAPTER)a->io, Port);
  708. }
  709. void io_out(ADAPTER *a, void *adr, byte data)
  710. {
  711. byte __iomem *Port = DIVA_OS_MEM_ATTACH_PORT((PISDN_ADAPTER)a->io);
  712. outppw(Port + 4, (word)(unsigned long)adr);
  713. outpp(Port, data);
  714. DIVA_OS_MEM_DETACH_PORT((PISDN_ADAPTER)a->io, Port);
  715. }
  716. void io_outw(ADAPTER *a, void *adr, word data)
  717. {
  718. byte __iomem *Port = DIVA_OS_MEM_ATTACH_PORT((PISDN_ADAPTER)a->io);
  719. outppw(Port + 4, (word)(unsigned long)adr);
  720. outppw(Port, data);
  721. DIVA_OS_MEM_DETACH_PORT((PISDN_ADAPTER)a->io, Port);
  722. }
  723. void io_out_buffer(ADAPTER *a, void *adr, void *buffer, word len)
  724. {
  725. byte __iomem *Port = DIVA_OS_MEM_ATTACH_PORT((PISDN_ADAPTER)a->io);
  726. byte *P = (byte *)buffer;
  727. if ((long)adr & 1) {
  728. outppw(Port + 4, (word)(unsigned long)adr);
  729. outpp(Port, *P);
  730. P++;
  731. adr = ((byte *) adr) + 1;
  732. len--;
  733. if (!len) {
  734. DIVA_OS_MEM_DETACH_PORT((PISDN_ADAPTER)a->io, Port);
  735. return;
  736. }
  737. }
  738. outppw(Port + 4, (word)(unsigned long)adr);
  739. outppw_buffer(Port, P, len + 1);
  740. DIVA_OS_MEM_DETACH_PORT((PISDN_ADAPTER)a->io, Port);
  741. }
  742. void io_inc(ADAPTER *a, void *adr)
  743. {
  744. byte x;
  745. byte __iomem *Port = DIVA_OS_MEM_ATTACH_PORT((PISDN_ADAPTER)a->io);
  746. outppw(Port + 4, (word)(unsigned long)adr);
  747. x = inpp(Port);
  748. outppw(Port + 4, (word)(unsigned long)adr);
  749. outpp(Port, x + 1);
  750. DIVA_OS_MEM_DETACH_PORT((PISDN_ADAPTER)a->io, Port);
  751. }
  752. /*------------------------------------------------------------------*/
  753. /* OS specific functions related to queuing of entities */
  754. /*------------------------------------------------------------------*/
  755. void free_entity(ADAPTER *a, byte e_no)
  756. {
  757. PISDN_ADAPTER IoAdapter;
  758. diva_os_spin_lock_magic_t irql;
  759. IoAdapter = (PISDN_ADAPTER) a->io;
  760. diva_os_enter_spin_lock(&IoAdapter->data_spin_lock, &irql, "data_free");
  761. IoAdapter->e_tbl[e_no].e = NULL;
  762. IoAdapter->e_count--;
  763. diva_os_leave_spin_lock(&IoAdapter->data_spin_lock, &irql, "data_free");
  764. }
  765. void assign_queue(ADAPTER *a, byte e_no, word ref)
  766. {
  767. PISDN_ADAPTER IoAdapter;
  768. diva_os_spin_lock_magic_t irql;
  769. IoAdapter = (PISDN_ADAPTER) a->io;
  770. diva_os_enter_spin_lock(&IoAdapter->data_spin_lock, &irql, "data_assign");
  771. IoAdapter->e_tbl[e_no].assign_ref = ref;
  772. IoAdapter->e_tbl[e_no].next = (byte)IoAdapter->assign;
  773. IoAdapter->assign = e_no;
  774. diva_os_leave_spin_lock(&IoAdapter->data_spin_lock, &irql, "data_assign");
  775. }
  776. byte get_assign(ADAPTER *a, word ref)
  777. {
  778. PISDN_ADAPTER IoAdapter;
  779. diva_os_spin_lock_magic_t irql;
  780. byte e_no;
  781. IoAdapter = (PISDN_ADAPTER) a->io;
  782. diva_os_enter_spin_lock(&IoAdapter->data_spin_lock,
  783. &irql,
  784. "data_assign_get");
  785. for (e_no = (byte)IoAdapter->assign;
  786. e_no && IoAdapter->e_tbl[e_no].assign_ref != ref;
  787. e_no = IoAdapter->e_tbl[e_no].next);
  788. diva_os_leave_spin_lock(&IoAdapter->data_spin_lock,
  789. &irql,
  790. "data_assign_get");
  791. return e_no;
  792. }
  793. void req_queue(ADAPTER *a, byte e_no)
  794. {
  795. PISDN_ADAPTER IoAdapter;
  796. diva_os_spin_lock_magic_t irql;
  797. IoAdapter = (PISDN_ADAPTER) a->io;
  798. diva_os_enter_spin_lock(&IoAdapter->data_spin_lock, &irql, "data_req_q");
  799. IoAdapter->e_tbl[e_no].next = 0;
  800. if (IoAdapter->head) {
  801. IoAdapter->e_tbl[IoAdapter->tail].next = e_no;
  802. IoAdapter->tail = e_no;
  803. }
  804. else {
  805. IoAdapter->head = e_no;
  806. IoAdapter->tail = e_no;
  807. }
  808. diva_os_leave_spin_lock(&IoAdapter->data_spin_lock, &irql, "data_req_q");
  809. }
  810. byte look_req(ADAPTER *a)
  811. {
  812. PISDN_ADAPTER IoAdapter;
  813. IoAdapter = (PISDN_ADAPTER) a->io;
  814. return ((byte)IoAdapter->head);
  815. }
  816. void next_req(ADAPTER *a)
  817. {
  818. PISDN_ADAPTER IoAdapter;
  819. diva_os_spin_lock_magic_t irql;
  820. IoAdapter = (PISDN_ADAPTER) a->io;
  821. diva_os_enter_spin_lock(&IoAdapter->data_spin_lock, &irql, "data_req_next");
  822. IoAdapter->head = IoAdapter->e_tbl[IoAdapter->head].next;
  823. if (!IoAdapter->head) IoAdapter->tail = 0;
  824. diva_os_leave_spin_lock(&IoAdapter->data_spin_lock, &irql, "data_req_next");
  825. }
  826. /*------------------------------------------------------------------*/
  827. /* memory map functions */
  828. /*------------------------------------------------------------------*/
  829. ENTITY *entity_ptr(ADAPTER *a, byte e_no)
  830. {
  831. PISDN_ADAPTER IoAdapter;
  832. IoAdapter = (PISDN_ADAPTER)a->io;
  833. return (IoAdapter->e_tbl[e_no].e);
  834. }
  835. void *PTR_X(ADAPTER *a, ENTITY *e)
  836. {
  837. return ((void *) e->X);
  838. }
  839. void *PTR_R(ADAPTER *a, ENTITY *e)
  840. {
  841. return ((void *) e->R);
  842. }
  843. void *PTR_P(ADAPTER *a, ENTITY *e, void *P)
  844. {
  845. return P;
  846. }
  847. void CALLBACK(ADAPTER *a, ENTITY *e)
  848. {
  849. if (e && e->callback)
  850. e->callback(e);
  851. }