megaraid_mm.c 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263
  1. /*
  2. *
  3. * Linux MegaRAID device driver
  4. *
  5. * Copyright (c) 2003-2004 LSI Logic Corporation.
  6. *
  7. * This program is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU General Public License
  9. * as published by the Free Software Foundation; either version
  10. * 2 of the License, or (at your option) any later version.
  11. *
  12. * FILE : megaraid_mm.c
  13. * Version : v2.20.2.7 (Jul 16 2006)
  14. *
  15. * Common management module
  16. */
  17. #include <linux/sched.h>
  18. #include <linux/slab.h>
  19. #include <linux/mutex.h>
  20. #include "megaraid_mm.h"
  21. // Entry points for char node driver
  22. static DEFINE_MUTEX(mraid_mm_mutex);
  23. static int mraid_mm_open(struct inode *, struct file *);
  24. static long mraid_mm_unlocked_ioctl(struct file *, uint, unsigned long);
  25. // routines to convert to and from the old the format
  26. static int mimd_to_kioc(mimd_t __user *, mraid_mmadp_t *, uioc_t *);
  27. static int kioc_to_mimd(uioc_t *, mimd_t __user *);
  28. // Helper functions
  29. static int handle_drvrcmd(void __user *, uint8_t, int *);
  30. static int lld_ioctl(mraid_mmadp_t *, uioc_t *);
  31. static void ioctl_done(uioc_t *);
  32. static void lld_timedout(unsigned long);
  33. static void hinfo_to_cinfo(mraid_hba_info_t *, mcontroller_t *);
  34. static mraid_mmadp_t *mraid_mm_get_adapter(mimd_t __user *, int *);
  35. static uioc_t *mraid_mm_alloc_kioc(mraid_mmadp_t *);
  36. static void mraid_mm_dealloc_kioc(mraid_mmadp_t *, uioc_t *);
  37. static int mraid_mm_attach_buf(mraid_mmadp_t *, uioc_t *, int);
  38. static int mraid_mm_setup_dma_pools(mraid_mmadp_t *);
  39. static void mraid_mm_free_adp_resources(mraid_mmadp_t *);
  40. static void mraid_mm_teardown_dma_pools(mraid_mmadp_t *);
  41. #ifdef CONFIG_COMPAT
  42. static long mraid_mm_compat_ioctl(struct file *, unsigned int, unsigned long);
  43. #endif
  44. MODULE_AUTHOR("LSI Logic Corporation");
  45. MODULE_DESCRIPTION("LSI Logic Management Module");
  46. MODULE_LICENSE("GPL");
  47. MODULE_VERSION(LSI_COMMON_MOD_VERSION);
  48. static int dbglevel = CL_ANN;
  49. module_param_named(dlevel, dbglevel, int, 0);
  50. MODULE_PARM_DESC(dlevel, "Debug level (default=0)");
  51. EXPORT_SYMBOL(mraid_mm_register_adp);
  52. EXPORT_SYMBOL(mraid_mm_unregister_adp);
  53. EXPORT_SYMBOL(mraid_mm_adapter_app_handle);
  54. static uint32_t drvr_ver = 0x02200207;
  55. static int adapters_count_g;
  56. static struct list_head adapters_list_g;
  57. static wait_queue_head_t wait_q;
  58. static const struct file_operations lsi_fops = {
  59. .open = mraid_mm_open,
  60. .unlocked_ioctl = mraid_mm_unlocked_ioctl,
  61. #ifdef CONFIG_COMPAT
  62. .compat_ioctl = mraid_mm_compat_ioctl,
  63. #endif
  64. .owner = THIS_MODULE,
  65. .llseek = noop_llseek,
  66. };
  67. static struct miscdevice megaraid_mm_dev = {
  68. .minor = MISC_DYNAMIC_MINOR,
  69. .name = "megadev0",
  70. .fops = &lsi_fops,
  71. };
  72. /**
  73. * mraid_mm_open - open routine for char node interface
  74. * @inode : unused
  75. * @filep : unused
  76. *
  77. * Allow ioctl operations by apps only if they have superuser privilege.
  78. */
  79. static int
  80. mraid_mm_open(struct inode *inode, struct file *filep)
  81. {
  82. /*
  83. * Only allow superuser to access private ioctl interface
  84. */
  85. if (!capable(CAP_SYS_ADMIN)) return (-EACCES);
  86. return 0;
  87. }
  88. /**
  89. * mraid_mm_ioctl - module entry-point for ioctls
  90. * @inode : inode (ignored)
  91. * @filep : file operations pointer (ignored)
  92. * @cmd : ioctl command
  93. * @arg : user ioctl packet
  94. */
  95. static int
  96. mraid_mm_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
  97. {
  98. uioc_t *kioc;
  99. char signature[EXT_IOCTL_SIGN_SZ] = {0};
  100. int rval;
  101. mraid_mmadp_t *adp;
  102. uint8_t old_ioctl;
  103. int drvrcmd_rval;
  104. void __user *argp = (void __user *)arg;
  105. /*
  106. * Make sure only USCSICMD are issued through this interface.
  107. * MIMD application would still fire different command.
  108. */
  109. if ((_IOC_TYPE(cmd) != MEGAIOC_MAGIC) && (cmd != USCSICMD)) {
  110. return (-EINVAL);
  111. }
  112. /*
  113. * Look for signature to see if this is the new or old ioctl format.
  114. */
  115. if (copy_from_user(signature, argp, EXT_IOCTL_SIGN_SZ)) {
  116. con_log(CL_ANN, (KERN_WARNING
  117. "megaraid cmm: copy from usr addr failed\n"));
  118. return (-EFAULT);
  119. }
  120. if (memcmp(signature, EXT_IOCTL_SIGN, EXT_IOCTL_SIGN_SZ) == 0)
  121. old_ioctl = 0;
  122. else
  123. old_ioctl = 1;
  124. /*
  125. * At present, we don't support the new ioctl packet
  126. */
  127. if (!old_ioctl )
  128. return (-EINVAL);
  129. /*
  130. * If it is a driver ioctl (as opposed to fw ioctls), then we can
  131. * handle the command locally. rval > 0 means it is not a drvr cmd
  132. */
  133. rval = handle_drvrcmd(argp, old_ioctl, &drvrcmd_rval);
  134. if (rval < 0)
  135. return rval;
  136. else if (rval == 0)
  137. return drvrcmd_rval;
  138. rval = 0;
  139. if ((adp = mraid_mm_get_adapter(argp, &rval)) == NULL) {
  140. return rval;
  141. }
  142. /*
  143. * Check if adapter can accept ioctl. We may have marked it offline
  144. * if any previous kioc had timedout on this controller.
  145. */
  146. if (!adp->quiescent) {
  147. con_log(CL_ANN, (KERN_WARNING
  148. "megaraid cmm: controller cannot accept cmds due to "
  149. "earlier errors\n" ));
  150. return -EFAULT;
  151. }
  152. /*
  153. * The following call will block till a kioc is available
  154. */
  155. kioc = mraid_mm_alloc_kioc(adp);
  156. /*
  157. * User sent the old mimd_t ioctl packet. Convert it to uioc_t.
  158. */
  159. if ((rval = mimd_to_kioc(argp, adp, kioc))) {
  160. mraid_mm_dealloc_kioc(adp, kioc);
  161. return rval;
  162. }
  163. kioc->done = ioctl_done;
  164. /*
  165. * Issue the IOCTL to the low level driver. After the IOCTL completes
  166. * release the kioc if and only if it was _not_ timedout. If it was
  167. * timedout, that means that resources are still with low level driver.
  168. */
  169. if ((rval = lld_ioctl(adp, kioc))) {
  170. if (!kioc->timedout)
  171. mraid_mm_dealloc_kioc(adp, kioc);
  172. return rval;
  173. }
  174. /*
  175. * Convert the kioc back to user space
  176. */
  177. rval = kioc_to_mimd(kioc, argp);
  178. /*
  179. * Return the kioc to free pool
  180. */
  181. mraid_mm_dealloc_kioc(adp, kioc);
  182. return rval;
  183. }
  184. static long
  185. mraid_mm_unlocked_ioctl(struct file *filep, unsigned int cmd,
  186. unsigned long arg)
  187. {
  188. int err;
  189. /* inconsistent: mraid_mm_compat_ioctl doesn't take the BKL */
  190. mutex_lock(&mraid_mm_mutex);
  191. err = mraid_mm_ioctl(filep, cmd, arg);
  192. mutex_unlock(&mraid_mm_mutex);
  193. return err;
  194. }
  195. /**
  196. * mraid_mm_get_adapter - Returns corresponding adapters for the mimd packet
  197. * @umimd : User space mimd_t ioctl packet
  198. * @rval : returned success/error status
  199. *
  200. * The function return value is a pointer to the located @adapter.
  201. */
  202. static mraid_mmadp_t *
  203. mraid_mm_get_adapter(mimd_t __user *umimd, int *rval)
  204. {
  205. mraid_mmadp_t *adapter;
  206. mimd_t mimd;
  207. uint32_t adapno;
  208. int iterator;
  209. if (copy_from_user(&mimd, umimd, sizeof(mimd_t))) {
  210. *rval = -EFAULT;
  211. return NULL;
  212. }
  213. adapno = GETADAP(mimd.ui.fcs.adapno);
  214. if (adapno >= adapters_count_g) {
  215. *rval = -ENODEV;
  216. return NULL;
  217. }
  218. adapter = NULL;
  219. iterator = 0;
  220. list_for_each_entry(adapter, &adapters_list_g, list) {
  221. if (iterator++ == adapno) break;
  222. }
  223. if (!adapter) {
  224. *rval = -ENODEV;
  225. return NULL;
  226. }
  227. return adapter;
  228. }
  229. /**
  230. * handle_drvrcmd - Checks if the opcode is a driver cmd and if it is, handles it.
  231. * @arg : packet sent by the user app
  232. * @old_ioctl : mimd if 1; uioc otherwise
  233. * @rval : pointer for command's returned value (not function status)
  234. */
  235. static int
  236. handle_drvrcmd(void __user *arg, uint8_t old_ioctl, int *rval)
  237. {
  238. mimd_t __user *umimd;
  239. mimd_t kmimd;
  240. uint8_t opcode;
  241. uint8_t subopcode;
  242. if (old_ioctl)
  243. goto old_packet;
  244. else
  245. goto new_packet;
  246. new_packet:
  247. return (-ENOTSUPP);
  248. old_packet:
  249. *rval = 0;
  250. umimd = arg;
  251. if (copy_from_user(&kmimd, umimd, sizeof(mimd_t)))
  252. return (-EFAULT);
  253. opcode = kmimd.ui.fcs.opcode;
  254. subopcode = kmimd.ui.fcs.subopcode;
  255. /*
  256. * If the opcode is 0x82 and the subopcode is either GET_DRVRVER or
  257. * GET_NUMADP, then we can handle. Otherwise we should return 1 to
  258. * indicate that we cannot handle this.
  259. */
  260. if (opcode != 0x82)
  261. return 1;
  262. switch (subopcode) {
  263. case MEGAIOC_QDRVRVER:
  264. if (copy_to_user(kmimd.data, &drvr_ver, sizeof(uint32_t)))
  265. return (-EFAULT);
  266. return 0;
  267. case MEGAIOC_QNADAP:
  268. *rval = adapters_count_g;
  269. if (copy_to_user(kmimd.data, &adapters_count_g,
  270. sizeof(uint32_t)))
  271. return (-EFAULT);
  272. return 0;
  273. default:
  274. /* cannot handle */
  275. return 1;
  276. }
  277. return 0;
  278. }
  279. /**
  280. * mimd_to_kioc - Converter from old to new ioctl format
  281. * @umimd : user space old MIMD IOCTL
  282. * @adp : adapter softstate
  283. * @kioc : kernel space new format IOCTL
  284. *
  285. * Routine to convert MIMD interface IOCTL to new interface IOCTL packet. The
  286. * new packet is in kernel space so that driver can perform operations on it
  287. * freely.
  288. */
  289. static int
  290. mimd_to_kioc(mimd_t __user *umimd, mraid_mmadp_t *adp, uioc_t *kioc)
  291. {
  292. mbox64_t *mbox64;
  293. mbox_t *mbox;
  294. mraid_passthru_t *pthru32;
  295. uint32_t adapno;
  296. uint8_t opcode;
  297. uint8_t subopcode;
  298. mimd_t mimd;
  299. if (copy_from_user(&mimd, umimd, sizeof(mimd_t)))
  300. return (-EFAULT);
  301. /*
  302. * Applications are not allowed to send extd pthru
  303. */
  304. if ((mimd.mbox[0] == MBOXCMD_PASSTHRU64) ||
  305. (mimd.mbox[0] == MBOXCMD_EXTPTHRU))
  306. return (-EINVAL);
  307. opcode = mimd.ui.fcs.opcode;
  308. subopcode = mimd.ui.fcs.subopcode;
  309. adapno = GETADAP(mimd.ui.fcs.adapno);
  310. if (adapno >= adapters_count_g)
  311. return (-ENODEV);
  312. kioc->adapno = adapno;
  313. kioc->mb_type = MBOX_LEGACY;
  314. kioc->app_type = APPTYPE_MIMD;
  315. switch (opcode) {
  316. case 0x82:
  317. if (subopcode == MEGAIOC_QADAPINFO) {
  318. kioc->opcode = GET_ADAP_INFO;
  319. kioc->data_dir = UIOC_RD;
  320. kioc->xferlen = sizeof(mraid_hba_info_t);
  321. if (mraid_mm_attach_buf(adp, kioc, kioc->xferlen))
  322. return (-ENOMEM);
  323. }
  324. else {
  325. con_log(CL_ANN, (KERN_WARNING
  326. "megaraid cmm: Invalid subop\n"));
  327. return (-EINVAL);
  328. }
  329. break;
  330. case 0x81:
  331. kioc->opcode = MBOX_CMD;
  332. kioc->xferlen = mimd.ui.fcs.length;
  333. kioc->user_data_len = kioc->xferlen;
  334. kioc->user_data = mimd.ui.fcs.buffer;
  335. if (mraid_mm_attach_buf(adp, kioc, kioc->xferlen))
  336. return (-ENOMEM);
  337. if (mimd.outlen) kioc->data_dir = UIOC_RD;
  338. if (mimd.inlen) kioc->data_dir |= UIOC_WR;
  339. break;
  340. case 0x80:
  341. kioc->opcode = MBOX_CMD;
  342. kioc->xferlen = (mimd.outlen > mimd.inlen) ?
  343. mimd.outlen : mimd.inlen;
  344. kioc->user_data_len = kioc->xferlen;
  345. kioc->user_data = mimd.data;
  346. if (mraid_mm_attach_buf(adp, kioc, kioc->xferlen))
  347. return (-ENOMEM);
  348. if (mimd.outlen) kioc->data_dir = UIOC_RD;
  349. if (mimd.inlen) kioc->data_dir |= UIOC_WR;
  350. break;
  351. default:
  352. return (-EINVAL);
  353. }
  354. /*
  355. * If driver command, nothing else to do
  356. */
  357. if (opcode == 0x82)
  358. return 0;
  359. /*
  360. * This is a mailbox cmd; copy the mailbox from mimd
  361. */
  362. mbox64 = (mbox64_t *)((unsigned long)kioc->cmdbuf);
  363. mbox = &mbox64->mbox32;
  364. memcpy(mbox, mimd.mbox, 14);
  365. if (mbox->cmd != MBOXCMD_PASSTHRU) { // regular DCMD
  366. mbox->xferaddr = (uint32_t)kioc->buf_paddr;
  367. if (kioc->data_dir & UIOC_WR) {
  368. if (copy_from_user(kioc->buf_vaddr, kioc->user_data,
  369. kioc->xferlen)) {
  370. return (-EFAULT);
  371. }
  372. }
  373. return 0;
  374. }
  375. /*
  376. * This is a regular 32-bit pthru cmd; mbox points to pthru struct.
  377. * Just like in above case, the beginning for memblk is treated as
  378. * a mailbox. The passthru will begin at next 1K boundary. And the
  379. * data will start 1K after that.
  380. */
  381. pthru32 = kioc->pthru32;
  382. kioc->user_pthru = &umimd->pthru;
  383. mbox->xferaddr = (uint32_t)kioc->pthru32_h;
  384. if (copy_from_user(pthru32, kioc->user_pthru,
  385. sizeof(mraid_passthru_t))) {
  386. return (-EFAULT);
  387. }
  388. pthru32->dataxferaddr = kioc->buf_paddr;
  389. if (kioc->data_dir & UIOC_WR) {
  390. if (pthru32->dataxferlen > kioc->xferlen)
  391. return -EINVAL;
  392. if (copy_from_user(kioc->buf_vaddr, kioc->user_data,
  393. pthru32->dataxferlen)) {
  394. return (-EFAULT);
  395. }
  396. }
  397. return 0;
  398. }
  399. /**
  400. * mraid_mm_attch_buf - Attach a free dma buffer for required size
  401. * @adp : Adapter softstate
  402. * @kioc : kioc that the buffer needs to be attached to
  403. * @xferlen : required length for buffer
  404. *
  405. * First we search for a pool with smallest buffer that is >= @xferlen. If
  406. * that pool has no free buffer, we will try for the next bigger size. If none
  407. * is available, we will try to allocate the smallest buffer that is >=
  408. * @xferlen and attach it the pool.
  409. */
  410. static int
  411. mraid_mm_attach_buf(mraid_mmadp_t *adp, uioc_t *kioc, int xferlen)
  412. {
  413. mm_dmapool_t *pool;
  414. int right_pool = -1;
  415. unsigned long flags;
  416. int i;
  417. kioc->pool_index = -1;
  418. kioc->buf_vaddr = NULL;
  419. kioc->buf_paddr = 0;
  420. kioc->free_buf = 0;
  421. /*
  422. * We need xferlen amount of memory. See if we can get it from our
  423. * dma pools. If we don't get exact size, we will try bigger buffer
  424. */
  425. for (i = 0; i < MAX_DMA_POOLS; i++) {
  426. pool = &adp->dma_pool_list[i];
  427. if (xferlen > pool->buf_size)
  428. continue;
  429. if (right_pool == -1)
  430. right_pool = i;
  431. spin_lock_irqsave(&pool->lock, flags);
  432. if (!pool->in_use) {
  433. pool->in_use = 1;
  434. kioc->pool_index = i;
  435. kioc->buf_vaddr = pool->vaddr;
  436. kioc->buf_paddr = pool->paddr;
  437. spin_unlock_irqrestore(&pool->lock, flags);
  438. return 0;
  439. }
  440. else {
  441. spin_unlock_irqrestore(&pool->lock, flags);
  442. continue;
  443. }
  444. }
  445. /*
  446. * If xferlen doesn't match any of our pools, return error
  447. */
  448. if (right_pool == -1)
  449. return -EINVAL;
  450. /*
  451. * We did not get any buffer from the preallocated pool. Let us try
  452. * to allocate one new buffer. NOTE: This is a blocking call.
  453. */
  454. pool = &adp->dma_pool_list[right_pool];
  455. spin_lock_irqsave(&pool->lock, flags);
  456. kioc->pool_index = right_pool;
  457. kioc->free_buf = 1;
  458. kioc->buf_vaddr = pci_pool_alloc(pool->handle, GFP_KERNEL,
  459. &kioc->buf_paddr);
  460. spin_unlock_irqrestore(&pool->lock, flags);
  461. if (!kioc->buf_vaddr)
  462. return -ENOMEM;
  463. return 0;
  464. }
  465. /**
  466. * mraid_mm_alloc_kioc - Returns a uioc_t from free list
  467. * @adp : Adapter softstate for this module
  468. *
  469. * The kioc_semaphore is initialized with number of kioc nodes in the
  470. * free kioc pool. If the kioc pool is empty, this function blocks till
  471. * a kioc becomes free.
  472. */
  473. static uioc_t *
  474. mraid_mm_alloc_kioc(mraid_mmadp_t *adp)
  475. {
  476. uioc_t *kioc;
  477. struct list_head* head;
  478. unsigned long flags;
  479. down(&adp->kioc_semaphore);
  480. spin_lock_irqsave(&adp->kioc_pool_lock, flags);
  481. head = &adp->kioc_pool;
  482. if (list_empty(head)) {
  483. up(&adp->kioc_semaphore);
  484. spin_unlock_irqrestore(&adp->kioc_pool_lock, flags);
  485. con_log(CL_ANN, ("megaraid cmm: kioc list empty!\n"));
  486. return NULL;
  487. }
  488. kioc = list_entry(head->next, uioc_t, list);
  489. list_del_init(&kioc->list);
  490. spin_unlock_irqrestore(&adp->kioc_pool_lock, flags);
  491. memset((caddr_t)(unsigned long)kioc->cmdbuf, 0, sizeof(mbox64_t));
  492. memset((caddr_t) kioc->pthru32, 0, sizeof(mraid_passthru_t));
  493. kioc->buf_vaddr = NULL;
  494. kioc->buf_paddr = 0;
  495. kioc->pool_index =-1;
  496. kioc->free_buf = 0;
  497. kioc->user_data = NULL;
  498. kioc->user_data_len = 0;
  499. kioc->user_pthru = NULL;
  500. kioc->timedout = 0;
  501. return kioc;
  502. }
  503. /**
  504. * mraid_mm_dealloc_kioc - Return kioc to free pool
  505. * @adp : Adapter softstate
  506. * @kioc : uioc_t node to be returned to free pool
  507. */
  508. static void
  509. mraid_mm_dealloc_kioc(mraid_mmadp_t *adp, uioc_t *kioc)
  510. {
  511. mm_dmapool_t *pool;
  512. unsigned long flags;
  513. if (kioc->pool_index != -1) {
  514. pool = &adp->dma_pool_list[kioc->pool_index];
  515. /* This routine may be called in non-isr context also */
  516. spin_lock_irqsave(&pool->lock, flags);
  517. /*
  518. * While attaching the dma buffer, if we didn't get the
  519. * required buffer from the pool, we would have allocated
  520. * it at the run time and set the free_buf flag. We must
  521. * free that buffer. Otherwise, just mark that the buffer is
  522. * not in use
  523. */
  524. if (kioc->free_buf == 1)
  525. pci_pool_free(pool->handle, kioc->buf_vaddr,
  526. kioc->buf_paddr);
  527. else
  528. pool->in_use = 0;
  529. spin_unlock_irqrestore(&pool->lock, flags);
  530. }
  531. /* Return the kioc to the free pool */
  532. spin_lock_irqsave(&adp->kioc_pool_lock, flags);
  533. list_add(&kioc->list, &adp->kioc_pool);
  534. spin_unlock_irqrestore(&adp->kioc_pool_lock, flags);
  535. /* increment the free kioc count */
  536. up(&adp->kioc_semaphore);
  537. return;
  538. }
  539. /**
  540. * lld_ioctl - Routine to issue ioctl to low level drvr
  541. * @adp : The adapter handle
  542. * @kioc : The ioctl packet with kernel addresses
  543. */
  544. static int
  545. lld_ioctl(mraid_mmadp_t *adp, uioc_t *kioc)
  546. {
  547. int rval;
  548. struct timer_list timer;
  549. struct timer_list *tp = NULL;
  550. kioc->status = -ENODATA;
  551. rval = adp->issue_uioc(adp->drvr_data, kioc, IOCTL_ISSUE);
  552. if (rval) return rval;
  553. /*
  554. * Start the timer
  555. */
  556. if (adp->timeout > 0) {
  557. tp = &timer;
  558. init_timer(tp);
  559. tp->function = lld_timedout;
  560. tp->data = (unsigned long)kioc;
  561. tp->expires = jiffies + adp->timeout * HZ;
  562. add_timer(tp);
  563. }
  564. /*
  565. * Wait till the low level driver completes the ioctl. After this
  566. * call, the ioctl either completed successfully or timedout.
  567. */
  568. wait_event(wait_q, (kioc->status != -ENODATA));
  569. if (tp) {
  570. del_timer_sync(tp);
  571. }
  572. /*
  573. * If the command had timedout, we mark the controller offline
  574. * before returning
  575. */
  576. if (kioc->timedout) {
  577. adp->quiescent = 0;
  578. }
  579. return kioc->status;
  580. }
  581. /**
  582. * ioctl_done - callback from the low level driver
  583. * @kioc : completed ioctl packet
  584. */
  585. static void
  586. ioctl_done(uioc_t *kioc)
  587. {
  588. uint32_t adapno;
  589. int iterator;
  590. mraid_mmadp_t* adapter;
  591. /*
  592. * When the kioc returns from driver, make sure it still doesn't
  593. * have ENODATA in status. Otherwise, driver will hang on wait_event
  594. * forever
  595. */
  596. if (kioc->status == -ENODATA) {
  597. con_log(CL_ANN, (KERN_WARNING
  598. "megaraid cmm: lld didn't change status!\n"));
  599. kioc->status = -EINVAL;
  600. }
  601. /*
  602. * Check if this kioc was timedout before. If so, nobody is waiting
  603. * on this kioc. We don't have to wake up anybody. Instead, we just
  604. * have to free the kioc
  605. */
  606. if (kioc->timedout) {
  607. iterator = 0;
  608. adapter = NULL;
  609. adapno = kioc->adapno;
  610. con_log(CL_ANN, ( KERN_WARNING "megaraid cmm: completed "
  611. "ioctl that was timedout before\n"));
  612. list_for_each_entry(adapter, &adapters_list_g, list) {
  613. if (iterator++ == adapno) break;
  614. }
  615. kioc->timedout = 0;
  616. if (adapter) {
  617. mraid_mm_dealloc_kioc( adapter, kioc );
  618. }
  619. }
  620. else {
  621. wake_up(&wait_q);
  622. }
  623. }
  624. /**
  625. * lld_timedout - callback from the expired timer
  626. * @ptr : ioctl packet that timed out
  627. */
  628. static void
  629. lld_timedout(unsigned long ptr)
  630. {
  631. uioc_t *kioc = (uioc_t *)ptr;
  632. kioc->status = -ETIME;
  633. kioc->timedout = 1;
  634. con_log(CL_ANN, (KERN_WARNING "megaraid cmm: ioctl timed out\n"));
  635. wake_up(&wait_q);
  636. }
  637. /**
  638. * kioc_to_mimd - Converter from new back to old format
  639. * @kioc : Kernel space IOCTL packet (successfully issued)
  640. * @mimd : User space MIMD packet
  641. */
  642. static int
  643. kioc_to_mimd(uioc_t *kioc, mimd_t __user *mimd)
  644. {
  645. mimd_t kmimd;
  646. uint8_t opcode;
  647. uint8_t subopcode;
  648. mbox64_t *mbox64;
  649. mraid_passthru_t __user *upthru32;
  650. mraid_passthru_t *kpthru32;
  651. mcontroller_t cinfo;
  652. mraid_hba_info_t *hinfo;
  653. if (copy_from_user(&kmimd, mimd, sizeof(mimd_t)))
  654. return (-EFAULT);
  655. opcode = kmimd.ui.fcs.opcode;
  656. subopcode = kmimd.ui.fcs.subopcode;
  657. if (opcode == 0x82) {
  658. switch (subopcode) {
  659. case MEGAIOC_QADAPINFO:
  660. hinfo = (mraid_hba_info_t *)(unsigned long)
  661. kioc->buf_vaddr;
  662. hinfo_to_cinfo(hinfo, &cinfo);
  663. if (copy_to_user(kmimd.data, &cinfo, sizeof(cinfo)))
  664. return (-EFAULT);
  665. return 0;
  666. default:
  667. return (-EINVAL);
  668. }
  669. return 0;
  670. }
  671. mbox64 = (mbox64_t *)(unsigned long)kioc->cmdbuf;
  672. if (kioc->user_pthru) {
  673. upthru32 = kioc->user_pthru;
  674. kpthru32 = kioc->pthru32;
  675. if (copy_to_user(&upthru32->scsistatus,
  676. &kpthru32->scsistatus,
  677. sizeof(uint8_t))) {
  678. return (-EFAULT);
  679. }
  680. }
  681. if (kioc->user_data) {
  682. if (copy_to_user(kioc->user_data, kioc->buf_vaddr,
  683. kioc->user_data_len)) {
  684. return (-EFAULT);
  685. }
  686. }
  687. if (copy_to_user(&mimd->mbox[17],
  688. &mbox64->mbox32.status, sizeof(uint8_t))) {
  689. return (-EFAULT);
  690. }
  691. return 0;
  692. }
  693. /**
  694. * hinfo_to_cinfo - Convert new format hba info into old format
  695. * @hinfo : New format, more comprehensive adapter info
  696. * @cinfo : Old format adapter info to support mimd_t apps
  697. */
  698. static void
  699. hinfo_to_cinfo(mraid_hba_info_t *hinfo, mcontroller_t *cinfo)
  700. {
  701. if (!hinfo || !cinfo)
  702. return;
  703. cinfo->base = hinfo->baseport;
  704. cinfo->irq = hinfo->irq;
  705. cinfo->numldrv = hinfo->num_ldrv;
  706. cinfo->pcibus = hinfo->pci_bus;
  707. cinfo->pcidev = hinfo->pci_slot;
  708. cinfo->pcifun = PCI_FUNC(hinfo->pci_dev_fn);
  709. cinfo->pciid = hinfo->pci_device_id;
  710. cinfo->pcivendor = hinfo->pci_vendor_id;
  711. cinfo->pcislot = hinfo->pci_slot;
  712. cinfo->uid = hinfo->unique_id;
  713. }
  714. /**
  715. * mraid_mm_register_adp - Registration routine for low level drivers
  716. * @lld_adp : Adapter object
  717. */
  718. int
  719. mraid_mm_register_adp(mraid_mmadp_t *lld_adp)
  720. {
  721. mraid_mmadp_t *adapter;
  722. mbox64_t *mbox_list;
  723. uioc_t *kioc;
  724. uint32_t rval;
  725. int i;
  726. if (lld_adp->drvr_type != DRVRTYPE_MBOX)
  727. return (-EINVAL);
  728. adapter = kzalloc(sizeof(mraid_mmadp_t), GFP_KERNEL);
  729. if (!adapter)
  730. return -ENOMEM;
  731. adapter->unique_id = lld_adp->unique_id;
  732. adapter->drvr_type = lld_adp->drvr_type;
  733. adapter->drvr_data = lld_adp->drvr_data;
  734. adapter->pdev = lld_adp->pdev;
  735. adapter->issue_uioc = lld_adp->issue_uioc;
  736. adapter->timeout = lld_adp->timeout;
  737. adapter->max_kioc = lld_adp->max_kioc;
  738. adapter->quiescent = 1;
  739. /*
  740. * Allocate single blocks of memory for all required kiocs,
  741. * mailboxes and passthru structures.
  742. */
  743. adapter->kioc_list = kmalloc(sizeof(uioc_t) * lld_adp->max_kioc,
  744. GFP_KERNEL);
  745. adapter->mbox_list = kmalloc(sizeof(mbox64_t) * lld_adp->max_kioc,
  746. GFP_KERNEL);
  747. adapter->pthru_dma_pool = pci_pool_create("megaraid mm pthru pool",
  748. adapter->pdev,
  749. sizeof(mraid_passthru_t),
  750. 16, 0);
  751. if (!adapter->kioc_list || !adapter->mbox_list ||
  752. !adapter->pthru_dma_pool) {
  753. con_log(CL_ANN, (KERN_WARNING
  754. "megaraid cmm: out of memory, %s %d\n", __func__,
  755. __LINE__));
  756. rval = (-ENOMEM);
  757. goto memalloc_error;
  758. }
  759. /*
  760. * Slice kioc_list and make a kioc_pool with the individiual kiocs
  761. */
  762. INIT_LIST_HEAD(&adapter->kioc_pool);
  763. spin_lock_init(&adapter->kioc_pool_lock);
  764. sema_init(&adapter->kioc_semaphore, lld_adp->max_kioc);
  765. mbox_list = (mbox64_t *)adapter->mbox_list;
  766. for (i = 0; i < lld_adp->max_kioc; i++) {
  767. kioc = adapter->kioc_list + i;
  768. kioc->cmdbuf = (uint64_t)(unsigned long)(mbox_list + i);
  769. kioc->pthru32 = pci_pool_alloc(adapter->pthru_dma_pool,
  770. GFP_KERNEL, &kioc->pthru32_h);
  771. if (!kioc->pthru32) {
  772. con_log(CL_ANN, (KERN_WARNING
  773. "megaraid cmm: out of memory, %s %d\n",
  774. __func__, __LINE__));
  775. rval = (-ENOMEM);
  776. goto pthru_dma_pool_error;
  777. }
  778. list_add_tail(&kioc->list, &adapter->kioc_pool);
  779. }
  780. // Setup the dma pools for data buffers
  781. if ((rval = mraid_mm_setup_dma_pools(adapter)) != 0) {
  782. goto dma_pool_error;
  783. }
  784. list_add_tail(&adapter->list, &adapters_list_g);
  785. adapters_count_g++;
  786. return 0;
  787. dma_pool_error:
  788. /* Do nothing */
  789. pthru_dma_pool_error:
  790. for (i = 0; i < lld_adp->max_kioc; i++) {
  791. kioc = adapter->kioc_list + i;
  792. if (kioc->pthru32) {
  793. pci_pool_free(adapter->pthru_dma_pool, kioc->pthru32,
  794. kioc->pthru32_h);
  795. }
  796. }
  797. memalloc_error:
  798. kfree(adapter->kioc_list);
  799. kfree(adapter->mbox_list);
  800. if (adapter->pthru_dma_pool)
  801. pci_pool_destroy(adapter->pthru_dma_pool);
  802. kfree(adapter);
  803. return rval;
  804. }
  805. /**
  806. * mraid_mm_adapter_app_handle - return the application handle for this adapter
  807. * @unique_id : adapter unique identifier
  808. *
  809. * For the given driver data, locate the adapter in our global list and
  810. * return the corresponding handle, which is also used by applications to
  811. * uniquely identify an adapter.
  812. *
  813. * Return adapter handle if found in the list.
  814. * Return 0 if adapter could not be located, should never happen though.
  815. */
  816. uint32_t
  817. mraid_mm_adapter_app_handle(uint32_t unique_id)
  818. {
  819. mraid_mmadp_t *adapter;
  820. mraid_mmadp_t *tmp;
  821. int index = 0;
  822. list_for_each_entry_safe(adapter, tmp, &adapters_list_g, list) {
  823. if (adapter->unique_id == unique_id) {
  824. return MKADAP(index);
  825. }
  826. index++;
  827. }
  828. return 0;
  829. }
  830. /**
  831. * mraid_mm_setup_dma_pools - Set up dma buffer pools per adapter
  832. * @adp : Adapter softstate
  833. *
  834. * We maintain a pool of dma buffers per each adapter. Each pool has one
  835. * buffer. E.g, we may have 5 dma pools - one each for 4k, 8k ... 64k buffers.
  836. * We have just one 4k buffer in 4k pool, one 8k buffer in 8k pool etc. We
  837. * dont' want to waste too much memory by allocating more buffers per each
  838. * pool.
  839. */
  840. static int
  841. mraid_mm_setup_dma_pools(mraid_mmadp_t *adp)
  842. {
  843. mm_dmapool_t *pool;
  844. int bufsize;
  845. int i;
  846. /*
  847. * Create MAX_DMA_POOLS number of pools
  848. */
  849. bufsize = MRAID_MM_INIT_BUFF_SIZE;
  850. for (i = 0; i < MAX_DMA_POOLS; i++){
  851. pool = &adp->dma_pool_list[i];
  852. pool->buf_size = bufsize;
  853. spin_lock_init(&pool->lock);
  854. pool->handle = pci_pool_create("megaraid mm data buffer",
  855. adp->pdev, bufsize, 16, 0);
  856. if (!pool->handle) {
  857. goto dma_pool_setup_error;
  858. }
  859. pool->vaddr = pci_pool_alloc(pool->handle, GFP_KERNEL,
  860. &pool->paddr);
  861. if (!pool->vaddr)
  862. goto dma_pool_setup_error;
  863. bufsize = bufsize * 2;
  864. }
  865. return 0;
  866. dma_pool_setup_error:
  867. mraid_mm_teardown_dma_pools(adp);
  868. return (-ENOMEM);
  869. }
  870. /**
  871. * mraid_mm_unregister_adp - Unregister routine for low level drivers
  872. * @unique_id : UID of the adpater
  873. *
  874. * Assumes no outstanding ioctls to llds.
  875. */
  876. int
  877. mraid_mm_unregister_adp(uint32_t unique_id)
  878. {
  879. mraid_mmadp_t *adapter;
  880. mraid_mmadp_t *tmp;
  881. list_for_each_entry_safe(adapter, tmp, &adapters_list_g, list) {
  882. if (adapter->unique_id == unique_id) {
  883. adapters_count_g--;
  884. list_del_init(&adapter->list);
  885. mraid_mm_free_adp_resources(adapter);
  886. kfree(adapter);
  887. con_log(CL_ANN, (
  888. "megaraid cmm: Unregistered one adapter:%#x\n",
  889. unique_id));
  890. return 0;
  891. }
  892. }
  893. return (-ENODEV);
  894. }
  895. /**
  896. * mraid_mm_free_adp_resources - Free adapter softstate
  897. * @adp : Adapter softstate
  898. */
  899. static void
  900. mraid_mm_free_adp_resources(mraid_mmadp_t *adp)
  901. {
  902. uioc_t *kioc;
  903. int i;
  904. mraid_mm_teardown_dma_pools(adp);
  905. for (i = 0; i < adp->max_kioc; i++) {
  906. kioc = adp->kioc_list + i;
  907. pci_pool_free(adp->pthru_dma_pool, kioc->pthru32,
  908. kioc->pthru32_h);
  909. }
  910. kfree(adp->kioc_list);
  911. kfree(adp->mbox_list);
  912. pci_pool_destroy(adp->pthru_dma_pool);
  913. return;
  914. }
  915. /**
  916. * mraid_mm_teardown_dma_pools - Free all per adapter dma buffers
  917. * @adp : Adapter softstate
  918. */
  919. static void
  920. mraid_mm_teardown_dma_pools(mraid_mmadp_t *adp)
  921. {
  922. int i;
  923. mm_dmapool_t *pool;
  924. for (i = 0; i < MAX_DMA_POOLS; i++) {
  925. pool = &adp->dma_pool_list[i];
  926. if (pool->handle) {
  927. if (pool->vaddr)
  928. pci_pool_free(pool->handle, pool->vaddr,
  929. pool->paddr);
  930. pci_pool_destroy(pool->handle);
  931. pool->handle = NULL;
  932. }
  933. }
  934. return;
  935. }
  936. /**
  937. * mraid_mm_init - Module entry point
  938. */
  939. static int __init
  940. mraid_mm_init(void)
  941. {
  942. int err;
  943. // Announce the driver version
  944. con_log(CL_ANN, (KERN_INFO "megaraid cmm: %s %s\n",
  945. LSI_COMMON_MOD_VERSION, LSI_COMMON_MOD_EXT_VERSION));
  946. err = misc_register(&megaraid_mm_dev);
  947. if (err < 0) {
  948. con_log(CL_ANN, ("megaraid cmm: cannot register misc device\n"));
  949. return err;
  950. }
  951. init_waitqueue_head(&wait_q);
  952. INIT_LIST_HEAD(&adapters_list_g);
  953. return 0;
  954. }
  955. #ifdef CONFIG_COMPAT
  956. /**
  957. * mraid_mm_compat_ioctl - 32bit to 64bit ioctl conversion routine
  958. * @filep : file operations pointer (ignored)
  959. * @cmd : ioctl command
  960. * @arg : user ioctl packet
  961. */
  962. static long
  963. mraid_mm_compat_ioctl(struct file *filep, unsigned int cmd,
  964. unsigned long arg)
  965. {
  966. int err;
  967. err = mraid_mm_ioctl(filep, cmd, arg);
  968. return err;
  969. }
  970. #endif
  971. /**
  972. * mraid_mm_exit - Module exit point
  973. */
  974. static void __exit
  975. mraid_mm_exit(void)
  976. {
  977. con_log(CL_DLEVEL1 , ("exiting common mod\n"));
  978. misc_deregister(&megaraid_mm_dev);
  979. }
  980. module_init(mraid_mm_init);
  981. module_exit(mraid_mm_exit);
  982. /* vi: set ts=8 sw=8 tw=78: */