superpipe.c 60 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158
  1. /*
  2. * CXL Flash Device Driver
  3. *
  4. * Written by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>, IBM Corporation
  5. * Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
  6. *
  7. * Copyright (C) 2015 IBM Corporation
  8. *
  9. * This program is free software; you can redistribute it and/or
  10. * modify it under the terms of the GNU General Public License
  11. * as published by the Free Software Foundation; either version
  12. * 2 of the License, or (at your option) any later version.
  13. */
  14. #include <linux/delay.h>
  15. #include <linux/file.h>
  16. #include <linux/syscalls.h>
  17. #include <misc/cxl.h>
  18. #include <asm/unaligned.h>
  19. #include <scsi/scsi.h>
  20. #include <scsi/scsi_host.h>
  21. #include <scsi/scsi_cmnd.h>
  22. #include <scsi/scsi_eh.h>
  23. #include <uapi/scsi/cxlflash_ioctl.h>
  24. #include "sislite.h"
  25. #include "common.h"
  26. #include "vlun.h"
  27. #include "superpipe.h"
  28. struct cxlflash_global global;
  29. /**
  30. * marshal_rele_to_resize() - translate release to resize structure
  31. * @rele: Source structure from which to translate/copy.
  32. * @resize: Destination structure for the translate/copy.
  33. */
  34. static void marshal_rele_to_resize(struct dk_cxlflash_release *release,
  35. struct dk_cxlflash_resize *resize)
  36. {
  37. resize->hdr = release->hdr;
  38. resize->context_id = release->context_id;
  39. resize->rsrc_handle = release->rsrc_handle;
  40. }
  41. /**
  42. * marshal_det_to_rele() - translate detach to release structure
  43. * @detach: Destination structure for the translate/copy.
  44. * @rele: Source structure from which to translate/copy.
  45. */
  46. static void marshal_det_to_rele(struct dk_cxlflash_detach *detach,
  47. struct dk_cxlflash_release *release)
  48. {
  49. release->hdr = detach->hdr;
  50. release->context_id = detach->context_id;
  51. }
  52. /**
  53. * cxlflash_free_errpage() - frees resources associated with global error page
  54. */
  55. void cxlflash_free_errpage(void)
  56. {
  57. mutex_lock(&global.mutex);
  58. if (global.err_page) {
  59. __free_page(global.err_page);
  60. global.err_page = NULL;
  61. }
  62. mutex_unlock(&global.mutex);
  63. }
  64. /**
  65. * cxlflash_stop_term_user_contexts() - stops/terminates known user contexts
  66. * @cfg: Internal structure associated with the host.
  67. *
  68. * When the host needs to go down, all users must be quiesced and their
  69. * memory freed. This is accomplished by putting the contexts in error
  70. * state which will notify the user and let them 'drive' the tear down.
  71. * Meanwhile, this routine camps until all user contexts have been removed.
  72. */
  73. void cxlflash_stop_term_user_contexts(struct cxlflash_cfg *cfg)
  74. {
  75. struct device *dev = &cfg->dev->dev;
  76. int i, found;
  77. cxlflash_mark_contexts_error(cfg);
  78. while (true) {
  79. found = false;
  80. for (i = 0; i < MAX_CONTEXT; i++)
  81. if (cfg->ctx_tbl[i]) {
  82. found = true;
  83. break;
  84. }
  85. if (!found && list_empty(&cfg->ctx_err_recovery))
  86. return;
  87. dev_dbg(dev, "%s: Wait for user contexts to quiesce...\n",
  88. __func__);
  89. wake_up_all(&cfg->reset_waitq);
  90. ssleep(1);
  91. }
  92. }
  93. /**
  94. * find_error_context() - locates a context by cookie on the error recovery list
  95. * @cfg: Internal structure associated with the host.
  96. * @rctxid: Desired context by id.
  97. * @file: Desired context by file.
  98. *
  99. * Return: Found context on success, NULL on failure
  100. */
  101. static struct ctx_info *find_error_context(struct cxlflash_cfg *cfg, u64 rctxid,
  102. struct file *file)
  103. {
  104. struct ctx_info *ctxi;
  105. list_for_each_entry(ctxi, &cfg->ctx_err_recovery, list)
  106. if ((ctxi->ctxid == rctxid) || (ctxi->file == file))
  107. return ctxi;
  108. return NULL;
  109. }
  110. /**
  111. * get_context() - obtains a validated and locked context reference
  112. * @cfg: Internal structure associated with the host.
  113. * @rctxid: Desired context (raw, un-decoded format).
  114. * @arg: LUN information or file associated with request.
  115. * @ctx_ctrl: Control information to 'steer' desired lookup.
  116. *
  117. * NOTE: despite the name pid, in linux, current->pid actually refers
  118. * to the lightweight process id (tid) and can change if the process is
  119. * multi threaded. The tgid remains constant for the process and only changes
  120. * when the process of fork. For all intents and purposes, think of tgid
  121. * as a pid in the traditional sense.
  122. *
  123. * Return: Validated context on success, NULL on failure
  124. */
  125. struct ctx_info *get_context(struct cxlflash_cfg *cfg, u64 rctxid,
  126. void *arg, enum ctx_ctrl ctx_ctrl)
  127. {
  128. struct device *dev = &cfg->dev->dev;
  129. struct ctx_info *ctxi = NULL;
  130. struct lun_access *lun_access = NULL;
  131. struct file *file = NULL;
  132. struct llun_info *lli = arg;
  133. u64 ctxid = DECODE_CTXID(rctxid);
  134. int rc;
  135. pid_t pid = current->tgid, ctxpid = 0;
  136. if (ctx_ctrl & CTX_CTRL_FILE) {
  137. lli = NULL;
  138. file = (struct file *)arg;
  139. }
  140. if (ctx_ctrl & CTX_CTRL_CLONE)
  141. pid = current->parent->tgid;
  142. if (likely(ctxid < MAX_CONTEXT)) {
  143. while (true) {
  144. mutex_lock(&cfg->ctx_tbl_list_mutex);
  145. ctxi = cfg->ctx_tbl[ctxid];
  146. if (ctxi)
  147. if ((file && (ctxi->file != file)) ||
  148. (!file && (ctxi->ctxid != rctxid)))
  149. ctxi = NULL;
  150. if ((ctx_ctrl & CTX_CTRL_ERR) ||
  151. (!ctxi && (ctx_ctrl & CTX_CTRL_ERR_FALLBACK)))
  152. ctxi = find_error_context(cfg, rctxid, file);
  153. if (!ctxi) {
  154. mutex_unlock(&cfg->ctx_tbl_list_mutex);
  155. goto out;
  156. }
  157. /*
  158. * Need to acquire ownership of the context while still
  159. * under the table/list lock to serialize with a remove
  160. * thread. Use the 'try' to avoid stalling the
  161. * table/list lock for a single context.
  162. *
  163. * Note that the lock order is:
  164. *
  165. * cfg->ctx_tbl_list_mutex -> ctxi->mutex
  166. *
  167. * Therefore release ctx_tbl_list_mutex before retrying.
  168. */
  169. rc = mutex_trylock(&ctxi->mutex);
  170. mutex_unlock(&cfg->ctx_tbl_list_mutex);
  171. if (rc)
  172. break; /* got the context's lock! */
  173. }
  174. if (ctxi->unavail)
  175. goto denied;
  176. ctxpid = ctxi->pid;
  177. if (likely(!(ctx_ctrl & CTX_CTRL_NOPID)))
  178. if (pid != ctxpid)
  179. goto denied;
  180. if (lli) {
  181. list_for_each_entry(lun_access, &ctxi->luns, list)
  182. if (lun_access->lli == lli)
  183. goto out;
  184. goto denied;
  185. }
  186. }
  187. out:
  188. dev_dbg(dev, "%s: rctxid=%016llX ctxinfo=%p ctxpid=%u pid=%u "
  189. "ctx_ctrl=%u\n", __func__, rctxid, ctxi, ctxpid, pid,
  190. ctx_ctrl);
  191. return ctxi;
  192. denied:
  193. mutex_unlock(&ctxi->mutex);
  194. ctxi = NULL;
  195. goto out;
  196. }
  197. /**
  198. * put_context() - release a context that was retrieved from get_context()
  199. * @ctxi: Context to release.
  200. *
  201. * For now, releasing the context equates to unlocking it's mutex.
  202. */
  203. void put_context(struct ctx_info *ctxi)
  204. {
  205. mutex_unlock(&ctxi->mutex);
  206. }
  207. /**
  208. * afu_attach() - attach a context to the AFU
  209. * @cfg: Internal structure associated with the host.
  210. * @ctxi: Context to attach.
  211. *
  212. * Upon setting the context capabilities, they must be confirmed with
  213. * a read back operation as the context might have been closed since
  214. * the mailbox was unlocked. When this occurs, registration is failed.
  215. *
  216. * Return: 0 on success, -errno on failure
  217. */
  218. static int afu_attach(struct cxlflash_cfg *cfg, struct ctx_info *ctxi)
  219. {
  220. struct device *dev = &cfg->dev->dev;
  221. struct afu *afu = cfg->afu;
  222. struct sisl_ctrl_map __iomem *ctrl_map = ctxi->ctrl_map;
  223. int rc = 0;
  224. u64 val;
  225. /* Unlock cap and restrict user to read/write cmds in translated mode */
  226. readq_be(&ctrl_map->mbox_r);
  227. val = (SISL_CTX_CAP_READ_CMD | SISL_CTX_CAP_WRITE_CMD);
  228. writeq_be(val, &ctrl_map->ctx_cap);
  229. val = readq_be(&ctrl_map->ctx_cap);
  230. if (val != (SISL_CTX_CAP_READ_CMD | SISL_CTX_CAP_WRITE_CMD)) {
  231. dev_err(dev, "%s: ctx may be closed val=%016llX\n",
  232. __func__, val);
  233. rc = -EAGAIN;
  234. goto out;
  235. }
  236. /* Set up MMIO registers pointing to the RHT */
  237. writeq_be((u64)ctxi->rht_start, &ctrl_map->rht_start);
  238. val = SISL_RHT_CNT_ID((u64)MAX_RHT_PER_CONTEXT, (u64)(afu->ctx_hndl));
  239. writeq_be(val, &ctrl_map->rht_cnt_id);
  240. out:
  241. dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
  242. return rc;
  243. }
  244. /**
  245. * read_cap16() - issues a SCSI READ_CAP16 command
  246. * @sdev: SCSI device associated with LUN.
  247. * @lli: LUN destined for capacity request.
  248. *
  249. * The READ_CAP16 can take quite a while to complete. Should an EEH occur while
  250. * in scsi_execute(), the EEH handler will attempt to recover. As part of the
  251. * recovery, the handler drains all currently running ioctls, waiting until they
  252. * have completed before proceeding with a reset. As this routine is used on the
  253. * ioctl path, this can create a condition where the EEH handler becomes stuck,
  254. * infinitely waiting for this ioctl thread. To avoid this behavior, temporarily
  255. * unmark this thread as an ioctl thread by releasing the ioctl read semaphore.
  256. * This will allow the EEH handler to proceed with a recovery while this thread
  257. * is still running. Once the scsi_execute() returns, reacquire the ioctl read
  258. * semaphore and check the adapter state in case it changed while inside of
  259. * scsi_execute(). The state check will wait if the adapter is still being
  260. * recovered or return a failure if the recovery failed. In the event that the
  261. * adapter reset failed, simply return the failure as the ioctl would be unable
  262. * to continue.
  263. *
  264. * Note that the above puts a requirement on this routine to only be called on
  265. * an ioctl thread.
  266. *
  267. * Return: 0 on success, -errno on failure
  268. */
  269. static int read_cap16(struct scsi_device *sdev, struct llun_info *lli)
  270. {
  271. struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata;
  272. struct device *dev = &cfg->dev->dev;
  273. struct glun_info *gli = lli->parent;
  274. u8 *cmd_buf = NULL;
  275. u8 *scsi_cmd = NULL;
  276. u8 *sense_buf = NULL;
  277. int rc = 0;
  278. int result = 0;
  279. int retry_cnt = 0;
  280. u32 to = CMD_TIMEOUT * HZ;
  281. retry:
  282. cmd_buf = kzalloc(CMD_BUFSIZE, GFP_KERNEL);
  283. scsi_cmd = kzalloc(MAX_COMMAND_SIZE, GFP_KERNEL);
  284. sense_buf = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL);
  285. if (unlikely(!cmd_buf || !scsi_cmd || !sense_buf)) {
  286. rc = -ENOMEM;
  287. goto out;
  288. }
  289. scsi_cmd[0] = SERVICE_ACTION_IN_16; /* read cap(16) */
  290. scsi_cmd[1] = SAI_READ_CAPACITY_16; /* service action */
  291. put_unaligned_be32(CMD_BUFSIZE, &scsi_cmd[10]);
  292. dev_dbg(dev, "%s: %ssending cmd(0x%x)\n", __func__,
  293. retry_cnt ? "re" : "", scsi_cmd[0]);
  294. /* Drop the ioctl read semahpore across lengthy call */
  295. up_read(&cfg->ioctl_rwsem);
  296. result = scsi_execute(sdev, scsi_cmd, DMA_FROM_DEVICE, cmd_buf,
  297. CMD_BUFSIZE, sense_buf, to, CMD_RETRIES, 0, NULL);
  298. down_read(&cfg->ioctl_rwsem);
  299. rc = check_state(cfg);
  300. if (rc) {
  301. dev_err(dev, "%s: Failed state! result=0x08%X\n",
  302. __func__, result);
  303. rc = -ENODEV;
  304. goto out;
  305. }
  306. if (driver_byte(result) == DRIVER_SENSE) {
  307. result &= ~(0xFF<<24); /* DRIVER_SENSE is not an error */
  308. if (result & SAM_STAT_CHECK_CONDITION) {
  309. struct scsi_sense_hdr sshdr;
  310. scsi_normalize_sense(sense_buf, SCSI_SENSE_BUFFERSIZE,
  311. &sshdr);
  312. switch (sshdr.sense_key) {
  313. case NO_SENSE:
  314. case RECOVERED_ERROR:
  315. /* fall through */
  316. case NOT_READY:
  317. result &= ~SAM_STAT_CHECK_CONDITION;
  318. break;
  319. case UNIT_ATTENTION:
  320. switch (sshdr.asc) {
  321. case 0x29: /* Power on Reset or Device Reset */
  322. /* fall through */
  323. case 0x2A: /* Device capacity changed */
  324. case 0x3F: /* Report LUNs changed */
  325. /* Retry the command once more */
  326. if (retry_cnt++ < 1) {
  327. kfree(cmd_buf);
  328. kfree(scsi_cmd);
  329. kfree(sense_buf);
  330. goto retry;
  331. }
  332. }
  333. break;
  334. default:
  335. break;
  336. }
  337. }
  338. }
  339. if (result) {
  340. dev_err(dev, "%s: command failed, result=0x%x\n",
  341. __func__, result);
  342. rc = -EIO;
  343. goto out;
  344. }
  345. /*
  346. * Read cap was successful, grab values from the buffer;
  347. * note that we don't need to worry about unaligned access
  348. * as the buffer is allocated on an aligned boundary.
  349. */
  350. mutex_lock(&gli->mutex);
  351. gli->max_lba = be64_to_cpu(*((__be64 *)&cmd_buf[0]));
  352. gli->blk_len = be32_to_cpu(*((__be32 *)&cmd_buf[8]));
  353. mutex_unlock(&gli->mutex);
  354. out:
  355. kfree(cmd_buf);
  356. kfree(scsi_cmd);
  357. kfree(sense_buf);
  358. dev_dbg(dev, "%s: maxlba=%lld blklen=%d rc=%d\n",
  359. __func__, gli->max_lba, gli->blk_len, rc);
  360. return rc;
  361. }
  362. /**
  363. * get_rhte() - obtains validated resource handle table entry reference
  364. * @ctxi: Context owning the resource handle.
  365. * @rhndl: Resource handle associated with entry.
  366. * @lli: LUN associated with request.
  367. *
  368. * Return: Validated RHTE on success, NULL on failure
  369. */
  370. struct sisl_rht_entry *get_rhte(struct ctx_info *ctxi, res_hndl_t rhndl,
  371. struct llun_info *lli)
  372. {
  373. struct sisl_rht_entry *rhte = NULL;
  374. if (unlikely(!ctxi->rht_start)) {
  375. pr_debug("%s: Context does not have allocated RHT!\n",
  376. __func__);
  377. goto out;
  378. }
  379. if (unlikely(rhndl >= MAX_RHT_PER_CONTEXT)) {
  380. pr_debug("%s: Bad resource handle! (%d)\n", __func__, rhndl);
  381. goto out;
  382. }
  383. if (unlikely(ctxi->rht_lun[rhndl] != lli)) {
  384. pr_debug("%s: Bad resource handle LUN! (%d)\n",
  385. __func__, rhndl);
  386. goto out;
  387. }
  388. rhte = &ctxi->rht_start[rhndl];
  389. if (unlikely(rhte->nmask == 0)) {
  390. pr_debug("%s: Unopened resource handle! (%d)\n",
  391. __func__, rhndl);
  392. rhte = NULL;
  393. goto out;
  394. }
  395. out:
  396. return rhte;
  397. }
  398. /**
  399. * rhte_checkout() - obtains free/empty resource handle table entry
  400. * @ctxi: Context owning the resource handle.
  401. * @lli: LUN associated with request.
  402. *
  403. * Return: Free RHTE on success, NULL on failure
  404. */
  405. struct sisl_rht_entry *rhte_checkout(struct ctx_info *ctxi,
  406. struct llun_info *lli)
  407. {
  408. struct sisl_rht_entry *rhte = NULL;
  409. int i;
  410. /* Find a free RHT entry */
  411. for (i = 0; i < MAX_RHT_PER_CONTEXT; i++)
  412. if (ctxi->rht_start[i].nmask == 0) {
  413. rhte = &ctxi->rht_start[i];
  414. ctxi->rht_out++;
  415. break;
  416. }
  417. if (likely(rhte))
  418. ctxi->rht_lun[i] = lli;
  419. pr_debug("%s: returning rhte=%p (%d)\n", __func__, rhte, i);
  420. return rhte;
  421. }
  422. /**
  423. * rhte_checkin() - releases a resource handle table entry
  424. * @ctxi: Context owning the resource handle.
  425. * @rhte: RHTE to release.
  426. */
  427. void rhte_checkin(struct ctx_info *ctxi,
  428. struct sisl_rht_entry *rhte)
  429. {
  430. u32 rsrc_handle = rhte - ctxi->rht_start;
  431. rhte->nmask = 0;
  432. rhte->fp = 0;
  433. ctxi->rht_out--;
  434. ctxi->rht_lun[rsrc_handle] = NULL;
  435. ctxi->rht_needs_ws[rsrc_handle] = false;
  436. }
  437. /**
  438. * rhte_format1() - populates a RHTE for format 1
  439. * @rhte: RHTE to populate.
  440. * @lun_id: LUN ID of LUN associated with RHTE.
  441. * @perm: Desired permissions for RHTE.
  442. * @port_sel: Port selection mask
  443. */
  444. static void rht_format1(struct sisl_rht_entry *rhte, u64 lun_id, u32 perm,
  445. u32 port_sel)
  446. {
  447. /*
  448. * Populate the Format 1 RHT entry for direct access (physical
  449. * LUN) using the synchronization sequence defined in the
  450. * SISLite specification.
  451. */
  452. struct sisl_rht_entry_f1 dummy = { 0 };
  453. struct sisl_rht_entry_f1 *rhte_f1 = (struct sisl_rht_entry_f1 *)rhte;
  454. memset(rhte_f1, 0, sizeof(*rhte_f1));
  455. rhte_f1->fp = SISL_RHT_FP(1U, 0);
  456. dma_wmb(); /* Make setting of format bit visible */
  457. rhte_f1->lun_id = lun_id;
  458. dma_wmb(); /* Make setting of LUN id visible */
  459. /*
  460. * Use a dummy RHT Format 1 entry to build the second dword
  461. * of the entry that must be populated in a single write when
  462. * enabled (valid bit set to TRUE).
  463. */
  464. dummy.valid = 0x80;
  465. dummy.fp = SISL_RHT_FP(1U, perm);
  466. dummy.port_sel = port_sel;
  467. rhte_f1->dw = dummy.dw;
  468. dma_wmb(); /* Make remaining RHT entry fields visible */
  469. }
  470. /**
  471. * cxlflash_lun_attach() - attaches a user to a LUN and manages the LUN's mode
  472. * @gli: LUN to attach.
  473. * @mode: Desired mode of the LUN.
  474. * @locked: Mutex status on current thread.
  475. *
  476. * Return: 0 on success, -errno on failure
  477. */
  478. int cxlflash_lun_attach(struct glun_info *gli, enum lun_mode mode, bool locked)
  479. {
  480. int rc = 0;
  481. if (!locked)
  482. mutex_lock(&gli->mutex);
  483. if (gli->mode == MODE_NONE)
  484. gli->mode = mode;
  485. else if (gli->mode != mode) {
  486. pr_debug("%s: LUN operating in mode %d, requested mode %d\n",
  487. __func__, gli->mode, mode);
  488. rc = -EINVAL;
  489. goto out;
  490. }
  491. gli->users++;
  492. WARN_ON(gli->users <= 0);
  493. out:
  494. pr_debug("%s: Returning rc=%d gli->mode=%u gli->users=%u\n",
  495. __func__, rc, gli->mode, gli->users);
  496. if (!locked)
  497. mutex_unlock(&gli->mutex);
  498. return rc;
  499. }
  500. /**
  501. * cxlflash_lun_detach() - detaches a user from a LUN and resets the LUN's mode
  502. * @gli: LUN to detach.
  503. *
  504. * When resetting the mode, terminate block allocation resources as they
  505. * are no longer required (service is safe to call even when block allocation
  506. * resources were not present - such as when transitioning from physical mode).
  507. * These resources will be reallocated when needed (subsequent transition to
  508. * virtual mode).
  509. */
  510. void cxlflash_lun_detach(struct glun_info *gli)
  511. {
  512. mutex_lock(&gli->mutex);
  513. WARN_ON(gli->mode == MODE_NONE);
  514. if (--gli->users == 0) {
  515. gli->mode = MODE_NONE;
  516. cxlflash_ba_terminate(&gli->blka.ba_lun);
  517. }
  518. pr_debug("%s: gli->users=%u\n", __func__, gli->users);
  519. WARN_ON(gli->users < 0);
  520. mutex_unlock(&gli->mutex);
  521. }
  522. /**
  523. * _cxlflash_disk_release() - releases the specified resource entry
  524. * @sdev: SCSI device associated with LUN.
  525. * @ctxi: Context owning resources.
  526. * @release: Release ioctl data structure.
  527. *
  528. * For LUNs in virtual mode, the virtual LUN associated with the specified
  529. * resource handle is resized to 0 prior to releasing the RHTE. Note that the
  530. * AFU sync should _not_ be performed when the context is sitting on the error
  531. * recovery list. A context on the error recovery list is not known to the AFU
  532. * due to reset. When the context is recovered, it will be reattached and made
  533. * known again to the AFU.
  534. *
  535. * Return: 0 on success, -errno on failure
  536. */
  537. int _cxlflash_disk_release(struct scsi_device *sdev,
  538. struct ctx_info *ctxi,
  539. struct dk_cxlflash_release *release)
  540. {
  541. struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata;
  542. struct device *dev = &cfg->dev->dev;
  543. struct llun_info *lli = sdev->hostdata;
  544. struct glun_info *gli = lli->parent;
  545. struct afu *afu = cfg->afu;
  546. bool put_ctx = false;
  547. struct dk_cxlflash_resize size;
  548. res_hndl_t rhndl = release->rsrc_handle;
  549. int rc = 0;
  550. u64 ctxid = DECODE_CTXID(release->context_id),
  551. rctxid = release->context_id;
  552. struct sisl_rht_entry *rhte;
  553. struct sisl_rht_entry_f1 *rhte_f1;
  554. dev_dbg(dev, "%s: ctxid=%llu rhndl=0x%llx gli->mode=%u gli->users=%u\n",
  555. __func__, ctxid, release->rsrc_handle, gli->mode, gli->users);
  556. if (!ctxi) {
  557. ctxi = get_context(cfg, rctxid, lli, CTX_CTRL_ERR_FALLBACK);
  558. if (unlikely(!ctxi)) {
  559. dev_dbg(dev, "%s: Bad context! (%llu)\n",
  560. __func__, ctxid);
  561. rc = -EINVAL;
  562. goto out;
  563. }
  564. put_ctx = true;
  565. }
  566. rhte = get_rhte(ctxi, rhndl, lli);
  567. if (unlikely(!rhte)) {
  568. dev_dbg(dev, "%s: Bad resource handle! (%d)\n",
  569. __func__, rhndl);
  570. rc = -EINVAL;
  571. goto out;
  572. }
  573. /*
  574. * Resize to 0 for virtual LUNS by setting the size
  575. * to 0. This will clear LXT_START and LXT_CNT fields
  576. * in the RHT entry and properly sync with the AFU.
  577. *
  578. * Afterwards we clear the remaining fields.
  579. */
  580. switch (gli->mode) {
  581. case MODE_VIRTUAL:
  582. marshal_rele_to_resize(release, &size);
  583. size.req_size = 0;
  584. rc = _cxlflash_vlun_resize(sdev, ctxi, &size);
  585. if (rc) {
  586. dev_dbg(dev, "%s: resize failed rc %d\n", __func__, rc);
  587. goto out;
  588. }
  589. break;
  590. case MODE_PHYSICAL:
  591. /*
  592. * Clear the Format 1 RHT entry for direct access
  593. * (physical LUN) using the synchronization sequence
  594. * defined in the SISLite specification.
  595. */
  596. rhte_f1 = (struct sisl_rht_entry_f1 *)rhte;
  597. rhte_f1->valid = 0;
  598. dma_wmb(); /* Make revocation of RHT entry visible */
  599. rhte_f1->lun_id = 0;
  600. dma_wmb(); /* Make clearing of LUN id visible */
  601. rhte_f1->dw = 0;
  602. dma_wmb(); /* Make RHT entry bottom-half clearing visible */
  603. if (!ctxi->err_recovery_active)
  604. cxlflash_afu_sync(afu, ctxid, rhndl, AFU_HW_SYNC);
  605. break;
  606. default:
  607. WARN(1, "Unsupported LUN mode!");
  608. goto out;
  609. }
  610. rhte_checkin(ctxi, rhte);
  611. cxlflash_lun_detach(gli);
  612. out:
  613. if (put_ctx)
  614. put_context(ctxi);
  615. dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
  616. return rc;
  617. }
  618. int cxlflash_disk_release(struct scsi_device *sdev,
  619. struct dk_cxlflash_release *release)
  620. {
  621. return _cxlflash_disk_release(sdev, NULL, release);
  622. }
  623. /**
  624. * destroy_context() - releases a context
  625. * @cfg: Internal structure associated with the host.
  626. * @ctxi: Context to release.
  627. *
  628. * Note that the rht_lun member of the context was cut from a single
  629. * allocation when the context was created and therefore does not need
  630. * to be explicitly freed. Also note that we conditionally check for the
  631. * existence of the context control map before clearing the RHT registers
  632. * and context capabilities because it is possible to destroy a context
  633. * while the context is in the error state (previous mapping was removed
  634. * [so we don't have to worry about clearing] and context is waiting for
  635. * a new mapping).
  636. */
  637. static void destroy_context(struct cxlflash_cfg *cfg,
  638. struct ctx_info *ctxi)
  639. {
  640. struct afu *afu = cfg->afu;
  641. WARN_ON(!list_empty(&ctxi->luns));
  642. /* Clear RHT registers and drop all capabilities for this context */
  643. if (afu->afu_map && ctxi->ctrl_map) {
  644. writeq_be(0, &ctxi->ctrl_map->rht_start);
  645. writeq_be(0, &ctxi->ctrl_map->rht_cnt_id);
  646. writeq_be(0, &ctxi->ctrl_map->ctx_cap);
  647. }
  648. /* Free memory associated with context */
  649. free_page((ulong)ctxi->rht_start);
  650. kfree(ctxi->rht_needs_ws);
  651. kfree(ctxi->rht_lun);
  652. kfree(ctxi);
  653. }
  654. /**
  655. * create_context() - allocates and initializes a context
  656. * @cfg: Internal structure associated with the host.
  657. * @ctx: Previously obtained CXL context reference.
  658. * @ctxid: Previously obtained process element associated with CXL context.
  659. * @adap_fd: Previously obtained adapter fd associated with CXL context.
  660. * @file: Previously obtained file associated with CXL context.
  661. * @perms: User-specified permissions.
  662. *
  663. * The context's mutex is locked when an allocated context is returned.
  664. *
  665. * Return: Allocated context on success, NULL on failure
  666. */
  667. static struct ctx_info *create_context(struct cxlflash_cfg *cfg,
  668. struct cxl_context *ctx, int ctxid,
  669. int adap_fd, struct file *file,
  670. u32 perms)
  671. {
  672. struct device *dev = &cfg->dev->dev;
  673. struct afu *afu = cfg->afu;
  674. struct ctx_info *ctxi = NULL;
  675. struct llun_info **lli = NULL;
  676. u8 *ws = NULL;
  677. struct sisl_rht_entry *rhte;
  678. ctxi = kzalloc(sizeof(*ctxi), GFP_KERNEL);
  679. lli = kzalloc((MAX_RHT_PER_CONTEXT * sizeof(*lli)), GFP_KERNEL);
  680. ws = kzalloc((MAX_RHT_PER_CONTEXT * sizeof(*ws)), GFP_KERNEL);
  681. if (unlikely(!ctxi || !lli || !ws)) {
  682. dev_err(dev, "%s: Unable to allocate context!\n", __func__);
  683. goto err;
  684. }
  685. rhte = (struct sisl_rht_entry *)get_zeroed_page(GFP_KERNEL);
  686. if (unlikely(!rhte)) {
  687. dev_err(dev, "%s: Unable to allocate RHT!\n", __func__);
  688. goto err;
  689. }
  690. ctxi->rht_lun = lli;
  691. ctxi->rht_needs_ws = ws;
  692. ctxi->rht_start = rhte;
  693. ctxi->rht_perms = perms;
  694. ctxi->ctrl_map = &afu->afu_map->ctrls[ctxid].ctrl;
  695. ctxi->ctxid = ENCODE_CTXID(ctxi, ctxid);
  696. ctxi->lfd = adap_fd;
  697. ctxi->pid = current->tgid; /* tgid = pid */
  698. ctxi->ctx = ctx;
  699. ctxi->file = file;
  700. mutex_init(&ctxi->mutex);
  701. INIT_LIST_HEAD(&ctxi->luns);
  702. INIT_LIST_HEAD(&ctxi->list); /* initialize for list_empty() */
  703. mutex_lock(&ctxi->mutex);
  704. out:
  705. return ctxi;
  706. err:
  707. kfree(ws);
  708. kfree(lli);
  709. kfree(ctxi);
  710. ctxi = NULL;
  711. goto out;
  712. }
  713. /**
  714. * _cxlflash_disk_detach() - detaches a LUN from a context
  715. * @sdev: SCSI device associated with LUN.
  716. * @ctxi: Context owning resources.
  717. * @detach: Detach ioctl data structure.
  718. *
  719. * As part of the detach, all per-context resources associated with the LUN
  720. * are cleaned up. When detaching the last LUN for a context, the context
  721. * itself is cleaned up and released.
  722. *
  723. * Return: 0 on success, -errno on failure
  724. */
  725. static int _cxlflash_disk_detach(struct scsi_device *sdev,
  726. struct ctx_info *ctxi,
  727. struct dk_cxlflash_detach *detach)
  728. {
  729. struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata;
  730. struct device *dev = &cfg->dev->dev;
  731. struct llun_info *lli = sdev->hostdata;
  732. struct lun_access *lun_access, *t;
  733. struct dk_cxlflash_release rel;
  734. bool put_ctx = false;
  735. int i;
  736. int rc = 0;
  737. int lfd;
  738. u64 ctxid = DECODE_CTXID(detach->context_id),
  739. rctxid = detach->context_id;
  740. dev_dbg(dev, "%s: ctxid=%llu\n", __func__, ctxid);
  741. if (!ctxi) {
  742. ctxi = get_context(cfg, rctxid, lli, CTX_CTRL_ERR_FALLBACK);
  743. if (unlikely(!ctxi)) {
  744. dev_dbg(dev, "%s: Bad context! (%llu)\n",
  745. __func__, ctxid);
  746. rc = -EINVAL;
  747. goto out;
  748. }
  749. put_ctx = true;
  750. }
  751. /* Cleanup outstanding resources tied to this LUN */
  752. if (ctxi->rht_out) {
  753. marshal_det_to_rele(detach, &rel);
  754. for (i = 0; i < MAX_RHT_PER_CONTEXT; i++) {
  755. if (ctxi->rht_lun[i] == lli) {
  756. rel.rsrc_handle = i;
  757. _cxlflash_disk_release(sdev, ctxi, &rel);
  758. }
  759. /* No need to loop further if we're done */
  760. if (ctxi->rht_out == 0)
  761. break;
  762. }
  763. }
  764. /* Take our LUN out of context, free the node */
  765. list_for_each_entry_safe(lun_access, t, &ctxi->luns, list)
  766. if (lun_access->lli == lli) {
  767. list_del(&lun_access->list);
  768. kfree(lun_access);
  769. lun_access = NULL;
  770. break;
  771. }
  772. /* Tear down context following last LUN cleanup */
  773. if (list_empty(&ctxi->luns)) {
  774. ctxi->unavail = true;
  775. mutex_unlock(&ctxi->mutex);
  776. mutex_lock(&cfg->ctx_tbl_list_mutex);
  777. mutex_lock(&ctxi->mutex);
  778. /* Might not have been in error list so conditionally remove */
  779. if (!list_empty(&ctxi->list))
  780. list_del(&ctxi->list);
  781. cfg->ctx_tbl[ctxid] = NULL;
  782. mutex_unlock(&cfg->ctx_tbl_list_mutex);
  783. mutex_unlock(&ctxi->mutex);
  784. lfd = ctxi->lfd;
  785. destroy_context(cfg, ctxi);
  786. ctxi = NULL;
  787. put_ctx = false;
  788. /*
  789. * As a last step, clean up external resources when not
  790. * already on an external cleanup thread, i.e.: close(adap_fd).
  791. *
  792. * NOTE: this will free up the context from the CXL services,
  793. * allowing it to dole out the same context_id on a future
  794. * (or even currently in-flight) disk_attach operation.
  795. */
  796. if (lfd != -1)
  797. sys_close(lfd);
  798. }
  799. /* Release the sdev reference that bound this LUN to the context */
  800. scsi_device_put(sdev);
  801. out:
  802. if (put_ctx)
  803. put_context(ctxi);
  804. dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
  805. return rc;
  806. }
  807. static int cxlflash_disk_detach(struct scsi_device *sdev,
  808. struct dk_cxlflash_detach *detach)
  809. {
  810. return _cxlflash_disk_detach(sdev, NULL, detach);
  811. }
  812. /**
  813. * cxlflash_cxl_release() - release handler for adapter file descriptor
  814. * @inode: File-system inode associated with fd.
  815. * @file: File installed with adapter file descriptor.
  816. *
  817. * This routine is the release handler for the fops registered with
  818. * the CXL services on an initial attach for a context. It is called
  819. * when a close is performed on the adapter file descriptor returned
  820. * to the user. Programmatically, the user is not required to perform
  821. * the close, as it is handled internally via the detach ioctl when
  822. * a context is being removed. Note that nothing prevents the user
  823. * from performing a close, but the user should be aware that doing
  824. * so is considered catastrophic and subsequent usage of the superpipe
  825. * API with previously saved off tokens will fail.
  826. *
  827. * When initiated from an external close (either by the user or via
  828. * a process tear down), the routine derives the context reference
  829. * and calls detach for each LUN associated with the context. The
  830. * final detach operation will cause the context itself to be freed.
  831. * Note that the saved off lfd is reset prior to calling detach to
  832. * signify that the final detach should not perform a close.
  833. *
  834. * When initiated from a detach operation as part of the tear down
  835. * of a context, the context is first completely freed and then the
  836. * close is performed. This routine will fail to derive the context
  837. * reference (due to the context having already been freed) and then
  838. * call into the CXL release entry point.
  839. *
  840. * Thus, with exception to when the CXL process element (context id)
  841. * lookup fails (a case that should theoretically never occur), every
  842. * call into this routine results in a complete freeing of a context.
  843. *
  844. * As part of the detach, all per-context resources associated with the LUN
  845. * are cleaned up. When detaching the last LUN for a context, the context
  846. * itself is cleaned up and released.
  847. *
  848. * Return: 0 on success
  849. */
  850. static int cxlflash_cxl_release(struct inode *inode, struct file *file)
  851. {
  852. struct cxl_context *ctx = cxl_fops_get_context(file);
  853. struct cxlflash_cfg *cfg = container_of(file->f_op, struct cxlflash_cfg,
  854. cxl_fops);
  855. struct device *dev = &cfg->dev->dev;
  856. struct ctx_info *ctxi = NULL;
  857. struct dk_cxlflash_detach detach = { { 0 }, 0 };
  858. struct lun_access *lun_access, *t;
  859. enum ctx_ctrl ctrl = CTX_CTRL_ERR_FALLBACK | CTX_CTRL_FILE;
  860. int ctxid;
  861. ctxid = cxl_process_element(ctx);
  862. if (unlikely(ctxid < 0)) {
  863. dev_err(dev, "%s: Context %p was closed! (%d)\n",
  864. __func__, ctx, ctxid);
  865. goto out;
  866. }
  867. ctxi = get_context(cfg, ctxid, file, ctrl);
  868. if (unlikely(!ctxi)) {
  869. ctxi = get_context(cfg, ctxid, file, ctrl | CTX_CTRL_CLONE);
  870. if (!ctxi) {
  871. dev_dbg(dev, "%s: Context %d already free!\n",
  872. __func__, ctxid);
  873. goto out_release;
  874. }
  875. dev_dbg(dev, "%s: Another process owns context %d!\n",
  876. __func__, ctxid);
  877. put_context(ctxi);
  878. goto out;
  879. }
  880. dev_dbg(dev, "%s: close(%d) for context %d\n",
  881. __func__, ctxi->lfd, ctxid);
  882. /* Reset the file descriptor to indicate we're on a close() thread */
  883. ctxi->lfd = -1;
  884. detach.context_id = ctxi->ctxid;
  885. list_for_each_entry_safe(lun_access, t, &ctxi->luns, list)
  886. _cxlflash_disk_detach(lun_access->sdev, ctxi, &detach);
  887. out_release:
  888. cxl_fd_release(inode, file);
  889. out:
  890. dev_dbg(dev, "%s: returning\n", __func__);
  891. return 0;
  892. }
  893. /**
  894. * unmap_context() - clears a previously established mapping
  895. * @ctxi: Context owning the mapping.
  896. *
  897. * This routine is used to switch between the error notification page
  898. * (dummy page of all 1's) and the real mapping (established by the CXL
  899. * fault handler).
  900. */
  901. static void unmap_context(struct ctx_info *ctxi)
  902. {
  903. unmap_mapping_range(ctxi->file->f_mapping, 0, 0, 1);
  904. }
  905. /**
  906. * get_err_page() - obtains and allocates the error notification page
  907. *
  908. * Return: error notification page on success, NULL on failure
  909. */
  910. static struct page *get_err_page(void)
  911. {
  912. struct page *err_page = global.err_page;
  913. if (unlikely(!err_page)) {
  914. err_page = alloc_page(GFP_KERNEL);
  915. if (unlikely(!err_page)) {
  916. pr_err("%s: Unable to allocate err_page!\n", __func__);
  917. goto out;
  918. }
  919. memset(page_address(err_page), -1, PAGE_SIZE);
  920. /* Serialize update w/ other threads to avoid a leak */
  921. mutex_lock(&global.mutex);
  922. if (likely(!global.err_page))
  923. global.err_page = err_page;
  924. else {
  925. __free_page(err_page);
  926. err_page = global.err_page;
  927. }
  928. mutex_unlock(&global.mutex);
  929. }
  930. out:
  931. pr_debug("%s: returning err_page=%p\n", __func__, err_page);
  932. return err_page;
  933. }
  934. /**
  935. * cxlflash_mmap_fault() - mmap fault handler for adapter file descriptor
  936. * @vma: VM area associated with mapping.
  937. * @vmf: VM fault associated with current fault.
  938. *
  939. * To support error notification via MMIO, faults are 'caught' by this routine
  940. * that was inserted before passing back the adapter file descriptor on attach.
  941. * When a fault occurs, this routine evaluates if error recovery is active and
  942. * if so, installs the error page to 'notify' the user about the error state.
  943. * During normal operation, the fault is simply handled by the original fault
  944. * handler that was installed by CXL services as part of initializing the
  945. * adapter file descriptor. The VMA's page protection bits are toggled to
  946. * indicate cached/not-cached depending on the memory backing the fault.
  947. *
  948. * Return: 0 on success, VM_FAULT_SIGBUS on failure
  949. */
  950. static int cxlflash_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  951. {
  952. struct file *file = vma->vm_file;
  953. struct cxl_context *ctx = cxl_fops_get_context(file);
  954. struct cxlflash_cfg *cfg = container_of(file->f_op, struct cxlflash_cfg,
  955. cxl_fops);
  956. struct device *dev = &cfg->dev->dev;
  957. struct ctx_info *ctxi = NULL;
  958. struct page *err_page = NULL;
  959. enum ctx_ctrl ctrl = CTX_CTRL_ERR_FALLBACK | CTX_CTRL_FILE;
  960. int rc = 0;
  961. int ctxid;
  962. ctxid = cxl_process_element(ctx);
  963. if (unlikely(ctxid < 0)) {
  964. dev_err(dev, "%s: Context %p was closed! (%d)\n",
  965. __func__, ctx, ctxid);
  966. goto err;
  967. }
  968. ctxi = get_context(cfg, ctxid, file, ctrl);
  969. if (unlikely(!ctxi)) {
  970. dev_dbg(dev, "%s: Bad context! (%d)\n", __func__, ctxid);
  971. goto err;
  972. }
  973. dev_dbg(dev, "%s: fault(%d) for context %d\n",
  974. __func__, ctxi->lfd, ctxid);
  975. if (likely(!ctxi->err_recovery_active)) {
  976. vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
  977. rc = ctxi->cxl_mmap_vmops->fault(vma, vmf);
  978. } else {
  979. dev_dbg(dev, "%s: err recovery active, use err_page!\n",
  980. __func__);
  981. err_page = get_err_page();
  982. if (unlikely(!err_page)) {
  983. dev_err(dev, "%s: Could not obtain error page!\n",
  984. __func__);
  985. rc = VM_FAULT_RETRY;
  986. goto out;
  987. }
  988. get_page(err_page);
  989. vmf->page = err_page;
  990. vma->vm_page_prot = pgprot_cached(vma->vm_page_prot);
  991. }
  992. out:
  993. if (likely(ctxi))
  994. put_context(ctxi);
  995. dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
  996. return rc;
  997. err:
  998. rc = VM_FAULT_SIGBUS;
  999. goto out;
  1000. }
  1001. /*
  1002. * Local MMAP vmops to 'catch' faults
  1003. */
  1004. static const struct vm_operations_struct cxlflash_mmap_vmops = {
  1005. .fault = cxlflash_mmap_fault,
  1006. };
  1007. /**
  1008. * cxlflash_cxl_mmap() - mmap handler for adapter file descriptor
  1009. * @file: File installed with adapter file descriptor.
  1010. * @vma: VM area associated with mapping.
  1011. *
  1012. * Installs local mmap vmops to 'catch' faults for error notification support.
  1013. *
  1014. * Return: 0 on success, -errno on failure
  1015. */
  1016. static int cxlflash_cxl_mmap(struct file *file, struct vm_area_struct *vma)
  1017. {
  1018. struct cxl_context *ctx = cxl_fops_get_context(file);
  1019. struct cxlflash_cfg *cfg = container_of(file->f_op, struct cxlflash_cfg,
  1020. cxl_fops);
  1021. struct device *dev = &cfg->dev->dev;
  1022. struct ctx_info *ctxi = NULL;
  1023. enum ctx_ctrl ctrl = CTX_CTRL_ERR_FALLBACK | CTX_CTRL_FILE;
  1024. int ctxid;
  1025. int rc = 0;
  1026. ctxid = cxl_process_element(ctx);
  1027. if (unlikely(ctxid < 0)) {
  1028. dev_err(dev, "%s: Context %p was closed! (%d)\n",
  1029. __func__, ctx, ctxid);
  1030. rc = -EIO;
  1031. goto out;
  1032. }
  1033. ctxi = get_context(cfg, ctxid, file, ctrl);
  1034. if (unlikely(!ctxi)) {
  1035. dev_dbg(dev, "%s: Bad context! (%d)\n", __func__, ctxid);
  1036. rc = -EIO;
  1037. goto out;
  1038. }
  1039. dev_dbg(dev, "%s: mmap(%d) for context %d\n",
  1040. __func__, ctxi->lfd, ctxid);
  1041. rc = cxl_fd_mmap(file, vma);
  1042. if (likely(!rc)) {
  1043. /* Insert ourself in the mmap fault handler path */
  1044. ctxi->cxl_mmap_vmops = vma->vm_ops;
  1045. vma->vm_ops = &cxlflash_mmap_vmops;
  1046. }
  1047. out:
  1048. if (likely(ctxi))
  1049. put_context(ctxi);
  1050. return rc;
  1051. }
  1052. const struct file_operations cxlflash_cxl_fops = {
  1053. .owner = THIS_MODULE,
  1054. .mmap = cxlflash_cxl_mmap,
  1055. .release = cxlflash_cxl_release,
  1056. };
  1057. /**
  1058. * cxlflash_mark_contexts_error() - move contexts to error state and list
  1059. * @cfg: Internal structure associated with the host.
  1060. *
  1061. * A context is only moved over to the error list when there are no outstanding
  1062. * references to it. This ensures that a running operation has completed.
  1063. *
  1064. * Return: 0 on success, -errno on failure
  1065. */
  1066. int cxlflash_mark_contexts_error(struct cxlflash_cfg *cfg)
  1067. {
  1068. int i, rc = 0;
  1069. struct ctx_info *ctxi = NULL;
  1070. mutex_lock(&cfg->ctx_tbl_list_mutex);
  1071. for (i = 0; i < MAX_CONTEXT; i++) {
  1072. ctxi = cfg->ctx_tbl[i];
  1073. if (ctxi) {
  1074. mutex_lock(&ctxi->mutex);
  1075. cfg->ctx_tbl[i] = NULL;
  1076. list_add(&ctxi->list, &cfg->ctx_err_recovery);
  1077. ctxi->err_recovery_active = true;
  1078. ctxi->ctrl_map = NULL;
  1079. unmap_context(ctxi);
  1080. mutex_unlock(&ctxi->mutex);
  1081. }
  1082. }
  1083. mutex_unlock(&cfg->ctx_tbl_list_mutex);
  1084. return rc;
  1085. }
  1086. /*
  1087. * Dummy NULL fops
  1088. */
  1089. static const struct file_operations null_fops = {
  1090. .owner = THIS_MODULE,
  1091. };
  1092. /**
  1093. * check_state() - checks and responds to the current adapter state
  1094. * @cfg: Internal structure associated with the host.
  1095. *
  1096. * This routine can block and should only be used on process context.
  1097. * It assumes that the caller is an ioctl thread and holding the ioctl
  1098. * read semaphore. This is temporarily let up across the wait to allow
  1099. * for draining actively running ioctls. Also note that when waking up
  1100. * from waiting in reset, the state is unknown and must be checked again
  1101. * before proceeding.
  1102. *
  1103. * Return: 0 on success, -errno on failure
  1104. */
  1105. int check_state(struct cxlflash_cfg *cfg)
  1106. {
  1107. struct device *dev = &cfg->dev->dev;
  1108. int rc = 0;
  1109. retry:
  1110. switch (cfg->state) {
  1111. case STATE_RESET:
  1112. dev_dbg(dev, "%s: Reset state, going to wait...\n", __func__);
  1113. up_read(&cfg->ioctl_rwsem);
  1114. rc = wait_event_interruptible(cfg->reset_waitq,
  1115. cfg->state != STATE_RESET);
  1116. down_read(&cfg->ioctl_rwsem);
  1117. if (unlikely(rc))
  1118. break;
  1119. goto retry;
  1120. case STATE_FAILTERM:
  1121. dev_dbg(dev, "%s: Failed/Terminating!\n", __func__);
  1122. rc = -ENODEV;
  1123. break;
  1124. default:
  1125. break;
  1126. }
  1127. return rc;
  1128. }
  1129. /**
  1130. * cxlflash_disk_attach() - attach a LUN to a context
  1131. * @sdev: SCSI device associated with LUN.
  1132. * @attach: Attach ioctl data structure.
  1133. *
  1134. * Creates a context and attaches LUN to it. A LUN can only be attached
  1135. * one time to a context (subsequent attaches for the same context/LUN pair
  1136. * are not supported). Additional LUNs can be attached to a context by
  1137. * specifying the 'reuse' flag defined in the cxlflash_ioctl.h header.
  1138. *
  1139. * Return: 0 on success, -errno on failure
  1140. */
  1141. static int cxlflash_disk_attach(struct scsi_device *sdev,
  1142. struct dk_cxlflash_attach *attach)
  1143. {
  1144. struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata;
  1145. struct device *dev = &cfg->dev->dev;
  1146. struct afu *afu = cfg->afu;
  1147. struct llun_info *lli = sdev->hostdata;
  1148. struct glun_info *gli = lli->parent;
  1149. struct cxl_ioctl_start_work *work;
  1150. struct ctx_info *ctxi = NULL;
  1151. struct lun_access *lun_access = NULL;
  1152. int rc = 0;
  1153. u32 perms;
  1154. int ctxid = -1;
  1155. u64 rctxid = 0UL;
  1156. struct file *file;
  1157. struct cxl_context *ctx;
  1158. int fd = -1;
  1159. if (attach->num_interrupts > 4) {
  1160. dev_dbg(dev, "%s: Cannot support this many interrupts %llu\n",
  1161. __func__, attach->num_interrupts);
  1162. rc = -EINVAL;
  1163. goto out;
  1164. }
  1165. if (gli->max_lba == 0) {
  1166. dev_dbg(dev, "%s: No capacity info for this LUN (%016llX)\n",
  1167. __func__, lli->lun_id[sdev->channel]);
  1168. rc = read_cap16(sdev, lli);
  1169. if (rc) {
  1170. dev_err(dev, "%s: Invalid device! (%d)\n",
  1171. __func__, rc);
  1172. rc = -ENODEV;
  1173. goto out;
  1174. }
  1175. dev_dbg(dev, "%s: LBA = %016llX\n", __func__, gli->max_lba);
  1176. dev_dbg(dev, "%s: BLK_LEN = %08X\n", __func__, gli->blk_len);
  1177. }
  1178. if (attach->hdr.flags & DK_CXLFLASH_ATTACH_REUSE_CONTEXT) {
  1179. rctxid = attach->context_id;
  1180. ctxi = get_context(cfg, rctxid, NULL, 0);
  1181. if (!ctxi) {
  1182. dev_dbg(dev, "%s: Bad context! (%016llX)\n",
  1183. __func__, rctxid);
  1184. rc = -EINVAL;
  1185. goto out;
  1186. }
  1187. list_for_each_entry(lun_access, &ctxi->luns, list)
  1188. if (lun_access->lli == lli) {
  1189. dev_dbg(dev, "%s: Already attached!\n",
  1190. __func__);
  1191. rc = -EINVAL;
  1192. goto out;
  1193. }
  1194. }
  1195. rc = scsi_device_get(sdev);
  1196. if (unlikely(rc)) {
  1197. dev_err(dev, "%s: Unable to get sdev reference!\n", __func__);
  1198. goto out;
  1199. }
  1200. lun_access = kzalloc(sizeof(*lun_access), GFP_KERNEL);
  1201. if (unlikely(!lun_access)) {
  1202. dev_err(dev, "%s: Unable to allocate lun_access!\n", __func__);
  1203. rc = -ENOMEM;
  1204. goto err0;
  1205. }
  1206. lun_access->lli = lli;
  1207. lun_access->sdev = sdev;
  1208. /* Non-NULL context indicates reuse */
  1209. if (ctxi) {
  1210. dev_dbg(dev, "%s: Reusing context for LUN! (%016llX)\n",
  1211. __func__, rctxid);
  1212. list_add(&lun_access->list, &ctxi->luns);
  1213. fd = ctxi->lfd;
  1214. goto out_attach;
  1215. }
  1216. ctx = cxl_dev_context_init(cfg->dev);
  1217. if (unlikely(IS_ERR_OR_NULL(ctx))) {
  1218. dev_err(dev, "%s: Could not initialize context %p\n",
  1219. __func__, ctx);
  1220. rc = -ENODEV;
  1221. goto err1;
  1222. }
  1223. ctxid = cxl_process_element(ctx);
  1224. if (unlikely((ctxid >= MAX_CONTEXT) || (ctxid < 0))) {
  1225. dev_err(dev, "%s: ctxid (%d) invalid!\n", __func__, ctxid);
  1226. rc = -EPERM;
  1227. goto err2;
  1228. }
  1229. file = cxl_get_fd(ctx, &cfg->cxl_fops, &fd);
  1230. if (unlikely(fd < 0)) {
  1231. rc = -ENODEV;
  1232. dev_err(dev, "%s: Could not get file descriptor\n", __func__);
  1233. goto err2;
  1234. }
  1235. /* Translate read/write O_* flags from fcntl.h to AFU permission bits */
  1236. perms = SISL_RHT_PERM(attach->hdr.flags + 1);
  1237. ctxi = create_context(cfg, ctx, ctxid, fd, file, perms);
  1238. if (unlikely(!ctxi)) {
  1239. dev_err(dev, "%s: Failed to create context! (%d)\n",
  1240. __func__, ctxid);
  1241. goto err3;
  1242. }
  1243. work = &ctxi->work;
  1244. work->num_interrupts = attach->num_interrupts;
  1245. work->flags = CXL_START_WORK_NUM_IRQS;
  1246. rc = cxl_start_work(ctx, work);
  1247. if (unlikely(rc)) {
  1248. dev_dbg(dev, "%s: Could not start context rc=%d\n",
  1249. __func__, rc);
  1250. goto err4;
  1251. }
  1252. rc = afu_attach(cfg, ctxi);
  1253. if (unlikely(rc)) {
  1254. dev_err(dev, "%s: Could not attach AFU rc %d\n", __func__, rc);
  1255. goto err5;
  1256. }
  1257. /*
  1258. * No error paths after this point. Once the fd is installed it's
  1259. * visible to user space and can't be undone safely on this thread.
  1260. * There is no need to worry about a deadlock here because no one
  1261. * knows about us yet; we can be the only one holding our mutex.
  1262. */
  1263. list_add(&lun_access->list, &ctxi->luns);
  1264. mutex_unlock(&ctxi->mutex);
  1265. mutex_lock(&cfg->ctx_tbl_list_mutex);
  1266. mutex_lock(&ctxi->mutex);
  1267. cfg->ctx_tbl[ctxid] = ctxi;
  1268. mutex_unlock(&cfg->ctx_tbl_list_mutex);
  1269. fd_install(fd, file);
  1270. out_attach:
  1271. attach->hdr.return_flags = 0;
  1272. attach->context_id = ctxi->ctxid;
  1273. attach->block_size = gli->blk_len;
  1274. attach->mmio_size = sizeof(afu->afu_map->hosts[0].harea);
  1275. attach->last_lba = gli->max_lba;
  1276. attach->max_xfer = sdev->host->max_sectors * MAX_SECTOR_UNIT;
  1277. attach->max_xfer /= gli->blk_len;
  1278. out:
  1279. attach->adap_fd = fd;
  1280. if (ctxi)
  1281. put_context(ctxi);
  1282. dev_dbg(dev, "%s: returning ctxid=%d fd=%d bs=%lld rc=%d llba=%lld\n",
  1283. __func__, ctxid, fd, attach->block_size, rc, attach->last_lba);
  1284. return rc;
  1285. err5:
  1286. cxl_stop_context(ctx);
  1287. err4:
  1288. put_context(ctxi);
  1289. destroy_context(cfg, ctxi);
  1290. ctxi = NULL;
  1291. err3:
  1292. /*
  1293. * Here, we're overriding the fops with a dummy all-NULL fops because
  1294. * fput() calls the release fop, which will cause us to mistakenly
  1295. * call into the CXL code. Rather than try to add yet more complexity
  1296. * to that routine (cxlflash_cxl_release) we should try to fix the
  1297. * issue here.
  1298. */
  1299. file->f_op = &null_fops;
  1300. fput(file);
  1301. put_unused_fd(fd);
  1302. fd = -1;
  1303. err2:
  1304. cxl_release_context(ctx);
  1305. err1:
  1306. kfree(lun_access);
  1307. err0:
  1308. scsi_device_put(sdev);
  1309. goto out;
  1310. }
  1311. /**
  1312. * recover_context() - recovers a context in error
  1313. * @cfg: Internal structure associated with the host.
  1314. * @ctxi: Context to release.
  1315. *
  1316. * Restablishes the state for a context-in-error.
  1317. *
  1318. * Return: 0 on success, -errno on failure
  1319. */
  1320. static int recover_context(struct cxlflash_cfg *cfg, struct ctx_info *ctxi)
  1321. {
  1322. struct device *dev = &cfg->dev->dev;
  1323. int rc = 0;
  1324. int old_fd, fd = -1;
  1325. int ctxid = -1;
  1326. struct file *file;
  1327. struct cxl_context *ctx;
  1328. struct afu *afu = cfg->afu;
  1329. ctx = cxl_dev_context_init(cfg->dev);
  1330. if (unlikely(IS_ERR_OR_NULL(ctx))) {
  1331. dev_err(dev, "%s: Could not initialize context %p\n",
  1332. __func__, ctx);
  1333. rc = -ENODEV;
  1334. goto out;
  1335. }
  1336. ctxid = cxl_process_element(ctx);
  1337. if (unlikely((ctxid >= MAX_CONTEXT) || (ctxid < 0))) {
  1338. dev_err(dev, "%s: ctxid (%d) invalid!\n", __func__, ctxid);
  1339. rc = -EPERM;
  1340. goto err1;
  1341. }
  1342. file = cxl_get_fd(ctx, &cfg->cxl_fops, &fd);
  1343. if (unlikely(fd < 0)) {
  1344. rc = -ENODEV;
  1345. dev_err(dev, "%s: Could not get file descriptor\n", __func__);
  1346. goto err1;
  1347. }
  1348. rc = cxl_start_work(ctx, &ctxi->work);
  1349. if (unlikely(rc)) {
  1350. dev_dbg(dev, "%s: Could not start context rc=%d\n",
  1351. __func__, rc);
  1352. goto err2;
  1353. }
  1354. /* Update with new MMIO area based on updated context id */
  1355. ctxi->ctrl_map = &afu->afu_map->ctrls[ctxid].ctrl;
  1356. rc = afu_attach(cfg, ctxi);
  1357. if (rc) {
  1358. dev_err(dev, "%s: Could not attach AFU rc %d\n", __func__, rc);
  1359. goto err3;
  1360. }
  1361. /*
  1362. * No error paths after this point. Once the fd is installed it's
  1363. * visible to user space and can't be undone safely on this thread.
  1364. */
  1365. old_fd = ctxi->lfd;
  1366. ctxi->ctxid = ENCODE_CTXID(ctxi, ctxid);
  1367. ctxi->lfd = fd;
  1368. ctxi->ctx = ctx;
  1369. ctxi->file = file;
  1370. /*
  1371. * Put context back in table (note the reinit of the context list);
  1372. * we must first drop the context's mutex and then acquire it in
  1373. * order with the table/list mutex to avoid a deadlock - safe to do
  1374. * here because no one can find us at this moment in time.
  1375. */
  1376. mutex_unlock(&ctxi->mutex);
  1377. mutex_lock(&cfg->ctx_tbl_list_mutex);
  1378. mutex_lock(&ctxi->mutex);
  1379. list_del_init(&ctxi->list);
  1380. cfg->ctx_tbl[ctxid] = ctxi;
  1381. mutex_unlock(&cfg->ctx_tbl_list_mutex);
  1382. fd_install(fd, file);
  1383. /* Release the original adapter fd and associated CXL resources */
  1384. sys_close(old_fd);
  1385. out:
  1386. dev_dbg(dev, "%s: returning ctxid=%d fd=%d rc=%d\n",
  1387. __func__, ctxid, fd, rc);
  1388. return rc;
  1389. err3:
  1390. cxl_stop_context(ctx);
  1391. err2:
  1392. fput(file);
  1393. put_unused_fd(fd);
  1394. err1:
  1395. cxl_release_context(ctx);
  1396. goto out;
  1397. }
  1398. /**
  1399. * cxlflash_afu_recover() - initiates AFU recovery
  1400. * @sdev: SCSI device associated with LUN.
  1401. * @recover: Recover ioctl data structure.
  1402. *
  1403. * Only a single recovery is allowed at a time to avoid exhausting CXL
  1404. * resources (leading to recovery failure) in the event that we're up
  1405. * against the maximum number of contexts limit. For similar reasons,
  1406. * a context recovery is retried if there are multiple recoveries taking
  1407. * place at the same time and the failure was due to CXL services being
  1408. * unable to keep up.
  1409. *
  1410. * As this routine is called on ioctl context, it holds the ioctl r/w
  1411. * semaphore that is used to drain ioctls in recovery scenarios. The
  1412. * implementation to achieve the pacing described above (a local mutex)
  1413. * requires that the ioctl r/w semaphore be dropped and reacquired to
  1414. * avoid a 3-way deadlock when multiple process recoveries operate in
  1415. * parallel.
  1416. *
  1417. * Because a user can detect an error condition before the kernel, it is
  1418. * quite possible for this routine to act as the kernel's EEH detection
  1419. * source (MMIO read of mbox_r). Because of this, there is a window of
  1420. * time where an EEH might have been detected but not yet 'serviced'
  1421. * (callback invoked, causing the device to enter reset state). To avoid
  1422. * looping in this routine during that window, a 1 second sleep is in place
  1423. * between the time the MMIO failure is detected and the time a wait on the
  1424. * reset wait queue is attempted via check_state().
  1425. *
  1426. * Return: 0 on success, -errno on failure
  1427. */
  1428. static int cxlflash_afu_recover(struct scsi_device *sdev,
  1429. struct dk_cxlflash_recover_afu *recover)
  1430. {
  1431. struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata;
  1432. struct device *dev = &cfg->dev->dev;
  1433. struct llun_info *lli = sdev->hostdata;
  1434. struct afu *afu = cfg->afu;
  1435. struct ctx_info *ctxi = NULL;
  1436. struct mutex *mutex = &cfg->ctx_recovery_mutex;
  1437. u64 ctxid = DECODE_CTXID(recover->context_id),
  1438. rctxid = recover->context_id;
  1439. long reg;
  1440. int lretry = 20; /* up to 2 seconds */
  1441. int rc = 0;
  1442. atomic_inc(&cfg->recovery_threads);
  1443. up_read(&cfg->ioctl_rwsem);
  1444. rc = mutex_lock_interruptible(mutex);
  1445. down_read(&cfg->ioctl_rwsem);
  1446. if (rc)
  1447. goto out;
  1448. rc = check_state(cfg);
  1449. if (rc) {
  1450. dev_err(dev, "%s: Failed state! rc=%d\n", __func__, rc);
  1451. rc = -ENODEV;
  1452. goto out;
  1453. }
  1454. dev_dbg(dev, "%s: reason 0x%016llX rctxid=%016llX\n",
  1455. __func__, recover->reason, rctxid);
  1456. retry:
  1457. /* Ensure that this process is attached to the context */
  1458. ctxi = get_context(cfg, rctxid, lli, CTX_CTRL_ERR_FALLBACK);
  1459. if (unlikely(!ctxi)) {
  1460. dev_dbg(dev, "%s: Bad context! (%llu)\n", __func__, ctxid);
  1461. rc = -EINVAL;
  1462. goto out;
  1463. }
  1464. if (ctxi->err_recovery_active) {
  1465. retry_recover:
  1466. rc = recover_context(cfg, ctxi);
  1467. if (unlikely(rc)) {
  1468. dev_err(dev, "%s: Recovery failed for context %llu (rc=%d)\n",
  1469. __func__, ctxid, rc);
  1470. if ((rc == -ENODEV) &&
  1471. ((atomic_read(&cfg->recovery_threads) > 1) ||
  1472. (lretry--))) {
  1473. dev_dbg(dev, "%s: Going to try again!\n",
  1474. __func__);
  1475. mutex_unlock(mutex);
  1476. msleep(100);
  1477. rc = mutex_lock_interruptible(mutex);
  1478. if (rc)
  1479. goto out;
  1480. goto retry_recover;
  1481. }
  1482. goto out;
  1483. }
  1484. ctxi->err_recovery_active = false;
  1485. recover->context_id = ctxi->ctxid;
  1486. recover->adap_fd = ctxi->lfd;
  1487. recover->mmio_size = sizeof(afu->afu_map->hosts[0].harea);
  1488. recover->hdr.return_flags |=
  1489. DK_CXLFLASH_RECOVER_AFU_CONTEXT_RESET;
  1490. goto out;
  1491. }
  1492. /* Test if in error state */
  1493. reg = readq_be(&afu->ctrl_map->mbox_r);
  1494. if (reg == -1) {
  1495. dev_dbg(dev, "%s: MMIO fail, wait for recovery.\n", __func__);
  1496. /*
  1497. * Before checking the state, put back the context obtained with
  1498. * get_context() as it is no longer needed and sleep for a short
  1499. * period of time (see prolog notes).
  1500. */
  1501. put_context(ctxi);
  1502. ctxi = NULL;
  1503. ssleep(1);
  1504. rc = check_state(cfg);
  1505. if (unlikely(rc))
  1506. goto out;
  1507. goto retry;
  1508. }
  1509. dev_dbg(dev, "%s: MMIO working, no recovery required!\n", __func__);
  1510. out:
  1511. if (likely(ctxi))
  1512. put_context(ctxi);
  1513. mutex_unlock(mutex);
  1514. atomic_dec_if_positive(&cfg->recovery_threads);
  1515. return rc;
  1516. }
  1517. /**
  1518. * process_sense() - evaluates and processes sense data
  1519. * @sdev: SCSI device associated with LUN.
  1520. * @verify: Verify ioctl data structure.
  1521. *
  1522. * Return: 0 on success, -errno on failure
  1523. */
  1524. static int process_sense(struct scsi_device *sdev,
  1525. struct dk_cxlflash_verify *verify)
  1526. {
  1527. struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata;
  1528. struct device *dev = &cfg->dev->dev;
  1529. struct llun_info *lli = sdev->hostdata;
  1530. struct glun_info *gli = lli->parent;
  1531. u64 prev_lba = gli->max_lba;
  1532. struct scsi_sense_hdr sshdr = { 0 };
  1533. int rc = 0;
  1534. rc = scsi_normalize_sense((const u8 *)&verify->sense_data,
  1535. DK_CXLFLASH_VERIFY_SENSE_LEN, &sshdr);
  1536. if (!rc) {
  1537. dev_err(dev, "%s: Failed to normalize sense data!\n", __func__);
  1538. rc = -EINVAL;
  1539. goto out;
  1540. }
  1541. switch (sshdr.sense_key) {
  1542. case NO_SENSE:
  1543. case RECOVERED_ERROR:
  1544. /* fall through */
  1545. case NOT_READY:
  1546. break;
  1547. case UNIT_ATTENTION:
  1548. switch (sshdr.asc) {
  1549. case 0x29: /* Power on Reset or Device Reset */
  1550. /* fall through */
  1551. case 0x2A: /* Device settings/capacity changed */
  1552. rc = read_cap16(sdev, lli);
  1553. if (rc) {
  1554. rc = -ENODEV;
  1555. break;
  1556. }
  1557. if (prev_lba != gli->max_lba)
  1558. dev_dbg(dev, "%s: Capacity changed old=%lld "
  1559. "new=%lld\n", __func__, prev_lba,
  1560. gli->max_lba);
  1561. break;
  1562. case 0x3F: /* Report LUNs changed, Rescan. */
  1563. scsi_scan_host(cfg->host);
  1564. break;
  1565. default:
  1566. rc = -EIO;
  1567. break;
  1568. }
  1569. break;
  1570. default:
  1571. rc = -EIO;
  1572. break;
  1573. }
  1574. out:
  1575. dev_dbg(dev, "%s: sense_key %x asc %x ascq %x rc %d\n", __func__,
  1576. sshdr.sense_key, sshdr.asc, sshdr.ascq, rc);
  1577. return rc;
  1578. }
  1579. /**
  1580. * cxlflash_disk_verify() - verifies a LUN is the same and handle size changes
  1581. * @sdev: SCSI device associated with LUN.
  1582. * @verify: Verify ioctl data structure.
  1583. *
  1584. * Return: 0 on success, -errno on failure
  1585. */
  1586. static int cxlflash_disk_verify(struct scsi_device *sdev,
  1587. struct dk_cxlflash_verify *verify)
  1588. {
  1589. int rc = 0;
  1590. struct ctx_info *ctxi = NULL;
  1591. struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata;
  1592. struct device *dev = &cfg->dev->dev;
  1593. struct llun_info *lli = sdev->hostdata;
  1594. struct glun_info *gli = lli->parent;
  1595. struct sisl_rht_entry *rhte = NULL;
  1596. res_hndl_t rhndl = verify->rsrc_handle;
  1597. u64 ctxid = DECODE_CTXID(verify->context_id),
  1598. rctxid = verify->context_id;
  1599. u64 last_lba = 0;
  1600. dev_dbg(dev, "%s: ctxid=%llu rhndl=%016llX, hint=%016llX, "
  1601. "flags=%016llX\n", __func__, ctxid, verify->rsrc_handle,
  1602. verify->hint, verify->hdr.flags);
  1603. ctxi = get_context(cfg, rctxid, lli, 0);
  1604. if (unlikely(!ctxi)) {
  1605. dev_dbg(dev, "%s: Bad context! (%llu)\n", __func__, ctxid);
  1606. rc = -EINVAL;
  1607. goto out;
  1608. }
  1609. rhte = get_rhte(ctxi, rhndl, lli);
  1610. if (unlikely(!rhte)) {
  1611. dev_dbg(dev, "%s: Bad resource handle! (%d)\n",
  1612. __func__, rhndl);
  1613. rc = -EINVAL;
  1614. goto out;
  1615. }
  1616. /*
  1617. * Look at the hint/sense to see if it requires us to redrive
  1618. * inquiry (i.e. the Unit attention is due to the WWN changing).
  1619. */
  1620. if (verify->hint & DK_CXLFLASH_VERIFY_HINT_SENSE) {
  1621. /* Can't hold mutex across process_sense/read_cap16,
  1622. * since we could have an intervening EEH event.
  1623. */
  1624. ctxi->unavail = true;
  1625. mutex_unlock(&ctxi->mutex);
  1626. rc = process_sense(sdev, verify);
  1627. if (unlikely(rc)) {
  1628. dev_err(dev, "%s: Failed to validate sense data (%d)\n",
  1629. __func__, rc);
  1630. mutex_lock(&ctxi->mutex);
  1631. ctxi->unavail = false;
  1632. goto out;
  1633. }
  1634. mutex_lock(&ctxi->mutex);
  1635. ctxi->unavail = false;
  1636. }
  1637. switch (gli->mode) {
  1638. case MODE_PHYSICAL:
  1639. last_lba = gli->max_lba;
  1640. break;
  1641. case MODE_VIRTUAL:
  1642. /* Cast lxt_cnt to u64 for multiply to be treated as 64bit op */
  1643. last_lba = ((u64)rhte->lxt_cnt * MC_CHUNK_SIZE * gli->blk_len);
  1644. last_lba /= CXLFLASH_BLOCK_SIZE;
  1645. last_lba--;
  1646. break;
  1647. default:
  1648. WARN(1, "Unsupported LUN mode!");
  1649. }
  1650. verify->last_lba = last_lba;
  1651. out:
  1652. if (likely(ctxi))
  1653. put_context(ctxi);
  1654. dev_dbg(dev, "%s: returning rc=%d llba=%llX\n",
  1655. __func__, rc, verify->last_lba);
  1656. return rc;
  1657. }
  1658. /**
  1659. * decode_ioctl() - translates an encoded ioctl to an easily identifiable string
  1660. * @cmd: The ioctl command to decode.
  1661. *
  1662. * Return: A string identifying the decoded ioctl.
  1663. */
  1664. static char *decode_ioctl(int cmd)
  1665. {
  1666. switch (cmd) {
  1667. case DK_CXLFLASH_ATTACH:
  1668. return __stringify_1(DK_CXLFLASH_ATTACH);
  1669. case DK_CXLFLASH_USER_DIRECT:
  1670. return __stringify_1(DK_CXLFLASH_USER_DIRECT);
  1671. case DK_CXLFLASH_USER_VIRTUAL:
  1672. return __stringify_1(DK_CXLFLASH_USER_VIRTUAL);
  1673. case DK_CXLFLASH_VLUN_RESIZE:
  1674. return __stringify_1(DK_CXLFLASH_VLUN_RESIZE);
  1675. case DK_CXLFLASH_RELEASE:
  1676. return __stringify_1(DK_CXLFLASH_RELEASE);
  1677. case DK_CXLFLASH_DETACH:
  1678. return __stringify_1(DK_CXLFLASH_DETACH);
  1679. case DK_CXLFLASH_VERIFY:
  1680. return __stringify_1(DK_CXLFLASH_VERIFY);
  1681. case DK_CXLFLASH_VLUN_CLONE:
  1682. return __stringify_1(DK_CXLFLASH_VLUN_CLONE);
  1683. case DK_CXLFLASH_RECOVER_AFU:
  1684. return __stringify_1(DK_CXLFLASH_RECOVER_AFU);
  1685. case DK_CXLFLASH_MANAGE_LUN:
  1686. return __stringify_1(DK_CXLFLASH_MANAGE_LUN);
  1687. }
  1688. return "UNKNOWN";
  1689. }
  1690. /**
  1691. * cxlflash_disk_direct_open() - opens a direct (physical) disk
  1692. * @sdev: SCSI device associated with LUN.
  1693. * @arg: UDirect ioctl data structure.
  1694. *
  1695. * On successful return, the user is informed of the resource handle
  1696. * to be used to identify the direct lun and the size (in blocks) of
  1697. * the direct lun in last LBA format.
  1698. *
  1699. * Return: 0 on success, -errno on failure
  1700. */
  1701. static int cxlflash_disk_direct_open(struct scsi_device *sdev, void *arg)
  1702. {
  1703. struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata;
  1704. struct device *dev = &cfg->dev->dev;
  1705. struct afu *afu = cfg->afu;
  1706. struct llun_info *lli = sdev->hostdata;
  1707. struct glun_info *gli = lli->parent;
  1708. struct dk_cxlflash_udirect *pphys = (struct dk_cxlflash_udirect *)arg;
  1709. u64 ctxid = DECODE_CTXID(pphys->context_id),
  1710. rctxid = pphys->context_id;
  1711. u64 lun_size = 0;
  1712. u64 last_lba = 0;
  1713. u64 rsrc_handle = -1;
  1714. u32 port = CHAN2PORT(sdev->channel);
  1715. int rc = 0;
  1716. struct ctx_info *ctxi = NULL;
  1717. struct sisl_rht_entry *rhte = NULL;
  1718. pr_debug("%s: ctxid=%llu ls=0x%llx\n", __func__, ctxid, lun_size);
  1719. rc = cxlflash_lun_attach(gli, MODE_PHYSICAL, false);
  1720. if (unlikely(rc)) {
  1721. dev_dbg(dev, "%s: Failed to attach to LUN! (PHYSICAL)\n",
  1722. __func__);
  1723. goto out;
  1724. }
  1725. ctxi = get_context(cfg, rctxid, lli, 0);
  1726. if (unlikely(!ctxi)) {
  1727. dev_dbg(dev, "%s: Bad context! (%llu)\n", __func__, ctxid);
  1728. rc = -EINVAL;
  1729. goto err1;
  1730. }
  1731. rhte = rhte_checkout(ctxi, lli);
  1732. if (unlikely(!rhte)) {
  1733. dev_dbg(dev, "%s: too many opens for this context\n", __func__);
  1734. rc = -EMFILE; /* too many opens */
  1735. goto err1;
  1736. }
  1737. rsrc_handle = (rhte - ctxi->rht_start);
  1738. rht_format1(rhte, lli->lun_id[sdev->channel], ctxi->rht_perms, port);
  1739. cxlflash_afu_sync(afu, ctxid, rsrc_handle, AFU_LW_SYNC);
  1740. last_lba = gli->max_lba;
  1741. pphys->hdr.return_flags = 0;
  1742. pphys->last_lba = last_lba;
  1743. pphys->rsrc_handle = rsrc_handle;
  1744. out:
  1745. if (likely(ctxi))
  1746. put_context(ctxi);
  1747. dev_dbg(dev, "%s: returning handle 0x%llx rc=%d llba %lld\n",
  1748. __func__, rsrc_handle, rc, last_lba);
  1749. return rc;
  1750. err1:
  1751. cxlflash_lun_detach(gli);
  1752. goto out;
  1753. }
  1754. /**
  1755. * ioctl_common() - common IOCTL handler for driver
  1756. * @sdev: SCSI device associated with LUN.
  1757. * @cmd: IOCTL command.
  1758. *
  1759. * Handles common fencing operations that are valid for multiple ioctls. Always
  1760. * allow through ioctls that are cleanup oriented in nature, even when operating
  1761. * in a failed/terminating state.
  1762. *
  1763. * Return: 0 on success, -errno on failure
  1764. */
  1765. static int ioctl_common(struct scsi_device *sdev, int cmd)
  1766. {
  1767. struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata;
  1768. struct device *dev = &cfg->dev->dev;
  1769. struct llun_info *lli = sdev->hostdata;
  1770. int rc = 0;
  1771. if (unlikely(!lli)) {
  1772. dev_dbg(dev, "%s: Unknown LUN\n", __func__);
  1773. rc = -EINVAL;
  1774. goto out;
  1775. }
  1776. rc = check_state(cfg);
  1777. if (unlikely(rc) && (cfg->state == STATE_FAILTERM)) {
  1778. switch (cmd) {
  1779. case DK_CXLFLASH_VLUN_RESIZE:
  1780. case DK_CXLFLASH_RELEASE:
  1781. case DK_CXLFLASH_DETACH:
  1782. dev_dbg(dev, "%s: Command override! (%d)\n",
  1783. __func__, rc);
  1784. rc = 0;
  1785. break;
  1786. }
  1787. }
  1788. out:
  1789. return rc;
  1790. }
  1791. /**
  1792. * cxlflash_ioctl() - IOCTL handler for driver
  1793. * @sdev: SCSI device associated with LUN.
  1794. * @cmd: IOCTL command.
  1795. * @arg: Userspace ioctl data structure.
  1796. *
  1797. * A read/write semaphore is used to implement a 'drain' of currently
  1798. * running ioctls. The read semaphore is taken at the beginning of each
  1799. * ioctl thread and released upon concluding execution. Additionally the
  1800. * semaphore should be released and then reacquired in any ioctl execution
  1801. * path which will wait for an event to occur that is outside the scope of
  1802. * the ioctl (i.e. an adapter reset). To drain the ioctls currently running,
  1803. * a thread simply needs to acquire the write semaphore.
  1804. *
  1805. * Return: 0 on success, -errno on failure
  1806. */
  1807. int cxlflash_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
  1808. {
  1809. typedef int (*sioctl) (struct scsi_device *, void *);
  1810. struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata;
  1811. struct device *dev = &cfg->dev->dev;
  1812. struct afu *afu = cfg->afu;
  1813. struct dk_cxlflash_hdr *hdr;
  1814. char buf[sizeof(union cxlflash_ioctls)];
  1815. size_t size = 0;
  1816. bool known_ioctl = false;
  1817. int idx;
  1818. int rc = 0;
  1819. struct Scsi_Host *shost = sdev->host;
  1820. sioctl do_ioctl = NULL;
  1821. static const struct {
  1822. size_t size;
  1823. sioctl ioctl;
  1824. } ioctl_tbl[] = { /* NOTE: order matters here */
  1825. {sizeof(struct dk_cxlflash_attach), (sioctl)cxlflash_disk_attach},
  1826. {sizeof(struct dk_cxlflash_udirect), cxlflash_disk_direct_open},
  1827. {sizeof(struct dk_cxlflash_release), (sioctl)cxlflash_disk_release},
  1828. {sizeof(struct dk_cxlflash_detach), (sioctl)cxlflash_disk_detach},
  1829. {sizeof(struct dk_cxlflash_verify), (sioctl)cxlflash_disk_verify},
  1830. {sizeof(struct dk_cxlflash_recover_afu), (sioctl)cxlflash_afu_recover},
  1831. {sizeof(struct dk_cxlflash_manage_lun), (sioctl)cxlflash_manage_lun},
  1832. {sizeof(struct dk_cxlflash_uvirtual), cxlflash_disk_virtual_open},
  1833. {sizeof(struct dk_cxlflash_resize), (sioctl)cxlflash_vlun_resize},
  1834. {sizeof(struct dk_cxlflash_clone), (sioctl)cxlflash_disk_clone},
  1835. };
  1836. /* Hold read semaphore so we can drain if needed */
  1837. down_read(&cfg->ioctl_rwsem);
  1838. /* Restrict command set to physical support only for internal LUN */
  1839. if (afu->internal_lun)
  1840. switch (cmd) {
  1841. case DK_CXLFLASH_RELEASE:
  1842. case DK_CXLFLASH_USER_VIRTUAL:
  1843. case DK_CXLFLASH_VLUN_RESIZE:
  1844. case DK_CXLFLASH_VLUN_CLONE:
  1845. dev_dbg(dev, "%s: %s not supported for lun_mode=%d\n",
  1846. __func__, decode_ioctl(cmd), afu->internal_lun);
  1847. rc = -EINVAL;
  1848. goto cxlflash_ioctl_exit;
  1849. }
  1850. switch (cmd) {
  1851. case DK_CXLFLASH_ATTACH:
  1852. case DK_CXLFLASH_USER_DIRECT:
  1853. case DK_CXLFLASH_RELEASE:
  1854. case DK_CXLFLASH_DETACH:
  1855. case DK_CXLFLASH_VERIFY:
  1856. case DK_CXLFLASH_RECOVER_AFU:
  1857. case DK_CXLFLASH_USER_VIRTUAL:
  1858. case DK_CXLFLASH_VLUN_RESIZE:
  1859. case DK_CXLFLASH_VLUN_CLONE:
  1860. dev_dbg(dev, "%s: %s (%08X) on dev(%d/%d/%d/%llu)\n",
  1861. __func__, decode_ioctl(cmd), cmd, shost->host_no,
  1862. sdev->channel, sdev->id, sdev->lun);
  1863. rc = ioctl_common(sdev, cmd);
  1864. if (unlikely(rc))
  1865. goto cxlflash_ioctl_exit;
  1866. /* fall through */
  1867. case DK_CXLFLASH_MANAGE_LUN:
  1868. known_ioctl = true;
  1869. idx = _IOC_NR(cmd) - _IOC_NR(DK_CXLFLASH_ATTACH);
  1870. size = ioctl_tbl[idx].size;
  1871. do_ioctl = ioctl_tbl[idx].ioctl;
  1872. if (likely(do_ioctl))
  1873. break;
  1874. /* fall through */
  1875. default:
  1876. rc = -EINVAL;
  1877. goto cxlflash_ioctl_exit;
  1878. }
  1879. if (unlikely(copy_from_user(&buf, arg, size))) {
  1880. dev_err(dev, "%s: copy_from_user() fail! "
  1881. "size=%lu cmd=%d (%s) arg=%p\n",
  1882. __func__, size, cmd, decode_ioctl(cmd), arg);
  1883. rc = -EFAULT;
  1884. goto cxlflash_ioctl_exit;
  1885. }
  1886. hdr = (struct dk_cxlflash_hdr *)&buf;
  1887. if (hdr->version != DK_CXLFLASH_VERSION_0) {
  1888. dev_dbg(dev, "%s: Version %u not supported for %s\n",
  1889. __func__, hdr->version, decode_ioctl(cmd));
  1890. rc = -EINVAL;
  1891. goto cxlflash_ioctl_exit;
  1892. }
  1893. if (hdr->rsvd[0] || hdr->rsvd[1] || hdr->rsvd[2] || hdr->return_flags) {
  1894. dev_dbg(dev, "%s: Reserved/rflags populated!\n", __func__);
  1895. rc = -EINVAL;
  1896. goto cxlflash_ioctl_exit;
  1897. }
  1898. rc = do_ioctl(sdev, (void *)&buf);
  1899. if (likely(!rc))
  1900. if (unlikely(copy_to_user(arg, &buf, size))) {
  1901. dev_err(dev, "%s: copy_to_user() fail! "
  1902. "size=%lu cmd=%d (%s) arg=%p\n",
  1903. __func__, size, cmd, decode_ioctl(cmd), arg);
  1904. rc = -EFAULT;
  1905. }
  1906. /* fall through to exit */
  1907. cxlflash_ioctl_exit:
  1908. up_read(&cfg->ioctl_rwsem);
  1909. if (unlikely(rc && known_ioctl))
  1910. dev_err(dev, "%s: ioctl %s (%08X) on dev(%d/%d/%d/%llu) "
  1911. "returned rc %d\n", __func__,
  1912. decode_ioctl(cmd), cmd, shost->host_no,
  1913. sdev->channel, sdev->id, sdev->lun, rc);
  1914. else
  1915. dev_dbg(dev, "%s: ioctl %s (%08X) on dev(%d/%d/%d/%llu) "
  1916. "returned rc %d\n", __func__, decode_ioctl(cmd),
  1917. cmd, shost->host_no, sdev->channel, sdev->id,
  1918. sdev->lun, rc);
  1919. return rc;
  1920. }