spi.c 73 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716
  1. /*
  2. * SPI init/core code
  3. *
  4. * Copyright (C) 2005 David Brownell
  5. * Copyright (C) 2008 Secret Lab Technologies Ltd.
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License as published by
  9. * the Free Software Foundation; either version 2 of the License, or
  10. * (at your option) any later version.
  11. *
  12. * This program is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. * GNU General Public License for more details.
  16. */
  17. #include <linux/kernel.h>
  18. #include <linux/device.h>
  19. #include <linux/init.h>
  20. #include <linux/cache.h>
  21. #include <linux/dma-mapping.h>
  22. #include <linux/dmaengine.h>
  23. #include <linux/mutex.h>
  24. #include <linux/of_device.h>
  25. #include <linux/of_irq.h>
  26. #include <linux/clk/clk-conf.h>
  27. #include <linux/slab.h>
  28. #include <linux/mod_devicetable.h>
  29. #include <linux/spi/spi.h>
  30. #include <linux/of_gpio.h>
  31. #include <linux/pm_runtime.h>
  32. #include <linux/pm_domain.h>
  33. #include <linux/export.h>
  34. #include <linux/sched/rt.h>
  35. #include <linux/delay.h>
  36. #include <linux/kthread.h>
  37. #include <linux/ioport.h>
  38. #include <linux/acpi.h>
  39. #define CREATE_TRACE_POINTS
  40. #include <trace/events/spi.h>
  41. static void spidev_release(struct device *dev)
  42. {
  43. struct spi_device *spi = to_spi_device(dev);
  44. /* spi masters may cleanup for released devices */
  45. if (spi->master->cleanup)
  46. spi->master->cleanup(spi);
  47. spi_master_put(spi->master);
  48. kfree(spi);
  49. }
  50. static ssize_t
  51. modalias_show(struct device *dev, struct device_attribute *a, char *buf)
  52. {
  53. const struct spi_device *spi = to_spi_device(dev);
  54. int len;
  55. len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1);
  56. if (len != -ENODEV)
  57. return len;
  58. return sprintf(buf, "%s%s\n", SPI_MODULE_PREFIX, spi->modalias);
  59. }
  60. static DEVICE_ATTR_RO(modalias);
  61. #define SPI_STATISTICS_ATTRS(field, file) \
  62. static ssize_t spi_master_##field##_show(struct device *dev, \
  63. struct device_attribute *attr, \
  64. char *buf) \
  65. { \
  66. struct spi_master *master = container_of(dev, \
  67. struct spi_master, dev); \
  68. return spi_statistics_##field##_show(&master->statistics, buf); \
  69. } \
  70. static struct device_attribute dev_attr_spi_master_##field = { \
  71. .attr = { .name = file, .mode = S_IRUGO }, \
  72. .show = spi_master_##field##_show, \
  73. }; \
  74. static ssize_t spi_device_##field##_show(struct device *dev, \
  75. struct device_attribute *attr, \
  76. char *buf) \
  77. { \
  78. struct spi_device *spi = container_of(dev, \
  79. struct spi_device, dev); \
  80. return spi_statistics_##field##_show(&spi->statistics, buf); \
  81. } \
  82. static struct device_attribute dev_attr_spi_device_##field = { \
  83. .attr = { .name = file, .mode = S_IRUGO }, \
  84. .show = spi_device_##field##_show, \
  85. }
  86. #define SPI_STATISTICS_SHOW_NAME(name, file, field, format_string) \
  87. static ssize_t spi_statistics_##name##_show(struct spi_statistics *stat, \
  88. char *buf) \
  89. { \
  90. unsigned long flags; \
  91. ssize_t len; \
  92. spin_lock_irqsave(&stat->lock, flags); \
  93. len = sprintf(buf, format_string, stat->field); \
  94. spin_unlock_irqrestore(&stat->lock, flags); \
  95. return len; \
  96. } \
  97. SPI_STATISTICS_ATTRS(name, file)
  98. #define SPI_STATISTICS_SHOW(field, format_string) \
  99. SPI_STATISTICS_SHOW_NAME(field, __stringify(field), \
  100. field, format_string)
  101. SPI_STATISTICS_SHOW(messages, "%lu");
  102. SPI_STATISTICS_SHOW(transfers, "%lu");
  103. SPI_STATISTICS_SHOW(errors, "%lu");
  104. SPI_STATISTICS_SHOW(timedout, "%lu");
  105. SPI_STATISTICS_SHOW(spi_sync, "%lu");
  106. SPI_STATISTICS_SHOW(spi_sync_immediate, "%lu");
  107. SPI_STATISTICS_SHOW(spi_async, "%lu");
  108. SPI_STATISTICS_SHOW(bytes, "%llu");
  109. SPI_STATISTICS_SHOW(bytes_rx, "%llu");
  110. SPI_STATISTICS_SHOW(bytes_tx, "%llu");
  111. #define SPI_STATISTICS_TRANSFER_BYTES_HISTO(index, number) \
  112. SPI_STATISTICS_SHOW_NAME(transfer_bytes_histo##index, \
  113. "transfer_bytes_histo_" number, \
  114. transfer_bytes_histo[index], "%lu")
  115. SPI_STATISTICS_TRANSFER_BYTES_HISTO(0, "0-1");
  116. SPI_STATISTICS_TRANSFER_BYTES_HISTO(1, "2-3");
  117. SPI_STATISTICS_TRANSFER_BYTES_HISTO(2, "4-7");
  118. SPI_STATISTICS_TRANSFER_BYTES_HISTO(3, "8-15");
  119. SPI_STATISTICS_TRANSFER_BYTES_HISTO(4, "16-31");
  120. SPI_STATISTICS_TRANSFER_BYTES_HISTO(5, "32-63");
  121. SPI_STATISTICS_TRANSFER_BYTES_HISTO(6, "64-127");
  122. SPI_STATISTICS_TRANSFER_BYTES_HISTO(7, "128-255");
  123. SPI_STATISTICS_TRANSFER_BYTES_HISTO(8, "256-511");
  124. SPI_STATISTICS_TRANSFER_BYTES_HISTO(9, "512-1023");
  125. SPI_STATISTICS_TRANSFER_BYTES_HISTO(10, "1024-2047");
  126. SPI_STATISTICS_TRANSFER_BYTES_HISTO(11, "2048-4095");
  127. SPI_STATISTICS_TRANSFER_BYTES_HISTO(12, "4096-8191");
  128. SPI_STATISTICS_TRANSFER_BYTES_HISTO(13, "8192-16383");
  129. SPI_STATISTICS_TRANSFER_BYTES_HISTO(14, "16384-32767");
  130. SPI_STATISTICS_TRANSFER_BYTES_HISTO(15, "32768-65535");
  131. SPI_STATISTICS_TRANSFER_BYTES_HISTO(16, "65536+");
  132. static struct attribute *spi_dev_attrs[] = {
  133. &dev_attr_modalias.attr,
  134. NULL,
  135. };
  136. static const struct attribute_group spi_dev_group = {
  137. .attrs = spi_dev_attrs,
  138. };
  139. static struct attribute *spi_device_statistics_attrs[] = {
  140. &dev_attr_spi_device_messages.attr,
  141. &dev_attr_spi_device_transfers.attr,
  142. &dev_attr_spi_device_errors.attr,
  143. &dev_attr_spi_device_timedout.attr,
  144. &dev_attr_spi_device_spi_sync.attr,
  145. &dev_attr_spi_device_spi_sync_immediate.attr,
  146. &dev_attr_spi_device_spi_async.attr,
  147. &dev_attr_spi_device_bytes.attr,
  148. &dev_attr_spi_device_bytes_rx.attr,
  149. &dev_attr_spi_device_bytes_tx.attr,
  150. &dev_attr_spi_device_transfer_bytes_histo0.attr,
  151. &dev_attr_spi_device_transfer_bytes_histo1.attr,
  152. &dev_attr_spi_device_transfer_bytes_histo2.attr,
  153. &dev_attr_spi_device_transfer_bytes_histo3.attr,
  154. &dev_attr_spi_device_transfer_bytes_histo4.attr,
  155. &dev_attr_spi_device_transfer_bytes_histo5.attr,
  156. &dev_attr_spi_device_transfer_bytes_histo6.attr,
  157. &dev_attr_spi_device_transfer_bytes_histo7.attr,
  158. &dev_attr_spi_device_transfer_bytes_histo8.attr,
  159. &dev_attr_spi_device_transfer_bytes_histo9.attr,
  160. &dev_attr_spi_device_transfer_bytes_histo10.attr,
  161. &dev_attr_spi_device_transfer_bytes_histo11.attr,
  162. &dev_attr_spi_device_transfer_bytes_histo12.attr,
  163. &dev_attr_spi_device_transfer_bytes_histo13.attr,
  164. &dev_attr_spi_device_transfer_bytes_histo14.attr,
  165. &dev_attr_spi_device_transfer_bytes_histo15.attr,
  166. &dev_attr_spi_device_transfer_bytes_histo16.attr,
  167. NULL,
  168. };
  169. static const struct attribute_group spi_device_statistics_group = {
  170. .name = "statistics",
  171. .attrs = spi_device_statistics_attrs,
  172. };
  173. static const struct attribute_group *spi_dev_groups[] = {
  174. &spi_dev_group,
  175. &spi_device_statistics_group,
  176. NULL,
  177. };
  178. static struct attribute *spi_master_statistics_attrs[] = {
  179. &dev_attr_spi_master_messages.attr,
  180. &dev_attr_spi_master_transfers.attr,
  181. &dev_attr_spi_master_errors.attr,
  182. &dev_attr_spi_master_timedout.attr,
  183. &dev_attr_spi_master_spi_sync.attr,
  184. &dev_attr_spi_master_spi_sync_immediate.attr,
  185. &dev_attr_spi_master_spi_async.attr,
  186. &dev_attr_spi_master_bytes.attr,
  187. &dev_attr_spi_master_bytes_rx.attr,
  188. &dev_attr_spi_master_bytes_tx.attr,
  189. &dev_attr_spi_master_transfer_bytes_histo0.attr,
  190. &dev_attr_spi_master_transfer_bytes_histo1.attr,
  191. &dev_attr_spi_master_transfer_bytes_histo2.attr,
  192. &dev_attr_spi_master_transfer_bytes_histo3.attr,
  193. &dev_attr_spi_master_transfer_bytes_histo4.attr,
  194. &dev_attr_spi_master_transfer_bytes_histo5.attr,
  195. &dev_attr_spi_master_transfer_bytes_histo6.attr,
  196. &dev_attr_spi_master_transfer_bytes_histo7.attr,
  197. &dev_attr_spi_master_transfer_bytes_histo8.attr,
  198. &dev_attr_spi_master_transfer_bytes_histo9.attr,
  199. &dev_attr_spi_master_transfer_bytes_histo10.attr,
  200. &dev_attr_spi_master_transfer_bytes_histo11.attr,
  201. &dev_attr_spi_master_transfer_bytes_histo12.attr,
  202. &dev_attr_spi_master_transfer_bytes_histo13.attr,
  203. &dev_attr_spi_master_transfer_bytes_histo14.attr,
  204. &dev_attr_spi_master_transfer_bytes_histo15.attr,
  205. &dev_attr_spi_master_transfer_bytes_histo16.attr,
  206. NULL,
  207. };
  208. static const struct attribute_group spi_master_statistics_group = {
  209. .name = "statistics",
  210. .attrs = spi_master_statistics_attrs,
  211. };
  212. static const struct attribute_group *spi_master_groups[] = {
  213. &spi_master_statistics_group,
  214. NULL,
  215. };
  216. void spi_statistics_add_transfer_stats(struct spi_statistics *stats,
  217. struct spi_transfer *xfer,
  218. struct spi_master *master)
  219. {
  220. unsigned long flags;
  221. int l2len = min(fls(xfer->len), SPI_STATISTICS_HISTO_SIZE) - 1;
  222. if (l2len < 0)
  223. l2len = 0;
  224. spin_lock_irqsave(&stats->lock, flags);
  225. stats->transfers++;
  226. stats->transfer_bytes_histo[l2len]++;
  227. stats->bytes += xfer->len;
  228. if ((xfer->tx_buf) &&
  229. (xfer->tx_buf != master->dummy_tx))
  230. stats->bytes_tx += xfer->len;
  231. if ((xfer->rx_buf) &&
  232. (xfer->rx_buf != master->dummy_rx))
  233. stats->bytes_rx += xfer->len;
  234. spin_unlock_irqrestore(&stats->lock, flags);
  235. }
  236. EXPORT_SYMBOL_GPL(spi_statistics_add_transfer_stats);
  237. /* modalias support makes "modprobe $MODALIAS" new-style hotplug work,
  238. * and the sysfs version makes coldplug work too.
  239. */
  240. static const struct spi_device_id *spi_match_id(const struct spi_device_id *id,
  241. const struct spi_device *sdev)
  242. {
  243. while (id->name[0]) {
  244. if (!strcmp(sdev->modalias, id->name))
  245. return id;
  246. id++;
  247. }
  248. return NULL;
  249. }
  250. const struct spi_device_id *spi_get_device_id(const struct spi_device *sdev)
  251. {
  252. const struct spi_driver *sdrv = to_spi_driver(sdev->dev.driver);
  253. return spi_match_id(sdrv->id_table, sdev);
  254. }
  255. EXPORT_SYMBOL_GPL(spi_get_device_id);
  256. static int spi_match_device(struct device *dev, struct device_driver *drv)
  257. {
  258. const struct spi_device *spi = to_spi_device(dev);
  259. const struct spi_driver *sdrv = to_spi_driver(drv);
  260. /* Attempt an OF style match */
  261. if (of_driver_match_device(dev, drv))
  262. return 1;
  263. /* Then try ACPI */
  264. if (acpi_driver_match_device(dev, drv))
  265. return 1;
  266. if (sdrv->id_table)
  267. return !!spi_match_id(sdrv->id_table, spi);
  268. return strcmp(spi->modalias, drv->name) == 0;
  269. }
  270. static int spi_uevent(struct device *dev, struct kobj_uevent_env *env)
  271. {
  272. const struct spi_device *spi = to_spi_device(dev);
  273. int rc;
  274. rc = acpi_device_uevent_modalias(dev, env);
  275. if (rc != -ENODEV)
  276. return rc;
  277. add_uevent_var(env, "MODALIAS=%s%s", SPI_MODULE_PREFIX, spi->modalias);
  278. return 0;
  279. }
  280. struct bus_type spi_bus_type = {
  281. .name = "spi",
  282. .dev_groups = spi_dev_groups,
  283. .match = spi_match_device,
  284. .uevent = spi_uevent,
  285. };
  286. EXPORT_SYMBOL_GPL(spi_bus_type);
  287. static int spi_drv_probe(struct device *dev)
  288. {
  289. const struct spi_driver *sdrv = to_spi_driver(dev->driver);
  290. struct spi_device *spi = to_spi_device(dev);
  291. int ret;
  292. ret = of_clk_set_defaults(dev->of_node, false);
  293. if (ret)
  294. return ret;
  295. if (dev->of_node) {
  296. spi->irq = of_irq_get(dev->of_node, 0);
  297. if (spi->irq == -EPROBE_DEFER)
  298. return -EPROBE_DEFER;
  299. if (spi->irq < 0)
  300. spi->irq = 0;
  301. }
  302. ret = dev_pm_domain_attach(dev, true);
  303. if (ret != -EPROBE_DEFER) {
  304. ret = sdrv->probe(spi);
  305. if (ret)
  306. dev_pm_domain_detach(dev, true);
  307. }
  308. return ret;
  309. }
  310. static int spi_drv_remove(struct device *dev)
  311. {
  312. const struct spi_driver *sdrv = to_spi_driver(dev->driver);
  313. int ret;
  314. ret = sdrv->remove(to_spi_device(dev));
  315. dev_pm_domain_detach(dev, true);
  316. return ret;
  317. }
  318. static void spi_drv_shutdown(struct device *dev)
  319. {
  320. const struct spi_driver *sdrv = to_spi_driver(dev->driver);
  321. sdrv->shutdown(to_spi_device(dev));
  322. }
  323. /**
  324. * __spi_register_driver - register a SPI driver
  325. * @owner: owner module of the driver to register
  326. * @sdrv: the driver to register
  327. * Context: can sleep
  328. *
  329. * Return: zero on success, else a negative error code.
  330. */
  331. int __spi_register_driver(struct module *owner, struct spi_driver *sdrv)
  332. {
  333. sdrv->driver.owner = owner;
  334. sdrv->driver.bus = &spi_bus_type;
  335. if (sdrv->probe)
  336. sdrv->driver.probe = spi_drv_probe;
  337. if (sdrv->remove)
  338. sdrv->driver.remove = spi_drv_remove;
  339. if (sdrv->shutdown)
  340. sdrv->driver.shutdown = spi_drv_shutdown;
  341. return driver_register(&sdrv->driver);
  342. }
  343. EXPORT_SYMBOL_GPL(__spi_register_driver);
  344. /*-------------------------------------------------------------------------*/
  345. /* SPI devices should normally not be created by SPI device drivers; that
  346. * would make them board-specific. Similarly with SPI master drivers.
  347. * Device registration normally goes into like arch/.../mach.../board-YYY.c
  348. * with other readonly (flashable) information about mainboard devices.
  349. */
  350. struct boardinfo {
  351. struct list_head list;
  352. struct spi_board_info board_info;
  353. };
  354. static LIST_HEAD(board_list);
  355. static LIST_HEAD(spi_master_list);
  356. /*
  357. * Used to protect add/del opertion for board_info list and
  358. * spi_master list, and their matching process
  359. */
  360. static DEFINE_MUTEX(board_lock);
  361. /**
  362. * spi_alloc_device - Allocate a new SPI device
  363. * @master: Controller to which device is connected
  364. * Context: can sleep
  365. *
  366. * Allows a driver to allocate and initialize a spi_device without
  367. * registering it immediately. This allows a driver to directly
  368. * fill the spi_device with device parameters before calling
  369. * spi_add_device() on it.
  370. *
  371. * Caller is responsible to call spi_add_device() on the returned
  372. * spi_device structure to add it to the SPI master. If the caller
  373. * needs to discard the spi_device without adding it, then it should
  374. * call spi_dev_put() on it.
  375. *
  376. * Return: a pointer to the new device, or NULL.
  377. */
  378. struct spi_device *spi_alloc_device(struct spi_master *master)
  379. {
  380. struct spi_device *spi;
  381. if (!spi_master_get(master))
  382. return NULL;
  383. spi = kzalloc(sizeof(*spi), GFP_KERNEL);
  384. if (!spi) {
  385. spi_master_put(master);
  386. return NULL;
  387. }
  388. spi->master = master;
  389. spi->dev.parent = &master->dev;
  390. spi->dev.bus = &spi_bus_type;
  391. spi->dev.release = spidev_release;
  392. spi->cs_gpio = -ENOENT;
  393. spin_lock_init(&spi->statistics.lock);
  394. device_initialize(&spi->dev);
  395. return spi;
  396. }
  397. EXPORT_SYMBOL_GPL(spi_alloc_device);
  398. static void spi_dev_set_name(struct spi_device *spi)
  399. {
  400. struct acpi_device *adev = ACPI_COMPANION(&spi->dev);
  401. if (adev) {
  402. dev_set_name(&spi->dev, "spi-%s", acpi_dev_name(adev));
  403. return;
  404. }
  405. dev_set_name(&spi->dev, "%s.%u", dev_name(&spi->master->dev),
  406. spi->chip_select);
  407. }
  408. static int spi_dev_check(struct device *dev, void *data)
  409. {
  410. struct spi_device *spi = to_spi_device(dev);
  411. struct spi_device *new_spi = data;
  412. if (spi->master == new_spi->master &&
  413. spi->chip_select == new_spi->chip_select)
  414. return -EBUSY;
  415. return 0;
  416. }
  417. /**
  418. * spi_add_device - Add spi_device allocated with spi_alloc_device
  419. * @spi: spi_device to register
  420. *
  421. * Companion function to spi_alloc_device. Devices allocated with
  422. * spi_alloc_device can be added onto the spi bus with this function.
  423. *
  424. * Return: 0 on success; negative errno on failure
  425. */
  426. int spi_add_device(struct spi_device *spi)
  427. {
  428. static DEFINE_MUTEX(spi_add_lock);
  429. struct spi_master *master = spi->master;
  430. struct device *dev = master->dev.parent;
  431. int status;
  432. /* Chipselects are numbered 0..max; validate. */
  433. if (spi->chip_select >= master->num_chipselect) {
  434. dev_err(dev, "cs%d >= max %d\n",
  435. spi->chip_select,
  436. master->num_chipselect);
  437. return -EINVAL;
  438. }
  439. /* Set the bus ID string */
  440. spi_dev_set_name(spi);
  441. /* We need to make sure there's no other device with this
  442. * chipselect **BEFORE** we call setup(), else we'll trash
  443. * its configuration. Lock against concurrent add() calls.
  444. */
  445. mutex_lock(&spi_add_lock);
  446. status = bus_for_each_dev(&spi_bus_type, NULL, spi, spi_dev_check);
  447. if (status) {
  448. dev_err(dev, "chipselect %d already in use\n",
  449. spi->chip_select);
  450. goto done;
  451. }
  452. if (master->cs_gpios)
  453. spi->cs_gpio = master->cs_gpios[spi->chip_select];
  454. /* Drivers may modify this initial i/o setup, but will
  455. * normally rely on the device being setup. Devices
  456. * using SPI_CS_HIGH can't coexist well otherwise...
  457. */
  458. status = spi_setup(spi);
  459. if (status < 0) {
  460. dev_err(dev, "can't setup %s, status %d\n",
  461. dev_name(&spi->dev), status);
  462. goto done;
  463. }
  464. /* Device may be bound to an active driver when this returns */
  465. status = device_add(&spi->dev);
  466. if (status < 0)
  467. dev_err(dev, "can't add %s, status %d\n",
  468. dev_name(&spi->dev), status);
  469. else
  470. dev_dbg(dev, "registered child %s\n", dev_name(&spi->dev));
  471. done:
  472. mutex_unlock(&spi_add_lock);
  473. return status;
  474. }
  475. EXPORT_SYMBOL_GPL(spi_add_device);
  476. /**
  477. * spi_new_device - instantiate one new SPI device
  478. * @master: Controller to which device is connected
  479. * @chip: Describes the SPI device
  480. * Context: can sleep
  481. *
  482. * On typical mainboards, this is purely internal; and it's not needed
  483. * after board init creates the hard-wired devices. Some development
  484. * platforms may not be able to use spi_register_board_info though, and
  485. * this is exported so that for example a USB or parport based adapter
  486. * driver could add devices (which it would learn about out-of-band).
  487. *
  488. * Return: the new device, or NULL.
  489. */
  490. struct spi_device *spi_new_device(struct spi_master *master,
  491. struct spi_board_info *chip)
  492. {
  493. struct spi_device *proxy;
  494. int status;
  495. /* NOTE: caller did any chip->bus_num checks necessary.
  496. *
  497. * Also, unless we change the return value convention to use
  498. * error-or-pointer (not NULL-or-pointer), troubleshootability
  499. * suggests syslogged diagnostics are best here (ugh).
  500. */
  501. proxy = spi_alloc_device(master);
  502. if (!proxy)
  503. return NULL;
  504. WARN_ON(strlen(chip->modalias) >= sizeof(proxy->modalias));
  505. proxy->chip_select = chip->chip_select;
  506. proxy->max_speed_hz = chip->max_speed_hz;
  507. proxy->mode = chip->mode;
  508. proxy->irq = chip->irq;
  509. strlcpy(proxy->modalias, chip->modalias, sizeof(proxy->modalias));
  510. proxy->dev.platform_data = (void *) chip->platform_data;
  511. proxy->controller_data = chip->controller_data;
  512. proxy->controller_state = NULL;
  513. status = spi_add_device(proxy);
  514. if (status < 0) {
  515. spi_dev_put(proxy);
  516. return NULL;
  517. }
  518. return proxy;
  519. }
  520. EXPORT_SYMBOL_GPL(spi_new_device);
  521. static void spi_match_master_to_boardinfo(struct spi_master *master,
  522. struct spi_board_info *bi)
  523. {
  524. struct spi_device *dev;
  525. if (master->bus_num != bi->bus_num)
  526. return;
  527. dev = spi_new_device(master, bi);
  528. if (!dev)
  529. dev_err(master->dev.parent, "can't create new device for %s\n",
  530. bi->modalias);
  531. }
  532. /**
  533. * spi_register_board_info - register SPI devices for a given board
  534. * @info: array of chip descriptors
  535. * @n: how many descriptors are provided
  536. * Context: can sleep
  537. *
  538. * Board-specific early init code calls this (probably during arch_initcall)
  539. * with segments of the SPI device table. Any device nodes are created later,
  540. * after the relevant parent SPI controller (bus_num) is defined. We keep
  541. * this table of devices forever, so that reloading a controller driver will
  542. * not make Linux forget about these hard-wired devices.
  543. *
  544. * Other code can also call this, e.g. a particular add-on board might provide
  545. * SPI devices through its expansion connector, so code initializing that board
  546. * would naturally declare its SPI devices.
  547. *
  548. * The board info passed can safely be __initdata ... but be careful of
  549. * any embedded pointers (platform_data, etc), they're copied as-is.
  550. *
  551. * Return: zero on success, else a negative error code.
  552. */
  553. int spi_register_board_info(struct spi_board_info const *info, unsigned n)
  554. {
  555. struct boardinfo *bi;
  556. int i;
  557. if (!n)
  558. return -EINVAL;
  559. bi = kzalloc(n * sizeof(*bi), GFP_KERNEL);
  560. if (!bi)
  561. return -ENOMEM;
  562. for (i = 0; i < n; i++, bi++, info++) {
  563. struct spi_master *master;
  564. memcpy(&bi->board_info, info, sizeof(*info));
  565. mutex_lock(&board_lock);
  566. list_add_tail(&bi->list, &board_list);
  567. list_for_each_entry(master, &spi_master_list, list)
  568. spi_match_master_to_boardinfo(master, &bi->board_info);
  569. mutex_unlock(&board_lock);
  570. }
  571. return 0;
  572. }
  573. /*-------------------------------------------------------------------------*/
  574. static void spi_set_cs(struct spi_device *spi, bool enable)
  575. {
  576. if (spi->mode & SPI_CS_HIGH)
  577. enable = !enable;
  578. if (gpio_is_valid(spi->cs_gpio))
  579. gpio_set_value(spi->cs_gpio, !enable);
  580. else if (spi->master->set_cs)
  581. spi->master->set_cs(spi, !enable);
  582. }
  583. #ifdef CONFIG_HAS_DMA
  584. static int spi_map_buf(struct spi_master *master, struct device *dev,
  585. struct sg_table *sgt, void *buf, size_t len,
  586. enum dma_data_direction dir)
  587. {
  588. const bool vmalloced_buf = is_vmalloc_addr(buf);
  589. int desc_len;
  590. int sgs;
  591. struct page *vm_page;
  592. void *sg_buf;
  593. size_t min;
  594. int i, ret;
  595. if (vmalloced_buf) {
  596. desc_len = PAGE_SIZE;
  597. sgs = DIV_ROUND_UP(len + offset_in_page(buf), desc_len);
  598. } else {
  599. desc_len = master->max_dma_len;
  600. sgs = DIV_ROUND_UP(len, desc_len);
  601. }
  602. ret = sg_alloc_table(sgt, sgs, GFP_KERNEL);
  603. if (ret != 0)
  604. return ret;
  605. for (i = 0; i < sgs; i++) {
  606. if (vmalloced_buf) {
  607. /*
  608. * Next scatterlist entry size is the minimum between
  609. * the desc_len and the remaining buffer length that
  610. * fits in a page.
  611. */
  612. min = min_t(size_t, desc_len,
  613. min_t(size_t, len,
  614. PAGE_SIZE - offset_in_page(buf)));
  615. vm_page = vmalloc_to_page(buf);
  616. if (!vm_page) {
  617. sg_free_table(sgt);
  618. return -ENOMEM;
  619. }
  620. sg_set_page(&sgt->sgl[i], vm_page,
  621. min, offset_in_page(buf));
  622. } else {
  623. min = min_t(size_t, len, desc_len);
  624. sg_buf = buf;
  625. sg_set_buf(&sgt->sgl[i], sg_buf, min);
  626. }
  627. buf += min;
  628. len -= min;
  629. }
  630. ret = dma_map_sg(dev, sgt->sgl, sgt->nents, dir);
  631. if (!ret)
  632. ret = -ENOMEM;
  633. if (ret < 0) {
  634. sg_free_table(sgt);
  635. return ret;
  636. }
  637. sgt->nents = ret;
  638. return 0;
  639. }
  640. static void spi_unmap_buf(struct spi_master *master, struct device *dev,
  641. struct sg_table *sgt, enum dma_data_direction dir)
  642. {
  643. if (sgt->orig_nents) {
  644. dma_unmap_sg(dev, sgt->sgl, sgt->orig_nents, dir);
  645. sg_free_table(sgt);
  646. }
  647. }
  648. static int __spi_map_msg(struct spi_master *master, struct spi_message *msg)
  649. {
  650. struct device *tx_dev, *rx_dev;
  651. struct spi_transfer *xfer;
  652. int ret;
  653. if (!master->can_dma)
  654. return 0;
  655. if (master->dma_tx)
  656. tx_dev = master->dma_tx->device->dev;
  657. else
  658. tx_dev = &master->dev;
  659. if (master->dma_rx)
  660. rx_dev = master->dma_rx->device->dev;
  661. else
  662. rx_dev = &master->dev;
  663. list_for_each_entry(xfer, &msg->transfers, transfer_list) {
  664. if (!master->can_dma(master, msg->spi, xfer))
  665. continue;
  666. if (xfer->tx_buf != NULL) {
  667. ret = spi_map_buf(master, tx_dev, &xfer->tx_sg,
  668. (void *)xfer->tx_buf, xfer->len,
  669. DMA_TO_DEVICE);
  670. if (ret != 0)
  671. return ret;
  672. }
  673. if (xfer->rx_buf != NULL) {
  674. ret = spi_map_buf(master, rx_dev, &xfer->rx_sg,
  675. xfer->rx_buf, xfer->len,
  676. DMA_FROM_DEVICE);
  677. if (ret != 0) {
  678. spi_unmap_buf(master, tx_dev, &xfer->tx_sg,
  679. DMA_TO_DEVICE);
  680. return ret;
  681. }
  682. }
  683. }
  684. master->cur_msg_mapped = true;
  685. return 0;
  686. }
  687. static int __spi_unmap_msg(struct spi_master *master, struct spi_message *msg)
  688. {
  689. struct spi_transfer *xfer;
  690. struct device *tx_dev, *rx_dev;
  691. if (!master->cur_msg_mapped || !master->can_dma)
  692. return 0;
  693. if (master->dma_tx)
  694. tx_dev = master->dma_tx->device->dev;
  695. else
  696. tx_dev = &master->dev;
  697. if (master->dma_rx)
  698. rx_dev = master->dma_rx->device->dev;
  699. else
  700. rx_dev = &master->dev;
  701. list_for_each_entry(xfer, &msg->transfers, transfer_list) {
  702. if (!master->can_dma(master, msg->spi, xfer))
  703. continue;
  704. spi_unmap_buf(master, rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE);
  705. spi_unmap_buf(master, tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
  706. }
  707. return 0;
  708. }
  709. #else /* !CONFIG_HAS_DMA */
  710. static inline int __spi_map_msg(struct spi_master *master,
  711. struct spi_message *msg)
  712. {
  713. return 0;
  714. }
  715. static inline int __spi_unmap_msg(struct spi_master *master,
  716. struct spi_message *msg)
  717. {
  718. return 0;
  719. }
  720. #endif /* !CONFIG_HAS_DMA */
  721. static inline int spi_unmap_msg(struct spi_master *master,
  722. struct spi_message *msg)
  723. {
  724. struct spi_transfer *xfer;
  725. list_for_each_entry(xfer, &msg->transfers, transfer_list) {
  726. /*
  727. * Restore the original value of tx_buf or rx_buf if they are
  728. * NULL.
  729. */
  730. if (xfer->tx_buf == master->dummy_tx)
  731. xfer->tx_buf = NULL;
  732. if (xfer->rx_buf == master->dummy_rx)
  733. xfer->rx_buf = NULL;
  734. }
  735. return __spi_unmap_msg(master, msg);
  736. }
  737. static int spi_map_msg(struct spi_master *master, struct spi_message *msg)
  738. {
  739. struct spi_transfer *xfer;
  740. void *tmp;
  741. unsigned int max_tx, max_rx;
  742. if (master->flags & (SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX)) {
  743. max_tx = 0;
  744. max_rx = 0;
  745. list_for_each_entry(xfer, &msg->transfers, transfer_list) {
  746. if ((master->flags & SPI_MASTER_MUST_TX) &&
  747. !xfer->tx_buf)
  748. max_tx = max(xfer->len, max_tx);
  749. if ((master->flags & SPI_MASTER_MUST_RX) &&
  750. !xfer->rx_buf)
  751. max_rx = max(xfer->len, max_rx);
  752. }
  753. if (max_tx) {
  754. tmp = krealloc(master->dummy_tx, max_tx,
  755. GFP_KERNEL | GFP_DMA);
  756. if (!tmp)
  757. return -ENOMEM;
  758. master->dummy_tx = tmp;
  759. memset(tmp, 0, max_tx);
  760. }
  761. if (max_rx) {
  762. tmp = krealloc(master->dummy_rx, max_rx,
  763. GFP_KERNEL | GFP_DMA);
  764. if (!tmp)
  765. return -ENOMEM;
  766. master->dummy_rx = tmp;
  767. }
  768. if (max_tx || max_rx) {
  769. list_for_each_entry(xfer, &msg->transfers,
  770. transfer_list) {
  771. if (!xfer->tx_buf)
  772. xfer->tx_buf = master->dummy_tx;
  773. if (!xfer->rx_buf)
  774. xfer->rx_buf = master->dummy_rx;
  775. }
  776. }
  777. }
  778. return __spi_map_msg(master, msg);
  779. }
  780. /*
  781. * spi_transfer_one_message - Default implementation of transfer_one_message()
  782. *
  783. * This is a standard implementation of transfer_one_message() for
  784. * drivers which impelment a transfer_one() operation. It provides
  785. * standard handling of delays and chip select management.
  786. */
  787. static int spi_transfer_one_message(struct spi_master *master,
  788. struct spi_message *msg)
  789. {
  790. struct spi_transfer *xfer;
  791. bool keep_cs = false;
  792. int ret = 0;
  793. unsigned long ms = 1;
  794. struct spi_statistics *statm = &master->statistics;
  795. struct spi_statistics *stats = &msg->spi->statistics;
  796. spi_set_cs(msg->spi, true);
  797. SPI_STATISTICS_INCREMENT_FIELD(statm, messages);
  798. SPI_STATISTICS_INCREMENT_FIELD(stats, messages);
  799. list_for_each_entry(xfer, &msg->transfers, transfer_list) {
  800. trace_spi_transfer_start(msg, xfer);
  801. spi_statistics_add_transfer_stats(statm, xfer, master);
  802. spi_statistics_add_transfer_stats(stats, xfer, master);
  803. if (xfer->tx_buf || xfer->rx_buf) {
  804. reinit_completion(&master->xfer_completion);
  805. ret = master->transfer_one(master, msg->spi, xfer);
  806. if (ret < 0) {
  807. SPI_STATISTICS_INCREMENT_FIELD(statm,
  808. errors);
  809. SPI_STATISTICS_INCREMENT_FIELD(stats,
  810. errors);
  811. dev_err(&msg->spi->dev,
  812. "SPI transfer failed: %d\n", ret);
  813. goto out;
  814. }
  815. if (ret > 0) {
  816. ret = 0;
  817. ms = xfer->len * 8 * 1000 / xfer->speed_hz;
  818. ms += ms + 100; /* some tolerance */
  819. ms = wait_for_completion_timeout(&master->xfer_completion,
  820. msecs_to_jiffies(ms));
  821. }
  822. if (ms == 0) {
  823. SPI_STATISTICS_INCREMENT_FIELD(statm,
  824. timedout);
  825. SPI_STATISTICS_INCREMENT_FIELD(stats,
  826. timedout);
  827. dev_err(&msg->spi->dev,
  828. "SPI transfer timed out\n");
  829. msg->status = -ETIMEDOUT;
  830. }
  831. } else {
  832. if (xfer->len)
  833. dev_err(&msg->spi->dev,
  834. "Bufferless transfer has length %u\n",
  835. xfer->len);
  836. }
  837. trace_spi_transfer_stop(msg, xfer);
  838. if (msg->status != -EINPROGRESS)
  839. goto out;
  840. if (xfer->delay_usecs)
  841. udelay(xfer->delay_usecs);
  842. if (xfer->cs_change) {
  843. if (list_is_last(&xfer->transfer_list,
  844. &msg->transfers)) {
  845. keep_cs = true;
  846. } else {
  847. spi_set_cs(msg->spi, false);
  848. udelay(10);
  849. spi_set_cs(msg->spi, true);
  850. }
  851. }
  852. msg->actual_length += xfer->len;
  853. }
  854. out:
  855. if (ret != 0 || !keep_cs)
  856. spi_set_cs(msg->spi, false);
  857. if (msg->status == -EINPROGRESS)
  858. msg->status = ret;
  859. if (msg->status && master->handle_err)
  860. master->handle_err(master, msg);
  861. spi_finalize_current_message(master);
  862. return ret;
  863. }
  864. /**
  865. * spi_finalize_current_transfer - report completion of a transfer
  866. * @master: the master reporting completion
  867. *
  868. * Called by SPI drivers using the core transfer_one_message()
  869. * implementation to notify it that the current interrupt driven
  870. * transfer has finished and the next one may be scheduled.
  871. */
  872. void spi_finalize_current_transfer(struct spi_master *master)
  873. {
  874. complete(&master->xfer_completion);
  875. }
  876. EXPORT_SYMBOL_GPL(spi_finalize_current_transfer);
  877. /**
  878. * __spi_pump_messages - function which processes spi message queue
  879. * @master: master to process queue for
  880. * @in_kthread: true if we are in the context of the message pump thread
  881. *
  882. * This function checks if there is any spi message in the queue that
  883. * needs processing and if so call out to the driver to initialize hardware
  884. * and transfer each message.
  885. *
  886. * Note that it is called both from the kthread itself and also from
  887. * inside spi_sync(); the queue extraction handling at the top of the
  888. * function should deal with this safely.
  889. */
  890. static void __spi_pump_messages(struct spi_master *master, bool in_kthread)
  891. {
  892. unsigned long flags;
  893. bool was_busy = false;
  894. int ret;
  895. /* Lock queue */
  896. spin_lock_irqsave(&master->queue_lock, flags);
  897. /* Make sure we are not already running a message */
  898. if (master->cur_msg) {
  899. spin_unlock_irqrestore(&master->queue_lock, flags);
  900. return;
  901. }
  902. /* If another context is idling the device then defer */
  903. if (master->idling) {
  904. queue_kthread_work(&master->kworker, &master->pump_messages);
  905. spin_unlock_irqrestore(&master->queue_lock, flags);
  906. return;
  907. }
  908. /* Check if the queue is idle */
  909. if (list_empty(&master->queue) || !master->running) {
  910. if (!master->busy) {
  911. spin_unlock_irqrestore(&master->queue_lock, flags);
  912. return;
  913. }
  914. /* Only do teardown in the thread */
  915. if (!in_kthread) {
  916. queue_kthread_work(&master->kworker,
  917. &master->pump_messages);
  918. spin_unlock_irqrestore(&master->queue_lock, flags);
  919. return;
  920. }
  921. master->busy = false;
  922. master->idling = true;
  923. spin_unlock_irqrestore(&master->queue_lock, flags);
  924. kfree(master->dummy_rx);
  925. master->dummy_rx = NULL;
  926. kfree(master->dummy_tx);
  927. master->dummy_tx = NULL;
  928. if (master->unprepare_transfer_hardware &&
  929. master->unprepare_transfer_hardware(master))
  930. dev_err(&master->dev,
  931. "failed to unprepare transfer hardware\n");
  932. if (master->auto_runtime_pm) {
  933. pm_runtime_mark_last_busy(master->dev.parent);
  934. pm_runtime_put_autosuspend(master->dev.parent);
  935. }
  936. trace_spi_master_idle(master);
  937. spin_lock_irqsave(&master->queue_lock, flags);
  938. master->idling = false;
  939. spin_unlock_irqrestore(&master->queue_lock, flags);
  940. return;
  941. }
  942. /* Extract head of queue */
  943. master->cur_msg =
  944. list_first_entry(&master->queue, struct spi_message, queue);
  945. list_del_init(&master->cur_msg->queue);
  946. if (master->busy)
  947. was_busy = true;
  948. else
  949. master->busy = true;
  950. spin_unlock_irqrestore(&master->queue_lock, flags);
  951. if (!was_busy && master->auto_runtime_pm) {
  952. ret = pm_runtime_get_sync(master->dev.parent);
  953. if (ret < 0) {
  954. dev_err(&master->dev, "Failed to power device: %d\n",
  955. ret);
  956. return;
  957. }
  958. }
  959. if (!was_busy)
  960. trace_spi_master_busy(master);
  961. if (!was_busy && master->prepare_transfer_hardware) {
  962. ret = master->prepare_transfer_hardware(master);
  963. if (ret) {
  964. dev_err(&master->dev,
  965. "failed to prepare transfer hardware\n");
  966. if (master->auto_runtime_pm)
  967. pm_runtime_put(master->dev.parent);
  968. return;
  969. }
  970. }
  971. trace_spi_message_start(master->cur_msg);
  972. if (master->prepare_message) {
  973. ret = master->prepare_message(master, master->cur_msg);
  974. if (ret) {
  975. dev_err(&master->dev,
  976. "failed to prepare message: %d\n", ret);
  977. master->cur_msg->status = ret;
  978. spi_finalize_current_message(master);
  979. return;
  980. }
  981. master->cur_msg_prepared = true;
  982. }
  983. ret = spi_map_msg(master, master->cur_msg);
  984. if (ret) {
  985. master->cur_msg->status = ret;
  986. spi_finalize_current_message(master);
  987. return;
  988. }
  989. ret = master->transfer_one_message(master, master->cur_msg);
  990. if (ret) {
  991. dev_err(&master->dev,
  992. "failed to transfer one message from queue\n");
  993. return;
  994. }
  995. }
  996. /**
  997. * spi_pump_messages - kthread work function which processes spi message queue
  998. * @work: pointer to kthread work struct contained in the master struct
  999. */
  1000. static void spi_pump_messages(struct kthread_work *work)
  1001. {
  1002. struct spi_master *master =
  1003. container_of(work, struct spi_master, pump_messages);
  1004. __spi_pump_messages(master, true);
  1005. }
  1006. static int spi_init_queue(struct spi_master *master)
  1007. {
  1008. struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
  1009. master->running = false;
  1010. master->busy = false;
  1011. init_kthread_worker(&master->kworker);
  1012. master->kworker_task = kthread_run(kthread_worker_fn,
  1013. &master->kworker, "%s",
  1014. dev_name(&master->dev));
  1015. if (IS_ERR(master->kworker_task)) {
  1016. dev_err(&master->dev, "failed to create message pump task\n");
  1017. return PTR_ERR(master->kworker_task);
  1018. }
  1019. init_kthread_work(&master->pump_messages, spi_pump_messages);
  1020. /*
  1021. * Master config will indicate if this controller should run the
  1022. * message pump with high (realtime) priority to reduce the transfer
  1023. * latency on the bus by minimising the delay between a transfer
  1024. * request and the scheduling of the message pump thread. Without this
  1025. * setting the message pump thread will remain at default priority.
  1026. */
  1027. if (master->rt) {
  1028. dev_info(&master->dev,
  1029. "will run message pump with realtime priority\n");
  1030. sched_setscheduler(master->kworker_task, SCHED_FIFO, &param);
  1031. }
  1032. return 0;
  1033. }
  1034. /**
  1035. * spi_get_next_queued_message() - called by driver to check for queued
  1036. * messages
  1037. * @master: the master to check for queued messages
  1038. *
  1039. * If there are more messages in the queue, the next message is returned from
  1040. * this call.
  1041. *
  1042. * Return: the next message in the queue, else NULL if the queue is empty.
  1043. */
  1044. struct spi_message *spi_get_next_queued_message(struct spi_master *master)
  1045. {
  1046. struct spi_message *next;
  1047. unsigned long flags;
  1048. /* get a pointer to the next message, if any */
  1049. spin_lock_irqsave(&master->queue_lock, flags);
  1050. next = list_first_entry_or_null(&master->queue, struct spi_message,
  1051. queue);
  1052. spin_unlock_irqrestore(&master->queue_lock, flags);
  1053. return next;
  1054. }
  1055. EXPORT_SYMBOL_GPL(spi_get_next_queued_message);
  1056. /**
  1057. * spi_finalize_current_message() - the current message is complete
  1058. * @master: the master to return the message to
  1059. *
  1060. * Called by the driver to notify the core that the message in the front of the
  1061. * queue is complete and can be removed from the queue.
  1062. */
  1063. void spi_finalize_current_message(struct spi_master *master)
  1064. {
  1065. struct spi_message *mesg;
  1066. unsigned long flags;
  1067. int ret;
  1068. spin_lock_irqsave(&master->queue_lock, flags);
  1069. mesg = master->cur_msg;
  1070. spin_unlock_irqrestore(&master->queue_lock, flags);
  1071. spi_unmap_msg(master, mesg);
  1072. if (master->cur_msg_prepared && master->unprepare_message) {
  1073. ret = master->unprepare_message(master, mesg);
  1074. if (ret) {
  1075. dev_err(&master->dev,
  1076. "failed to unprepare message: %d\n", ret);
  1077. }
  1078. }
  1079. spin_lock_irqsave(&master->queue_lock, flags);
  1080. master->cur_msg = NULL;
  1081. master->cur_msg_prepared = false;
  1082. queue_kthread_work(&master->kworker, &master->pump_messages);
  1083. spin_unlock_irqrestore(&master->queue_lock, flags);
  1084. trace_spi_message_done(mesg);
  1085. mesg->state = NULL;
  1086. if (mesg->complete)
  1087. mesg->complete(mesg->context);
  1088. }
  1089. EXPORT_SYMBOL_GPL(spi_finalize_current_message);
  1090. static int spi_start_queue(struct spi_master *master)
  1091. {
  1092. unsigned long flags;
  1093. spin_lock_irqsave(&master->queue_lock, flags);
  1094. if (master->running || master->busy) {
  1095. spin_unlock_irqrestore(&master->queue_lock, flags);
  1096. return -EBUSY;
  1097. }
  1098. master->running = true;
  1099. master->cur_msg = NULL;
  1100. spin_unlock_irqrestore(&master->queue_lock, flags);
  1101. queue_kthread_work(&master->kworker, &master->pump_messages);
  1102. return 0;
  1103. }
  1104. static int spi_stop_queue(struct spi_master *master)
  1105. {
  1106. unsigned long flags;
  1107. unsigned limit = 500;
  1108. int ret = 0;
  1109. spin_lock_irqsave(&master->queue_lock, flags);
  1110. /*
  1111. * This is a bit lame, but is optimized for the common execution path.
  1112. * A wait_queue on the master->busy could be used, but then the common
  1113. * execution path (pump_messages) would be required to call wake_up or
  1114. * friends on every SPI message. Do this instead.
  1115. */
  1116. while ((!list_empty(&master->queue) || master->busy) && limit--) {
  1117. spin_unlock_irqrestore(&master->queue_lock, flags);
  1118. usleep_range(10000, 11000);
  1119. spin_lock_irqsave(&master->queue_lock, flags);
  1120. }
  1121. if (!list_empty(&master->queue) || master->busy)
  1122. ret = -EBUSY;
  1123. else
  1124. master->running = false;
  1125. spin_unlock_irqrestore(&master->queue_lock, flags);
  1126. if (ret) {
  1127. dev_warn(&master->dev,
  1128. "could not stop message queue\n");
  1129. return ret;
  1130. }
  1131. return ret;
  1132. }
  1133. static int spi_destroy_queue(struct spi_master *master)
  1134. {
  1135. int ret;
  1136. ret = spi_stop_queue(master);
  1137. /*
  1138. * flush_kthread_worker will block until all work is done.
  1139. * If the reason that stop_queue timed out is that the work will never
  1140. * finish, then it does no good to call flush/stop thread, so
  1141. * return anyway.
  1142. */
  1143. if (ret) {
  1144. dev_err(&master->dev, "problem destroying queue\n");
  1145. return ret;
  1146. }
  1147. flush_kthread_worker(&master->kworker);
  1148. kthread_stop(master->kworker_task);
  1149. return 0;
  1150. }
  1151. static int __spi_queued_transfer(struct spi_device *spi,
  1152. struct spi_message *msg,
  1153. bool need_pump)
  1154. {
  1155. struct spi_master *master = spi->master;
  1156. unsigned long flags;
  1157. spin_lock_irqsave(&master->queue_lock, flags);
  1158. if (!master->running) {
  1159. spin_unlock_irqrestore(&master->queue_lock, flags);
  1160. return -ESHUTDOWN;
  1161. }
  1162. msg->actual_length = 0;
  1163. msg->status = -EINPROGRESS;
  1164. list_add_tail(&msg->queue, &master->queue);
  1165. if (!master->busy && need_pump)
  1166. queue_kthread_work(&master->kworker, &master->pump_messages);
  1167. spin_unlock_irqrestore(&master->queue_lock, flags);
  1168. return 0;
  1169. }
  1170. /**
  1171. * spi_queued_transfer - transfer function for queued transfers
  1172. * @spi: spi device which is requesting transfer
  1173. * @msg: spi message which is to handled is queued to driver queue
  1174. *
  1175. * Return: zero on success, else a negative error code.
  1176. */
  1177. static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg)
  1178. {
  1179. return __spi_queued_transfer(spi, msg, true);
  1180. }
  1181. static int spi_master_initialize_queue(struct spi_master *master)
  1182. {
  1183. int ret;
  1184. master->transfer = spi_queued_transfer;
  1185. if (!master->transfer_one_message)
  1186. master->transfer_one_message = spi_transfer_one_message;
  1187. /* Initialize and start queue */
  1188. ret = spi_init_queue(master);
  1189. if (ret) {
  1190. dev_err(&master->dev, "problem initializing queue\n");
  1191. goto err_init_queue;
  1192. }
  1193. master->queued = true;
  1194. ret = spi_start_queue(master);
  1195. if (ret) {
  1196. dev_err(&master->dev, "problem starting queue\n");
  1197. goto err_start_queue;
  1198. }
  1199. return 0;
  1200. err_start_queue:
  1201. spi_destroy_queue(master);
  1202. err_init_queue:
  1203. return ret;
  1204. }
  1205. /*-------------------------------------------------------------------------*/
  1206. #if defined(CONFIG_OF)
  1207. static struct spi_device *
  1208. of_register_spi_device(struct spi_master *master, struct device_node *nc)
  1209. {
  1210. struct spi_device *spi;
  1211. int rc;
  1212. u32 value;
  1213. /* Alloc an spi_device */
  1214. spi = spi_alloc_device(master);
  1215. if (!spi) {
  1216. dev_err(&master->dev, "spi_device alloc error for %s\n",
  1217. nc->full_name);
  1218. rc = -ENOMEM;
  1219. goto err_out;
  1220. }
  1221. /* Select device driver */
  1222. rc = of_modalias_node(nc, spi->modalias,
  1223. sizeof(spi->modalias));
  1224. if (rc < 0) {
  1225. dev_err(&master->dev, "cannot find modalias for %s\n",
  1226. nc->full_name);
  1227. goto err_out;
  1228. }
  1229. /* Device address */
  1230. rc = of_property_read_u32(nc, "reg", &value);
  1231. if (rc) {
  1232. dev_err(&master->dev, "%s has no valid 'reg' property (%d)\n",
  1233. nc->full_name, rc);
  1234. goto err_out;
  1235. }
  1236. spi->chip_select = value;
  1237. /* Mode (clock phase/polarity/etc.) */
  1238. if (of_find_property(nc, "spi-cpha", NULL))
  1239. spi->mode |= SPI_CPHA;
  1240. if (of_find_property(nc, "spi-cpol", NULL))
  1241. spi->mode |= SPI_CPOL;
  1242. if (of_find_property(nc, "spi-cs-high", NULL))
  1243. spi->mode |= SPI_CS_HIGH;
  1244. if (of_find_property(nc, "spi-3wire", NULL))
  1245. spi->mode |= SPI_3WIRE;
  1246. if (of_find_property(nc, "spi-lsb-first", NULL))
  1247. spi->mode |= SPI_LSB_FIRST;
  1248. /* Device DUAL/QUAD mode */
  1249. if (!of_property_read_u32(nc, "spi-tx-bus-width", &value)) {
  1250. switch (value) {
  1251. case 1:
  1252. break;
  1253. case 2:
  1254. spi->mode |= SPI_TX_DUAL;
  1255. break;
  1256. case 4:
  1257. spi->mode |= SPI_TX_QUAD;
  1258. break;
  1259. default:
  1260. dev_warn(&master->dev,
  1261. "spi-tx-bus-width %d not supported\n",
  1262. value);
  1263. break;
  1264. }
  1265. }
  1266. if (!of_property_read_u32(nc, "spi-rx-bus-width", &value)) {
  1267. switch (value) {
  1268. case 1:
  1269. break;
  1270. case 2:
  1271. spi->mode |= SPI_RX_DUAL;
  1272. break;
  1273. case 4:
  1274. spi->mode |= SPI_RX_QUAD;
  1275. break;
  1276. default:
  1277. dev_warn(&master->dev,
  1278. "spi-rx-bus-width %d not supported\n",
  1279. value);
  1280. break;
  1281. }
  1282. }
  1283. /* Device speed */
  1284. rc = of_property_read_u32(nc, "spi-max-frequency", &value);
  1285. if (rc) {
  1286. dev_err(&master->dev, "%s has no valid 'spi-max-frequency' property (%d)\n",
  1287. nc->full_name, rc);
  1288. goto err_out;
  1289. }
  1290. spi->max_speed_hz = value;
  1291. /* Store a pointer to the node in the device structure */
  1292. of_node_get(nc);
  1293. spi->dev.of_node = nc;
  1294. /* Register the new device */
  1295. rc = spi_add_device(spi);
  1296. if (rc) {
  1297. dev_err(&master->dev, "spi_device register error %s\n",
  1298. nc->full_name);
  1299. goto err_out;
  1300. }
  1301. return spi;
  1302. err_out:
  1303. spi_dev_put(spi);
  1304. return ERR_PTR(rc);
  1305. }
  1306. /**
  1307. * of_register_spi_devices() - Register child devices onto the SPI bus
  1308. * @master: Pointer to spi_master device
  1309. *
  1310. * Registers an spi_device for each child node of master node which has a 'reg'
  1311. * property.
  1312. */
  1313. static void of_register_spi_devices(struct spi_master *master)
  1314. {
  1315. struct spi_device *spi;
  1316. struct device_node *nc;
  1317. if (!master->dev.of_node)
  1318. return;
  1319. for_each_available_child_of_node(master->dev.of_node, nc) {
  1320. spi = of_register_spi_device(master, nc);
  1321. if (IS_ERR(spi))
  1322. dev_warn(&master->dev, "Failed to create SPI device for %s\n",
  1323. nc->full_name);
  1324. }
  1325. }
  1326. #else
  1327. static void of_register_spi_devices(struct spi_master *master) { }
  1328. #endif
  1329. #ifdef CONFIG_ACPI
  1330. static int acpi_spi_add_resource(struct acpi_resource *ares, void *data)
  1331. {
  1332. struct spi_device *spi = data;
  1333. if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) {
  1334. struct acpi_resource_spi_serialbus *sb;
  1335. sb = &ares->data.spi_serial_bus;
  1336. if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_SPI) {
  1337. spi->chip_select = sb->device_selection;
  1338. spi->max_speed_hz = sb->connection_speed;
  1339. if (sb->clock_phase == ACPI_SPI_SECOND_PHASE)
  1340. spi->mode |= SPI_CPHA;
  1341. if (sb->clock_polarity == ACPI_SPI_START_HIGH)
  1342. spi->mode |= SPI_CPOL;
  1343. if (sb->device_polarity == ACPI_SPI_ACTIVE_HIGH)
  1344. spi->mode |= SPI_CS_HIGH;
  1345. }
  1346. } else if (spi->irq < 0) {
  1347. struct resource r;
  1348. if (acpi_dev_resource_interrupt(ares, 0, &r))
  1349. spi->irq = r.start;
  1350. }
  1351. /* Always tell the ACPI core to skip this resource */
  1352. return 1;
  1353. }
  1354. static acpi_status acpi_spi_add_device(acpi_handle handle, u32 level,
  1355. void *data, void **return_value)
  1356. {
  1357. struct spi_master *master = data;
  1358. struct list_head resource_list;
  1359. struct acpi_device *adev;
  1360. struct spi_device *spi;
  1361. int ret;
  1362. if (acpi_bus_get_device(handle, &adev))
  1363. return AE_OK;
  1364. if (acpi_bus_get_status(adev) || !adev->status.present)
  1365. return AE_OK;
  1366. spi = spi_alloc_device(master);
  1367. if (!spi) {
  1368. dev_err(&master->dev, "failed to allocate SPI device for %s\n",
  1369. dev_name(&adev->dev));
  1370. return AE_NO_MEMORY;
  1371. }
  1372. ACPI_COMPANION_SET(&spi->dev, adev);
  1373. spi->irq = -1;
  1374. INIT_LIST_HEAD(&resource_list);
  1375. ret = acpi_dev_get_resources(adev, &resource_list,
  1376. acpi_spi_add_resource, spi);
  1377. acpi_dev_free_resource_list(&resource_list);
  1378. if (ret < 0 || !spi->max_speed_hz) {
  1379. spi_dev_put(spi);
  1380. return AE_OK;
  1381. }
  1382. adev->power.flags.ignore_parent = true;
  1383. strlcpy(spi->modalias, acpi_device_hid(adev), sizeof(spi->modalias));
  1384. if (spi_add_device(spi)) {
  1385. adev->power.flags.ignore_parent = false;
  1386. dev_err(&master->dev, "failed to add SPI device %s from ACPI\n",
  1387. dev_name(&adev->dev));
  1388. spi_dev_put(spi);
  1389. }
  1390. return AE_OK;
  1391. }
  1392. static void acpi_register_spi_devices(struct spi_master *master)
  1393. {
  1394. acpi_status status;
  1395. acpi_handle handle;
  1396. handle = ACPI_HANDLE(master->dev.parent);
  1397. if (!handle)
  1398. return;
  1399. status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1,
  1400. acpi_spi_add_device, NULL,
  1401. master, NULL);
  1402. if (ACPI_FAILURE(status))
  1403. dev_warn(&master->dev, "failed to enumerate SPI slaves\n");
  1404. }
  1405. #else
  1406. static inline void acpi_register_spi_devices(struct spi_master *master) {}
  1407. #endif /* CONFIG_ACPI */
  1408. static void spi_master_release(struct device *dev)
  1409. {
  1410. struct spi_master *master;
  1411. master = container_of(dev, struct spi_master, dev);
  1412. kfree(master);
  1413. }
  1414. static struct class spi_master_class = {
  1415. .name = "spi_master",
  1416. .owner = THIS_MODULE,
  1417. .dev_release = spi_master_release,
  1418. .dev_groups = spi_master_groups,
  1419. };
  1420. /**
  1421. * spi_alloc_master - allocate SPI master controller
  1422. * @dev: the controller, possibly using the platform_bus
  1423. * @size: how much zeroed driver-private data to allocate; the pointer to this
  1424. * memory is in the driver_data field of the returned device,
  1425. * accessible with spi_master_get_devdata().
  1426. * Context: can sleep
  1427. *
  1428. * This call is used only by SPI master controller drivers, which are the
  1429. * only ones directly touching chip registers. It's how they allocate
  1430. * an spi_master structure, prior to calling spi_register_master().
  1431. *
  1432. * This must be called from context that can sleep.
  1433. *
  1434. * The caller is responsible for assigning the bus number and initializing
  1435. * the master's methods before calling spi_register_master(); and (after errors
  1436. * adding the device) calling spi_master_put() to prevent a memory leak.
  1437. *
  1438. * Return: the SPI master structure on success, else NULL.
  1439. */
  1440. struct spi_master *spi_alloc_master(struct device *dev, unsigned size)
  1441. {
  1442. struct spi_master *master;
  1443. if (!dev)
  1444. return NULL;
  1445. master = kzalloc(size + sizeof(*master), GFP_KERNEL);
  1446. if (!master)
  1447. return NULL;
  1448. device_initialize(&master->dev);
  1449. master->bus_num = -1;
  1450. master->num_chipselect = 1;
  1451. master->dev.class = &spi_master_class;
  1452. master->dev.parent = dev;
  1453. spi_master_set_devdata(master, &master[1]);
  1454. return master;
  1455. }
  1456. EXPORT_SYMBOL_GPL(spi_alloc_master);
  1457. #ifdef CONFIG_OF
  1458. static int of_spi_register_master(struct spi_master *master)
  1459. {
  1460. int nb, i, *cs;
  1461. struct device_node *np = master->dev.of_node;
  1462. if (!np)
  1463. return 0;
  1464. nb = of_gpio_named_count(np, "cs-gpios");
  1465. master->num_chipselect = max_t(int, nb, master->num_chipselect);
  1466. /* Return error only for an incorrectly formed cs-gpios property */
  1467. if (nb == 0 || nb == -ENOENT)
  1468. return 0;
  1469. else if (nb < 0)
  1470. return nb;
  1471. cs = devm_kzalloc(&master->dev,
  1472. sizeof(int) * master->num_chipselect,
  1473. GFP_KERNEL);
  1474. master->cs_gpios = cs;
  1475. if (!master->cs_gpios)
  1476. return -ENOMEM;
  1477. for (i = 0; i < master->num_chipselect; i++)
  1478. cs[i] = -ENOENT;
  1479. for (i = 0; i < nb; i++)
  1480. cs[i] = of_get_named_gpio(np, "cs-gpios", i);
  1481. return 0;
  1482. }
  1483. #else
  1484. static int of_spi_register_master(struct spi_master *master)
  1485. {
  1486. return 0;
  1487. }
  1488. #endif
  1489. /**
  1490. * spi_register_master - register SPI master controller
  1491. * @master: initialized master, originally from spi_alloc_master()
  1492. * Context: can sleep
  1493. *
  1494. * SPI master controllers connect to their drivers using some non-SPI bus,
  1495. * such as the platform bus. The final stage of probe() in that code
  1496. * includes calling spi_register_master() to hook up to this SPI bus glue.
  1497. *
  1498. * SPI controllers use board specific (often SOC specific) bus numbers,
  1499. * and board-specific addressing for SPI devices combines those numbers
  1500. * with chip select numbers. Since SPI does not directly support dynamic
  1501. * device identification, boards need configuration tables telling which
  1502. * chip is at which address.
  1503. *
  1504. * This must be called from context that can sleep. It returns zero on
  1505. * success, else a negative error code (dropping the master's refcount).
  1506. * After a successful return, the caller is responsible for calling
  1507. * spi_unregister_master().
  1508. *
  1509. * Return: zero on success, else a negative error code.
  1510. */
  1511. int spi_register_master(struct spi_master *master)
  1512. {
  1513. static atomic_t dyn_bus_id = ATOMIC_INIT((1<<15) - 1);
  1514. struct device *dev = master->dev.parent;
  1515. struct boardinfo *bi;
  1516. int status = -ENODEV;
  1517. int dynamic = 0;
  1518. if (!dev)
  1519. return -ENODEV;
  1520. status = of_spi_register_master(master);
  1521. if (status)
  1522. return status;
  1523. /* even if it's just one always-selected device, there must
  1524. * be at least one chipselect
  1525. */
  1526. if (master->num_chipselect == 0)
  1527. return -EINVAL;
  1528. if ((master->bus_num < 0) && master->dev.of_node)
  1529. master->bus_num = of_alias_get_id(master->dev.of_node, "spi");
  1530. /* convention: dynamically assigned bus IDs count down from the max */
  1531. if (master->bus_num < 0) {
  1532. /* FIXME switch to an IDR based scheme, something like
  1533. * I2C now uses, so we can't run out of "dynamic" IDs
  1534. */
  1535. master->bus_num = atomic_dec_return(&dyn_bus_id);
  1536. dynamic = 1;
  1537. }
  1538. INIT_LIST_HEAD(&master->queue);
  1539. spin_lock_init(&master->queue_lock);
  1540. spin_lock_init(&master->bus_lock_spinlock);
  1541. mutex_init(&master->bus_lock_mutex);
  1542. master->bus_lock_flag = 0;
  1543. init_completion(&master->xfer_completion);
  1544. if (!master->max_dma_len)
  1545. master->max_dma_len = INT_MAX;
  1546. /* register the device, then userspace will see it.
  1547. * registration fails if the bus ID is in use.
  1548. */
  1549. dev_set_name(&master->dev, "spi%u", master->bus_num);
  1550. status = device_add(&master->dev);
  1551. if (status < 0)
  1552. goto done;
  1553. dev_dbg(dev, "registered master %s%s\n", dev_name(&master->dev),
  1554. dynamic ? " (dynamic)" : "");
  1555. /* If we're using a queued driver, start the queue */
  1556. if (master->transfer)
  1557. dev_info(dev, "master is unqueued, this is deprecated\n");
  1558. else {
  1559. status = spi_master_initialize_queue(master);
  1560. if (status) {
  1561. device_del(&master->dev);
  1562. goto done;
  1563. }
  1564. }
  1565. /* add statistics */
  1566. spin_lock_init(&master->statistics.lock);
  1567. mutex_lock(&board_lock);
  1568. list_add_tail(&master->list, &spi_master_list);
  1569. list_for_each_entry(bi, &board_list, list)
  1570. spi_match_master_to_boardinfo(master, &bi->board_info);
  1571. mutex_unlock(&board_lock);
  1572. /* Register devices from the device tree and ACPI */
  1573. of_register_spi_devices(master);
  1574. acpi_register_spi_devices(master);
  1575. done:
  1576. return status;
  1577. }
  1578. EXPORT_SYMBOL_GPL(spi_register_master);
  1579. static void devm_spi_unregister(struct device *dev, void *res)
  1580. {
  1581. spi_unregister_master(*(struct spi_master **)res);
  1582. }
  1583. /**
  1584. * dev_spi_register_master - register managed SPI master controller
  1585. * @dev: device managing SPI master
  1586. * @master: initialized master, originally from spi_alloc_master()
  1587. * Context: can sleep
  1588. *
  1589. * Register a SPI device as with spi_register_master() which will
  1590. * automatically be unregister
  1591. *
  1592. * Return: zero on success, else a negative error code.
  1593. */
  1594. int devm_spi_register_master(struct device *dev, struct spi_master *master)
  1595. {
  1596. struct spi_master **ptr;
  1597. int ret;
  1598. ptr = devres_alloc(devm_spi_unregister, sizeof(*ptr), GFP_KERNEL);
  1599. if (!ptr)
  1600. return -ENOMEM;
  1601. ret = spi_register_master(master);
  1602. if (!ret) {
  1603. *ptr = master;
  1604. devres_add(dev, ptr);
  1605. } else {
  1606. devres_free(ptr);
  1607. }
  1608. return ret;
  1609. }
  1610. EXPORT_SYMBOL_GPL(devm_spi_register_master);
  1611. static int __unregister(struct device *dev, void *null)
  1612. {
  1613. spi_unregister_device(to_spi_device(dev));
  1614. return 0;
  1615. }
  1616. /**
  1617. * spi_unregister_master - unregister SPI master controller
  1618. * @master: the master being unregistered
  1619. * Context: can sleep
  1620. *
  1621. * This call is used only by SPI master controller drivers, which are the
  1622. * only ones directly touching chip registers.
  1623. *
  1624. * This must be called from context that can sleep.
  1625. */
  1626. void spi_unregister_master(struct spi_master *master)
  1627. {
  1628. int dummy;
  1629. if (master->queued) {
  1630. if (spi_destroy_queue(master))
  1631. dev_err(&master->dev, "queue remove failed\n");
  1632. }
  1633. mutex_lock(&board_lock);
  1634. list_del(&master->list);
  1635. mutex_unlock(&board_lock);
  1636. dummy = device_for_each_child(&master->dev, NULL, __unregister);
  1637. device_unregister(&master->dev);
  1638. }
  1639. EXPORT_SYMBOL_GPL(spi_unregister_master);
  1640. int spi_master_suspend(struct spi_master *master)
  1641. {
  1642. int ret;
  1643. /* Basically no-ops for non-queued masters */
  1644. if (!master->queued)
  1645. return 0;
  1646. ret = spi_stop_queue(master);
  1647. if (ret)
  1648. dev_err(&master->dev, "queue stop failed\n");
  1649. return ret;
  1650. }
  1651. EXPORT_SYMBOL_GPL(spi_master_suspend);
  1652. int spi_master_resume(struct spi_master *master)
  1653. {
  1654. int ret;
  1655. if (!master->queued)
  1656. return 0;
  1657. ret = spi_start_queue(master);
  1658. if (ret)
  1659. dev_err(&master->dev, "queue restart failed\n");
  1660. return ret;
  1661. }
  1662. EXPORT_SYMBOL_GPL(spi_master_resume);
  1663. static int __spi_master_match(struct device *dev, const void *data)
  1664. {
  1665. struct spi_master *m;
  1666. const u16 *bus_num = data;
  1667. m = container_of(dev, struct spi_master, dev);
  1668. return m->bus_num == *bus_num;
  1669. }
  1670. /**
  1671. * spi_busnum_to_master - look up master associated with bus_num
  1672. * @bus_num: the master's bus number
  1673. * Context: can sleep
  1674. *
  1675. * This call may be used with devices that are registered after
  1676. * arch init time. It returns a refcounted pointer to the relevant
  1677. * spi_master (which the caller must release), or NULL if there is
  1678. * no such master registered.
  1679. *
  1680. * Return: the SPI master structure on success, else NULL.
  1681. */
  1682. struct spi_master *spi_busnum_to_master(u16 bus_num)
  1683. {
  1684. struct device *dev;
  1685. struct spi_master *master = NULL;
  1686. dev = class_find_device(&spi_master_class, NULL, &bus_num,
  1687. __spi_master_match);
  1688. if (dev)
  1689. master = container_of(dev, struct spi_master, dev);
  1690. /* reference got in class_find_device */
  1691. return master;
  1692. }
  1693. EXPORT_SYMBOL_GPL(spi_busnum_to_master);
  1694. /*-------------------------------------------------------------------------*/
  1695. /* Core methods for SPI master protocol drivers. Some of the
  1696. * other core methods are currently defined as inline functions.
  1697. */
  1698. static int __spi_validate_bits_per_word(struct spi_master *master, u8 bits_per_word)
  1699. {
  1700. if (master->bits_per_word_mask) {
  1701. /* Only 32 bits fit in the mask */
  1702. if (bits_per_word > 32)
  1703. return -EINVAL;
  1704. if (!(master->bits_per_word_mask &
  1705. SPI_BPW_MASK(bits_per_word)))
  1706. return -EINVAL;
  1707. }
  1708. return 0;
  1709. }
  1710. /**
  1711. * spi_setup - setup SPI mode and clock rate
  1712. * @spi: the device whose settings are being modified
  1713. * Context: can sleep, and no requests are queued to the device
  1714. *
  1715. * SPI protocol drivers may need to update the transfer mode if the
  1716. * device doesn't work with its default. They may likewise need
  1717. * to update clock rates or word sizes from initial values. This function
  1718. * changes those settings, and must be called from a context that can sleep.
  1719. * Except for SPI_CS_HIGH, which takes effect immediately, the changes take
  1720. * effect the next time the device is selected and data is transferred to
  1721. * or from it. When this function returns, the spi device is deselected.
  1722. *
  1723. * Note that this call will fail if the protocol driver specifies an option
  1724. * that the underlying controller or its driver does not support. For
  1725. * example, not all hardware supports wire transfers using nine bit words,
  1726. * LSB-first wire encoding, or active-high chipselects.
  1727. *
  1728. * Return: zero on success, else a negative error code.
  1729. */
  1730. int spi_setup(struct spi_device *spi)
  1731. {
  1732. unsigned bad_bits, ugly_bits;
  1733. int status;
  1734. /* check mode to prevent that DUAL and QUAD set at the same time
  1735. */
  1736. if (((spi->mode & SPI_TX_DUAL) && (spi->mode & SPI_TX_QUAD)) ||
  1737. ((spi->mode & SPI_RX_DUAL) && (spi->mode & SPI_RX_QUAD))) {
  1738. dev_err(&spi->dev,
  1739. "setup: can not select dual and quad at the same time\n");
  1740. return -EINVAL;
  1741. }
  1742. /* if it is SPI_3WIRE mode, DUAL and QUAD should be forbidden
  1743. */
  1744. if ((spi->mode & SPI_3WIRE) && (spi->mode &
  1745. (SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD)))
  1746. return -EINVAL;
  1747. /* help drivers fail *cleanly* when they need options
  1748. * that aren't supported with their current master
  1749. */
  1750. bad_bits = spi->mode & ~spi->master->mode_bits;
  1751. ugly_bits = bad_bits &
  1752. (SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD);
  1753. if (ugly_bits) {
  1754. dev_warn(&spi->dev,
  1755. "setup: ignoring unsupported mode bits %x\n",
  1756. ugly_bits);
  1757. spi->mode &= ~ugly_bits;
  1758. bad_bits &= ~ugly_bits;
  1759. }
  1760. if (bad_bits) {
  1761. dev_err(&spi->dev, "setup: unsupported mode bits %x\n",
  1762. bad_bits);
  1763. return -EINVAL;
  1764. }
  1765. if (!spi->bits_per_word)
  1766. spi->bits_per_word = 8;
  1767. status = __spi_validate_bits_per_word(spi->master, spi->bits_per_word);
  1768. if (status)
  1769. return status;
  1770. if (!spi->max_speed_hz)
  1771. spi->max_speed_hz = spi->master->max_speed_hz;
  1772. if (spi->master->setup)
  1773. status = spi->master->setup(spi);
  1774. spi_set_cs(spi, false);
  1775. dev_dbg(&spi->dev, "setup mode %d, %s%s%s%s%u bits/w, %u Hz max --> %d\n",
  1776. (int) (spi->mode & (SPI_CPOL | SPI_CPHA)),
  1777. (spi->mode & SPI_CS_HIGH) ? "cs_high, " : "",
  1778. (spi->mode & SPI_LSB_FIRST) ? "lsb, " : "",
  1779. (spi->mode & SPI_3WIRE) ? "3wire, " : "",
  1780. (spi->mode & SPI_LOOP) ? "loopback, " : "",
  1781. spi->bits_per_word, spi->max_speed_hz,
  1782. status);
  1783. return status;
  1784. }
  1785. EXPORT_SYMBOL_GPL(spi_setup);
  1786. static int __spi_validate(struct spi_device *spi, struct spi_message *message)
  1787. {
  1788. struct spi_master *master = spi->master;
  1789. struct spi_transfer *xfer;
  1790. int w_size;
  1791. if (list_empty(&message->transfers))
  1792. return -EINVAL;
  1793. /* Half-duplex links include original MicroWire, and ones with
  1794. * only one data pin like SPI_3WIRE (switches direction) or where
  1795. * either MOSI or MISO is missing. They can also be caused by
  1796. * software limitations.
  1797. */
  1798. if ((master->flags & SPI_MASTER_HALF_DUPLEX)
  1799. || (spi->mode & SPI_3WIRE)) {
  1800. unsigned flags = master->flags;
  1801. list_for_each_entry(xfer, &message->transfers, transfer_list) {
  1802. if (xfer->rx_buf && xfer->tx_buf)
  1803. return -EINVAL;
  1804. if ((flags & SPI_MASTER_NO_TX) && xfer->tx_buf)
  1805. return -EINVAL;
  1806. if ((flags & SPI_MASTER_NO_RX) && xfer->rx_buf)
  1807. return -EINVAL;
  1808. }
  1809. }
  1810. /**
  1811. * Set transfer bits_per_word and max speed as spi device default if
  1812. * it is not set for this transfer.
  1813. * Set transfer tx_nbits and rx_nbits as single transfer default
  1814. * (SPI_NBITS_SINGLE) if it is not set for this transfer.
  1815. */
  1816. message->frame_length = 0;
  1817. list_for_each_entry(xfer, &message->transfers, transfer_list) {
  1818. message->frame_length += xfer->len;
  1819. if (!xfer->bits_per_word)
  1820. xfer->bits_per_word = spi->bits_per_word;
  1821. if (!xfer->speed_hz)
  1822. xfer->speed_hz = spi->max_speed_hz;
  1823. if (!xfer->speed_hz)
  1824. xfer->speed_hz = master->max_speed_hz;
  1825. if (master->max_speed_hz &&
  1826. xfer->speed_hz > master->max_speed_hz)
  1827. xfer->speed_hz = master->max_speed_hz;
  1828. if (__spi_validate_bits_per_word(master, xfer->bits_per_word))
  1829. return -EINVAL;
  1830. /*
  1831. * SPI transfer length should be multiple of SPI word size
  1832. * where SPI word size should be power-of-two multiple
  1833. */
  1834. if (xfer->bits_per_word <= 8)
  1835. w_size = 1;
  1836. else if (xfer->bits_per_word <= 16)
  1837. w_size = 2;
  1838. else
  1839. w_size = 4;
  1840. /* No partial transfers accepted */
  1841. if (xfer->len % w_size)
  1842. return -EINVAL;
  1843. if (xfer->speed_hz && master->min_speed_hz &&
  1844. xfer->speed_hz < master->min_speed_hz)
  1845. return -EINVAL;
  1846. if (xfer->tx_buf && !xfer->tx_nbits)
  1847. xfer->tx_nbits = SPI_NBITS_SINGLE;
  1848. if (xfer->rx_buf && !xfer->rx_nbits)
  1849. xfer->rx_nbits = SPI_NBITS_SINGLE;
  1850. /* check transfer tx/rx_nbits:
  1851. * 1. check the value matches one of single, dual and quad
  1852. * 2. check tx/rx_nbits match the mode in spi_device
  1853. */
  1854. if (xfer->tx_buf) {
  1855. if (xfer->tx_nbits != SPI_NBITS_SINGLE &&
  1856. xfer->tx_nbits != SPI_NBITS_DUAL &&
  1857. xfer->tx_nbits != SPI_NBITS_QUAD)
  1858. return -EINVAL;
  1859. if ((xfer->tx_nbits == SPI_NBITS_DUAL) &&
  1860. !(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD)))
  1861. return -EINVAL;
  1862. if ((xfer->tx_nbits == SPI_NBITS_QUAD) &&
  1863. !(spi->mode & SPI_TX_QUAD))
  1864. return -EINVAL;
  1865. }
  1866. /* check transfer rx_nbits */
  1867. if (xfer->rx_buf) {
  1868. if (xfer->rx_nbits != SPI_NBITS_SINGLE &&
  1869. xfer->rx_nbits != SPI_NBITS_DUAL &&
  1870. xfer->rx_nbits != SPI_NBITS_QUAD)
  1871. return -EINVAL;
  1872. if ((xfer->rx_nbits == SPI_NBITS_DUAL) &&
  1873. !(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD)))
  1874. return -EINVAL;
  1875. if ((xfer->rx_nbits == SPI_NBITS_QUAD) &&
  1876. !(spi->mode & SPI_RX_QUAD))
  1877. return -EINVAL;
  1878. }
  1879. }
  1880. message->status = -EINPROGRESS;
  1881. return 0;
  1882. }
  1883. static int __spi_async(struct spi_device *spi, struct spi_message *message)
  1884. {
  1885. struct spi_master *master = spi->master;
  1886. message->spi = spi;
  1887. SPI_STATISTICS_INCREMENT_FIELD(&master->statistics, spi_async);
  1888. SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, spi_async);
  1889. trace_spi_message_submit(message);
  1890. return master->transfer(spi, message);
  1891. }
  1892. /**
  1893. * spi_async - asynchronous SPI transfer
  1894. * @spi: device with which data will be exchanged
  1895. * @message: describes the data transfers, including completion callback
  1896. * Context: any (irqs may be blocked, etc)
  1897. *
  1898. * This call may be used in_irq and other contexts which can't sleep,
  1899. * as well as from task contexts which can sleep.
  1900. *
  1901. * The completion callback is invoked in a context which can't sleep.
  1902. * Before that invocation, the value of message->status is undefined.
  1903. * When the callback is issued, message->status holds either zero (to
  1904. * indicate complete success) or a negative error code. After that
  1905. * callback returns, the driver which issued the transfer request may
  1906. * deallocate the associated memory; it's no longer in use by any SPI
  1907. * core or controller driver code.
  1908. *
  1909. * Note that although all messages to a spi_device are handled in
  1910. * FIFO order, messages may go to different devices in other orders.
  1911. * Some device might be higher priority, or have various "hard" access
  1912. * time requirements, for example.
  1913. *
  1914. * On detection of any fault during the transfer, processing of
  1915. * the entire message is aborted, and the device is deselected.
  1916. * Until returning from the associated message completion callback,
  1917. * no other spi_message queued to that device will be processed.
  1918. * (This rule applies equally to all the synchronous transfer calls,
  1919. * which are wrappers around this core asynchronous primitive.)
  1920. *
  1921. * Return: zero on success, else a negative error code.
  1922. */
  1923. int spi_async(struct spi_device *spi, struct spi_message *message)
  1924. {
  1925. struct spi_master *master = spi->master;
  1926. int ret;
  1927. unsigned long flags;
  1928. ret = __spi_validate(spi, message);
  1929. if (ret != 0)
  1930. return ret;
  1931. spin_lock_irqsave(&master->bus_lock_spinlock, flags);
  1932. if (master->bus_lock_flag)
  1933. ret = -EBUSY;
  1934. else
  1935. ret = __spi_async(spi, message);
  1936. spin_unlock_irqrestore(&master->bus_lock_spinlock, flags);
  1937. return ret;
  1938. }
  1939. EXPORT_SYMBOL_GPL(spi_async);
  1940. /**
  1941. * spi_async_locked - version of spi_async with exclusive bus usage
  1942. * @spi: device with which data will be exchanged
  1943. * @message: describes the data transfers, including completion callback
  1944. * Context: any (irqs may be blocked, etc)
  1945. *
  1946. * This call may be used in_irq and other contexts which can't sleep,
  1947. * as well as from task contexts which can sleep.
  1948. *
  1949. * The completion callback is invoked in a context which can't sleep.
  1950. * Before that invocation, the value of message->status is undefined.
  1951. * When the callback is issued, message->status holds either zero (to
  1952. * indicate complete success) or a negative error code. After that
  1953. * callback returns, the driver which issued the transfer request may
  1954. * deallocate the associated memory; it's no longer in use by any SPI
  1955. * core or controller driver code.
  1956. *
  1957. * Note that although all messages to a spi_device are handled in
  1958. * FIFO order, messages may go to different devices in other orders.
  1959. * Some device might be higher priority, or have various "hard" access
  1960. * time requirements, for example.
  1961. *
  1962. * On detection of any fault during the transfer, processing of
  1963. * the entire message is aborted, and the device is deselected.
  1964. * Until returning from the associated message completion callback,
  1965. * no other spi_message queued to that device will be processed.
  1966. * (This rule applies equally to all the synchronous transfer calls,
  1967. * which are wrappers around this core asynchronous primitive.)
  1968. *
  1969. * Return: zero on success, else a negative error code.
  1970. */
  1971. int spi_async_locked(struct spi_device *spi, struct spi_message *message)
  1972. {
  1973. struct spi_master *master = spi->master;
  1974. int ret;
  1975. unsigned long flags;
  1976. ret = __spi_validate(spi, message);
  1977. if (ret != 0)
  1978. return ret;
  1979. spin_lock_irqsave(&master->bus_lock_spinlock, flags);
  1980. ret = __spi_async(spi, message);
  1981. spin_unlock_irqrestore(&master->bus_lock_spinlock, flags);
  1982. return ret;
  1983. }
  1984. EXPORT_SYMBOL_GPL(spi_async_locked);
  1985. /*-------------------------------------------------------------------------*/
  1986. /* Utility methods for SPI master protocol drivers, layered on
  1987. * top of the core. Some other utility methods are defined as
  1988. * inline functions.
  1989. */
  1990. static void spi_complete(void *arg)
  1991. {
  1992. complete(arg);
  1993. }
  1994. static int __spi_sync(struct spi_device *spi, struct spi_message *message,
  1995. int bus_locked)
  1996. {
  1997. DECLARE_COMPLETION_ONSTACK(done);
  1998. int status;
  1999. struct spi_master *master = spi->master;
  2000. unsigned long flags;
  2001. status = __spi_validate(spi, message);
  2002. if (status != 0)
  2003. return status;
  2004. message->complete = spi_complete;
  2005. message->context = &done;
  2006. message->spi = spi;
  2007. SPI_STATISTICS_INCREMENT_FIELD(&master->statistics, spi_sync);
  2008. SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, spi_sync);
  2009. if (!bus_locked)
  2010. mutex_lock(&master->bus_lock_mutex);
  2011. /* If we're not using the legacy transfer method then we will
  2012. * try to transfer in the calling context so special case.
  2013. * This code would be less tricky if we could remove the
  2014. * support for driver implemented message queues.
  2015. */
  2016. if (master->transfer == spi_queued_transfer) {
  2017. spin_lock_irqsave(&master->bus_lock_spinlock, flags);
  2018. trace_spi_message_submit(message);
  2019. status = __spi_queued_transfer(spi, message, false);
  2020. spin_unlock_irqrestore(&master->bus_lock_spinlock, flags);
  2021. } else {
  2022. status = spi_async_locked(spi, message);
  2023. }
  2024. if (!bus_locked)
  2025. mutex_unlock(&master->bus_lock_mutex);
  2026. if (status == 0) {
  2027. /* Push out the messages in the calling context if we
  2028. * can.
  2029. */
  2030. if (master->transfer == spi_queued_transfer) {
  2031. SPI_STATISTICS_INCREMENT_FIELD(&master->statistics,
  2032. spi_sync_immediate);
  2033. SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics,
  2034. spi_sync_immediate);
  2035. __spi_pump_messages(master, false);
  2036. }
  2037. wait_for_completion(&done);
  2038. status = message->status;
  2039. }
  2040. message->context = NULL;
  2041. return status;
  2042. }
  2043. /**
  2044. * spi_sync - blocking/synchronous SPI data transfers
  2045. * @spi: device with which data will be exchanged
  2046. * @message: describes the data transfers
  2047. * Context: can sleep
  2048. *
  2049. * This call may only be used from a context that may sleep. The sleep
  2050. * is non-interruptible, and has no timeout. Low-overhead controller
  2051. * drivers may DMA directly into and out of the message buffers.
  2052. *
  2053. * Note that the SPI device's chip select is active during the message,
  2054. * and then is normally disabled between messages. Drivers for some
  2055. * frequently-used devices may want to minimize costs of selecting a chip,
  2056. * by leaving it selected in anticipation that the next message will go
  2057. * to the same chip. (That may increase power usage.)
  2058. *
  2059. * Also, the caller is guaranteeing that the memory associated with the
  2060. * message will not be freed before this call returns.
  2061. *
  2062. * Return: zero on success, else a negative error code.
  2063. */
  2064. int spi_sync(struct spi_device *spi, struct spi_message *message)
  2065. {
  2066. return __spi_sync(spi, message, 0);
  2067. }
  2068. EXPORT_SYMBOL_GPL(spi_sync);
  2069. /**
  2070. * spi_sync_locked - version of spi_sync with exclusive bus usage
  2071. * @spi: device with which data will be exchanged
  2072. * @message: describes the data transfers
  2073. * Context: can sleep
  2074. *
  2075. * This call may only be used from a context that may sleep. The sleep
  2076. * is non-interruptible, and has no timeout. Low-overhead controller
  2077. * drivers may DMA directly into and out of the message buffers.
  2078. *
  2079. * This call should be used by drivers that require exclusive access to the
  2080. * SPI bus. It has to be preceded by a spi_bus_lock call. The SPI bus must
  2081. * be released by a spi_bus_unlock call when the exclusive access is over.
  2082. *
  2083. * Return: zero on success, else a negative error code.
  2084. */
  2085. int spi_sync_locked(struct spi_device *spi, struct spi_message *message)
  2086. {
  2087. return __spi_sync(spi, message, 1);
  2088. }
  2089. EXPORT_SYMBOL_GPL(spi_sync_locked);
  2090. /**
  2091. * spi_bus_lock - obtain a lock for exclusive SPI bus usage
  2092. * @master: SPI bus master that should be locked for exclusive bus access
  2093. * Context: can sleep
  2094. *
  2095. * This call may only be used from a context that may sleep. The sleep
  2096. * is non-interruptible, and has no timeout.
  2097. *
  2098. * This call should be used by drivers that require exclusive access to the
  2099. * SPI bus. The SPI bus must be released by a spi_bus_unlock call when the
  2100. * exclusive access is over. Data transfer must be done by spi_sync_locked
  2101. * and spi_async_locked calls when the SPI bus lock is held.
  2102. *
  2103. * Return: always zero.
  2104. */
  2105. int spi_bus_lock(struct spi_master *master)
  2106. {
  2107. unsigned long flags;
  2108. mutex_lock(&master->bus_lock_mutex);
  2109. spin_lock_irqsave(&master->bus_lock_spinlock, flags);
  2110. master->bus_lock_flag = 1;
  2111. spin_unlock_irqrestore(&master->bus_lock_spinlock, flags);
  2112. /* mutex remains locked until spi_bus_unlock is called */
  2113. return 0;
  2114. }
  2115. EXPORT_SYMBOL_GPL(spi_bus_lock);
  2116. /**
  2117. * spi_bus_unlock - release the lock for exclusive SPI bus usage
  2118. * @master: SPI bus master that was locked for exclusive bus access
  2119. * Context: can sleep
  2120. *
  2121. * This call may only be used from a context that may sleep. The sleep
  2122. * is non-interruptible, and has no timeout.
  2123. *
  2124. * This call releases an SPI bus lock previously obtained by an spi_bus_lock
  2125. * call.
  2126. *
  2127. * Return: always zero.
  2128. */
  2129. int spi_bus_unlock(struct spi_master *master)
  2130. {
  2131. master->bus_lock_flag = 0;
  2132. mutex_unlock(&master->bus_lock_mutex);
  2133. return 0;
  2134. }
  2135. EXPORT_SYMBOL_GPL(spi_bus_unlock);
  2136. /* portable code must never pass more than 32 bytes */
  2137. #define SPI_BUFSIZ max(32, SMP_CACHE_BYTES)
  2138. static u8 *buf;
  2139. /**
  2140. * spi_write_then_read - SPI synchronous write followed by read
  2141. * @spi: device with which data will be exchanged
  2142. * @txbuf: data to be written (need not be dma-safe)
  2143. * @n_tx: size of txbuf, in bytes
  2144. * @rxbuf: buffer into which data will be read (need not be dma-safe)
  2145. * @n_rx: size of rxbuf, in bytes
  2146. * Context: can sleep
  2147. *
  2148. * This performs a half duplex MicroWire style transaction with the
  2149. * device, sending txbuf and then reading rxbuf. The return value
  2150. * is zero for success, else a negative errno status code.
  2151. * This call may only be used from a context that may sleep.
  2152. *
  2153. * Parameters to this routine are always copied using a small buffer;
  2154. * portable code should never use this for more than 32 bytes.
  2155. * Performance-sensitive or bulk transfer code should instead use
  2156. * spi_{async,sync}() calls with dma-safe buffers.
  2157. *
  2158. * Return: zero on success, else a negative error code.
  2159. */
  2160. int spi_write_then_read(struct spi_device *spi,
  2161. const void *txbuf, unsigned n_tx,
  2162. void *rxbuf, unsigned n_rx)
  2163. {
  2164. static DEFINE_MUTEX(lock);
  2165. int status;
  2166. struct spi_message message;
  2167. struct spi_transfer x[2];
  2168. u8 *local_buf;
  2169. /* Use preallocated DMA-safe buffer if we can. We can't avoid
  2170. * copying here, (as a pure convenience thing), but we can
  2171. * keep heap costs out of the hot path unless someone else is
  2172. * using the pre-allocated buffer or the transfer is too large.
  2173. */
  2174. if ((n_tx + n_rx) > SPI_BUFSIZ || !mutex_trylock(&lock)) {
  2175. local_buf = kmalloc(max((unsigned)SPI_BUFSIZ, n_tx + n_rx),
  2176. GFP_KERNEL | GFP_DMA);
  2177. if (!local_buf)
  2178. return -ENOMEM;
  2179. } else {
  2180. local_buf = buf;
  2181. }
  2182. spi_message_init(&message);
  2183. memset(x, 0, sizeof(x));
  2184. if (n_tx) {
  2185. x[0].len = n_tx;
  2186. spi_message_add_tail(&x[0], &message);
  2187. }
  2188. if (n_rx) {
  2189. x[1].len = n_rx;
  2190. spi_message_add_tail(&x[1], &message);
  2191. }
  2192. memcpy(local_buf, txbuf, n_tx);
  2193. x[0].tx_buf = local_buf;
  2194. x[1].rx_buf = local_buf + n_tx;
  2195. /* do the i/o */
  2196. status = spi_sync(spi, &message);
  2197. if (status == 0)
  2198. memcpy(rxbuf, x[1].rx_buf, n_rx);
  2199. if (x[0].tx_buf == buf)
  2200. mutex_unlock(&lock);
  2201. else
  2202. kfree(local_buf);
  2203. return status;
  2204. }
  2205. EXPORT_SYMBOL_GPL(spi_write_then_read);
  2206. /*-------------------------------------------------------------------------*/
  2207. #if IS_ENABLED(CONFIG_OF_DYNAMIC)
  2208. static int __spi_of_device_match(struct device *dev, void *data)
  2209. {
  2210. return dev->of_node == data;
  2211. }
  2212. /* must call put_device() when done with returned spi_device device */
  2213. static struct spi_device *of_find_spi_device_by_node(struct device_node *node)
  2214. {
  2215. struct device *dev = bus_find_device(&spi_bus_type, NULL, node,
  2216. __spi_of_device_match);
  2217. return dev ? to_spi_device(dev) : NULL;
  2218. }
  2219. static int __spi_of_master_match(struct device *dev, const void *data)
  2220. {
  2221. return dev->of_node == data;
  2222. }
  2223. /* the spi masters are not using spi_bus, so we find it with another way */
  2224. static struct spi_master *of_find_spi_master_by_node(struct device_node *node)
  2225. {
  2226. struct device *dev;
  2227. dev = class_find_device(&spi_master_class, NULL, node,
  2228. __spi_of_master_match);
  2229. if (!dev)
  2230. return NULL;
  2231. /* reference got in class_find_device */
  2232. return container_of(dev, struct spi_master, dev);
  2233. }
  2234. static int of_spi_notify(struct notifier_block *nb, unsigned long action,
  2235. void *arg)
  2236. {
  2237. struct of_reconfig_data *rd = arg;
  2238. struct spi_master *master;
  2239. struct spi_device *spi;
  2240. switch (of_reconfig_get_state_change(action, arg)) {
  2241. case OF_RECONFIG_CHANGE_ADD:
  2242. master = of_find_spi_master_by_node(rd->dn->parent);
  2243. if (master == NULL)
  2244. return NOTIFY_OK; /* not for us */
  2245. spi = of_register_spi_device(master, rd->dn);
  2246. put_device(&master->dev);
  2247. if (IS_ERR(spi)) {
  2248. pr_err("%s: failed to create for '%s'\n",
  2249. __func__, rd->dn->full_name);
  2250. return notifier_from_errno(PTR_ERR(spi));
  2251. }
  2252. break;
  2253. case OF_RECONFIG_CHANGE_REMOVE:
  2254. /* find our device by node */
  2255. spi = of_find_spi_device_by_node(rd->dn);
  2256. if (spi == NULL)
  2257. return NOTIFY_OK; /* no? not meant for us */
  2258. /* unregister takes one ref away */
  2259. spi_unregister_device(spi);
  2260. /* and put the reference of the find */
  2261. put_device(&spi->dev);
  2262. break;
  2263. }
  2264. return NOTIFY_OK;
  2265. }
  2266. static struct notifier_block spi_of_notifier = {
  2267. .notifier_call = of_spi_notify,
  2268. };
  2269. #else /* IS_ENABLED(CONFIG_OF_DYNAMIC) */
  2270. extern struct notifier_block spi_of_notifier;
  2271. #endif /* IS_ENABLED(CONFIG_OF_DYNAMIC) */
  2272. static int __init spi_init(void)
  2273. {
  2274. int status;
  2275. buf = kmalloc(SPI_BUFSIZ, GFP_KERNEL);
  2276. if (!buf) {
  2277. status = -ENOMEM;
  2278. goto err0;
  2279. }
  2280. status = bus_register(&spi_bus_type);
  2281. if (status < 0)
  2282. goto err1;
  2283. status = class_register(&spi_master_class);
  2284. if (status < 0)
  2285. goto err2;
  2286. if (IS_ENABLED(CONFIG_OF_DYNAMIC))
  2287. WARN_ON(of_reconfig_notifier_register(&spi_of_notifier));
  2288. return 0;
  2289. err2:
  2290. bus_unregister(&spi_bus_type);
  2291. err1:
  2292. kfree(buf);
  2293. buf = NULL;
  2294. err0:
  2295. return status;
  2296. }
  2297. /* board_info is normally registered in arch_initcall(),
  2298. * but even essential drivers wait till later
  2299. *
  2300. * REVISIT only boardinfo really needs static linking. the rest (device and
  2301. * driver registration) _could_ be dynamically linked (modular) ... costs
  2302. * include needing to have boardinfo data structures be much more public.
  2303. */
  2304. postcore_initcall(spi_init);