main.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708
  1. /*
  2. * Broadcom specific AMBA
  3. * Bus subsystem
  4. *
  5. * Licensed under the GNU/GPL. See COPYING for details.
  6. */
  7. #include "bcma_private.h"
  8. #include <linux/module.h>
  9. #include <linux/mmc/sdio_func.h>
  10. #include <linux/platform_device.h>
  11. #include <linux/pci.h>
  12. #include <linux/bcma/bcma.h>
  13. #include <linux/slab.h>
  14. #include <linux/of_address.h>
  15. #include <linux/of_irq.h>
  16. #include <linux/of_platform.h>
  17. MODULE_DESCRIPTION("Broadcom's specific AMBA driver");
  18. MODULE_LICENSE("GPL");
  19. /* contains the number the next bus should get. */
  20. static unsigned int bcma_bus_next_num = 0;
  21. /* bcma_buses_mutex locks the bcma_bus_next_num */
  22. static DEFINE_MUTEX(bcma_buses_mutex);
  23. static int bcma_bus_match(struct device *dev, struct device_driver *drv);
  24. static int bcma_device_probe(struct device *dev);
  25. static int bcma_device_remove(struct device *dev);
  26. static int bcma_device_uevent(struct device *dev, struct kobj_uevent_env *env);
  27. static ssize_t manuf_show(struct device *dev, struct device_attribute *attr, char *buf)
  28. {
  29. struct bcma_device *core = container_of(dev, struct bcma_device, dev);
  30. return sprintf(buf, "0x%03X\n", core->id.manuf);
  31. }
  32. static DEVICE_ATTR_RO(manuf);
  33. static ssize_t id_show(struct device *dev, struct device_attribute *attr, char *buf)
  34. {
  35. struct bcma_device *core = container_of(dev, struct bcma_device, dev);
  36. return sprintf(buf, "0x%03X\n", core->id.id);
  37. }
  38. static DEVICE_ATTR_RO(id);
  39. static ssize_t rev_show(struct device *dev, struct device_attribute *attr, char *buf)
  40. {
  41. struct bcma_device *core = container_of(dev, struct bcma_device, dev);
  42. return sprintf(buf, "0x%02X\n", core->id.rev);
  43. }
  44. static DEVICE_ATTR_RO(rev);
  45. static ssize_t class_show(struct device *dev, struct device_attribute *attr, char *buf)
  46. {
  47. struct bcma_device *core = container_of(dev, struct bcma_device, dev);
  48. return sprintf(buf, "0x%X\n", core->id.class);
  49. }
  50. static DEVICE_ATTR_RO(class);
  51. static struct attribute *bcma_device_attrs[] = {
  52. &dev_attr_manuf.attr,
  53. &dev_attr_id.attr,
  54. &dev_attr_rev.attr,
  55. &dev_attr_class.attr,
  56. NULL,
  57. };
  58. ATTRIBUTE_GROUPS(bcma_device);
  59. static struct bus_type bcma_bus_type = {
  60. .name = "bcma",
  61. .match = bcma_bus_match,
  62. .probe = bcma_device_probe,
  63. .remove = bcma_device_remove,
  64. .uevent = bcma_device_uevent,
  65. .dev_groups = bcma_device_groups,
  66. };
  67. static u16 bcma_cc_core_id(struct bcma_bus *bus)
  68. {
  69. if (bus->chipinfo.id == BCMA_CHIP_ID_BCM4706)
  70. return BCMA_CORE_4706_CHIPCOMMON;
  71. return BCMA_CORE_CHIPCOMMON;
  72. }
  73. struct bcma_device *bcma_find_core_unit(struct bcma_bus *bus, u16 coreid,
  74. u8 unit)
  75. {
  76. struct bcma_device *core;
  77. list_for_each_entry(core, &bus->cores, list) {
  78. if (core->id.id == coreid && core->core_unit == unit)
  79. return core;
  80. }
  81. return NULL;
  82. }
  83. EXPORT_SYMBOL_GPL(bcma_find_core_unit);
  84. bool bcma_wait_value(struct bcma_device *core, u16 reg, u32 mask, u32 value,
  85. int timeout)
  86. {
  87. unsigned long deadline = jiffies + timeout;
  88. u32 val;
  89. do {
  90. val = bcma_read32(core, reg);
  91. if ((val & mask) == value)
  92. return true;
  93. cpu_relax();
  94. udelay(10);
  95. } while (!time_after_eq(jiffies, deadline));
  96. bcma_warn(core->bus, "Timeout waiting for register 0x%04X!\n", reg);
  97. return false;
  98. }
  99. static void bcma_release_core_dev(struct device *dev)
  100. {
  101. struct bcma_device *core = container_of(dev, struct bcma_device, dev);
  102. if (core->io_addr)
  103. iounmap(core->io_addr);
  104. if (core->io_wrap)
  105. iounmap(core->io_wrap);
  106. kfree(core);
  107. }
  108. static bool bcma_is_core_needed_early(u16 core_id)
  109. {
  110. switch (core_id) {
  111. case BCMA_CORE_NS_NAND:
  112. case BCMA_CORE_NS_QSPI:
  113. return true;
  114. }
  115. return false;
  116. }
  117. #if defined(CONFIG_OF) && defined(CONFIG_OF_ADDRESS)
  118. static struct device_node *bcma_of_find_child_device(struct platform_device *parent,
  119. struct bcma_device *core)
  120. {
  121. struct device_node *node;
  122. u64 size;
  123. const __be32 *reg;
  124. if (!parent || !parent->dev.of_node)
  125. return NULL;
  126. for_each_child_of_node(parent->dev.of_node, node) {
  127. reg = of_get_address(node, 0, &size, NULL);
  128. if (!reg)
  129. continue;
  130. if (of_translate_address(node, reg) == core->addr)
  131. return node;
  132. }
  133. return NULL;
  134. }
  135. static int bcma_of_irq_parse(struct platform_device *parent,
  136. struct bcma_device *core,
  137. struct of_phandle_args *out_irq, int num)
  138. {
  139. __be32 laddr[1];
  140. int rc;
  141. if (core->dev.of_node) {
  142. rc = of_irq_parse_one(core->dev.of_node, num, out_irq);
  143. if (!rc)
  144. return rc;
  145. }
  146. out_irq->np = parent->dev.of_node;
  147. out_irq->args_count = 1;
  148. out_irq->args[0] = num;
  149. laddr[0] = cpu_to_be32(core->addr);
  150. return of_irq_parse_raw(laddr, out_irq);
  151. }
  152. static unsigned int bcma_of_get_irq(struct platform_device *parent,
  153. struct bcma_device *core, int num)
  154. {
  155. struct of_phandle_args out_irq;
  156. int ret;
  157. if (!parent || !parent->dev.of_node)
  158. return 0;
  159. ret = bcma_of_irq_parse(parent, core, &out_irq, num);
  160. if (ret) {
  161. bcma_debug(core->bus, "bcma_of_get_irq() failed with rc=%d\n",
  162. ret);
  163. return 0;
  164. }
  165. return irq_create_of_mapping(&out_irq);
  166. }
  167. static void bcma_of_fill_device(struct platform_device *parent,
  168. struct bcma_device *core)
  169. {
  170. struct device_node *node;
  171. node = bcma_of_find_child_device(parent, core);
  172. if (node)
  173. core->dev.of_node = node;
  174. core->irq = bcma_of_get_irq(parent, core, 0);
  175. }
  176. #else
  177. static void bcma_of_fill_device(struct platform_device *parent,
  178. struct bcma_device *core)
  179. {
  180. }
  181. static inline unsigned int bcma_of_get_irq(struct platform_device *parent,
  182. struct bcma_device *core, int num)
  183. {
  184. return 0;
  185. }
  186. #endif /* CONFIG_OF */
  187. unsigned int bcma_core_irq(struct bcma_device *core, int num)
  188. {
  189. struct bcma_bus *bus = core->bus;
  190. unsigned int mips_irq;
  191. switch (bus->hosttype) {
  192. case BCMA_HOSTTYPE_PCI:
  193. return bus->host_pci->irq;
  194. case BCMA_HOSTTYPE_SOC:
  195. if (bus->drv_mips.core && num == 0) {
  196. mips_irq = bcma_core_mips_irq(core);
  197. return mips_irq <= 4 ? mips_irq + 2 : 0;
  198. }
  199. if (bus->host_pdev)
  200. return bcma_of_get_irq(bus->host_pdev, core, num);
  201. return 0;
  202. case BCMA_HOSTTYPE_SDIO:
  203. return 0;
  204. }
  205. return 0;
  206. }
  207. EXPORT_SYMBOL(bcma_core_irq);
  208. void bcma_prepare_core(struct bcma_bus *bus, struct bcma_device *core)
  209. {
  210. core->dev.release = bcma_release_core_dev;
  211. core->dev.bus = &bcma_bus_type;
  212. dev_set_name(&core->dev, "bcma%d:%d", bus->num, core->core_index);
  213. switch (bus->hosttype) {
  214. case BCMA_HOSTTYPE_PCI:
  215. core->dev.parent = &bus->host_pci->dev;
  216. core->dma_dev = &bus->host_pci->dev;
  217. core->irq = bus->host_pci->irq;
  218. break;
  219. case BCMA_HOSTTYPE_SOC:
  220. core->dev.dma_mask = &core->dev.coherent_dma_mask;
  221. if (bus->host_pdev) {
  222. core->dma_dev = &bus->host_pdev->dev;
  223. core->dev.parent = &bus->host_pdev->dev;
  224. bcma_of_fill_device(bus->host_pdev, core);
  225. } else {
  226. core->dma_dev = &core->dev;
  227. }
  228. break;
  229. case BCMA_HOSTTYPE_SDIO:
  230. break;
  231. }
  232. }
  233. struct device *bcma_bus_get_host_dev(struct bcma_bus *bus)
  234. {
  235. switch (bus->hosttype) {
  236. case BCMA_HOSTTYPE_PCI:
  237. if (bus->host_pci)
  238. return &bus->host_pci->dev;
  239. else
  240. return NULL;
  241. case BCMA_HOSTTYPE_SOC:
  242. if (bus->host_pdev)
  243. return &bus->host_pdev->dev;
  244. else
  245. return NULL;
  246. case BCMA_HOSTTYPE_SDIO:
  247. if (bus->host_sdio)
  248. return &bus->host_sdio->dev;
  249. else
  250. return NULL;
  251. }
  252. return NULL;
  253. }
  254. void bcma_init_bus(struct bcma_bus *bus)
  255. {
  256. mutex_lock(&bcma_buses_mutex);
  257. bus->num = bcma_bus_next_num++;
  258. mutex_unlock(&bcma_buses_mutex);
  259. INIT_LIST_HEAD(&bus->cores);
  260. bus->nr_cores = 0;
  261. bcma_detect_chip(bus);
  262. }
  263. static void bcma_register_core(struct bcma_bus *bus, struct bcma_device *core)
  264. {
  265. int err;
  266. err = device_register(&core->dev);
  267. if (err) {
  268. bcma_err(bus, "Could not register dev for core 0x%03X\n",
  269. core->id.id);
  270. put_device(&core->dev);
  271. return;
  272. }
  273. core->dev_registered = true;
  274. }
  275. static int bcma_register_devices(struct bcma_bus *bus)
  276. {
  277. struct bcma_device *core;
  278. int err;
  279. list_for_each_entry(core, &bus->cores, list) {
  280. /* We support that cores ourself */
  281. switch (core->id.id) {
  282. case BCMA_CORE_4706_CHIPCOMMON:
  283. case BCMA_CORE_CHIPCOMMON:
  284. case BCMA_CORE_NS_CHIPCOMMON_B:
  285. case BCMA_CORE_PCI:
  286. case BCMA_CORE_PCIE:
  287. case BCMA_CORE_PCIE2:
  288. case BCMA_CORE_MIPS_74K:
  289. case BCMA_CORE_4706_MAC_GBIT_COMMON:
  290. continue;
  291. }
  292. /* Early cores were already registered */
  293. if (bcma_is_core_needed_early(core->id.id))
  294. continue;
  295. /* Only first GMAC core on BCM4706 is connected and working */
  296. if (core->id.id == BCMA_CORE_4706_MAC_GBIT &&
  297. core->core_unit > 0)
  298. continue;
  299. bcma_register_core(bus, core);
  300. }
  301. #ifdef CONFIG_BCMA_DRIVER_MIPS
  302. if (bus->drv_cc.pflash.present) {
  303. err = platform_device_register(&bcma_pflash_dev);
  304. if (err)
  305. bcma_err(bus, "Error registering parallel flash\n");
  306. }
  307. #endif
  308. #ifdef CONFIG_BCMA_SFLASH
  309. if (bus->drv_cc.sflash.present) {
  310. err = platform_device_register(&bcma_sflash_dev);
  311. if (err)
  312. bcma_err(bus, "Error registering serial flash\n");
  313. }
  314. #endif
  315. #ifdef CONFIG_BCMA_NFLASH
  316. if (bus->drv_cc.nflash.present) {
  317. err = platform_device_register(&bcma_nflash_dev);
  318. if (err)
  319. bcma_err(bus, "Error registering NAND flash\n");
  320. }
  321. #endif
  322. err = bcma_gpio_init(&bus->drv_cc);
  323. if (err == -ENOTSUPP)
  324. bcma_debug(bus, "GPIO driver not activated\n");
  325. else if (err)
  326. bcma_err(bus, "Error registering GPIO driver: %i\n", err);
  327. if (bus->hosttype == BCMA_HOSTTYPE_SOC) {
  328. err = bcma_chipco_watchdog_register(&bus->drv_cc);
  329. if (err)
  330. bcma_err(bus, "Error registering watchdog driver\n");
  331. }
  332. return 0;
  333. }
  334. void bcma_unregister_cores(struct bcma_bus *bus)
  335. {
  336. struct bcma_device *core, *tmp;
  337. list_for_each_entry_safe(core, tmp, &bus->cores, list) {
  338. if (!core->dev_registered)
  339. continue;
  340. list_del(&core->list);
  341. device_unregister(&core->dev);
  342. }
  343. if (bus->hosttype == BCMA_HOSTTYPE_SOC)
  344. platform_device_unregister(bus->drv_cc.watchdog);
  345. /* Now noone uses internally-handled cores, we can free them */
  346. list_for_each_entry_safe(core, tmp, &bus->cores, list) {
  347. list_del(&core->list);
  348. kfree(core);
  349. }
  350. }
  351. int bcma_bus_register(struct bcma_bus *bus)
  352. {
  353. int err;
  354. struct bcma_device *core;
  355. struct device *dev;
  356. /* Scan for devices (cores) */
  357. err = bcma_bus_scan(bus);
  358. if (err) {
  359. bcma_err(bus, "Failed to scan: %d\n", err);
  360. return err;
  361. }
  362. /* Early init CC core */
  363. core = bcma_find_core(bus, bcma_cc_core_id(bus));
  364. if (core) {
  365. bus->drv_cc.core = core;
  366. bcma_core_chipcommon_early_init(&bus->drv_cc);
  367. }
  368. /* Early init PCIE core */
  369. core = bcma_find_core(bus, BCMA_CORE_PCIE);
  370. if (core) {
  371. bus->drv_pci[0].core = core;
  372. bcma_core_pci_early_init(&bus->drv_pci[0]);
  373. }
  374. dev = bcma_bus_get_host_dev(bus);
  375. if (dev) {
  376. of_platform_default_populate(dev->of_node, NULL, dev);
  377. }
  378. /* Cores providing flash access go before SPROM init */
  379. list_for_each_entry(core, &bus->cores, list) {
  380. if (bcma_is_core_needed_early(core->id.id))
  381. bcma_register_core(bus, core);
  382. }
  383. /* Try to get SPROM */
  384. err = bcma_sprom_get(bus);
  385. if (err == -ENOENT) {
  386. bcma_err(bus, "No SPROM available\n");
  387. } else if (err)
  388. bcma_err(bus, "Failed to get SPROM: %d\n", err);
  389. /* Init CC core */
  390. core = bcma_find_core(bus, bcma_cc_core_id(bus));
  391. if (core) {
  392. bus->drv_cc.core = core;
  393. bcma_core_chipcommon_init(&bus->drv_cc);
  394. }
  395. /* Init CC core */
  396. core = bcma_find_core(bus, BCMA_CORE_NS_CHIPCOMMON_B);
  397. if (core) {
  398. bus->drv_cc_b.core = core;
  399. bcma_core_chipcommon_b_init(&bus->drv_cc_b);
  400. }
  401. /* Init MIPS core */
  402. core = bcma_find_core(bus, BCMA_CORE_MIPS_74K);
  403. if (core) {
  404. bus->drv_mips.core = core;
  405. bcma_core_mips_init(&bus->drv_mips);
  406. }
  407. /* Init PCIE core */
  408. core = bcma_find_core_unit(bus, BCMA_CORE_PCIE, 0);
  409. if (core) {
  410. bus->drv_pci[0].core = core;
  411. bcma_core_pci_init(&bus->drv_pci[0]);
  412. }
  413. /* Init PCIE core */
  414. core = bcma_find_core_unit(bus, BCMA_CORE_PCIE, 1);
  415. if (core) {
  416. bus->drv_pci[1].core = core;
  417. bcma_core_pci_init(&bus->drv_pci[1]);
  418. }
  419. /* Init PCIe Gen 2 core */
  420. core = bcma_find_core_unit(bus, BCMA_CORE_PCIE2, 0);
  421. if (core) {
  422. bus->drv_pcie2.core = core;
  423. bcma_core_pcie2_init(&bus->drv_pcie2);
  424. }
  425. /* Init GBIT MAC COMMON core */
  426. core = bcma_find_core(bus, BCMA_CORE_4706_MAC_GBIT_COMMON);
  427. if (core) {
  428. bus->drv_gmac_cmn.core = core;
  429. bcma_core_gmac_cmn_init(&bus->drv_gmac_cmn);
  430. }
  431. /* Register found cores */
  432. bcma_register_devices(bus);
  433. bcma_info(bus, "Bus registered\n");
  434. return 0;
  435. }
  436. void bcma_bus_unregister(struct bcma_bus *bus)
  437. {
  438. int err;
  439. err = bcma_gpio_unregister(&bus->drv_cc);
  440. if (err == -EBUSY)
  441. bcma_err(bus, "Some GPIOs are still in use.\n");
  442. else if (err)
  443. bcma_err(bus, "Can not unregister GPIO driver: %i\n", err);
  444. bcma_core_chipcommon_b_free(&bus->drv_cc_b);
  445. bcma_unregister_cores(bus);
  446. }
  447. /*
  448. * This is a special version of bus registration function designed for SoCs.
  449. * It scans bus and performs basic initialization of main cores only.
  450. * Please note it requires memory allocation, however it won't try to sleep.
  451. */
  452. int __init bcma_bus_early_register(struct bcma_bus *bus)
  453. {
  454. int err;
  455. struct bcma_device *core;
  456. /* Scan for devices (cores) */
  457. err = bcma_bus_scan(bus);
  458. if (err) {
  459. bcma_err(bus, "Failed to scan bus: %d\n", err);
  460. return -1;
  461. }
  462. /* Early init CC core */
  463. core = bcma_find_core(bus, bcma_cc_core_id(bus));
  464. if (core) {
  465. bus->drv_cc.core = core;
  466. bcma_core_chipcommon_early_init(&bus->drv_cc);
  467. }
  468. /* Early init MIPS core */
  469. core = bcma_find_core(bus, BCMA_CORE_MIPS_74K);
  470. if (core) {
  471. bus->drv_mips.core = core;
  472. bcma_core_mips_early_init(&bus->drv_mips);
  473. }
  474. bcma_info(bus, "Early bus registered\n");
  475. return 0;
  476. }
  477. #ifdef CONFIG_PM
  478. int bcma_bus_suspend(struct bcma_bus *bus)
  479. {
  480. struct bcma_device *core;
  481. list_for_each_entry(core, &bus->cores, list) {
  482. struct device_driver *drv = core->dev.driver;
  483. if (drv) {
  484. struct bcma_driver *adrv = container_of(drv, struct bcma_driver, drv);
  485. if (adrv->suspend)
  486. adrv->suspend(core);
  487. }
  488. }
  489. return 0;
  490. }
  491. int bcma_bus_resume(struct bcma_bus *bus)
  492. {
  493. struct bcma_device *core;
  494. /* Init CC core */
  495. if (bus->drv_cc.core) {
  496. bus->drv_cc.setup_done = false;
  497. bcma_core_chipcommon_init(&bus->drv_cc);
  498. }
  499. list_for_each_entry(core, &bus->cores, list) {
  500. struct device_driver *drv = core->dev.driver;
  501. if (drv) {
  502. struct bcma_driver *adrv = container_of(drv, struct bcma_driver, drv);
  503. if (adrv->resume)
  504. adrv->resume(core);
  505. }
  506. }
  507. return 0;
  508. }
  509. #endif
  510. int __bcma_driver_register(struct bcma_driver *drv, struct module *owner)
  511. {
  512. drv->drv.name = drv->name;
  513. drv->drv.bus = &bcma_bus_type;
  514. drv->drv.owner = owner;
  515. return driver_register(&drv->drv);
  516. }
  517. EXPORT_SYMBOL_GPL(__bcma_driver_register);
  518. void bcma_driver_unregister(struct bcma_driver *drv)
  519. {
  520. driver_unregister(&drv->drv);
  521. }
  522. EXPORT_SYMBOL_GPL(bcma_driver_unregister);
  523. static int bcma_bus_match(struct device *dev, struct device_driver *drv)
  524. {
  525. struct bcma_device *core = container_of(dev, struct bcma_device, dev);
  526. struct bcma_driver *adrv = container_of(drv, struct bcma_driver, drv);
  527. const struct bcma_device_id *cid = &core->id;
  528. const struct bcma_device_id *did;
  529. for (did = adrv->id_table; did->manuf || did->id || did->rev; did++) {
  530. if ((did->manuf == cid->manuf || did->manuf == BCMA_ANY_MANUF) &&
  531. (did->id == cid->id || did->id == BCMA_ANY_ID) &&
  532. (did->rev == cid->rev || did->rev == BCMA_ANY_REV) &&
  533. (did->class == cid->class || did->class == BCMA_ANY_CLASS))
  534. return 1;
  535. }
  536. return 0;
  537. }
  538. static int bcma_device_probe(struct device *dev)
  539. {
  540. struct bcma_device *core = container_of(dev, struct bcma_device, dev);
  541. struct bcma_driver *adrv = container_of(dev->driver, struct bcma_driver,
  542. drv);
  543. int err = 0;
  544. get_device(dev);
  545. if (adrv->probe)
  546. err = adrv->probe(core);
  547. if (err)
  548. put_device(dev);
  549. return err;
  550. }
  551. static int bcma_device_remove(struct device *dev)
  552. {
  553. struct bcma_device *core = container_of(dev, struct bcma_device, dev);
  554. struct bcma_driver *adrv = container_of(dev->driver, struct bcma_driver,
  555. drv);
  556. if (adrv->remove)
  557. adrv->remove(core);
  558. put_device(dev);
  559. return 0;
  560. }
  561. static int bcma_device_uevent(struct device *dev, struct kobj_uevent_env *env)
  562. {
  563. struct bcma_device *core = container_of(dev, struct bcma_device, dev);
  564. return add_uevent_var(env,
  565. "MODALIAS=bcma:m%04Xid%04Xrev%02Xcl%02X",
  566. core->id.manuf, core->id.id,
  567. core->id.rev, core->id.class);
  568. }
  569. static int __init bcma_modinit(void)
  570. {
  571. int err;
  572. err = bus_register(&bcma_bus_type);
  573. if (err)
  574. return err;
  575. err = bcma_host_soc_register_driver();
  576. if (err) {
  577. pr_err("SoC host initialization failed\n");
  578. err = 0;
  579. }
  580. #ifdef CONFIG_BCMA_HOST_PCI
  581. err = bcma_host_pci_init();
  582. if (err) {
  583. pr_err("PCI host initialization failed\n");
  584. err = 0;
  585. }
  586. #endif
  587. return err;
  588. }
  589. fs_initcall(bcma_modinit);
  590. static void __exit bcma_modexit(void)
  591. {
  592. #ifdef CONFIG_BCMA_HOST_PCI
  593. bcma_host_pci_exit();
  594. #endif
  595. bcma_host_soc_unregister_driver();
  596. bus_unregister(&bcma_bus_type);
  597. }
  598. module_exit(bcma_modexit)