i386.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455
  1. /*
  2. * Low-Level PCI Access for i386 machines
  3. *
  4. * Copyright 1993, 1994 Drew Eckhardt
  5. * Visionary Computing
  6. * (Unix and Linux consulting and custom programming)
  7. * Drew@Colorado.EDU
  8. * +1 (303) 786-7975
  9. *
  10. * Drew's work was sponsored by:
  11. * iX Multiuser Multitasking Magazine
  12. * Hannover, Germany
  13. * hm@ix.de
  14. *
  15. * Copyright 1997--2000 Martin Mares <mj@ucw.cz>
  16. *
  17. * For more information, please consult the following manuals (look at
  18. * http://www.pcisig.com/ for how to get them):
  19. *
  20. * PCI BIOS Specification
  21. * PCI Local Bus Specification
  22. * PCI to PCI Bridge Specification
  23. * PCI System Design Guide
  24. *
  25. */
  26. #include <linux/types.h>
  27. #include <linux/kernel.h>
  28. #include <linux/export.h>
  29. #include <linux/pci.h>
  30. #include <linux/init.h>
  31. #include <linux/ioport.h>
  32. #include <linux/errno.h>
  33. #include <linux/bootmem.h>
  34. #include <asm/pat.h>
  35. #include <asm/e820.h>
  36. #include <asm/pci_x86.h>
  37. #include <asm/io_apic.h>
  38. /*
  39. * This list of dynamic mappings is for temporarily maintaining
  40. * original BIOS BAR addresses for possible reinstatement.
  41. */
  42. struct pcibios_fwaddrmap {
  43. struct list_head list;
  44. struct pci_dev *dev;
  45. resource_size_t fw_addr[DEVICE_COUNT_RESOURCE];
  46. };
  47. static LIST_HEAD(pcibios_fwaddrmappings);
  48. static DEFINE_SPINLOCK(pcibios_fwaddrmap_lock);
  49. static bool pcibios_fw_addr_done;
  50. /* Must be called with 'pcibios_fwaddrmap_lock' lock held. */
  51. static struct pcibios_fwaddrmap *pcibios_fwaddrmap_lookup(struct pci_dev *dev)
  52. {
  53. struct pcibios_fwaddrmap *map;
  54. WARN_ON_SMP(!spin_is_locked(&pcibios_fwaddrmap_lock));
  55. list_for_each_entry(map, &pcibios_fwaddrmappings, list)
  56. if (map->dev == dev)
  57. return map;
  58. return NULL;
  59. }
  60. static void
  61. pcibios_save_fw_addr(struct pci_dev *dev, int idx, resource_size_t fw_addr)
  62. {
  63. unsigned long flags;
  64. struct pcibios_fwaddrmap *map;
  65. if (pcibios_fw_addr_done)
  66. return;
  67. spin_lock_irqsave(&pcibios_fwaddrmap_lock, flags);
  68. map = pcibios_fwaddrmap_lookup(dev);
  69. if (!map) {
  70. spin_unlock_irqrestore(&pcibios_fwaddrmap_lock, flags);
  71. map = kzalloc(sizeof(*map), GFP_KERNEL);
  72. if (!map)
  73. return;
  74. map->dev = pci_dev_get(dev);
  75. map->fw_addr[idx] = fw_addr;
  76. INIT_LIST_HEAD(&map->list);
  77. spin_lock_irqsave(&pcibios_fwaddrmap_lock, flags);
  78. list_add_tail(&map->list, &pcibios_fwaddrmappings);
  79. } else
  80. map->fw_addr[idx] = fw_addr;
  81. spin_unlock_irqrestore(&pcibios_fwaddrmap_lock, flags);
  82. }
  83. resource_size_t pcibios_retrieve_fw_addr(struct pci_dev *dev, int idx)
  84. {
  85. unsigned long flags;
  86. struct pcibios_fwaddrmap *map;
  87. resource_size_t fw_addr = 0;
  88. if (pcibios_fw_addr_done)
  89. return 0;
  90. spin_lock_irqsave(&pcibios_fwaddrmap_lock, flags);
  91. map = pcibios_fwaddrmap_lookup(dev);
  92. if (map)
  93. fw_addr = map->fw_addr[idx];
  94. spin_unlock_irqrestore(&pcibios_fwaddrmap_lock, flags);
  95. return fw_addr;
  96. }
  97. static void __init pcibios_fw_addr_list_del(void)
  98. {
  99. unsigned long flags;
  100. struct pcibios_fwaddrmap *entry, *next;
  101. spin_lock_irqsave(&pcibios_fwaddrmap_lock, flags);
  102. list_for_each_entry_safe(entry, next, &pcibios_fwaddrmappings, list) {
  103. list_del(&entry->list);
  104. pci_dev_put(entry->dev);
  105. kfree(entry);
  106. }
  107. spin_unlock_irqrestore(&pcibios_fwaddrmap_lock, flags);
  108. pcibios_fw_addr_done = true;
  109. }
  110. static int
  111. skip_isa_ioresource_align(struct pci_dev *dev) {
  112. if ((pci_probe & PCI_CAN_SKIP_ISA_ALIGN) &&
  113. !(dev->bus->bridge_ctl & PCI_BRIDGE_CTL_ISA))
  114. return 1;
  115. return 0;
  116. }
  117. /*
  118. * We need to avoid collisions with `mirrored' VGA ports
  119. * and other strange ISA hardware, so we always want the
  120. * addresses to be allocated in the 0x000-0x0ff region
  121. * modulo 0x400.
  122. *
  123. * Why? Because some silly external IO cards only decode
  124. * the low 10 bits of the IO address. The 0x00-0xff region
  125. * is reserved for motherboard devices that decode all 16
  126. * bits, so it's ok to allocate at, say, 0x2800-0x28ff,
  127. * but we want to try to avoid allocating at 0x2900-0x2bff
  128. * which might have be mirrored at 0x0100-0x03ff..
  129. */
  130. resource_size_t
  131. pcibios_align_resource(void *data, const struct resource *res,
  132. resource_size_t size, resource_size_t align)
  133. {
  134. struct pci_dev *dev = data;
  135. resource_size_t start = res->start;
  136. if (res->flags & IORESOURCE_IO) {
  137. if (skip_isa_ioresource_align(dev))
  138. return start;
  139. if (start & 0x300)
  140. start = (start + 0x3ff) & ~0x3ff;
  141. } else if (res->flags & IORESOURCE_MEM) {
  142. /* The low 1MB range is reserved for ISA cards */
  143. if (start < BIOS_END)
  144. start = BIOS_END;
  145. }
  146. return start;
  147. }
  148. EXPORT_SYMBOL(pcibios_align_resource);
  149. /*
  150. * Handle resources of PCI devices. If the world were perfect, we could
  151. * just allocate all the resource regions and do nothing more. It isn't.
  152. * On the other hand, we cannot just re-allocate all devices, as it would
  153. * require us to know lots of host bridge internals. So we attempt to
  154. * keep as much of the original configuration as possible, but tweak it
  155. * when it's found to be wrong.
  156. *
  157. * Known BIOS problems we have to work around:
  158. * - I/O or memory regions not configured
  159. * - regions configured, but not enabled in the command register
  160. * - bogus I/O addresses above 64K used
  161. * - expansion ROMs left enabled (this may sound harmless, but given
  162. * the fact the PCI specs explicitly allow address decoders to be
  163. * shared between expansion ROMs and other resource regions, it's
  164. * at least dangerous)
  165. * - bad resource sizes or overlaps with other regions
  166. *
  167. * Our solution:
  168. * (1) Allocate resources for all buses behind PCI-to-PCI bridges.
  169. * This gives us fixed barriers on where we can allocate.
  170. * (2) Allocate resources for all enabled devices. If there is
  171. * a collision, just mark the resource as unallocated. Also
  172. * disable expansion ROMs during this step.
  173. * (3) Try to allocate resources for disabled devices. If the
  174. * resources were assigned correctly, everything goes well,
  175. * if they weren't, they won't disturb allocation of other
  176. * resources.
  177. * (4) Assign new addresses to resources which were either
  178. * not configured at all or misconfigured. If explicitly
  179. * requested by the user, configure expansion ROM address
  180. * as well.
  181. */
  182. static void pcibios_allocate_bridge_resources(struct pci_dev *dev)
  183. {
  184. int idx;
  185. struct resource *r;
  186. for (idx = PCI_BRIDGE_RESOURCES; idx < PCI_NUM_RESOURCES; idx++) {
  187. r = &dev->resource[idx];
  188. if (!r->flags)
  189. continue;
  190. if (r->parent) /* Already allocated */
  191. continue;
  192. if (!r->start || pci_claim_bridge_resource(dev, idx) < 0) {
  193. /*
  194. * Something is wrong with the region.
  195. * Invalidate the resource to prevent
  196. * child resource allocations in this
  197. * range.
  198. */
  199. r->start = r->end = 0;
  200. r->flags = 0;
  201. }
  202. }
  203. }
  204. static void pcibios_allocate_bus_resources(struct pci_bus *bus)
  205. {
  206. struct pci_bus *child;
  207. /* Depth-First Search on bus tree */
  208. if (bus->self)
  209. pcibios_allocate_bridge_resources(bus->self);
  210. list_for_each_entry(child, &bus->children, node)
  211. pcibios_allocate_bus_resources(child);
  212. }
  213. struct pci_check_idx_range {
  214. int start;
  215. int end;
  216. };
  217. static void pcibios_allocate_dev_resources(struct pci_dev *dev, int pass)
  218. {
  219. int idx, disabled, i;
  220. u16 command;
  221. struct resource *r;
  222. struct pci_check_idx_range idx_range[] = {
  223. { PCI_STD_RESOURCES, PCI_STD_RESOURCE_END },
  224. #ifdef CONFIG_PCI_IOV
  225. { PCI_IOV_RESOURCES, PCI_IOV_RESOURCE_END },
  226. #endif
  227. };
  228. pci_read_config_word(dev, PCI_COMMAND, &command);
  229. for (i = 0; i < ARRAY_SIZE(idx_range); i++)
  230. for (idx = idx_range[i].start; idx <= idx_range[i].end; idx++) {
  231. r = &dev->resource[idx];
  232. if (r->parent) /* Already allocated */
  233. continue;
  234. if (!r->start) /* Address not assigned at all */
  235. continue;
  236. if (r->flags & IORESOURCE_IO)
  237. disabled = !(command & PCI_COMMAND_IO);
  238. else
  239. disabled = !(command & PCI_COMMAND_MEMORY);
  240. if (pass == disabled) {
  241. dev_dbg(&dev->dev,
  242. "BAR %d: reserving %pr (d=%d, p=%d)\n",
  243. idx, r, disabled, pass);
  244. if (pci_claim_resource(dev, idx) < 0) {
  245. if (r->flags & IORESOURCE_PCI_FIXED) {
  246. dev_info(&dev->dev, "BAR %d %pR is immovable\n",
  247. idx, r);
  248. } else {
  249. /* We'll assign a new address later */
  250. pcibios_save_fw_addr(dev,
  251. idx, r->start);
  252. r->end -= r->start;
  253. r->start = 0;
  254. }
  255. }
  256. }
  257. }
  258. if (!pass) {
  259. r = &dev->resource[PCI_ROM_RESOURCE];
  260. if (r->flags & IORESOURCE_ROM_ENABLE) {
  261. /* Turn the ROM off, leave the resource region,
  262. * but keep it unregistered. */
  263. u32 reg;
  264. dev_dbg(&dev->dev, "disabling ROM %pR\n", r);
  265. r->flags &= ~IORESOURCE_ROM_ENABLE;
  266. pci_read_config_dword(dev, dev->rom_base_reg, &reg);
  267. pci_write_config_dword(dev, dev->rom_base_reg,
  268. reg & ~PCI_ROM_ADDRESS_ENABLE);
  269. }
  270. }
  271. }
  272. static void pcibios_allocate_resources(struct pci_bus *bus, int pass)
  273. {
  274. struct pci_dev *dev;
  275. struct pci_bus *child;
  276. list_for_each_entry(dev, &bus->devices, bus_list) {
  277. pcibios_allocate_dev_resources(dev, pass);
  278. child = dev->subordinate;
  279. if (child)
  280. pcibios_allocate_resources(child, pass);
  281. }
  282. }
  283. static void pcibios_allocate_dev_rom_resource(struct pci_dev *dev)
  284. {
  285. struct resource *r;
  286. /*
  287. * Try to use BIOS settings for ROMs, otherwise let
  288. * pci_assign_unassigned_resources() allocate the new
  289. * addresses.
  290. */
  291. r = &dev->resource[PCI_ROM_RESOURCE];
  292. if (!r->flags || !r->start)
  293. return;
  294. if (r->parent) /* Already allocated */
  295. return;
  296. if (pci_claim_resource(dev, PCI_ROM_RESOURCE) < 0) {
  297. r->end -= r->start;
  298. r->start = 0;
  299. }
  300. }
  301. static void pcibios_allocate_rom_resources(struct pci_bus *bus)
  302. {
  303. struct pci_dev *dev;
  304. struct pci_bus *child;
  305. list_for_each_entry(dev, &bus->devices, bus_list) {
  306. pcibios_allocate_dev_rom_resource(dev);
  307. child = dev->subordinate;
  308. if (child)
  309. pcibios_allocate_rom_resources(child);
  310. }
  311. }
  312. static int __init pcibios_assign_resources(void)
  313. {
  314. struct pci_bus *bus;
  315. if (!(pci_probe & PCI_ASSIGN_ROMS))
  316. list_for_each_entry(bus, &pci_root_buses, node)
  317. pcibios_allocate_rom_resources(bus);
  318. pci_assign_unassigned_resources();
  319. pcibios_fw_addr_list_del();
  320. return 0;
  321. }
  322. /**
  323. * called in fs_initcall (one below subsys_initcall),
  324. * give a chance for motherboard reserve resources
  325. */
  326. fs_initcall(pcibios_assign_resources);
  327. void pcibios_resource_survey_bus(struct pci_bus *bus)
  328. {
  329. dev_printk(KERN_DEBUG, &bus->dev, "Allocating resources\n");
  330. pcibios_allocate_bus_resources(bus);
  331. pcibios_allocate_resources(bus, 0);
  332. pcibios_allocate_resources(bus, 1);
  333. if (!(pci_probe & PCI_ASSIGN_ROMS))
  334. pcibios_allocate_rom_resources(bus);
  335. }
  336. void __init pcibios_resource_survey(void)
  337. {
  338. struct pci_bus *bus;
  339. DBG("PCI: Allocating resources\n");
  340. list_for_each_entry(bus, &pci_root_buses, node)
  341. pcibios_allocate_bus_resources(bus);
  342. list_for_each_entry(bus, &pci_root_buses, node)
  343. pcibios_allocate_resources(bus, 0);
  344. list_for_each_entry(bus, &pci_root_buses, node)
  345. pcibios_allocate_resources(bus, 1);
  346. e820_reserve_resources_late();
  347. /*
  348. * Insert the IO APIC resources after PCI initialization has
  349. * occurred to handle IO APICS that are mapped in on a BAR in
  350. * PCI space, but before trying to assign unassigned pci res.
  351. */
  352. ioapic_insert_resources();
  353. }
  354. static const struct vm_operations_struct pci_mmap_ops = {
  355. .access = generic_access_phys,
  356. };
  357. int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
  358. enum pci_mmap_state mmap_state, int write_combine)
  359. {
  360. unsigned long prot;
  361. /* I/O space cannot be accessed via normal processor loads and
  362. * stores on this platform.
  363. */
  364. if (mmap_state == pci_mmap_io)
  365. return -EINVAL;
  366. prot = pgprot_val(vma->vm_page_prot);
  367. /*
  368. * Return error if pat is not enabled and write_combine is requested.
  369. * Caller can followup with UC MINUS request and add a WC mtrr if there
  370. * is a free mtrr slot.
  371. */
  372. if (!pat_enabled() && write_combine)
  373. return -EINVAL;
  374. if (pat_enabled() && write_combine)
  375. prot |= cachemode2protval(_PAGE_CACHE_MODE_WC);
  376. else if (pat_enabled() || boot_cpu_data.x86 > 3)
  377. /*
  378. * ioremap() and ioremap_nocache() defaults to UC MINUS for now.
  379. * To avoid attribute conflicts, request UC MINUS here
  380. * as well.
  381. */
  382. prot |= cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS);
  383. vma->vm_page_prot = __pgprot(prot);
  384. if (io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
  385. vma->vm_end - vma->vm_start,
  386. vma->vm_page_prot))
  387. return -EAGAIN;
  388. vma->vm_ops = &pci_mmap_ops;
  389. return 0;
  390. }