core_titan.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798
  1. /*
  2. * linux/arch/alpha/kernel/core_titan.c
  3. *
  4. * Code common to all TITAN core logic chips.
  5. */
  6. #define __EXTERN_INLINE inline
  7. #include <asm/io.h>
  8. #include <asm/core_titan.h>
  9. #undef __EXTERN_INLINE
  10. #include <linux/module.h>
  11. #include <linux/types.h>
  12. #include <linux/pci.h>
  13. #include <linux/sched.h>
  14. #include <linux/init.h>
  15. #include <linux/vmalloc.h>
  16. #include <linux/bootmem.h>
  17. #include <asm/ptrace.h>
  18. #include <asm/smp.h>
  19. #include <asm/pgalloc.h>
  20. #include <asm/tlbflush.h>
  21. #include <asm/vga.h>
  22. #include "proto.h"
  23. #include "pci_impl.h"
  24. /* Save Titan configuration data as the console had it set up. */
  25. struct
  26. {
  27. unsigned long wsba[4];
  28. unsigned long wsm[4];
  29. unsigned long tba[4];
  30. } saved_config[4] __attribute__((common));
  31. /*
  32. * Is PChip 1 present? No need to query it more than once.
  33. */
  34. static int titan_pchip1_present;
  35. /*
  36. * BIOS32-style PCI interface:
  37. */
  38. #define DEBUG_CONFIG 0
  39. #if DEBUG_CONFIG
  40. # define DBG_CFG(args) printk args
  41. #else
  42. # define DBG_CFG(args)
  43. #endif
  44. /*
  45. * Routines to access TIG registers.
  46. */
  47. static inline volatile unsigned long *
  48. mk_tig_addr(int offset)
  49. {
  50. return (volatile unsigned long *)(TITAN_TIG_SPACE + (offset << 6));
  51. }
  52. static inline u8
  53. titan_read_tig(int offset, u8 value)
  54. {
  55. volatile unsigned long *tig_addr = mk_tig_addr(offset);
  56. return (u8)(*tig_addr & 0xff);
  57. }
  58. static inline void
  59. titan_write_tig(int offset, u8 value)
  60. {
  61. volatile unsigned long *tig_addr = mk_tig_addr(offset);
  62. *tig_addr = (unsigned long)value;
  63. }
  64. /*
  65. * Given a bus, device, and function number, compute resulting
  66. * configuration space address
  67. * accordingly. It is therefore not safe to have concurrent
  68. * invocations to configuration space access routines, but there
  69. * really shouldn't be any need for this.
  70. *
  71. * Note that all config space accesses use Type 1 address format.
  72. *
  73. * Note also that type 1 is determined by non-zero bus number.
  74. *
  75. * Type 1:
  76. *
  77. * 3 3|3 3 2 2|2 2 2 2|2 2 2 2|1 1 1 1|1 1 1 1|1 1
  78. * 3 2|1 0 9 8|7 6 5 4|3 2 1 0|9 8 7 6|5 4 3 2|1 0 9 8|7 6 5 4|3 2 1 0
  79. * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  80. * | | | | | | | | | | |B|B|B|B|B|B|B|B|D|D|D|D|D|F|F|F|R|R|R|R|R|R|0|1|
  81. * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  82. *
  83. * 31:24 reserved
  84. * 23:16 bus number (8 bits = 128 possible buses)
  85. * 15:11 Device number (5 bits)
  86. * 10:8 function number
  87. * 7:2 register number
  88. *
  89. * Notes:
  90. * The function number selects which function of a multi-function device
  91. * (e.g., SCSI and Ethernet).
  92. *
  93. * The register selects a DWORD (32 bit) register offset. Hence it
  94. * doesn't get shifted by 2 bits as we want to "drop" the bottom two
  95. * bits.
  96. */
  97. static int
  98. mk_conf_addr(struct pci_bus *pbus, unsigned int device_fn, int where,
  99. unsigned long *pci_addr, unsigned char *type1)
  100. {
  101. struct pci_controller *hose = pbus->sysdata;
  102. unsigned long addr;
  103. u8 bus = pbus->number;
  104. DBG_CFG(("mk_conf_addr(bus=%d ,device_fn=0x%x, where=0x%x, "
  105. "pci_addr=0x%p, type1=0x%p)\n",
  106. bus, device_fn, where, pci_addr, type1));
  107. if (!pbus->parent) /* No parent means peer PCI bus. */
  108. bus = 0;
  109. *type1 = (bus != 0);
  110. addr = (bus << 16) | (device_fn << 8) | where;
  111. addr |= hose->config_space_base;
  112. *pci_addr = addr;
  113. DBG_CFG(("mk_conf_addr: returning pci_addr 0x%lx\n", addr));
  114. return 0;
  115. }
  116. static int
  117. titan_read_config(struct pci_bus *bus, unsigned int devfn, int where,
  118. int size, u32 *value)
  119. {
  120. unsigned long addr;
  121. unsigned char type1;
  122. if (mk_conf_addr(bus, devfn, where, &addr, &type1))
  123. return PCIBIOS_DEVICE_NOT_FOUND;
  124. switch (size) {
  125. case 1:
  126. *value = __kernel_ldbu(*(vucp)addr);
  127. break;
  128. case 2:
  129. *value = __kernel_ldwu(*(vusp)addr);
  130. break;
  131. case 4:
  132. *value = *(vuip)addr;
  133. break;
  134. }
  135. return PCIBIOS_SUCCESSFUL;
  136. }
  137. static int
  138. titan_write_config(struct pci_bus *bus, unsigned int devfn, int where,
  139. int size, u32 value)
  140. {
  141. unsigned long addr;
  142. unsigned char type1;
  143. if (mk_conf_addr(bus, devfn, where, &addr, &type1))
  144. return PCIBIOS_DEVICE_NOT_FOUND;
  145. switch (size) {
  146. case 1:
  147. __kernel_stb(value, *(vucp)addr);
  148. mb();
  149. __kernel_ldbu(*(vucp)addr);
  150. break;
  151. case 2:
  152. __kernel_stw(value, *(vusp)addr);
  153. mb();
  154. __kernel_ldwu(*(vusp)addr);
  155. break;
  156. case 4:
  157. *(vuip)addr = value;
  158. mb();
  159. *(vuip)addr;
  160. break;
  161. }
  162. return PCIBIOS_SUCCESSFUL;
  163. }
  164. struct pci_ops titan_pci_ops =
  165. {
  166. .read = titan_read_config,
  167. .write = titan_write_config,
  168. };
  169. void
  170. titan_pci_tbi(struct pci_controller *hose, dma_addr_t start, dma_addr_t end)
  171. {
  172. titan_pachip *pachip =
  173. (hose->index & 1) ? TITAN_pachip1 : TITAN_pachip0;
  174. titan_pachip_port *port;
  175. volatile unsigned long *csr;
  176. unsigned long value;
  177. /* Get the right hose. */
  178. port = &pachip->g_port;
  179. if (hose->index & 2)
  180. port = &pachip->a_port;
  181. /* We can invalidate up to 8 tlb entries in a go. The flush
  182. matches against <31:16> in the pci address.
  183. Note that gtlbi* and atlbi* are in the same place in the g_port
  184. and a_port, respectively, so the g_port offset can be used
  185. even if hose is an a_port */
  186. csr = &port->port_specific.g.gtlbia.csr;
  187. if (((start ^ end) & 0xffff0000) == 0)
  188. csr = &port->port_specific.g.gtlbiv.csr;
  189. /* For TBIA, it doesn't matter what value we write. For TBI,
  190. it's the shifted tag bits. */
  191. value = (start & 0xffff0000) >> 12;
  192. wmb();
  193. *csr = value;
  194. mb();
  195. *csr;
  196. }
  197. static int
  198. titan_query_agp(titan_pachip_port *port)
  199. {
  200. union TPAchipPCTL pctl;
  201. /* set up APCTL */
  202. pctl.pctl_q_whole = port->pctl.csr;
  203. return pctl.pctl_r_bits.apctl_v_agp_present;
  204. }
  205. static void __init
  206. titan_init_one_pachip_port(titan_pachip_port *port, int index)
  207. {
  208. struct pci_controller *hose;
  209. hose = alloc_pci_controller();
  210. if (index == 0)
  211. pci_isa_hose = hose;
  212. hose->io_space = alloc_resource();
  213. hose->mem_space = alloc_resource();
  214. /*
  215. * This is for userland consumption. The 40-bit PIO bias that we
  216. * use in the kernel through KSEG doesn't work in the page table
  217. * based user mappings. (43-bit KSEG sign extends the physical
  218. * address from bit 40 to hit the I/O bit - mapped addresses don't).
  219. * So make sure we get the 43-bit PIO bias.
  220. */
  221. hose->sparse_mem_base = 0;
  222. hose->sparse_io_base = 0;
  223. hose->dense_mem_base
  224. = (TITAN_MEM(index) & 0xffffffffffUL) | 0x80000000000UL;
  225. hose->dense_io_base
  226. = (TITAN_IO(index) & 0xffffffffffUL) | 0x80000000000UL;
  227. hose->config_space_base = TITAN_CONF(index);
  228. hose->index = index;
  229. hose->io_space->start = TITAN_IO(index) - TITAN_IO_BIAS;
  230. hose->io_space->end = hose->io_space->start + TITAN_IO_SPACE - 1;
  231. hose->io_space->name = pci_io_names[index];
  232. hose->io_space->flags = IORESOURCE_IO;
  233. hose->mem_space->start = TITAN_MEM(index) - TITAN_MEM_BIAS;
  234. hose->mem_space->end = hose->mem_space->start + 0xffffffff;
  235. hose->mem_space->name = pci_mem_names[index];
  236. hose->mem_space->flags = IORESOURCE_MEM;
  237. if (request_resource(&ioport_resource, hose->io_space) < 0)
  238. printk(KERN_ERR "Failed to request IO on hose %d\n", index);
  239. if (request_resource(&iomem_resource, hose->mem_space) < 0)
  240. printk(KERN_ERR "Failed to request MEM on hose %d\n", index);
  241. /*
  242. * Save the existing PCI window translations. SRM will
  243. * need them when we go to reboot.
  244. */
  245. saved_config[index].wsba[0] = port->wsba[0].csr;
  246. saved_config[index].wsm[0] = port->wsm[0].csr;
  247. saved_config[index].tba[0] = port->tba[0].csr;
  248. saved_config[index].wsba[1] = port->wsba[1].csr;
  249. saved_config[index].wsm[1] = port->wsm[1].csr;
  250. saved_config[index].tba[1] = port->tba[1].csr;
  251. saved_config[index].wsba[2] = port->wsba[2].csr;
  252. saved_config[index].wsm[2] = port->wsm[2].csr;
  253. saved_config[index].tba[2] = port->tba[2].csr;
  254. saved_config[index].wsba[3] = port->wsba[3].csr;
  255. saved_config[index].wsm[3] = port->wsm[3].csr;
  256. saved_config[index].tba[3] = port->tba[3].csr;
  257. /*
  258. * Set up the PCI to main memory translation windows.
  259. *
  260. * Note: Window 3 on Titan is Scatter-Gather ONLY.
  261. *
  262. * Window 0 is scatter-gather 8MB at 8MB (for isa)
  263. * Window 1 is direct access 1GB at 2GB
  264. * Window 2 is scatter-gather 1GB at 3GB
  265. */
  266. hose->sg_isa = iommu_arena_new(hose, 0x00800000, 0x00800000, 0);
  267. hose->sg_isa->align_entry = 8; /* 64KB for ISA */
  268. hose->sg_pci = iommu_arena_new(hose, 0xc0000000, 0x40000000, 0);
  269. hose->sg_pci->align_entry = 4; /* Titan caches 4 PTEs at a time */
  270. port->wsba[0].csr = hose->sg_isa->dma_base | 3;
  271. port->wsm[0].csr = (hose->sg_isa->size - 1) & 0xfff00000;
  272. port->tba[0].csr = virt_to_phys(hose->sg_isa->ptes);
  273. port->wsba[1].csr = __direct_map_base | 1;
  274. port->wsm[1].csr = (__direct_map_size - 1) & 0xfff00000;
  275. port->tba[1].csr = 0;
  276. port->wsba[2].csr = hose->sg_pci->dma_base | 3;
  277. port->wsm[2].csr = (hose->sg_pci->size - 1) & 0xfff00000;
  278. port->tba[2].csr = virt_to_phys(hose->sg_pci->ptes);
  279. port->wsba[3].csr = 0;
  280. /* Enable the Monster Window to make DAC pci64 possible. */
  281. port->pctl.csr |= pctl_m_mwin;
  282. /*
  283. * If it's an AGP port, initialize agplastwr.
  284. */
  285. if (titan_query_agp(port))
  286. port->port_specific.a.agplastwr.csr = __direct_map_base;
  287. titan_pci_tbi(hose, 0, -1);
  288. }
  289. static void __init
  290. titan_init_pachips(titan_pachip *pachip0, titan_pachip *pachip1)
  291. {
  292. titan_pchip1_present = TITAN_cchip->csc.csr & 1L<<14;
  293. /* Init the ports in hose order... */
  294. titan_init_one_pachip_port(&pachip0->g_port, 0); /* hose 0 */
  295. if (titan_pchip1_present)
  296. titan_init_one_pachip_port(&pachip1->g_port, 1);/* hose 1 */
  297. titan_init_one_pachip_port(&pachip0->a_port, 2); /* hose 2 */
  298. if (titan_pchip1_present)
  299. titan_init_one_pachip_port(&pachip1->a_port, 3);/* hose 3 */
  300. }
  301. void __init
  302. titan_init_arch(void)
  303. {
  304. #if 0
  305. printk("%s: titan_init_arch()\n", __func__);
  306. printk("%s: CChip registers:\n", __func__);
  307. printk("%s: CSR_CSC 0x%lx\n", __func__, TITAN_cchip->csc.csr);
  308. printk("%s: CSR_MTR 0x%lx\n", __func__, TITAN_cchip->mtr.csr);
  309. printk("%s: CSR_MISC 0x%lx\n", __func__, TITAN_cchip->misc.csr);
  310. printk("%s: CSR_DIM0 0x%lx\n", __func__, TITAN_cchip->dim0.csr);
  311. printk("%s: CSR_DIM1 0x%lx\n", __func__, TITAN_cchip->dim1.csr);
  312. printk("%s: CSR_DIR0 0x%lx\n", __func__, TITAN_cchip->dir0.csr);
  313. printk("%s: CSR_DIR1 0x%lx\n", __func__, TITAN_cchip->dir1.csr);
  314. printk("%s: CSR_DRIR 0x%lx\n", __func__, TITAN_cchip->drir.csr);
  315. printk("%s: DChip registers:\n", __func__);
  316. printk("%s: CSR_DSC 0x%lx\n", __func__, TITAN_dchip->dsc.csr);
  317. printk("%s: CSR_STR 0x%lx\n", __func__, TITAN_dchip->str.csr);
  318. printk("%s: CSR_DREV 0x%lx\n", __func__, TITAN_dchip->drev.csr);
  319. #endif
  320. boot_cpuid = __hard_smp_processor_id();
  321. /* With multiple PCI busses, we play with I/O as physical addrs. */
  322. ioport_resource.end = ~0UL;
  323. iomem_resource.end = ~0UL;
  324. /* PCI DMA Direct Mapping is 1GB at 2GB. */
  325. __direct_map_base = 0x80000000;
  326. __direct_map_size = 0x40000000;
  327. /* Init the PA chip(s). */
  328. titan_init_pachips(TITAN_pachip0, TITAN_pachip1);
  329. /* Check for graphic console location (if any). */
  330. find_console_vga_hose();
  331. }
  332. static void
  333. titan_kill_one_pachip_port(titan_pachip_port *port, int index)
  334. {
  335. port->wsba[0].csr = saved_config[index].wsba[0];
  336. port->wsm[0].csr = saved_config[index].wsm[0];
  337. port->tba[0].csr = saved_config[index].tba[0];
  338. port->wsba[1].csr = saved_config[index].wsba[1];
  339. port->wsm[1].csr = saved_config[index].wsm[1];
  340. port->tba[1].csr = saved_config[index].tba[1];
  341. port->wsba[2].csr = saved_config[index].wsba[2];
  342. port->wsm[2].csr = saved_config[index].wsm[2];
  343. port->tba[2].csr = saved_config[index].tba[2];
  344. port->wsba[3].csr = saved_config[index].wsba[3];
  345. port->wsm[3].csr = saved_config[index].wsm[3];
  346. port->tba[3].csr = saved_config[index].tba[3];
  347. }
  348. static void
  349. titan_kill_pachips(titan_pachip *pachip0, titan_pachip *pachip1)
  350. {
  351. if (titan_pchip1_present) {
  352. titan_kill_one_pachip_port(&pachip1->g_port, 1);
  353. titan_kill_one_pachip_port(&pachip1->a_port, 3);
  354. }
  355. titan_kill_one_pachip_port(&pachip0->g_port, 0);
  356. titan_kill_one_pachip_port(&pachip0->a_port, 2);
  357. }
  358. void
  359. titan_kill_arch(int mode)
  360. {
  361. titan_kill_pachips(TITAN_pachip0, TITAN_pachip1);
  362. }
  363. /*
  364. * IO map support.
  365. */
  366. void __iomem *
  367. titan_ioportmap(unsigned long addr)
  368. {
  369. FIXUP_IOADDR_VGA(addr);
  370. return (void __iomem *)(addr + TITAN_IO_BIAS);
  371. }
  372. void __iomem *
  373. titan_ioremap(unsigned long addr, unsigned long size)
  374. {
  375. int h = (addr & TITAN_HOSE_MASK) >> TITAN_HOSE_SHIFT;
  376. unsigned long baddr = addr & ~TITAN_HOSE_MASK;
  377. unsigned long last = baddr + size - 1;
  378. struct pci_controller *hose;
  379. struct vm_struct *area;
  380. unsigned long vaddr;
  381. unsigned long *ptes;
  382. unsigned long pfn;
  383. /*
  384. * Adjust the address and hose, if necessary.
  385. */
  386. if (pci_vga_hose && __is_mem_vga(addr)) {
  387. h = pci_vga_hose->index;
  388. addr += pci_vga_hose->mem_space->start;
  389. }
  390. /*
  391. * Find the hose.
  392. */
  393. for (hose = hose_head; hose; hose = hose->next)
  394. if (hose->index == h)
  395. break;
  396. if (!hose)
  397. return NULL;
  398. /*
  399. * Is it direct-mapped?
  400. */
  401. if ((baddr >= __direct_map_base) &&
  402. ((baddr + size - 1) < __direct_map_base + __direct_map_size)) {
  403. vaddr = addr - __direct_map_base + TITAN_MEM_BIAS;
  404. return (void __iomem *) vaddr;
  405. }
  406. /*
  407. * Check the scatter-gather arena.
  408. */
  409. if (hose->sg_pci &&
  410. baddr >= (unsigned long)hose->sg_pci->dma_base &&
  411. last < (unsigned long)hose->sg_pci->dma_base + hose->sg_pci->size){
  412. /*
  413. * Adjust the limits (mappings must be page aligned)
  414. */
  415. baddr -= hose->sg_pci->dma_base;
  416. last -= hose->sg_pci->dma_base;
  417. baddr &= PAGE_MASK;
  418. size = PAGE_ALIGN(last) - baddr;
  419. /*
  420. * Map it
  421. */
  422. area = get_vm_area(size, VM_IOREMAP);
  423. if (!area) {
  424. printk("ioremap failed... no vm_area...\n");
  425. return NULL;
  426. }
  427. ptes = hose->sg_pci->ptes;
  428. for (vaddr = (unsigned long)area->addr;
  429. baddr <= last;
  430. baddr += PAGE_SIZE, vaddr += PAGE_SIZE) {
  431. pfn = ptes[baddr >> PAGE_SHIFT];
  432. if (!(pfn & 1)) {
  433. printk("ioremap failed... pte not valid...\n");
  434. vfree(area->addr);
  435. return NULL;
  436. }
  437. pfn >>= 1; /* make it a true pfn */
  438. if (__alpha_remap_area_pages(vaddr,
  439. pfn << PAGE_SHIFT,
  440. PAGE_SIZE, 0)) {
  441. printk("FAILED to remap_area_pages...\n");
  442. vfree(area->addr);
  443. return NULL;
  444. }
  445. }
  446. flush_tlb_all();
  447. vaddr = (unsigned long)area->addr + (addr & ~PAGE_MASK);
  448. return (void __iomem *) vaddr;
  449. }
  450. /* Assume a legacy (read: VGA) address, and return appropriately. */
  451. return (void __iomem *)(addr + TITAN_MEM_BIAS);
  452. }
  453. void
  454. titan_iounmap(volatile void __iomem *xaddr)
  455. {
  456. unsigned long addr = (unsigned long) xaddr;
  457. if (addr >= VMALLOC_START)
  458. vfree((void *)(PAGE_MASK & addr));
  459. }
  460. int
  461. titan_is_mmio(const volatile void __iomem *xaddr)
  462. {
  463. unsigned long addr = (unsigned long) xaddr;
  464. if (addr >= VMALLOC_START)
  465. return 1;
  466. else
  467. return (addr & 0x100000000UL) == 0;
  468. }
  469. #ifndef CONFIG_ALPHA_GENERIC
  470. EXPORT_SYMBOL(titan_ioportmap);
  471. EXPORT_SYMBOL(titan_ioremap);
  472. EXPORT_SYMBOL(titan_iounmap);
  473. EXPORT_SYMBOL(titan_is_mmio);
  474. #endif
  475. /*
  476. * AGP GART Support.
  477. */
  478. #include <linux/agp_backend.h>
  479. #include <asm/agp_backend.h>
  480. #include <linux/slab.h>
  481. #include <linux/delay.h>
  482. struct titan_agp_aperture {
  483. struct pci_iommu_arena *arena;
  484. long pg_start;
  485. long pg_count;
  486. };
  487. static int
  488. titan_agp_setup(alpha_agp_info *agp)
  489. {
  490. struct titan_agp_aperture *aper;
  491. if (!alpha_agpgart_size)
  492. return -ENOMEM;
  493. aper = kmalloc(sizeof(struct titan_agp_aperture), GFP_KERNEL);
  494. if (aper == NULL)
  495. return -ENOMEM;
  496. aper->arena = agp->hose->sg_pci;
  497. aper->pg_count = alpha_agpgart_size / PAGE_SIZE;
  498. aper->pg_start = iommu_reserve(aper->arena, aper->pg_count,
  499. aper->pg_count - 1);
  500. if (aper->pg_start < 0) {
  501. printk(KERN_ERR "Failed to reserve AGP memory\n");
  502. kfree(aper);
  503. return -ENOMEM;
  504. }
  505. agp->aperture.bus_base =
  506. aper->arena->dma_base + aper->pg_start * PAGE_SIZE;
  507. agp->aperture.size = aper->pg_count * PAGE_SIZE;
  508. agp->aperture.sysdata = aper;
  509. return 0;
  510. }
  511. static void
  512. titan_agp_cleanup(alpha_agp_info *agp)
  513. {
  514. struct titan_agp_aperture *aper = agp->aperture.sysdata;
  515. int status;
  516. status = iommu_release(aper->arena, aper->pg_start, aper->pg_count);
  517. if (status == -EBUSY) {
  518. printk(KERN_WARNING
  519. "Attempted to release bound AGP memory - unbinding\n");
  520. iommu_unbind(aper->arena, aper->pg_start, aper->pg_count);
  521. status = iommu_release(aper->arena, aper->pg_start,
  522. aper->pg_count);
  523. }
  524. if (status < 0)
  525. printk(KERN_ERR "Failed to release AGP memory\n");
  526. kfree(aper);
  527. kfree(agp);
  528. }
  529. static int
  530. titan_agp_configure(alpha_agp_info *agp)
  531. {
  532. union TPAchipPCTL pctl;
  533. titan_pachip_port *port = agp->private;
  534. pctl.pctl_q_whole = port->pctl.csr;
  535. /* Side-Band Addressing? */
  536. pctl.pctl_r_bits.apctl_v_agp_sba_en = agp->mode.bits.sba;
  537. /* AGP Rate? */
  538. pctl.pctl_r_bits.apctl_v_agp_rate = 0; /* 1x */
  539. if (agp->mode.bits.rate & 2)
  540. pctl.pctl_r_bits.apctl_v_agp_rate = 1; /* 2x */
  541. #if 0
  542. if (agp->mode.bits.rate & 4)
  543. pctl.pctl_r_bits.apctl_v_agp_rate = 2; /* 4x */
  544. #endif
  545. /* RQ Depth? */
  546. pctl.pctl_r_bits.apctl_v_agp_hp_rd = 2;
  547. pctl.pctl_r_bits.apctl_v_agp_lp_rd = 7;
  548. /*
  549. * AGP Enable.
  550. */
  551. pctl.pctl_r_bits.apctl_v_agp_en = agp->mode.bits.enable;
  552. /* Tell the user. */
  553. printk("Enabling AGP: %dX%s\n",
  554. 1 << pctl.pctl_r_bits.apctl_v_agp_rate,
  555. pctl.pctl_r_bits.apctl_v_agp_sba_en ? " - SBA" : "");
  556. /* Write it. */
  557. port->pctl.csr = pctl.pctl_q_whole;
  558. /* And wait at least 5000 66MHz cycles (per Titan spec). */
  559. udelay(100);
  560. return 0;
  561. }
  562. static int
  563. titan_agp_bind_memory(alpha_agp_info *agp, off_t pg_start, struct agp_memory *mem)
  564. {
  565. struct titan_agp_aperture *aper = agp->aperture.sysdata;
  566. return iommu_bind(aper->arena, aper->pg_start + pg_start,
  567. mem->page_count, mem->pages);
  568. }
  569. static int
  570. titan_agp_unbind_memory(alpha_agp_info *agp, off_t pg_start, struct agp_memory *mem)
  571. {
  572. struct titan_agp_aperture *aper = agp->aperture.sysdata;
  573. return iommu_unbind(aper->arena, aper->pg_start + pg_start,
  574. mem->page_count);
  575. }
  576. static unsigned long
  577. titan_agp_translate(alpha_agp_info *agp, dma_addr_t addr)
  578. {
  579. struct titan_agp_aperture *aper = agp->aperture.sysdata;
  580. unsigned long baddr = addr - aper->arena->dma_base;
  581. unsigned long pte;
  582. if (addr < agp->aperture.bus_base ||
  583. addr >= agp->aperture.bus_base + agp->aperture.size) {
  584. printk("%s: addr out of range\n", __func__);
  585. return -EINVAL;
  586. }
  587. pte = aper->arena->ptes[baddr >> PAGE_SHIFT];
  588. if (!(pte & 1)) {
  589. printk("%s: pte not valid\n", __func__);
  590. return -EINVAL;
  591. }
  592. return (pte >> 1) << PAGE_SHIFT;
  593. }
  594. struct alpha_agp_ops titan_agp_ops =
  595. {
  596. .setup = titan_agp_setup,
  597. .cleanup = titan_agp_cleanup,
  598. .configure = titan_agp_configure,
  599. .bind = titan_agp_bind_memory,
  600. .unbind = titan_agp_unbind_memory,
  601. .translate = titan_agp_translate
  602. };
  603. alpha_agp_info *
  604. titan_agp_info(void)
  605. {
  606. alpha_agp_info *agp;
  607. struct pci_controller *hose;
  608. titan_pachip_port *port;
  609. int hosenum = -1;
  610. union TPAchipPCTL pctl;
  611. /*
  612. * Find the AGP port.
  613. */
  614. port = &TITAN_pachip0->a_port;
  615. if (titan_query_agp(port))
  616. hosenum = 2;
  617. if (hosenum < 0 &&
  618. titan_pchip1_present &&
  619. titan_query_agp(port = &TITAN_pachip1->a_port))
  620. hosenum = 3;
  621. /*
  622. * Find the hose the port is on.
  623. */
  624. for (hose = hose_head; hose; hose = hose->next)
  625. if (hose->index == hosenum)
  626. break;
  627. if (!hose || !hose->sg_pci)
  628. return NULL;
  629. /*
  630. * Allocate the info structure.
  631. */
  632. agp = kmalloc(sizeof(*agp), GFP_KERNEL);
  633. if (!agp)
  634. return NULL;
  635. /*
  636. * Fill it in.
  637. */
  638. agp->hose = hose;
  639. agp->private = port;
  640. agp->ops = &titan_agp_ops;
  641. /*
  642. * Aperture - not configured until ops.setup().
  643. *
  644. * FIXME - should we go ahead and allocate it here?
  645. */
  646. agp->aperture.bus_base = 0;
  647. agp->aperture.size = 0;
  648. agp->aperture.sysdata = NULL;
  649. /*
  650. * Capabilities.
  651. */
  652. agp->capability.lw = 0;
  653. agp->capability.bits.rate = 3; /* 2x, 1x */
  654. agp->capability.bits.sba = 1;
  655. agp->capability.bits.rq = 7; /* 8 - 1 */
  656. /*
  657. * Mode.
  658. */
  659. pctl.pctl_q_whole = port->pctl.csr;
  660. agp->mode.lw = 0;
  661. agp->mode.bits.rate = 1 << pctl.pctl_r_bits.apctl_v_agp_rate;
  662. agp->mode.bits.sba = pctl.pctl_r_bits.apctl_v_agp_sba_en;
  663. agp->mode.bits.rq = 7; /* RQ Depth? */
  664. agp->mode.bits.enable = pctl.pctl_r_bits.apctl_v_agp_en;
  665. return agp;
  666. }