io.h 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922
  1. /* Generic I/O port emulation, based on MN10300 code
  2. *
  3. * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
  4. * Written by David Howells (dhowells@redhat.com)
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public Licence
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the Licence, or (at your option) any later version.
  10. */
  11. #ifndef __ASM_GENERIC_IO_H
  12. #define __ASM_GENERIC_IO_H
  13. #include <asm/page.h> /* I/O is all done through memory accesses */
  14. #include <linux/string.h> /* for memset() and memcpy() */
  15. #include <linux/types.h>
  16. #ifdef CONFIG_GENERIC_IOMAP
  17. #include <asm-generic/iomap.h>
  18. #endif
  19. #include <asm-generic/pci_iomap.h>
  20. #ifndef mmiowb
  21. #define mmiowb() do {} while (0)
  22. #endif
  23. /*
  24. * __raw_{read,write}{b,w,l,q}() access memory in native endianness.
  25. *
  26. * On some architectures memory mapped IO needs to be accessed differently.
  27. * On the simple architectures, we just read/write the memory location
  28. * directly.
  29. */
  30. #ifndef __raw_readb
  31. #define __raw_readb __raw_readb
  32. static inline u8 __raw_readb(const volatile void __iomem *addr)
  33. {
  34. return *(const volatile u8 __force *)addr;
  35. }
  36. #endif
  37. #ifndef __raw_readw
  38. #define __raw_readw __raw_readw
  39. static inline u16 __raw_readw(const volatile void __iomem *addr)
  40. {
  41. return *(const volatile u16 __force *)addr;
  42. }
  43. #endif
  44. #ifndef __raw_readl
  45. #define __raw_readl __raw_readl
  46. static inline u32 __raw_readl(const volatile void __iomem *addr)
  47. {
  48. return *(const volatile u32 __force *)addr;
  49. }
  50. #endif
  51. #ifdef CONFIG_64BIT
  52. #ifndef __raw_readq
  53. #define __raw_readq __raw_readq
  54. static inline u64 __raw_readq(const volatile void __iomem *addr)
  55. {
  56. return *(const volatile u64 __force *)addr;
  57. }
  58. #endif
  59. #endif /* CONFIG_64BIT */
  60. #ifndef __raw_writeb
  61. #define __raw_writeb __raw_writeb
  62. static inline void __raw_writeb(u8 value, volatile void __iomem *addr)
  63. {
  64. *(volatile u8 __force *)addr = value;
  65. }
  66. #endif
  67. #ifndef __raw_writew
  68. #define __raw_writew __raw_writew
  69. static inline void __raw_writew(u16 value, volatile void __iomem *addr)
  70. {
  71. *(volatile u16 __force *)addr = value;
  72. }
  73. #endif
  74. #ifndef __raw_writel
  75. #define __raw_writel __raw_writel
  76. static inline void __raw_writel(u32 value, volatile void __iomem *addr)
  77. {
  78. *(volatile u32 __force *)addr = value;
  79. }
  80. #endif
  81. #ifdef CONFIG_64BIT
  82. #ifndef __raw_writeq
  83. #define __raw_writeq __raw_writeq
  84. static inline void __raw_writeq(u64 value, volatile void __iomem *addr)
  85. {
  86. *(volatile u64 __force *)addr = value;
  87. }
  88. #endif
  89. #endif /* CONFIG_64BIT */
  90. /*
  91. * {read,write}{b,w,l,q}() access little endian memory and return result in
  92. * native endianness.
  93. */
  94. #ifndef readb
  95. #define readb readb
  96. static inline u8 readb(const volatile void __iomem *addr)
  97. {
  98. return __raw_readb(addr);
  99. }
  100. #endif
  101. #ifndef readw
  102. #define readw readw
  103. static inline u16 readw(const volatile void __iomem *addr)
  104. {
  105. return __le16_to_cpu(__raw_readw(addr));
  106. }
  107. #endif
  108. #ifndef readl
  109. #define readl readl
  110. static inline u32 readl(const volatile void __iomem *addr)
  111. {
  112. return __le32_to_cpu(__raw_readl(addr));
  113. }
  114. #endif
  115. #ifdef CONFIG_64BIT
  116. #ifndef readq
  117. #define readq readq
  118. static inline u64 readq(const volatile void __iomem *addr)
  119. {
  120. return __le64_to_cpu(__raw_readq(addr));
  121. }
  122. #endif
  123. #endif /* CONFIG_64BIT */
  124. #ifndef writeb
  125. #define writeb writeb
  126. static inline void writeb(u8 value, volatile void __iomem *addr)
  127. {
  128. __raw_writeb(value, addr);
  129. }
  130. #endif
  131. #ifndef writew
  132. #define writew writew
  133. static inline void writew(u16 value, volatile void __iomem *addr)
  134. {
  135. __raw_writew(cpu_to_le16(value), addr);
  136. }
  137. #endif
  138. #ifndef writel
  139. #define writel writel
  140. static inline void writel(u32 value, volatile void __iomem *addr)
  141. {
  142. __raw_writel(__cpu_to_le32(value), addr);
  143. }
  144. #endif
  145. #ifdef CONFIG_64BIT
  146. #ifndef writeq
  147. #define writeq writeq
  148. static inline void writeq(u64 value, volatile void __iomem *addr)
  149. {
  150. __raw_writeq(__cpu_to_le64(value), addr);
  151. }
  152. #endif
  153. #endif /* CONFIG_64BIT */
  154. /*
  155. * {read,write}{b,w,l,q}_relaxed() are like the regular version, but
  156. * are not guaranteed to provide ordering against spinlocks or memory
  157. * accesses.
  158. */
  159. #ifndef readb_relaxed
  160. #define readb_relaxed readb
  161. #endif
  162. #ifndef readw_relaxed
  163. #define readw_relaxed readw
  164. #endif
  165. #ifndef readl_relaxed
  166. #define readl_relaxed readl
  167. #endif
  168. #ifndef readq_relaxed
  169. #define readq_relaxed readq
  170. #endif
  171. #ifndef writeb_relaxed
  172. #define writeb_relaxed writeb
  173. #endif
  174. #ifndef writew_relaxed
  175. #define writew_relaxed writew
  176. #endif
  177. #ifndef writel_relaxed
  178. #define writel_relaxed writel
  179. #endif
  180. #ifndef writeq_relaxed
  181. #define writeq_relaxed writeq
  182. #endif
  183. /*
  184. * {read,write}s{b,w,l,q}() repeatedly access the same memory address in
  185. * native endianness in 8-, 16-, 32- or 64-bit chunks (@count times).
  186. */
  187. #ifndef readsb
  188. #define readsb readsb
  189. static inline void readsb(const volatile void __iomem *addr, void *buffer,
  190. unsigned int count)
  191. {
  192. if (count) {
  193. u8 *buf = buffer;
  194. do {
  195. u8 x = __raw_readb(addr);
  196. *buf++ = x;
  197. } while (--count);
  198. }
  199. }
  200. #endif
  201. #ifndef readsw
  202. #define readsw readsw
  203. static inline void readsw(const volatile void __iomem *addr, void *buffer,
  204. unsigned int count)
  205. {
  206. if (count) {
  207. u16 *buf = buffer;
  208. do {
  209. u16 x = __raw_readw(addr);
  210. *buf++ = x;
  211. } while (--count);
  212. }
  213. }
  214. #endif
  215. #ifndef readsl
  216. #define readsl readsl
  217. static inline void readsl(const volatile void __iomem *addr, void *buffer,
  218. unsigned int count)
  219. {
  220. if (count) {
  221. u32 *buf = buffer;
  222. do {
  223. u32 x = __raw_readl(addr);
  224. *buf++ = x;
  225. } while (--count);
  226. }
  227. }
  228. #endif
  229. #ifdef CONFIG_64BIT
  230. #ifndef readsq
  231. #define readsq readsq
  232. static inline void readsq(const volatile void __iomem *addr, void *buffer,
  233. unsigned int count)
  234. {
  235. if (count) {
  236. u64 *buf = buffer;
  237. do {
  238. u64 x = __raw_readq(addr);
  239. *buf++ = x;
  240. } while (--count);
  241. }
  242. }
  243. #endif
  244. #endif /* CONFIG_64BIT */
  245. #ifndef writesb
  246. #define writesb writesb
  247. static inline void writesb(volatile void __iomem *addr, const void *buffer,
  248. unsigned int count)
  249. {
  250. if (count) {
  251. const u8 *buf = buffer;
  252. do {
  253. __raw_writeb(*buf++, addr);
  254. } while (--count);
  255. }
  256. }
  257. #endif
  258. #ifndef writesw
  259. #define writesw writesw
  260. static inline void writesw(volatile void __iomem *addr, const void *buffer,
  261. unsigned int count)
  262. {
  263. if (count) {
  264. const u16 *buf = buffer;
  265. do {
  266. __raw_writew(*buf++, addr);
  267. } while (--count);
  268. }
  269. }
  270. #endif
  271. #ifndef writesl
  272. #define writesl writesl
  273. static inline void writesl(volatile void __iomem *addr, const void *buffer,
  274. unsigned int count)
  275. {
  276. if (count) {
  277. const u32 *buf = buffer;
  278. do {
  279. __raw_writel(*buf++, addr);
  280. } while (--count);
  281. }
  282. }
  283. #endif
  284. #ifdef CONFIG_64BIT
  285. #ifndef writesq
  286. #define writesq writesq
  287. static inline void writesq(volatile void __iomem *addr, const void *buffer,
  288. unsigned int count)
  289. {
  290. if (count) {
  291. const u64 *buf = buffer;
  292. do {
  293. __raw_writeq(*buf++, addr);
  294. } while (--count);
  295. }
  296. }
  297. #endif
  298. #endif /* CONFIG_64BIT */
  299. #ifndef PCI_IOBASE
  300. #define PCI_IOBASE ((void __iomem *)0)
  301. #endif
  302. #ifndef IO_SPACE_LIMIT
  303. #define IO_SPACE_LIMIT 0xffff
  304. #endif
  305. /*
  306. * {in,out}{b,w,l}() access little endian I/O. {in,out}{b,w,l}_p() can be
  307. * implemented on hardware that needs an additional delay for I/O accesses to
  308. * take effect.
  309. */
  310. #ifndef inb
  311. #define inb inb
  312. static inline u8 inb(unsigned long addr)
  313. {
  314. return readb(PCI_IOBASE + addr);
  315. }
  316. #endif
  317. #ifndef inw
  318. #define inw inw
  319. static inline u16 inw(unsigned long addr)
  320. {
  321. return readw(PCI_IOBASE + addr);
  322. }
  323. #endif
  324. #ifndef inl
  325. #define inl inl
  326. static inline u32 inl(unsigned long addr)
  327. {
  328. return readl(PCI_IOBASE + addr);
  329. }
  330. #endif
  331. #ifndef outb
  332. #define outb outb
  333. static inline void outb(u8 value, unsigned long addr)
  334. {
  335. writeb(value, PCI_IOBASE + addr);
  336. }
  337. #endif
  338. #ifndef outw
  339. #define outw outw
  340. static inline void outw(u16 value, unsigned long addr)
  341. {
  342. writew(value, PCI_IOBASE + addr);
  343. }
  344. #endif
  345. #ifndef outl
  346. #define outl outl
  347. static inline void outl(u32 value, unsigned long addr)
  348. {
  349. writel(value, PCI_IOBASE + addr);
  350. }
  351. #endif
  352. #ifndef inb_p
  353. #define inb_p inb_p
  354. static inline u8 inb_p(unsigned long addr)
  355. {
  356. return inb(addr);
  357. }
  358. #endif
  359. #ifndef inw_p
  360. #define inw_p inw_p
  361. static inline u16 inw_p(unsigned long addr)
  362. {
  363. return inw(addr);
  364. }
  365. #endif
  366. #ifndef inl_p
  367. #define inl_p inl_p
  368. static inline u32 inl_p(unsigned long addr)
  369. {
  370. return inl(addr);
  371. }
  372. #endif
  373. #ifndef outb_p
  374. #define outb_p outb_p
  375. static inline void outb_p(u8 value, unsigned long addr)
  376. {
  377. outb(value, addr);
  378. }
  379. #endif
  380. #ifndef outw_p
  381. #define outw_p outw_p
  382. static inline void outw_p(u16 value, unsigned long addr)
  383. {
  384. outw(value, addr);
  385. }
  386. #endif
  387. #ifndef outl_p
  388. #define outl_p outl_p
  389. static inline void outl_p(u32 value, unsigned long addr)
  390. {
  391. outl(value, addr);
  392. }
  393. #endif
  394. /*
  395. * {in,out}s{b,w,l}{,_p}() are variants of the above that repeatedly access a
  396. * single I/O port multiple times.
  397. */
  398. #ifndef insb
  399. #define insb insb
  400. static inline void insb(unsigned long addr, void *buffer, unsigned int count)
  401. {
  402. readsb(PCI_IOBASE + addr, buffer, count);
  403. }
  404. #endif
  405. #ifndef insw
  406. #define insw insw
  407. static inline void insw(unsigned long addr, void *buffer, unsigned int count)
  408. {
  409. readsw(PCI_IOBASE + addr, buffer, count);
  410. }
  411. #endif
  412. #ifndef insl
  413. #define insl insl
  414. static inline void insl(unsigned long addr, void *buffer, unsigned int count)
  415. {
  416. readsl(PCI_IOBASE + addr, buffer, count);
  417. }
  418. #endif
  419. #ifndef outsb
  420. #define outsb outsb
  421. static inline void outsb(unsigned long addr, const void *buffer,
  422. unsigned int count)
  423. {
  424. writesb(PCI_IOBASE + addr, buffer, count);
  425. }
  426. #endif
  427. #ifndef outsw
  428. #define outsw outsw
  429. static inline void outsw(unsigned long addr, const void *buffer,
  430. unsigned int count)
  431. {
  432. writesw(PCI_IOBASE + addr, buffer, count);
  433. }
  434. #endif
  435. #ifndef outsl
  436. #define outsl outsl
  437. static inline void outsl(unsigned long addr, const void *buffer,
  438. unsigned int count)
  439. {
  440. writesl(PCI_IOBASE + addr, buffer, count);
  441. }
  442. #endif
  443. #ifndef insb_p
  444. #define insb_p insb_p
  445. static inline void insb_p(unsigned long addr, void *buffer, unsigned int count)
  446. {
  447. insb(addr, buffer, count);
  448. }
  449. #endif
  450. #ifndef insw_p
  451. #define insw_p insw_p
  452. static inline void insw_p(unsigned long addr, void *buffer, unsigned int count)
  453. {
  454. insw(addr, buffer, count);
  455. }
  456. #endif
  457. #ifndef insl_p
  458. #define insl_p insl_p
  459. static inline void insl_p(unsigned long addr, void *buffer, unsigned int count)
  460. {
  461. insl(addr, buffer, count);
  462. }
  463. #endif
  464. #ifndef outsb_p
  465. #define outsb_p outsb_p
  466. static inline void outsb_p(unsigned long addr, const void *buffer,
  467. unsigned int count)
  468. {
  469. outsb(addr, buffer, count);
  470. }
  471. #endif
  472. #ifndef outsw_p
  473. #define outsw_p outsw_p
  474. static inline void outsw_p(unsigned long addr, const void *buffer,
  475. unsigned int count)
  476. {
  477. outsw(addr, buffer, count);
  478. }
  479. #endif
  480. #ifndef outsl_p
  481. #define outsl_p outsl_p
  482. static inline void outsl_p(unsigned long addr, const void *buffer,
  483. unsigned int count)
  484. {
  485. outsl(addr, buffer, count);
  486. }
  487. #endif
  488. #ifndef CONFIG_GENERIC_IOMAP
  489. #ifndef ioread8
  490. #define ioread8 ioread8
  491. static inline u8 ioread8(const volatile void __iomem *addr)
  492. {
  493. return readb(addr);
  494. }
  495. #endif
  496. #ifndef ioread16
  497. #define ioread16 ioread16
  498. static inline u16 ioread16(const volatile void __iomem *addr)
  499. {
  500. return readw(addr);
  501. }
  502. #endif
  503. #ifndef ioread32
  504. #define ioread32 ioread32
  505. static inline u32 ioread32(const volatile void __iomem *addr)
  506. {
  507. return readl(addr);
  508. }
  509. #endif
  510. #ifndef iowrite8
  511. #define iowrite8 iowrite8
  512. static inline void iowrite8(u8 value, volatile void __iomem *addr)
  513. {
  514. writeb(value, addr);
  515. }
  516. #endif
  517. #ifndef iowrite16
  518. #define iowrite16 iowrite16
  519. static inline void iowrite16(u16 value, volatile void __iomem *addr)
  520. {
  521. writew(value, addr);
  522. }
  523. #endif
  524. #ifndef iowrite32
  525. #define iowrite32 iowrite32
  526. static inline void iowrite32(u32 value, volatile void __iomem *addr)
  527. {
  528. writel(value, addr);
  529. }
  530. #endif
  531. #ifndef ioread16be
  532. #define ioread16be ioread16be
  533. static inline u16 ioread16be(const volatile void __iomem *addr)
  534. {
  535. return __be16_to_cpu(__raw_readw(addr));
  536. }
  537. #endif
  538. #ifndef ioread32be
  539. #define ioread32be ioread32be
  540. static inline u32 ioread32be(const volatile void __iomem *addr)
  541. {
  542. return __be32_to_cpu(__raw_readl(addr));
  543. }
  544. #endif
  545. #ifndef iowrite16be
  546. #define iowrite16be iowrite16be
  547. static inline void iowrite16be(u16 value, void volatile __iomem *addr)
  548. {
  549. __raw_writew(__cpu_to_be16(value), addr);
  550. }
  551. #endif
  552. #ifndef iowrite32be
  553. #define iowrite32be iowrite32be
  554. static inline void iowrite32be(u32 value, volatile void __iomem *addr)
  555. {
  556. __raw_writel(__cpu_to_be32(value), addr);
  557. }
  558. #endif
  559. #ifndef ioread8_rep
  560. #define ioread8_rep ioread8_rep
  561. static inline void ioread8_rep(const volatile void __iomem *addr, void *buffer,
  562. unsigned int count)
  563. {
  564. readsb(addr, buffer, count);
  565. }
  566. #endif
  567. #ifndef ioread16_rep
  568. #define ioread16_rep ioread16_rep
  569. static inline void ioread16_rep(const volatile void __iomem *addr,
  570. void *buffer, unsigned int count)
  571. {
  572. readsw(addr, buffer, count);
  573. }
  574. #endif
  575. #ifndef ioread32_rep
  576. #define ioread32_rep ioread32_rep
  577. static inline void ioread32_rep(const volatile void __iomem *addr,
  578. void *buffer, unsigned int count)
  579. {
  580. readsl(addr, buffer, count);
  581. }
  582. #endif
  583. #ifndef iowrite8_rep
  584. #define iowrite8_rep iowrite8_rep
  585. static inline void iowrite8_rep(volatile void __iomem *addr,
  586. const void *buffer,
  587. unsigned int count)
  588. {
  589. writesb(addr, buffer, count);
  590. }
  591. #endif
  592. #ifndef iowrite16_rep
  593. #define iowrite16_rep iowrite16_rep
  594. static inline void iowrite16_rep(volatile void __iomem *addr,
  595. const void *buffer,
  596. unsigned int count)
  597. {
  598. writesw(addr, buffer, count);
  599. }
  600. #endif
  601. #ifndef iowrite32_rep
  602. #define iowrite32_rep iowrite32_rep
  603. static inline void iowrite32_rep(volatile void __iomem *addr,
  604. const void *buffer,
  605. unsigned int count)
  606. {
  607. writesl(addr, buffer, count);
  608. }
  609. #endif
  610. #endif /* CONFIG_GENERIC_IOMAP */
  611. #ifdef __KERNEL__
  612. #include <linux/vmalloc.h>
  613. #define __io_virt(x) ((void __force *)(x))
  614. #ifndef CONFIG_GENERIC_IOMAP
  615. struct pci_dev;
  616. extern void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max);
  617. #ifndef pci_iounmap
  618. #define pci_iounmap pci_iounmap
  619. static inline void pci_iounmap(struct pci_dev *dev, void __iomem *p)
  620. {
  621. }
  622. #endif
  623. #endif /* CONFIG_GENERIC_IOMAP */
  624. /*
  625. * Change virtual addresses to physical addresses and vv.
  626. * These are pretty trivial
  627. */
  628. #ifndef virt_to_phys
  629. #define virt_to_phys virt_to_phys
  630. static inline unsigned long virt_to_phys(volatile void *address)
  631. {
  632. return __pa((unsigned long)address);
  633. }
  634. #endif
  635. #ifndef phys_to_virt
  636. #define phys_to_virt phys_to_virt
  637. static inline void *phys_to_virt(unsigned long address)
  638. {
  639. return __va(address);
  640. }
  641. #endif
  642. /**
  643. * DOC: ioremap() and ioremap_*() variants
  644. *
  645. * If you have an IOMMU your architecture is expected to have both ioremap()
  646. * and iounmap() implemented otherwise the asm-generic helpers will provide a
  647. * direct mapping.
  648. *
  649. * There are ioremap_*() call variants, if you have no IOMMU we naturally will
  650. * default to direct mapping for all of them, you can override these defaults.
  651. * If you have an IOMMU you are highly encouraged to provide your own
  652. * ioremap variant implementation as there currently is no safe architecture
  653. * agnostic default. To avoid possible improper behaviour default asm-generic
  654. * ioremap_*() variants all return NULL when an IOMMU is available. If you've
  655. * defined your own ioremap_*() variant you must then declare your own
  656. * ioremap_*() variant as defined to itself to avoid the default NULL return.
  657. */
  658. #ifdef CONFIG_MMU
  659. #ifndef ioremap_uc
  660. #define ioremap_uc ioremap_uc
  661. static inline void __iomem *ioremap_uc(phys_addr_t offset, size_t size)
  662. {
  663. return NULL;
  664. }
  665. #endif
  666. #else /* !CONFIG_MMU */
  667. /*
  668. * Change "struct page" to physical address.
  669. *
  670. * This implementation is for the no-MMU case only... if you have an MMU
  671. * you'll need to provide your own definitions.
  672. */
  673. #ifndef ioremap
  674. #define ioremap ioremap
  675. static inline void __iomem *ioremap(phys_addr_t offset, size_t size)
  676. {
  677. return (void __iomem *)(unsigned long)offset;
  678. }
  679. #endif
  680. #ifndef __ioremap
  681. #define __ioremap __ioremap
  682. static inline void __iomem *__ioremap(phys_addr_t offset, size_t size,
  683. unsigned long flags)
  684. {
  685. return ioremap(offset, size);
  686. }
  687. #endif
  688. #ifndef ioremap_nocache
  689. #define ioremap_nocache ioremap_nocache
  690. static inline void __iomem *ioremap_nocache(phys_addr_t offset, size_t size)
  691. {
  692. return ioremap(offset, size);
  693. }
  694. #endif
  695. #ifndef ioremap_uc
  696. #define ioremap_uc ioremap_uc
  697. static inline void __iomem *ioremap_uc(phys_addr_t offset, size_t size)
  698. {
  699. return ioremap_nocache(offset, size);
  700. }
  701. #endif
  702. #ifndef ioremap_wc
  703. #define ioremap_wc ioremap_wc
  704. static inline void __iomem *ioremap_wc(phys_addr_t offset, size_t size)
  705. {
  706. return ioremap_nocache(offset, size);
  707. }
  708. #endif
  709. #ifndef ioremap_wt
  710. #define ioremap_wt ioremap_wt
  711. static inline void __iomem *ioremap_wt(phys_addr_t offset, size_t size)
  712. {
  713. return ioremap_nocache(offset, size);
  714. }
  715. #endif
  716. #ifndef iounmap
  717. #define iounmap iounmap
  718. static inline void iounmap(void __iomem *addr)
  719. {
  720. }
  721. #endif
  722. #endif /* CONFIG_MMU */
  723. #ifdef CONFIG_HAS_IOPORT_MAP
  724. #ifndef CONFIG_GENERIC_IOMAP
  725. #ifndef ioport_map
  726. #define ioport_map ioport_map
  727. static inline void __iomem *ioport_map(unsigned long port, unsigned int nr)
  728. {
  729. return PCI_IOBASE + (port & IO_SPACE_LIMIT);
  730. }
  731. #endif
  732. #ifndef ioport_unmap
  733. #define ioport_unmap ioport_unmap
  734. static inline void ioport_unmap(void __iomem *p)
  735. {
  736. }
  737. #endif
  738. #else /* CONFIG_GENERIC_IOMAP */
  739. extern void __iomem *ioport_map(unsigned long port, unsigned int nr);
  740. extern void ioport_unmap(void __iomem *p);
  741. #endif /* CONFIG_GENERIC_IOMAP */
  742. #endif /* CONFIG_HAS_IOPORT_MAP */
  743. #ifndef xlate_dev_kmem_ptr
  744. #define xlate_dev_kmem_ptr xlate_dev_kmem_ptr
  745. static inline void *xlate_dev_kmem_ptr(void *addr)
  746. {
  747. return addr;
  748. }
  749. #endif
  750. #ifndef xlate_dev_mem_ptr
  751. #define xlate_dev_mem_ptr xlate_dev_mem_ptr
  752. static inline void *xlate_dev_mem_ptr(phys_addr_t addr)
  753. {
  754. return __va(addr);
  755. }
  756. #endif
  757. #ifndef unxlate_dev_mem_ptr
  758. #define unxlate_dev_mem_ptr unxlate_dev_mem_ptr
  759. static inline void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr)
  760. {
  761. }
  762. #endif
  763. #ifdef CONFIG_VIRT_TO_BUS
  764. #ifndef virt_to_bus
  765. static inline unsigned long virt_to_bus(void *address)
  766. {
  767. return (unsigned long)address;
  768. }
  769. static inline void *bus_to_virt(unsigned long address)
  770. {
  771. return (void *)address;
  772. }
  773. #endif
  774. #endif
  775. #ifndef memset_io
  776. #define memset_io memset_io
  777. static inline void memset_io(volatile void __iomem *addr, int value,
  778. size_t size)
  779. {
  780. memset(__io_virt(addr), value, size);
  781. }
  782. #endif
  783. #ifndef memcpy_fromio
  784. #define memcpy_fromio memcpy_fromio
  785. static inline void memcpy_fromio(void *buffer,
  786. const volatile void __iomem *addr,
  787. size_t size)
  788. {
  789. memcpy(buffer, __io_virt(addr), size);
  790. }
  791. #endif
  792. #ifndef memcpy_toio
  793. #define memcpy_toio memcpy_toio
  794. static inline void memcpy_toio(volatile void __iomem *addr, const void *buffer,
  795. size_t size)
  796. {
  797. memcpy(__io_virt(addr), buffer, size);
  798. }
  799. #endif
  800. #endif /* __KERNEL__ */
  801. #endif /* __ASM_GENERIC_IO_H */