processor.h 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711
  1. #ifndef _ASM_IA64_PROCESSOR_H
  2. #define _ASM_IA64_PROCESSOR_H
  3. /*
  4. * Copyright (C) 1998-2004 Hewlett-Packard Co
  5. * David Mosberger-Tang <davidm@hpl.hp.com>
  6. * Stephane Eranian <eranian@hpl.hp.com>
  7. * Copyright (C) 1999 Asit Mallick <asit.k.mallick@intel.com>
  8. * Copyright (C) 1999 Don Dugger <don.dugger@intel.com>
  9. *
  10. * 11/24/98 S.Eranian added ia64_set_iva()
  11. * 12/03/99 D. Mosberger implement thread_saved_pc() via kernel unwind API
  12. * 06/16/00 A. Mallick added csd/ssd/tssd for ia32 support
  13. */
  14. #include <asm/intrinsics.h>
  15. #include <asm/kregs.h>
  16. #include <asm/ptrace.h>
  17. #include <asm/ustack.h>
  18. #define ARCH_HAS_PREFETCH_SWITCH_STACK
  19. #define IA64_NUM_PHYS_STACK_REG 96
  20. #define IA64_NUM_DBG_REGS 8
  21. #define DEFAULT_MAP_BASE __IA64_UL_CONST(0x2000000000000000)
  22. #define DEFAULT_TASK_SIZE __IA64_UL_CONST(0xa000000000000000)
  23. /*
  24. * TASK_SIZE really is a mis-named. It really is the maximum user
  25. * space address (plus one). On IA-64, there are five regions of 2TB
  26. * each (assuming 8KB page size), for a total of 8TB of user virtual
  27. * address space.
  28. */
  29. #define TASK_SIZE DEFAULT_TASK_SIZE
  30. /*
  31. * This decides where the kernel will search for a free chunk of vm
  32. * space during mmap's.
  33. */
  34. #define TASK_UNMAPPED_BASE (current->thread.map_base)
  35. #define IA64_THREAD_FPH_VALID (__IA64_UL(1) << 0) /* floating-point high state valid? */
  36. #define IA64_THREAD_DBG_VALID (__IA64_UL(1) << 1) /* debug registers valid? */
  37. #define IA64_THREAD_PM_VALID (__IA64_UL(1) << 2) /* performance registers valid? */
  38. #define IA64_THREAD_UAC_NOPRINT (__IA64_UL(1) << 3) /* don't log unaligned accesses */
  39. #define IA64_THREAD_UAC_SIGBUS (__IA64_UL(1) << 4) /* generate SIGBUS on unaligned acc. */
  40. #define IA64_THREAD_MIGRATION (__IA64_UL(1) << 5) /* require migration
  41. sync at ctx sw */
  42. #define IA64_THREAD_FPEMU_NOPRINT (__IA64_UL(1) << 6) /* don't log any fpswa faults */
  43. #define IA64_THREAD_FPEMU_SIGFPE (__IA64_UL(1) << 7) /* send a SIGFPE for fpswa faults */
  44. #define IA64_THREAD_UAC_SHIFT 3
  45. #define IA64_THREAD_UAC_MASK (IA64_THREAD_UAC_NOPRINT | IA64_THREAD_UAC_SIGBUS)
  46. #define IA64_THREAD_FPEMU_SHIFT 6
  47. #define IA64_THREAD_FPEMU_MASK (IA64_THREAD_FPEMU_NOPRINT | IA64_THREAD_FPEMU_SIGFPE)
  48. /*
  49. * This shift should be large enough to be able to represent 1000000000/itc_freq with good
  50. * accuracy while being small enough to fit 10*1000000000<<IA64_NSEC_PER_CYC_SHIFT in 64 bits
  51. * (this will give enough slack to represent 10 seconds worth of time as a scaled number).
  52. */
  53. #define IA64_NSEC_PER_CYC_SHIFT 30
  54. #ifndef __ASSEMBLY__
  55. #include <linux/cache.h>
  56. #include <linux/compiler.h>
  57. #include <linux/threads.h>
  58. #include <linux/types.h>
  59. #include <linux/bitops.h>
  60. #include <asm/fpu.h>
  61. #include <asm/page.h>
  62. #include <asm/percpu.h>
  63. #include <asm/rse.h>
  64. #include <asm/unwind.h>
  65. #include <linux/atomic.h>
  66. #ifdef CONFIG_NUMA
  67. #include <asm/nodedata.h>
  68. #endif
  69. /* like above but expressed as bitfields for more efficient access: */
  70. struct ia64_psr {
  71. __u64 reserved0 : 1;
  72. __u64 be : 1;
  73. __u64 up : 1;
  74. __u64 ac : 1;
  75. __u64 mfl : 1;
  76. __u64 mfh : 1;
  77. __u64 reserved1 : 7;
  78. __u64 ic : 1;
  79. __u64 i : 1;
  80. __u64 pk : 1;
  81. __u64 reserved2 : 1;
  82. __u64 dt : 1;
  83. __u64 dfl : 1;
  84. __u64 dfh : 1;
  85. __u64 sp : 1;
  86. __u64 pp : 1;
  87. __u64 di : 1;
  88. __u64 si : 1;
  89. __u64 db : 1;
  90. __u64 lp : 1;
  91. __u64 tb : 1;
  92. __u64 rt : 1;
  93. __u64 reserved3 : 4;
  94. __u64 cpl : 2;
  95. __u64 is : 1;
  96. __u64 mc : 1;
  97. __u64 it : 1;
  98. __u64 id : 1;
  99. __u64 da : 1;
  100. __u64 dd : 1;
  101. __u64 ss : 1;
  102. __u64 ri : 2;
  103. __u64 ed : 1;
  104. __u64 bn : 1;
  105. __u64 reserved4 : 19;
  106. };
  107. union ia64_isr {
  108. __u64 val;
  109. struct {
  110. __u64 code : 16;
  111. __u64 vector : 8;
  112. __u64 reserved1 : 8;
  113. __u64 x : 1;
  114. __u64 w : 1;
  115. __u64 r : 1;
  116. __u64 na : 1;
  117. __u64 sp : 1;
  118. __u64 rs : 1;
  119. __u64 ir : 1;
  120. __u64 ni : 1;
  121. __u64 so : 1;
  122. __u64 ei : 2;
  123. __u64 ed : 1;
  124. __u64 reserved2 : 20;
  125. };
  126. };
  127. union ia64_lid {
  128. __u64 val;
  129. struct {
  130. __u64 rv : 16;
  131. __u64 eid : 8;
  132. __u64 id : 8;
  133. __u64 ig : 32;
  134. };
  135. };
  136. union ia64_tpr {
  137. __u64 val;
  138. struct {
  139. __u64 ig0 : 4;
  140. __u64 mic : 4;
  141. __u64 rsv : 8;
  142. __u64 mmi : 1;
  143. __u64 ig1 : 47;
  144. };
  145. };
  146. union ia64_itir {
  147. __u64 val;
  148. struct {
  149. __u64 rv3 : 2; /* 0-1 */
  150. __u64 ps : 6; /* 2-7 */
  151. __u64 key : 24; /* 8-31 */
  152. __u64 rv4 : 32; /* 32-63 */
  153. };
  154. };
  155. union ia64_rr {
  156. __u64 val;
  157. struct {
  158. __u64 ve : 1; /* enable hw walker */
  159. __u64 reserved0: 1; /* reserved */
  160. __u64 ps : 6; /* log page size */
  161. __u64 rid : 24; /* region id */
  162. __u64 reserved1: 32; /* reserved */
  163. };
  164. };
  165. /*
  166. * CPU type, hardware bug flags, and per-CPU state. Frequently used
  167. * state comes earlier:
  168. */
  169. struct cpuinfo_ia64 {
  170. unsigned int softirq_pending;
  171. unsigned long itm_delta; /* # of clock cycles between clock ticks */
  172. unsigned long itm_next; /* interval timer mask value to use for next clock tick */
  173. unsigned long nsec_per_cyc; /* (1000000000<<IA64_NSEC_PER_CYC_SHIFT)/itc_freq */
  174. unsigned long unimpl_va_mask; /* mask of unimplemented virtual address bits (from PAL) */
  175. unsigned long unimpl_pa_mask; /* mask of unimplemented physical address bits (from PAL) */
  176. unsigned long itc_freq; /* frequency of ITC counter */
  177. unsigned long proc_freq; /* frequency of processor */
  178. unsigned long cyc_per_usec; /* itc_freq/1000000 */
  179. unsigned long ptce_base;
  180. unsigned int ptce_count[2];
  181. unsigned int ptce_stride[2];
  182. struct task_struct *ksoftirqd; /* kernel softirq daemon for this CPU */
  183. #ifdef CONFIG_SMP
  184. unsigned long loops_per_jiffy;
  185. int cpu;
  186. unsigned int socket_id; /* physical processor socket id */
  187. unsigned short core_id; /* core id */
  188. unsigned short thread_id; /* thread id */
  189. unsigned short num_log; /* Total number of logical processors on
  190. * this socket that were successfully booted */
  191. unsigned char cores_per_socket; /* Cores per processor socket */
  192. unsigned char threads_per_core; /* Threads per core */
  193. #endif
  194. /* CPUID-derived information: */
  195. unsigned long ppn;
  196. unsigned long features;
  197. unsigned char number;
  198. unsigned char revision;
  199. unsigned char model;
  200. unsigned char family;
  201. unsigned char archrev;
  202. char vendor[16];
  203. char *model_name;
  204. #ifdef CONFIG_NUMA
  205. struct ia64_node_data *node_data;
  206. #endif
  207. };
  208. DECLARE_PER_CPU(struct cpuinfo_ia64, ia64_cpu_info);
  209. /*
  210. * The "local" data variable. It refers to the per-CPU data of the currently executing
  211. * CPU, much like "current" points to the per-task data of the currently executing task.
  212. * Do not use the address of local_cpu_data, since it will be different from
  213. * cpu_data(smp_processor_id())!
  214. */
  215. #define local_cpu_data (&__ia64_per_cpu_var(ia64_cpu_info))
  216. #define cpu_data(cpu) (&per_cpu(ia64_cpu_info, cpu))
  217. extern void print_cpu_info (struct cpuinfo_ia64 *);
  218. typedef struct {
  219. unsigned long seg;
  220. } mm_segment_t;
  221. #define SET_UNALIGN_CTL(task,value) \
  222. ({ \
  223. (task)->thread.flags = (((task)->thread.flags & ~IA64_THREAD_UAC_MASK) \
  224. | (((value) << IA64_THREAD_UAC_SHIFT) & IA64_THREAD_UAC_MASK)); \
  225. 0; \
  226. })
  227. #define GET_UNALIGN_CTL(task,addr) \
  228. ({ \
  229. put_user(((task)->thread.flags & IA64_THREAD_UAC_MASK) >> IA64_THREAD_UAC_SHIFT, \
  230. (int __user *) (addr)); \
  231. })
  232. #define SET_FPEMU_CTL(task,value) \
  233. ({ \
  234. (task)->thread.flags = (((task)->thread.flags & ~IA64_THREAD_FPEMU_MASK) \
  235. | (((value) << IA64_THREAD_FPEMU_SHIFT) & IA64_THREAD_FPEMU_MASK)); \
  236. 0; \
  237. })
  238. #define GET_FPEMU_CTL(task,addr) \
  239. ({ \
  240. put_user(((task)->thread.flags & IA64_THREAD_FPEMU_MASK) >> IA64_THREAD_FPEMU_SHIFT, \
  241. (int __user *) (addr)); \
  242. })
  243. struct thread_struct {
  244. __u32 flags; /* various thread flags (see IA64_THREAD_*) */
  245. /* writing on_ustack is performance-critical, so it's worth spending 8 bits on it... */
  246. __u8 on_ustack; /* executing on user-stacks? */
  247. __u8 pad[3];
  248. __u64 ksp; /* kernel stack pointer */
  249. __u64 map_base; /* base address for get_unmapped_area() */
  250. __u64 rbs_bot; /* the base address for the RBS */
  251. int last_fph_cpu; /* CPU that may hold the contents of f32-f127 */
  252. #ifdef CONFIG_PERFMON
  253. void *pfm_context; /* pointer to detailed PMU context */
  254. unsigned long pfm_needs_checking; /* when >0, pending perfmon work on kernel exit */
  255. # define INIT_THREAD_PM .pfm_context = NULL, \
  256. .pfm_needs_checking = 0UL,
  257. #else
  258. # define INIT_THREAD_PM
  259. #endif
  260. unsigned long dbr[IA64_NUM_DBG_REGS];
  261. unsigned long ibr[IA64_NUM_DBG_REGS];
  262. struct ia64_fpreg fph[96]; /* saved/loaded on demand */
  263. };
  264. #define INIT_THREAD { \
  265. .flags = 0, \
  266. .on_ustack = 0, \
  267. .ksp = 0, \
  268. .map_base = DEFAULT_MAP_BASE, \
  269. .rbs_bot = STACK_TOP - DEFAULT_USER_STACK_SIZE, \
  270. .last_fph_cpu = -1, \
  271. INIT_THREAD_PM \
  272. .dbr = {0, }, \
  273. .ibr = {0, }, \
  274. .fph = {{{{0}}}, } \
  275. }
  276. #define start_thread(regs,new_ip,new_sp) do { \
  277. regs->cr_ipsr = ((regs->cr_ipsr | (IA64_PSR_BITS_TO_SET | IA64_PSR_CPL)) \
  278. & ~(IA64_PSR_BITS_TO_CLEAR | IA64_PSR_RI | IA64_PSR_IS)); \
  279. regs->cr_iip = new_ip; \
  280. regs->ar_rsc = 0xf; /* eager mode, privilege level 3 */ \
  281. regs->ar_rnat = 0; \
  282. regs->ar_bspstore = current->thread.rbs_bot; \
  283. regs->ar_fpsr = FPSR_DEFAULT; \
  284. regs->loadrs = 0; \
  285. regs->r8 = get_dumpable(current->mm); /* set "don't zap registers" flag */ \
  286. regs->r12 = new_sp - 16; /* allocate 16 byte scratch area */ \
  287. if (unlikely(get_dumpable(current->mm) != SUID_DUMP_USER)) { \
  288. /* \
  289. * Zap scratch regs to avoid leaking bits between processes with different \
  290. * uid/privileges. \
  291. */ \
  292. regs->ar_pfs = 0; regs->b0 = 0; regs->pr = 0; \
  293. regs->r1 = 0; regs->r9 = 0; regs->r11 = 0; regs->r13 = 0; regs->r15 = 0; \
  294. } \
  295. } while (0)
  296. /* Forward declarations, a strange C thing... */
  297. struct mm_struct;
  298. struct task_struct;
  299. /*
  300. * Free all resources held by a thread. This is called after the
  301. * parent of DEAD_TASK has collected the exit status of the task via
  302. * wait().
  303. */
  304. #define release_thread(dead_task)
  305. /* Get wait channel for task P. */
  306. extern unsigned long get_wchan (struct task_struct *p);
  307. /* Return instruction pointer of blocked task TSK. */
  308. #define KSTK_EIP(tsk) \
  309. ({ \
  310. struct pt_regs *_regs = task_pt_regs(tsk); \
  311. _regs->cr_iip + ia64_psr(_regs)->ri; \
  312. })
  313. /* Return stack pointer of blocked task TSK. */
  314. #define KSTK_ESP(tsk) ((tsk)->thread.ksp)
  315. extern void ia64_getreg_unknown_kr (void);
  316. extern void ia64_setreg_unknown_kr (void);
  317. #define ia64_get_kr(regnum) \
  318. ({ \
  319. unsigned long r = 0; \
  320. \
  321. switch (regnum) { \
  322. case 0: r = ia64_getreg(_IA64_REG_AR_KR0); break; \
  323. case 1: r = ia64_getreg(_IA64_REG_AR_KR1); break; \
  324. case 2: r = ia64_getreg(_IA64_REG_AR_KR2); break; \
  325. case 3: r = ia64_getreg(_IA64_REG_AR_KR3); break; \
  326. case 4: r = ia64_getreg(_IA64_REG_AR_KR4); break; \
  327. case 5: r = ia64_getreg(_IA64_REG_AR_KR5); break; \
  328. case 6: r = ia64_getreg(_IA64_REG_AR_KR6); break; \
  329. case 7: r = ia64_getreg(_IA64_REG_AR_KR7); break; \
  330. default: ia64_getreg_unknown_kr(); break; \
  331. } \
  332. r; \
  333. })
  334. #define ia64_set_kr(regnum, r) \
  335. ({ \
  336. switch (regnum) { \
  337. case 0: ia64_setreg(_IA64_REG_AR_KR0, r); break; \
  338. case 1: ia64_setreg(_IA64_REG_AR_KR1, r); break; \
  339. case 2: ia64_setreg(_IA64_REG_AR_KR2, r); break; \
  340. case 3: ia64_setreg(_IA64_REG_AR_KR3, r); break; \
  341. case 4: ia64_setreg(_IA64_REG_AR_KR4, r); break; \
  342. case 5: ia64_setreg(_IA64_REG_AR_KR5, r); break; \
  343. case 6: ia64_setreg(_IA64_REG_AR_KR6, r); break; \
  344. case 7: ia64_setreg(_IA64_REG_AR_KR7, r); break; \
  345. default: ia64_setreg_unknown_kr(); break; \
  346. } \
  347. })
  348. /*
  349. * The following three macros can't be inline functions because we don't have struct
  350. * task_struct at this point.
  351. */
  352. /*
  353. * Return TRUE if task T owns the fph partition of the CPU we're running on.
  354. * Must be called from code that has preemption disabled.
  355. */
  356. #define ia64_is_local_fpu_owner(t) \
  357. ({ \
  358. struct task_struct *__ia64_islfo_task = (t); \
  359. (__ia64_islfo_task->thread.last_fph_cpu == smp_processor_id() \
  360. && __ia64_islfo_task == (struct task_struct *) ia64_get_kr(IA64_KR_FPU_OWNER)); \
  361. })
  362. /*
  363. * Mark task T as owning the fph partition of the CPU we're running on.
  364. * Must be called from code that has preemption disabled.
  365. */
  366. #define ia64_set_local_fpu_owner(t) do { \
  367. struct task_struct *__ia64_slfo_task = (t); \
  368. __ia64_slfo_task->thread.last_fph_cpu = smp_processor_id(); \
  369. ia64_set_kr(IA64_KR_FPU_OWNER, (unsigned long) __ia64_slfo_task); \
  370. } while (0)
  371. /* Mark the fph partition of task T as being invalid on all CPUs. */
  372. #define ia64_drop_fpu(t) ((t)->thread.last_fph_cpu = -1)
  373. extern void __ia64_init_fpu (void);
  374. extern void __ia64_save_fpu (struct ia64_fpreg *fph);
  375. extern void __ia64_load_fpu (struct ia64_fpreg *fph);
  376. extern void ia64_save_debug_regs (unsigned long *save_area);
  377. extern void ia64_load_debug_regs (unsigned long *save_area);
  378. #define ia64_fph_enable() do { ia64_rsm(IA64_PSR_DFH); ia64_srlz_d(); } while (0)
  379. #define ia64_fph_disable() do { ia64_ssm(IA64_PSR_DFH); ia64_srlz_d(); } while (0)
  380. /* load fp 0.0 into fph */
  381. static inline void
  382. ia64_init_fpu (void) {
  383. ia64_fph_enable();
  384. __ia64_init_fpu();
  385. ia64_fph_disable();
  386. }
  387. /* save f32-f127 at FPH */
  388. static inline void
  389. ia64_save_fpu (struct ia64_fpreg *fph) {
  390. ia64_fph_enable();
  391. __ia64_save_fpu(fph);
  392. ia64_fph_disable();
  393. }
  394. /* load f32-f127 from FPH */
  395. static inline void
  396. ia64_load_fpu (struct ia64_fpreg *fph) {
  397. ia64_fph_enable();
  398. __ia64_load_fpu(fph);
  399. ia64_fph_disable();
  400. }
  401. static inline __u64
  402. ia64_clear_ic (void)
  403. {
  404. __u64 psr;
  405. psr = ia64_getreg(_IA64_REG_PSR);
  406. ia64_stop();
  407. ia64_rsm(IA64_PSR_I | IA64_PSR_IC);
  408. ia64_srlz_i();
  409. return psr;
  410. }
  411. /*
  412. * Restore the psr.
  413. */
  414. static inline void
  415. ia64_set_psr (__u64 psr)
  416. {
  417. ia64_stop();
  418. ia64_setreg(_IA64_REG_PSR_L, psr);
  419. ia64_srlz_i();
  420. }
  421. /*
  422. * Insert a translation into an instruction and/or data translation
  423. * register.
  424. */
  425. static inline void
  426. ia64_itr (__u64 target_mask, __u64 tr_num,
  427. __u64 vmaddr, __u64 pte,
  428. __u64 log_page_size)
  429. {
  430. ia64_setreg(_IA64_REG_CR_ITIR, (log_page_size << 2));
  431. ia64_setreg(_IA64_REG_CR_IFA, vmaddr);
  432. ia64_stop();
  433. if (target_mask & 0x1)
  434. ia64_itri(tr_num, pte);
  435. if (target_mask & 0x2)
  436. ia64_itrd(tr_num, pte);
  437. }
  438. /*
  439. * Insert a translation into the instruction and/or data translation
  440. * cache.
  441. */
  442. static inline void
  443. ia64_itc (__u64 target_mask, __u64 vmaddr, __u64 pte,
  444. __u64 log_page_size)
  445. {
  446. ia64_setreg(_IA64_REG_CR_ITIR, (log_page_size << 2));
  447. ia64_setreg(_IA64_REG_CR_IFA, vmaddr);
  448. ia64_stop();
  449. /* as per EAS2.6, itc must be the last instruction in an instruction group */
  450. if (target_mask & 0x1)
  451. ia64_itci(pte);
  452. if (target_mask & 0x2)
  453. ia64_itcd(pte);
  454. }
  455. /*
  456. * Purge a range of addresses from instruction and/or data translation
  457. * register(s).
  458. */
  459. static inline void
  460. ia64_ptr (__u64 target_mask, __u64 vmaddr, __u64 log_size)
  461. {
  462. if (target_mask & 0x1)
  463. ia64_ptri(vmaddr, (log_size << 2));
  464. if (target_mask & 0x2)
  465. ia64_ptrd(vmaddr, (log_size << 2));
  466. }
  467. /* Set the interrupt vector address. The address must be suitably aligned (32KB). */
  468. static inline void
  469. ia64_set_iva (void *ivt_addr)
  470. {
  471. ia64_setreg(_IA64_REG_CR_IVA, (__u64) ivt_addr);
  472. ia64_srlz_i();
  473. }
  474. /* Set the page table address and control bits. */
  475. static inline void
  476. ia64_set_pta (__u64 pta)
  477. {
  478. /* Note: srlz.i implies srlz.d */
  479. ia64_setreg(_IA64_REG_CR_PTA, pta);
  480. ia64_srlz_i();
  481. }
  482. static inline void
  483. ia64_eoi (void)
  484. {
  485. ia64_setreg(_IA64_REG_CR_EOI, 0);
  486. ia64_srlz_d();
  487. }
  488. #define cpu_relax() ia64_hint(ia64_hint_pause)
  489. #define cpu_relax_lowlatency() cpu_relax()
  490. static inline int
  491. ia64_get_irr(unsigned int vector)
  492. {
  493. unsigned int reg = vector / 64;
  494. unsigned int bit = vector % 64;
  495. u64 irr;
  496. switch (reg) {
  497. case 0: irr = ia64_getreg(_IA64_REG_CR_IRR0); break;
  498. case 1: irr = ia64_getreg(_IA64_REG_CR_IRR1); break;
  499. case 2: irr = ia64_getreg(_IA64_REG_CR_IRR2); break;
  500. case 3: irr = ia64_getreg(_IA64_REG_CR_IRR3); break;
  501. }
  502. return test_bit(bit, &irr);
  503. }
  504. static inline void
  505. ia64_set_lrr0 (unsigned long val)
  506. {
  507. ia64_setreg(_IA64_REG_CR_LRR0, val);
  508. ia64_srlz_d();
  509. }
  510. static inline void
  511. ia64_set_lrr1 (unsigned long val)
  512. {
  513. ia64_setreg(_IA64_REG_CR_LRR1, val);
  514. ia64_srlz_d();
  515. }
  516. /*
  517. * Given the address to which a spill occurred, return the unat bit
  518. * number that corresponds to this address.
  519. */
  520. static inline __u64
  521. ia64_unat_pos (void *spill_addr)
  522. {
  523. return ((__u64) spill_addr >> 3) & 0x3f;
  524. }
  525. /*
  526. * Set the NaT bit of an integer register which was spilled at address
  527. * SPILL_ADDR. UNAT is the mask to be updated.
  528. */
  529. static inline void
  530. ia64_set_unat (__u64 *unat, void *spill_addr, unsigned long nat)
  531. {
  532. __u64 bit = ia64_unat_pos(spill_addr);
  533. __u64 mask = 1UL << bit;
  534. *unat = (*unat & ~mask) | (nat << bit);
  535. }
  536. /*
  537. * Return saved PC of a blocked thread.
  538. * Note that the only way T can block is through a call to schedule() -> switch_to().
  539. */
  540. static inline unsigned long
  541. thread_saved_pc (struct task_struct *t)
  542. {
  543. struct unw_frame_info info;
  544. unsigned long ip;
  545. unw_init_from_blocked_task(&info, t);
  546. if (unw_unwind(&info) < 0)
  547. return 0;
  548. unw_get_ip(&info, &ip);
  549. return ip;
  550. }
  551. /*
  552. * Get the current instruction/program counter value.
  553. */
  554. #define current_text_addr() \
  555. ({ void *_pc; _pc = (void *)ia64_getreg(_IA64_REG_IP); _pc; })
  556. static inline __u64
  557. ia64_get_ivr (void)
  558. {
  559. __u64 r;
  560. ia64_srlz_d();
  561. r = ia64_getreg(_IA64_REG_CR_IVR);
  562. ia64_srlz_d();
  563. return r;
  564. }
  565. static inline void
  566. ia64_set_dbr (__u64 regnum, __u64 value)
  567. {
  568. __ia64_set_dbr(regnum, value);
  569. #ifdef CONFIG_ITANIUM
  570. ia64_srlz_d();
  571. #endif
  572. }
  573. static inline __u64
  574. ia64_get_dbr (__u64 regnum)
  575. {
  576. __u64 retval;
  577. retval = __ia64_get_dbr(regnum);
  578. #ifdef CONFIG_ITANIUM
  579. ia64_srlz_d();
  580. #endif
  581. return retval;
  582. }
  583. static inline __u64
  584. ia64_rotr (__u64 w, __u64 n)
  585. {
  586. return (w >> n) | (w << (64 - n));
  587. }
  588. #define ia64_rotl(w,n) ia64_rotr((w), (64) - (n))
  589. /*
  590. * Take a mapped kernel address and return the equivalent address
  591. * in the region 7 identity mapped virtual area.
  592. */
  593. static inline void *
  594. ia64_imva (void *addr)
  595. {
  596. void *result;
  597. result = (void *) ia64_tpa(addr);
  598. return __va(result);
  599. }
  600. #define ARCH_HAS_PREFETCH
  601. #define ARCH_HAS_PREFETCHW
  602. #define ARCH_HAS_SPINLOCK_PREFETCH
  603. #define PREFETCH_STRIDE L1_CACHE_BYTES
  604. static inline void
  605. prefetch (const void *x)
  606. {
  607. ia64_lfetch(ia64_lfhint_none, x);
  608. }
  609. static inline void
  610. prefetchw (const void *x)
  611. {
  612. ia64_lfetch_excl(ia64_lfhint_none, x);
  613. }
  614. #define spin_lock_prefetch(x) prefetchw(x)
  615. extern unsigned long boot_option_idle_override;
  616. enum idle_boot_override {IDLE_NO_OVERRIDE=0, IDLE_HALT, IDLE_FORCE_MWAIT,
  617. IDLE_NOMWAIT, IDLE_POLL};
  618. void default_idle(void);
  619. #define ia64_platform_is(x) (strcmp(x, ia64_platform_name) == 0)
  620. #endif /* !__ASSEMBLY__ */
  621. #endif /* _ASM_IA64_PROCESSOR_H */