numa.c 2.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * This file contains NUMA specific variables and functions which can
  7. * be split away from DISCONTIGMEM and are used on NUMA machines with
  8. * contiguous memory.
  9. *
  10. * 2002/08/07 Erich Focht <efocht@ess.nec.de>
  11. */
  12. #include <linux/cpu.h>
  13. #include <linux/kernel.h>
  14. #include <linux/mm.h>
  15. #include <linux/node.h>
  16. #include <linux/init.h>
  17. #include <linux/bootmem.h>
  18. #include <linux/module.h>
  19. #include <asm/mmzone.h>
  20. #include <asm/numa.h>
  21. /*
  22. * The following structures are usually initialized by ACPI or
  23. * similar mechanisms and describe the NUMA characteristics of the machine.
  24. */
  25. int num_node_memblks;
  26. struct node_memblk_s node_memblk[NR_NODE_MEMBLKS];
  27. struct node_cpuid_s node_cpuid[NR_CPUS] =
  28. { [0 ... NR_CPUS-1] = { .phys_id = 0, .nid = NUMA_NO_NODE } };
  29. /*
  30. * This is a matrix with "distances" between nodes, they should be
  31. * proportional to the memory access latency ratios.
  32. */
  33. u8 numa_slit[MAX_NUMNODES * MAX_NUMNODES];
  34. /* Identify which cnode a physical address resides on */
  35. int
  36. paddr_to_nid(unsigned long paddr)
  37. {
  38. int i;
  39. for (i = 0; i < num_node_memblks; i++)
  40. if (paddr >= node_memblk[i].start_paddr &&
  41. paddr < node_memblk[i].start_paddr + node_memblk[i].size)
  42. break;
  43. return (i < num_node_memblks) ? node_memblk[i].nid : (num_node_memblks ? -1 : 0);
  44. }
  45. #if defined(CONFIG_SPARSEMEM) && defined(CONFIG_NUMA)
  46. /*
  47. * Because of holes evaluate on section limits.
  48. * If the section of memory exists, then return the node where the section
  49. * resides. Otherwise return node 0 as the default. This is used by
  50. * SPARSEMEM to allocate the SPARSEMEM sectionmap on the NUMA node where
  51. * the section resides.
  52. */
  53. int __meminit __early_pfn_to_nid(unsigned long pfn,
  54. struct mminit_pfnnid_cache *state)
  55. {
  56. int i, section = pfn >> PFN_SECTION_SHIFT, ssec, esec;
  57. if (section >= state->last_start && section < state->last_end)
  58. return state->last_nid;
  59. for (i = 0; i < num_node_memblks; i++) {
  60. ssec = node_memblk[i].start_paddr >> PA_SECTION_SHIFT;
  61. esec = (node_memblk[i].start_paddr + node_memblk[i].size +
  62. ((1L << PA_SECTION_SHIFT) - 1)) >> PA_SECTION_SHIFT;
  63. if (section >= ssec && section < esec) {
  64. state->last_start = ssec;
  65. state->last_end = esec;
  66. state->last_nid = node_memblk[i].nid;
  67. return node_memblk[i].nid;
  68. }
  69. }
  70. return -1;
  71. }
  72. void numa_clear_node(int cpu)
  73. {
  74. unmap_cpu_from_node(cpu, NUMA_NO_NODE);
  75. }
  76. #ifdef CONFIG_MEMORY_HOTPLUG
  77. /*
  78. * SRAT information is stored in node_memblk[], then we can use SRAT
  79. * information at memory-hot-add if necessary.
  80. */
  81. int memory_add_physaddr_to_nid(u64 addr)
  82. {
  83. int nid = paddr_to_nid(addr);
  84. if (nid < 0)
  85. return 0;
  86. return nid;
  87. }
  88. EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
  89. #endif
  90. #endif