my_tlb.h 3.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152
  1. #ifndef _MY_TLB_H
  2. #define _MY_TLB_H
  3. #include <linux/version.h>
  4. typedef void (* free_pages_and_swap_cache_t )(struct page **, int);
  5. free_pages_and_swap_cache_t kern_free_pages_and_swap_cachep = NULL;
  6. #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0)
  7. typedef void (* flush_tlb_mm_t) (struct mm_struct *mm);
  8. flush_tlb_mm_t kern_flush_tlb_mm = NULL;
  9. #endif
  10. typedef void (*free_pgtables_t)(struct mmu_gather *tlb, struct vm_area_struct *vma,
  11. unsigned long floor, unsigned long ceiling);
  12. free_pgtables_t kern_free_pgtables = NULL;
  13. #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0)
  14. /*
  15. * If we can't allocate a page to make a big batch of page pointers
  16. * to work on, then just handle a few from the on-stack structure.
  17. */
  18. #define MMU_GATHER_BUNDLE 8
  19. struct mmu_gather_batch {
  20. struct mmu_gather_batch *next;
  21. unsigned int nr;
  22. unsigned int max;
  23. struct page *pages[0];
  24. };
  25. /* struct mmu_gather is an opaque type used by the mm code for passing around
  26. * any data needed by arch specific code for tlb_remove_page.
  27. */
  28. struct mmu_gather {
  29. struct mm_struct *mm;
  30. #ifdef CONFIG_HAVE_RCU_TABLE_FREE
  31. struct mmu_table_batch *batch;
  32. #endif
  33. unsigned int need_flush : 1, /* Did free PTEs */
  34. fast_mode : 1; /* No batching */
  35. unsigned int fullmm;
  36. struct mmu_gather_batch *active;
  37. struct mmu_gather_batch local;
  38. struct page *__pages[MMU_GATHER_BUNDLE];
  39. };
  40. typedef void (*tlb_gather_mmu_t)(struct mmu_gather *tlb, struct mm_struct *mm, bool fullmm);
  41. tlb_gather_mmu_t my_tlb_gather_mmu = NULL;
  42. typedef void (*tlb_flush_mmu_t)(struct mmu_gather *tlb);
  43. tlb_flush_mmu_t my_tlb_flush_mmu = NULL;
  44. typedef void (*tlb_finish_mmu_t)(struct mmu_gather *tlb, unsigned long start, unsigned long end);
  45. tlb_finish_mmu_t my_tlb_finish_mmu = NULL;
  46. #else
  47. #ifdef CONFIG_X86
  48. #ifdef CONFIG_SMP
  49. #ifdef ARCH_FREE_PTR_NR
  50. #define FREE_PTR_NR ARCH_FREE_PTR_NR
  51. #else
  52. #define FREE_PTE_NR 506
  53. #endif
  54. #define tlb_fast_mode(tlb) ((tlb)->nr == ~0U)
  55. #else
  56. #define FREE_PTE_NR 1
  57. #define tlb_fast_mode(tlb) 1
  58. #endif
  59. #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0)
  60. #include <asm/tlbflush.h>
  61. #define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
  62. #else
  63. #define tlb_flush(tlb) kern_flush_tlb_mm((tlb)->mm)
  64. #endif
  65. /* struct mmu_gather is an opaque type used by the mm code for passing around
  66. * any data needed by arch specific code for tlb_remove_page.
  67. */
  68. struct mmu_gather {
  69. struct mm_struct *mm;
  70. unsigned int nr; /* set to ~0U means fast mode */
  71. unsigned int need_flush;/* Really unmapped some ptes? */
  72. unsigned int fullmm; /* non-zero means full mm flush */
  73. struct page * pages[FREE_PTE_NR];
  74. };
  75. #else
  76. #error Need mmu_gather def
  77. #endif
  78. struct mmu_gather *pmmu_gathers = NULL;
  79. /* tlb_gather_mmu
  80. * Return a pointer to an initialized struct mmu_gather.
  81. */
  82. static inline struct mmu_gather *
  83. my_tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush)
  84. {
  85. struct mmu_gather *tlb = &get_cpu_var(*pmmu_gathers);
  86. tlb->mm = mm;
  87. /* Use fast mode if only one CPU is online */
  88. tlb->nr = num_online_cpus() > 1 ? 0U : ~0U;
  89. tlb->fullmm = full_mm_flush;
  90. return tlb;
  91. }
  92. static inline void
  93. my_tlb_flush_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
  94. {
  95. if (!tlb->need_flush)
  96. return;
  97. tlb->need_flush = 0;
  98. tlb_flush(tlb);
  99. if (!tlb_fast_mode(tlb)) {
  100. kern_free_pages_and_swap_cachep(tlb->pages, tlb->nr);
  101. tlb->nr = 0;
  102. }
  103. }
  104. /* tlb_finish_mmu
  105. * Called at the end of the shootdown operation to free up any resources
  106. * that were required.
  107. */
  108. static inline void
  109. my_tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
  110. {
  111. my_tlb_flush_mmu(tlb, start, end);
  112. /* keep the page table cache within bounds */
  113. check_pgt_cache();
  114. put_cpu_var(*pmmu_gathers);
  115. }
  116. #endif // Pre 3.2 kernel
  117. #endif