Ver código fonte

feat: make compiler optimize likely branches

greatbridf 2 anos atrás
pai
commit
74fa329672

+ 6 - 3
include/kernel/mm.hpp

@@ -194,13 +194,16 @@ inline constexpr void* mmend(const mm* mm_area)
     return (char*)mm_area->start + mm_area->pgs->size() * PAGE_SIZE;
 }
 
-// allocate a raw page
-page_t alloc_raw_page(void);
-
 // allocate n raw page(s)
 // @return the id of the first page allocated
 page_t alloc_n_raw_pages(size_t n);
 
+// allocate a raw page
+inline page_t alloc_raw_page(void)
+{
+    return alloc_n_raw_pages(1);
+}
+
 // allocate a struct page together with the raw page
 struct page allocate_page(void);
 

+ 8 - 0
include/types/types.h

@@ -18,6 +18,14 @@
 #error "no definition for ((SECTION))"
 #endif
 
+#ifdef __GNUC__
+#define likely(expr) (__builtin_expect(!!(expr), 1))
+#define unlikely(expr) (__builtin_expect(!!(expr), 0))
+#else
+#define likely(expr) (!!(expr))
+#define unlikely(expr) (!!(expr))
+#endif
+
 #ifdef __cplusplus
 #include <types/allocator.hpp>
 #include <types/cplusplus.hpp>

+ 3 - 2
src/kernel/interrupt.cpp

@@ -16,6 +16,7 @@
 #include <kernel_main.h>
 #include <types/size.h>
 #include <types/stdint.h>
+#include <types/types.h>
 
 static struct IDT_entry IDT[256];
 
@@ -171,13 +172,13 @@ extern "C" void int14_handler(int14_data* d)
         mms = kernel_mms;
 
     mm* mm_area = find_mm_area(mms, d->l_addr);
-    if (!mm_area)
+    if (unlikely(!mm_area))
         _int14_panic(d->v_eip, d->l_addr, d->error_code);
 
     pte_t* pte = to_pte(mms_get_pd(mms), d->l_addr);
     page* page = lto_page(mm_area, d->l_addr);
 
-    if (d->error_code.present == 0 && !mm_area->mapped_file)
+    if (unlikely(d->error_code.present == 0 && !mm_area->mapped_file))
         _int14_panic(d->v_eip, d->l_addr, d->error_code);
 
     // copy on write

+ 22 - 25
src/kernel/mem.cpp

@@ -60,9 +60,9 @@ private:
     brk_memory_allocator(const brk_memory_allocator&) = delete;
     brk_memory_allocator(brk_memory_allocator&&) = delete;
 
-    inline int brk(byte* addr)
+    inline constexpr int brk(byte* addr)
     {
-        if (addr >= p_limit)
+        if (unlikely(addr >= p_limit))
             return GB_FAILED;
         p_break = addr;
         return GB_OK;
@@ -71,7 +71,7 @@ private:
     // sets errno
     inline byte* sbrk(size_type increment)
     {
-        if (brk(p_break + increment) != GB_OK) {
+        if (unlikely(brk(p_break + increment) != GB_OK)) {
             errno = ENOMEM;
             return nullptr;
         } else {
@@ -100,7 +100,7 @@ private:
                 errno = 0;
                 return start_pos;
             } else {
-                if (!start_pos->flags.has_next) {
+                if (unlikely(!start_pos->flags.has_next)) {
                     errno = ENOTFOUND;
                     return start_pos;
                 }
@@ -114,7 +114,7 @@ private:
     {
         sbrk(sizeof(mem_blk) + size - 4 * sizeof(byte));
         // preserves errno
-        if (errno) {
+        if (unlikely(errno)) {
             return nullptr;
         }
 
@@ -201,7 +201,10 @@ static brk_memory_allocator
 
 void* k_malloc(size_t size)
 {
-    return kernel_heap_allocator->alloc(size);
+    void* ptr = kernel_heap_allocator->alloc(size);
+    if unlikely (!ptr)
+        MAKE_BREAK_POINT();
+    return ptr;
 }
 
 void k_free(void* ptr)
@@ -212,9 +215,8 @@ void k_free(void* ptr)
 void* ki_malloc(size_t size)
 {
     void* ptr = kernel_ident_mapped_allocator.alloc(size);
-    if (!ptr) {
+    if (unlikely(!ptr))
         MAKE_BREAK_POINT();
-    }
     return ptr;
 }
 
@@ -235,19 +237,19 @@ void* ptovp(pptr_t p_ptr)
     }
 }
 
-static inline void mark_page(page_t n)
+inline void mark_page(page_t n)
 {
     bm_set(mem_bitmap, n);
 }
 
-static inline void free_page(page_t n)
+inline void free_page(page_t n)
 {
     bm_clear(mem_bitmap, n);
 }
 
-static void mark_addr_len(pptr_t start, size_t n)
+constexpr void mark_addr_len(pptr_t start, size_t n)
 {
-    if (n == 0)
+    if (unlikely(n == 0))
         return;
     page_t start_page = to_page(start);
     page_t end_page = to_page(start + n + 4095);
@@ -255,9 +257,9 @@ static void mark_addr_len(pptr_t start, size_t n)
         mark_page(i);
 }
 
-static void free_addr_len(pptr_t start, size_t n)
+constexpr void free_addr_len(pptr_t start, size_t n)
 {
-    if (n == 0)
+    if (unlikely(n == 0))
         return;
     page_t start_page = to_page(start);
     page_t end_page = to_page(start + n + 4095);
@@ -265,21 +267,16 @@ static void free_addr_len(pptr_t start, size_t n)
         free_page(i);
 }
 
-static inline void mark_addr_range(pptr_t start, pptr_t end)
+inline constexpr void mark_addr_range(pptr_t start, pptr_t end)
 {
     mark_addr_len(start, end - start);
 }
 
-static inline void free_addr_range(pptr_t start, pptr_t end)
+inline constexpr void free_addr_range(pptr_t start, pptr_t end)
 {
     free_addr_len(start, end - start);
 }
 
-page_t alloc_raw_page(void)
-{
-    return alloc_n_raw_pages(1);
-}
-
 // @return the max count (but less than n) of the pages continuously available
 static inline size_t _test_n_raw_pages(page_t start, size_t n)
 {
@@ -423,7 +420,7 @@ int k_map(
     void* addr = mmend(mm_area);
     pde_t* pde = to_pde(mm_area->pd, addr);
     // page table not exist
-    if (!pde->in.p) {
+    if (unlikely(!pde->in.p)) {
         // allocate a page for the page table
         pde->in.p = 1;
         pde->in.rw = 1;
@@ -437,7 +434,7 @@ int k_map(
     pte_t* pte = to_pte(pde, addr);
     map_raw_page_to_pte(pte, page->phys_page_id, read, (write && !cow), priv);
 
-    if (cow && !page->attr.in.cow) {
+    if (unlikely(cow && !page->attr.in.cow)) {
         page->attr.in.cow = 1;
         page->pte->in.rw = 0;
         invalidate_tlb(addr);
@@ -488,7 +485,7 @@ static inline int _mmap(
     int write,
     int priv)
 {
-    if (!file->flags.in.file && !file->flags.in.special_node) {
+    if (unlikely(!file->flags.in.file && !file->flags.in.special_node)) {
         errno = EINVAL;
         return GB_FAILED;
     }
@@ -539,7 +536,7 @@ static inline void _init_map_page_identically(page_t page)
 {
     pde_t* pde = *KERNEL_PAGE_DIRECTORY_ADDR + to_pdi(page);
     // page table not exist
-    if (!pde->in.p) {
+    if (unlikely(!pde->in.p)) {
         // allocate a page for the page table
         // set the P bit of the pde in advance
         pde->in.p = 1;

+ 3 - 3
src/kernel/process.cpp

@@ -127,7 +127,7 @@ void NORETURN _kernel_init(void)
     // TODO: parse kernel parameters
     auto* _new_fs = fs::register_fs(types::kernel_allocator_new<fs::fat::fat32>(fs::vfs_open("/dev/hda1")->ind));
     int ret = fs::fs_root->ind->fs->mount(fs::vfs_open("/mnt"), _new_fs);
-    if (ret != GB_OK)
+    if (unlikely(ret != GB_OK))
         syscall(0x03);
 
     pd_t new_pd = alloc_pd();
@@ -144,7 +144,7 @@ void NORETURN _kernel_init(void)
     types::elf::elf32_load("/mnt/INIT.ELF", &intrpt_stack, 0);
     // map stack area
     ret = mmap((void*)types::elf::ELF_STACK_TOP, types::elf::ELF_STACK_SIZE, fs::vfs_open("/dev/null")->ind, 0, 1, 0);
-    if (ret != GB_OK)
+    if (unlikely(ret != GB_OK))
         syscall(0x03);
 
     asm_cli();
@@ -270,7 +270,7 @@ static inline void next_task(const types::list<thread*>::iterator_type& iter_to_
 
 void do_scheduling(interrupt_stack* intrpt_data)
 {
-    if (!is_scheduler_ready)
+    if (unlikely(!is_scheduler_ready))
         return;
 
     auto iter_thd = ready_thds->begin();