Browse Source

[LibOS] Add a bunch of asserts on locks state

borysp 4 years ago
parent
commit
bdc6a48c2c

+ 38 - 37
LibOS/shim/include/shim_internal.h

@@ -505,8 +505,6 @@ static inline void enable_preempt (shim_tcb_t * tcb)
     __enable_preempt(tcb);
 }
 
-#define DEBUG_LOCK 0
-
 static inline bool lock_created(struct shim_lock* l)
 {
     return l->lock != NULL;
@@ -518,72 +516,75 @@ static inline void clear_lock(struct shim_lock* l)
     l->owner = 0;
 }
 
-static inline void create_lock(struct shim_lock* l)
-{
+static inline bool create_lock(struct shim_lock* l) {
+    l->owner = 0;
     l->lock = DkMutexCreate(0);
-    /* l->owner = LOCK_FREE; */
-    /* l->reowned = 0; */
+    return l->lock != NULL;
 }
 
-static inline void destroy_lock(struct shim_lock* l)
-{
+static inline void destroy_lock(struct shim_lock* l) {
     DkObjectClose(l->lock);
+    l->lock = NULL;
+    l->owner = 0;
 }
 
-#if DEBUG_LOCK == 1
-#define lock(l) __lock(l, #l, __FILE__, __LINE__)
-static void __lock(struct shim_lock* l,
-                   const char* name, const char* file, int line)
+#ifdef DEBUG
+#define lock(l) __lock(l, __FILE__, __LINE__)
+static void __lock(struct shim_lock* l, const char* file, int line) {
 #else
-static void lock(struct shim_lock* l)
+static void lock(struct shim_lock* l) {
 #endif
-{
-    if (!lock_enabled || !l->lock)
+    if (!lock_enabled) {
         return;
+    }
+    /* TODO: This whole if should be just an assert. Change it once we are sure that it does not
+     * trigger (previous code allowed for this case). Same in unlock below. */
+    if (!l->lock) {
+#ifdef DEBUG
+        debug("Trying to lock an uninitialized lock at %s:%d!\n", file, line);
+#endif // DEBUG
+        __abort();
+    }
 
     shim_tcb_t * tcb = shim_get_tcb();
     disable_preempt(tcb);
 
-#if DEBUG_LOCK == 1
-    debug("try lock(%s=%p) %s:%d\n", name, l, file, line);
-#endif
-
     while (!DkSynchronizationObjectWait(l->lock, NO_TIMEOUT))
         /* nop */;
 
     l->owner = tcb->tid;
-#if DEBUG_LOCK == 1
-    debug("lock(%s=%p) by %s:%d\n", name, l, file, line);
-#endif
 }
 
-#if DEBUG_LOCK == 1
-#define unlock(l) __unlock(l, #l, __FILE__, __LINE__)
-static inline void __unlock(struct shim_lock* l,
-                            const char* name, const char* file, int line)
+#ifdef DEBUG
+#define unlock(l) __unlock(l, __FILE__, __LINE__)
+static inline void __unlock(struct shim_lock* l, const char* file, int line) {
 #else
-static inline void unlock(struct shim_lock* l)
+static inline void unlock(struct shim_lock* l) {
 #endif
-{
-    if (!lock_enabled || !l->lock)
+    if (!lock_enabled) {
         return;
+    }
+    if (!l->lock) {
+#ifdef DEBUG
+        debug("Trying to unlock an uninitialized lock at %s:%d!\n", file, line);
+#endif // DEBUG
+        __abort();
+    }
 
     shim_tcb_t* tcb = shim_get_tcb();
 
-#if DEBUG_LOCK == 1
-    debug("unlock(%s=%p) %s:%d\n", name, l, file, line);
-#endif
-
     l->owner = 0;
     DkMutexRelease(l->lock);
     enable_preempt(tcb);
 }
 
-static inline bool locked(struct shim_lock* l)
-{
-    if (!lock_enabled || !l->lock)
+static inline bool locked(struct shim_lock* l) {
+    if (!lock_enabled) {
+        return true;
+    }
+    if (!l->lock) {
         return false;
-
+    }
     return get_cur_tid() == l->owner;
 }
 

+ 7 - 0
LibOS/shim/src/bookkeep/shim_handle.c

@@ -34,6 +34,7 @@ static struct shim_lock handle_mgr_lock;
 
 #define SYSTEM_LOCK()   lock(&handle_mgr_lock)
 #define SYSTEM_UNLOCK() unlock(&handle_mgr_lock)
+#define SYSTEM_LOCKED() locked(&handle_mgr_lock)
 
 #define OBJ_TYPE struct shim_handle
 #include <memmgr.h>
@@ -199,6 +200,8 @@ done:
 }
 
 struct shim_handle* __get_fd_handle(FDTYPE fd, int* flags, struct shim_handle_map* map) {
+    assert(locked(&map->lock));
+
     struct shim_fd_handle* fd_handle = NULL;
 
     if (map->fd_top != FD_NULL && fd <= map->fd_top) {
@@ -228,6 +231,8 @@ struct shim_handle* get_fd_handle(FDTYPE fd, int* flags, struct shim_handle_map*
 
 struct shim_handle* __detach_fd_handle(struct shim_fd_handle* fd, int* flags,
                                        struct shim_handle_map* map) {
+    assert(locked(&map->lock));
+
     struct shim_handle* handle = NULL;
 
     if (HANDLE_ALLOCATED(fd)) {
@@ -556,6 +561,8 @@ static struct shim_handle_map* get_new_handle_map(FDTYPE size) {
 }
 
 static struct shim_handle_map* __enlarge_handle_map(struct shim_handle_map* map, FDTYPE size) {
+    assert(locked(&map->lock));
+
     if (size <= map->fd_size)
         return map;
 

+ 4 - 0
LibOS/shim/src/bookkeep/shim_signal.c

@@ -619,6 +619,8 @@ __sigset_t * set_sig_mask (struct shim_thread * thread,
 }
 
 static __rt_sighandler_t __get_sighandler(struct shim_thread* thread, int sig) {
+    assert(locked(&thread->lock));
+
     struct shim_signal_handle* sighdl = &thread->signal_handles[sig - 1];
     __rt_sighandler_t handler = NULL;
 
@@ -745,6 +747,8 @@ void handle_signal (void)
 
 // Need to hold thread->lock when calling this function
 void append_signal(struct shim_thread* thread, int sig, siginfo_t* info, bool need_interrupt) {
+    assert(locked(&thread->lock));
+
     __rt_sighandler_t handler = __get_sighandler(thread, sig);
 
     if (!handler) {

+ 8 - 0
LibOS/shim/src/bookkeep/shim_thread.c

@@ -78,6 +78,8 @@ void dump_threads (void)
 }
 
 static struct shim_thread* __lookup_thread(IDTYPE tid) {
+    assert(locked(&thread_list_lock));
+
     struct shim_thread* tmp;
 
     LISTP_FOR_EACH_ENTRY(tmp, &thread_list, list) {
@@ -253,6 +255,8 @@ struct shim_thread * get_new_internal_thread (void)
 
 struct shim_simple_thread * __lookup_simple_thread (IDTYPE tid)
 {
+    assert(locked(&thread_list_lock));
+
     struct shim_simple_thread * tmp;
 
     LISTP_FOR_EACH_ENTRY(tmp, &simple_thread_list, list) {
@@ -343,6 +347,8 @@ void get_simple_thread (struct shim_simple_thread * thread)
 
 void put_simple_thread (struct shim_simple_thread * thread)
 {
+    assert(locked(&thread_list_lock));
+
     int ref_count = REF_DEC(thread->ref_count);
 
     if (!ref_count) {
@@ -454,6 +460,8 @@ void del_simple_thread (struct shim_simple_thread * thread)
 }
 
 static int _check_last_thread(struct shim_thread* self) {
+    assert(locked(&thread_list_lock));
+
     IDTYPE self_tid = self ? self->tid : 0;
 
     struct shim_thread* thread;

+ 23 - 0
LibOS/shim/src/bookkeep/shim_vma.c

@@ -165,6 +165,8 @@ static inline bool test_vma_overlap (struct shim_vma * vma,
 
 static inline void __assert_vma_list (void)
 {
+    assert(locked(&vma_list_lock));
+
     struct shim_vma * tmp;
     struct shim_vma * prev __attribute__((unused)) = NULL;
 
@@ -196,6 +198,8 @@ static inline void assert_vma_list (void)
 static inline struct shim_vma *
 __lookup_vma (void * addr, struct shim_vma ** pprev)
 {
+    assert(locked(&vma_list_lock));
+
     struct shim_vma * vma, * prev = NULL;
     struct shim_vma * found = NULL;
 
@@ -224,6 +228,7 @@ __lookup_vma (void * addr, struct shim_vma ** pprev)
 static inline void
 __insert_vma (struct shim_vma * vma, struct shim_vma * prev)
 {
+    assert(locked(&vma_list_lock));
     assert(!prev || prev->end <= vma->start);
     assert(vma != prev);
 
@@ -249,6 +254,7 @@ __insert_vma (struct shim_vma * vma, struct shim_vma * prev)
 static inline void
 __remove_vma (struct shim_vma * vma, struct shim_vma * prev)
 {
+    assert(locked(&vma_list_lock));
     __UNUSED(prev);
     assert(vma != prev);
     LISTP_DEL(vma, &vma_list, list);
@@ -276,6 +282,8 @@ static int
 __bkeep_preloaded (void * start, void * end, int prot, int flags,
                    const char * comment)
 {
+    assert(locked(&vma_list_lock));
+
     if (!start || !end || start == end)
         return 0;
 
@@ -375,6 +383,8 @@ int init_vma (void)
 
 static inline struct shim_vma * __get_new_vma (void)
 {
+    assert(locked(&vma_list_lock));
+
     struct shim_vma * tmp = NULL;
 
     if (vma_mgr)
@@ -401,6 +411,8 @@ static inline struct shim_vma * __get_new_vma (void)
 
 static inline void __restore_reserved_vmas (void)
 {
+    assert(locked(&vma_list_lock));
+
     bool nothing_reserved;
     do {
         nothing_reserved = true;
@@ -420,6 +432,8 @@ static inline void __restore_reserved_vmas (void)
 
 static inline void __drop_vma (struct shim_vma * vma)
 {
+    assert(locked(&vma_list_lock));
+
     if (vma->file)
         put_handle(vma->file);
 
@@ -476,6 +490,8 @@ static int __bkeep_mmap (struct shim_vma * prev,
                          struct shim_handle * file, off_t offset,
                          const char * comment)
 {
+    assert(locked(&vma_list_lock));
+
     int ret = 0;
     struct shim_vma * new = __get_new_vma();
 
@@ -534,6 +550,8 @@ int bkeep_mmap (void * addr, size_t length, int prot, int flags,
 static inline void __shrink_vma (struct shim_vma * vma, void * start, void * end,
                                  struct shim_vma ** tailptr)
 {
+    assert(locked(&vma_list_lock));
+
     if (test_vma_startin(vma, start, end)) {
         /*
          * Dealing with the head: if the starting address of "vma" is in
@@ -609,6 +627,8 @@ static inline void __shrink_vma (struct shim_vma * vma, void * start, void * end
 static int __bkeep_munmap (struct shim_vma ** pprev,
                            void * start, void * end, int flags)
 {
+    assert(locked(&vma_list_lock));
+
     struct shim_vma * prev = *pprev;
     struct shim_vma * cur, * next;
 
@@ -705,6 +725,8 @@ int bkeep_munmap (void * addr, size_t length, int flags)
 static int __bkeep_mprotect (struct shim_vma * prev,
                              void * start, void * end, int prot, int flags)
 {
+    assert(locked(&vma_list_lock));
+
     struct shim_vma * cur, * next;
 
     if (!prev) {
@@ -814,6 +836,7 @@ static void * __bkeep_unmapped (void * top_addr, void * bottom_addr,
                                 struct shim_handle * file,
                                 off_t offset, const char * comment)
 {
+    assert(locked(&vma_list_lock));
     assert(top_addr > bottom_addr);
 
     if (!length || length > (uintptr_t) top_addr - (uintptr_t) bottom_addr)

+ 2 - 0
LibOS/shim/src/fs/chroot/fs.c

@@ -180,6 +180,8 @@ static ssize_t make_uri (struct shim_dentry * dent)
    be held */
 static int create_data (struct shim_dentry * dent, const char * uri, size_t len)
 {
+    assert(locked(&dent->lock));
+
     if (dent->data)
         return 0;
 

+ 6 - 0
LibOS/shim/src/fs/shim_dcache.c

@@ -146,6 +146,8 @@ void put_dentry(struct shim_dentry* dent) {
  */
 struct shim_dentry* get_new_dentry(struct shim_mount* mount, struct shim_dentry* parent,
                                    const char* name, int namelen, HASHTYPE* hashptr) {
+    assert(locked(&dcache_lock));
+
     struct shim_dentry* dent = alloc_dentry();
     HASHTYPE hash;
 
@@ -211,6 +213,8 @@ struct shim_dentry* get_new_dentry(struct shim_mount* mount, struct shim_dentry*
  */
 struct shim_dentry* __lookup_dcache(struct shim_dentry* start, const char* name, int namelen,
                                     HASHTYPE* hashptr) {
+    assert(locked(&dcache_lock));
+
     /* In this implementation, we just look at the children
      * under the parent and see if there are matches.  It so,
      * return it; if not, don't.
@@ -284,6 +288,8 @@ out:
  * structure on the heap to track progress.
  */
 int __del_dentry_tree(struct shim_dentry* root) {
+    assert(locked(&dcache_lock));
+
     struct shim_dentry *cursor, *n;
 
     LISTP_FOR_EACH_ENTRY_SAFE(cursor, n, &root->children, siblings) {

+ 3 - 0
LibOS/shim/src/fs/shim_fs.c

@@ -71,6 +71,7 @@ static struct shim_lock mount_mgr_lock;
 
 #define SYSTEM_LOCK()   lock(&mount_mgr_lock)
 #define SYSTEM_UNLOCK() unlock(&mount_mgr_lock)
+#define SYSTEM_LOCKED() locked(&mount_mgr_lock)
 
 #define MOUNT_MGR_ALLOC 64
 
@@ -278,6 +279,8 @@ int search_builtin_fs(const char* type, struct shim_mount** fs) {
 }
 
 int __mount_fs(struct shim_mount* mount, struct shim_dentry* dent) {
+    assert(locked(&dcache_lock));
+
     int ret = 0;
 
     dent->state |= DENTRY_MOUNTPOINT;

+ 4 - 0
LibOS/shim/src/fs/shim_namei.c

@@ -73,6 +73,7 @@ static inline int __lookup_flags (int flags)
  */
 /* Assume caller has acquired dcache_lock */
 int permission (struct shim_dentry * dent, mode_t mask) {
+    assert(locked(&dcache_lock));
 
     mode_t mode = 0;
 
@@ -156,6 +157,8 @@ int permission (struct shim_dentry * dent, mode_t mask) {
  */
 int lookup_dentry (struct shim_dentry * parent, const char * name, int namelen, struct shim_dentry ** new, struct shim_mount * fs)
 {
+    assert(locked(&dcache_lock));
+
     struct shim_dentry * dent = NULL;
     int do_fs_lookup = 0;
     int err = 0;
@@ -265,6 +268,7 @@ int __path_lookupat (struct shim_dentry * start, const char * path, int flags,
                      struct shim_dentry ** dent, int link_depth,
                      struct shim_mount * fs, bool make_ancestor)
 {
+    assert(locked(&dcache_lock));
     // Basic idea: recursively iterate over path, peeling off one atom at a
     // time.
     /* Chia-Che 12/5/2014:

+ 7 - 1
LibOS/shim/src/ipc/shim_ipc.c

@@ -85,6 +85,8 @@ int prepare_ns_leaders(void) {
 }
 
 static struct shim_ipc_info* __create_ipc_info(IDTYPE vmid, const char* uri, size_t len) {
+    assert(locked(&ipc_info_lock));
+
     struct shim_ipc_info* info =
         get_mem_obj_from_mgr_enlarge(ipc_info_mgr, size_align_up(IPC_INFO_MGR_ALLOC));
     if (!info)
@@ -100,6 +102,8 @@ static struct shim_ipc_info* __create_ipc_info(IDTYPE vmid, const char* uri, siz
 }
 
 static void __free_ipc_info(struct shim_ipc_info* info) {
+    assert(locked(&ipc_info_lock));
+
     if (info->pal_handle) {
         DkObjectClose(info->pal_handle);
         info->pal_handle = NULL;
@@ -115,13 +119,15 @@ static void __get_ipc_info(struct shim_ipc_info* info) {
 }
 
 static void __put_ipc_info(struct shim_ipc_info* info) {
+    assert(locked(&ipc_info_lock));
+
     int ref_count = REF_DEC(info->ref_count);
     if (!ref_count)
         __free_ipc_info(info);
 }
 
 void get_ipc_info(struct shim_ipc_info* info) {
-    /* no need to grab ipc_info_lock because __get_ipc_info() is atomic */
+    /* no need to grab ipc_info_lock because __get_ipc_info() does not touch global state */
     __get_ipc_info(info);
 }
 

+ 11 - 1
LibOS/shim/src/ipc/shim_ipc_helper.c

@@ -216,6 +216,8 @@ static struct shim_ipc_port* __create_ipc_port(PAL_HANDLE hdl) {
 }
 
 static void __free_ipc_port(struct shim_ipc_port* port) {
+    assert(locked(&ipc_helper_lock));
+
     if (port->pal_handle) {
         DkObjectClose(port->pal_handle);
         port->pal_handle = NULL;
@@ -230,13 +232,15 @@ static void __get_ipc_port(struct shim_ipc_port* port) {
 }
 
 static void __put_ipc_port(struct shim_ipc_port* port) {
+    assert(locked(&ipc_helper_lock));
+
     int ref_count = REF_DEC(port->ref_count);
     if (!ref_count)
         __free_ipc_port(port);
 }
 
 void get_ipc_port(struct shim_ipc_port* port) {
-    /* no need to grab ipc_helper_lock because __get_ipc_port() is atomic */
+    /* no need to grab ipc_helper_lock because __get_ipc_port() does not touch global state */
     __get_ipc_port(port);
 }
 
@@ -252,6 +256,8 @@ void put_ipc_port(struct shim_ipc_port* port) {
 }
 
 static void __add_ipc_port(struct shim_ipc_port* port, IDTYPE vmid, IDTYPE type, port_fini fini) {
+    assert(locked(&ipc_helper_lock));
+
     port->type |= type;
     if (vmid && !port->vmid)
         port->vmid = vmid;
@@ -281,6 +287,8 @@ static void __add_ipc_port(struct shim_ipc_port* port, IDTYPE vmid, IDTYPE type,
 }
 
 static void __del_ipc_port(struct shim_ipc_port* port) {
+    assert(locked(&ipc_helper_lock));
+
     debug("Deleting port %p (handle %p) of process %u\n", port, port->pal_handle,
           port->vmid & 0xFFFF);
 
@@ -849,6 +857,8 @@ static void shim_ipc_helper_prepare(void* arg) {
 
 /* this should be called with the ipc_helper_lock held */
 static int create_ipc_helper(void) {
+    assert(locked(&ipc_helper_lock));
+
     if (ipc_helper_state == HELPER_ALIVE)
         return 0;
 

+ 11 - 1
LibOS/shim/src/ipc/shim_ipc_nsimpl.h

@@ -76,7 +76,7 @@ struct range_bitmap {
     unsigned char map[];
 };
 
-/* Helper functions __*_range_*() must be called with range_map_lock held */
+/* Helper functions __*_range*() must be called with range_map_lock held */
 static struct range_bitmap* range_map;
 static struct shim_lock range_map_lock;
 
@@ -170,6 +170,8 @@ void CONCAT3(debug_print, NS, ranges)(void) {
 #define INIT_RANGE_MAP_SIZE 32
 
 static int __extend_range_bitmap(IDTYPE expected) {
+    assert(locked(&range_map_lock));
+
     IDTYPE size = INIT_RANGE_MAP_SIZE;
 
     if (range_map)
@@ -197,6 +199,8 @@ static int __extend_range_bitmap(IDTYPE expected) {
 }
 
 static int __set_range_bitmap(IDTYPE off, bool unset) {
+    assert(locked(&range_map_lock));
+
     IDTYPE i         = off / BITS;
     IDTYPE j         = off - i * BITS;
     unsigned char* m = range_map->map + i;
@@ -214,6 +218,8 @@ static int __set_range_bitmap(IDTYPE off, bool unset) {
 }
 
 static bool __check_range_bitmap(IDTYPE off) {
+    assert(locked(&range_map_lock));
+
     IDTYPE i         = off / BITS;
     IDTYPE j         = off - i * BITS;
     unsigned char* m = range_map->map + i;
@@ -222,6 +228,8 @@ static bool __check_range_bitmap(IDTYPE off) {
 }
 
 static struct range* __get_range(IDTYPE off) {
+    assert(locked(&range_map_lock));
+
     LISTP_TYPE(range)* head = range_table + RANGE_HASH(off);
 
     if (!range_map || off >= range_map->map_size)
@@ -242,6 +250,8 @@ static struct range* __get_range(IDTYPE off) {
 
 static int __add_range(struct range* r, IDTYPE off, IDTYPE owner, const char* uri,
                        LEASETYPE lease) {
+    assert(locked(&range_map_lock));
+
     LISTP_TYPE(range)* head = range_table + RANGE_HASH(off);
     int ret                 = 0;
 

+ 2 - 0
LibOS/shim/src/ipc/shim_ipc_sysv.c

@@ -892,6 +892,8 @@ out:
 int __balance_sysv_score(struct sysv_balance_policy* policy, struct shim_handle* hdl,
                          struct sysv_score* scores, int nscores, struct sysv_client* src,
                          long score) {
+    assert(locked(&hdl->lock));
+
     struct sysv_score* s    = scores;
     struct sysv_score* last = scores + nscores;
 

+ 2 - 0
LibOS/shim/src/shim_async.c

@@ -364,6 +364,8 @@ out_err:
 
 /* this should be called with the async_helper_lock held */
 static int create_async_helper(void) {
+    assert(locked(&async_helper_lock));
+
     if (async_helper_state == HELPER_ALIVE)
         return 0;
 

+ 1 - 0
LibOS/shim/src/shim_malloc.c

@@ -37,6 +37,7 @@ static struct shim_lock slab_mgr_lock;
 
 #define SYSTEM_LOCK()   lock(&slab_mgr_lock)
 #define SYSTEM_UNLOCK() unlock(&slab_mgr_lock)
+#define SYSTEM_LOCKED() locked(&slab_mgr_lock)
 
 #ifdef SLAB_DEBUG_TRACE
 #define SLAB_DEBUG

+ 2 - 0
LibOS/shim/src/sys/shim_epoll.c

@@ -100,6 +100,8 @@ int shim_do_epoll_create(int size) {
 
 /* lock of shim_handle enclosing this epoll should be held while calling this function */
 static void update_epoll(struct shim_epoll_handle* epoll) {
+    assert(locked(&container_of(epoll, struct shim_handle, info.epoll)->lock));
+
     struct shim_epoll_item* tmp;
     epoll->pal_cnt = 0;
 

+ 5 - 2
LibOS/shim/src/sys/shim_msgget.c

@@ -54,6 +54,8 @@ DEFINE_PROFILE_CATEGORY(sysv_msg, );
 
 static int __add_msg_handle(unsigned long key, IDTYPE msqid, bool owned,
                             struct shim_msg_handle** msghdl) {
+    assert(locked(&msgq_list_lock));
+
     LISTP_TYPE(shim_msg_handle)* key_head =
         (key != IPC_PRIVATE) ? &msgq_key_hlist[MSGQ_HASH(key)] : NULL;
     LISTP_TYPE(shim_msg_handle)* qid_head = msqid ? &msgq_qid_hlist[MSGQ_HASH(msqid)] : NULL;
@@ -218,6 +220,9 @@ static void __free_msg_linked_qobjs(struct shim_msg_handle* msgq, void* obj) {
 }
 
 static int __del_msg_handle(struct shim_msg_handle* msgq) {
+    struct shim_handle* hdl = MSG_TO_HANDLE(msgq);
+    assert(locked(&hdl->lock));
+
     if (msgq->deleted)
         return -EIDRM;
 
@@ -228,8 +233,6 @@ static int __del_msg_handle(struct shim_msg_handle* msgq) {
     free(msgq->types);
     msgq->ntypes = 0;
 
-    struct shim_handle* hdl = MSG_TO_HANDLE(msgq);
-
     lock(&msgq_list_lock);
     LISTP_DEL_INIT(msgq, &msgq_list, list);
     put_handle(hdl);

+ 5 - 2
LibOS/shim/src/sys/shim_semget.c

@@ -51,6 +51,8 @@ DEFINE_PROFILE_CATEGORY(sysv_sem, );
 
 static int __add_sem_handle(unsigned long key, IDTYPE semid, int nsems, bool owned,
                             struct shim_sem_handle** semhdl) {
+    assert(locked(&sem_list_lock));
+
     LISTP_TYPE(shim_sem_handle)* key_head =
         (key != IPC_PRIVATE) ? &sem_key_hlist[SEM_HASH(key)] : NULL;
     LISTP_TYPE(shim_sem_handle)* sid_head = semid ? &sem_sid_hlist[SEM_HASH(semid)] : NULL;
@@ -188,13 +190,14 @@ void put_sem_handle(struct shim_sem_handle* sem) {
 }
 
 static int __del_sem_handle(struct shim_sem_handle* sem) {
+    struct shim_handle* hdl = SEM_TO_HANDLE(sem);
+    assert(locked(&hdl->lock));
+
     if (sem->deleted)
         return 0;
 
     sem->deleted = true;
 
-    struct shim_handle* hdl = SEM_TO_HANDLE(sem);
-
     lock(&sem_list_lock);
     LISTP_DEL_INIT(sem, &sem_list, list);
     put_handle(hdl);

+ 2 - 0
LibOS/shim/src/sys/shim_sigaction.c

@@ -263,6 +263,8 @@ struct walk_arg {
 
 // Need to hold thread->lock
 static inline void __append_signal(struct shim_thread* thread, int sig, IDTYPE sender) {
+    assert(locked(&thread->lock));
+
     debug("Thread %d killed by signal %d\n", thread->tid, sig);
     siginfo_t info;
     memset(&info, 0, sizeof(siginfo_t));

+ 2 - 0
LibOS/shim/src/sys/shim_socket.c

@@ -419,6 +419,8 @@ static int create_socket_uri(struct shim_handle* hdl) {
 
 /* hdl->lock must be held */
 static bool __socket_is_ipv6_v6only(struct shim_handle* hdl) {
+    assert(locked(&hdl->lock));
+
     struct shim_sock_option* o = hdl->info.sock.pending_options;
     while (o) {
         if (o->level == IPPROTO_IPV6 && o->optname == IPV6_V6ONLY) {

+ 1 - 0
LibOS/shim/src/utils/strobjs.c

@@ -27,6 +27,7 @@ static struct shim_lock str_mgr_lock;
 
 #define SYSTEM_LOCK()   lock(&str_mgr_lock)
 #define SYSTEM_UNLOCK() unlock(&str_mgr_lock)
+#define SYSTEM_LOCKED() locked(&str_mgr_lock)
 
 #define STR_MGR_ALLOC 32
 

+ 5 - 0
Pal/lib/memmgr.h

@@ -45,6 +45,9 @@
 #ifndef SYSTEM_UNLOCK
 #define SYSTEM_UNLOCK() ({})
 #endif
+#ifndef SYSTEM_LOCKED
+#define SYSTEM_LOCKED() true
+#endif
 
 DEFINE_LIST(mem_obj);
 typedef struct mem_obj {
@@ -111,6 +114,8 @@ static inline int init_align_up(int size) {
 #endif
 
 static inline void __set_free_mem_area(MEM_AREA area, MEM_MGR mgr) {
+    assert(SYSTEM_LOCKED());
+
     mgr->size += area->size;
     mgr->obj         = area->objs;
     mgr->obj_top     = area->objs + area->size;

+ 1 - 0
Pal/lib/slabmgr.h

@@ -277,6 +277,7 @@ static inline void destroy_slab_mgr(SLAB_MGR mgr) {
 
 // SYSTEM_LOCK needs to be held by the caller on entry.
 static inline int enlarge_slab_mgr(SLAB_MGR mgr, int level) {
+    assert(SYSTEM_LOCKED());
     assert(level < SLAB_LEVEL);
     /* DEP 11/24/17: This strategy basically doubles a level's size
      * every time it grows.  The assumption if we get this far is that

+ 1 - 0
Pal/src/host/Linux-SGX/enclave_untrusted.c

@@ -27,6 +27,7 @@ static size_t g_page_size   = PRESET_PAGESIZE;
 
 #define SYSTEM_LOCK()   spinlock_lock(&malloc_lock)
 #define SYSTEM_UNLOCK() spinlock_unlock(&malloc_lock)
+#define SYSTEM_LOCKED() spinlock_is_locked(&malloc_lock)
 
 #define ALLOC_ALIGNMENT g_page_size
 

+ 1 - 0
Pal/src/host/Linux-SGX/pal_host.h

@@ -35,6 +35,7 @@ typedef spinlock_t PAL_LOCK;
 #define LOCK_INIT INIT_SPINLOCK_UNLOCKED
 #define _DkInternalLock spinlock_lock
 #define _DkInternalUnlock spinlock_unlock
+#define _DkInternalIsLocked spinlock_is_locked
 
 void * malloc_untrusted (int size);
 void free_untrusted (void * mem);

+ 18 - 0
Pal/src/host/Linux/db_mutex.c

@@ -187,6 +187,20 @@ void _DkMutexRelease(PAL_HANDLE handle) {
     return;
 }
 
+bool _DkMutexIsLocked(struct mutex_handle* m) {
+    if (!m->locked) {
+        return false;
+    }
+
+#ifdef DEBUG_MUTEX
+    if (m->owner != INLINE_SYSCALL(gettid, 0)) {
+        return false;
+    }
+#endif
+
+    return true;
+}
+
 void _DkInternalLock(PAL_LOCK* lock) {
     // Retry the lock if being interrupted by signals
     while (_DkMutexLock(lock) < 0)
@@ -197,6 +211,10 @@ void _DkInternalUnlock(PAL_LOCK* lock) {
     _DkMutexUnlock(lock);
 }
 
+bool _DkInternalIsLocked(PAL_LOCK* lock) {
+    return _DkMutexIsLocked(lock);
+}
+
 static int mutex_wait(PAL_HANDLE handle, int64_t timeout_us) {
     return _DkMutexAcquireTimeout(handle, timeout_us);
 }

+ 5 - 0
Pal/src/host/Skeleton/db_misc.c

@@ -35,6 +35,11 @@ void _DkInternalUnlock(PAL_LOCK* lock) {
     __abort();
 }
 
+bool _DkInternalIsLocked(PAL_LOCK* lock) {
+    __abort();
+    return false;
+}
+
 unsigned long _DkSystemTimeQuery(void) {
     return 0;
 }

+ 1 - 0
Pal/src/pal_internal.h

@@ -336,6 +336,7 @@ void _DkExceptionReturn (void * event);
 /* other DK calls */
 void _DkInternalLock(PAL_LOCK* mut);
 void _DkInternalUnlock(PAL_LOCK* mut);
+bool _DkInternalIsLocked(PAL_LOCK* mut);
 unsigned long _DkSystemTimeQuery (void);
 
 /*

+ 1 - 0
Pal/src/slab.c

@@ -34,6 +34,7 @@ static PAL_LOCK slab_mgr_lock = LOCK_INIT;
 
 #define SYSTEM_LOCK()   _DkInternalLock(&slab_mgr_lock)
 #define SYSTEM_UNLOCK() _DkInternalUnlock(&slab_mgr_lock)
+#define SYSTEM_LOCKED() _DkInternalIsLocked(&slab_mgr_lock)
 
 #if STATIC_SLAB == 1
 #define POOL_SIZE 64 * 1024 * 1024 /* 64MB by default */