Browse Source

On windows, allow many entries in conscache directories

Since we can't be sure that we can unlink enough files on windows
here, let's let the number of permitted entries grow huge if it
really must.

We do this by letting the storagedir hold lots of entries, but still
trying to keep the number of entries under the configured limit.  We
also have to tell consdiffmgr not to freak out if it can't actually
remove enough entries.

Part of a fix for bug 22752
Nick Mathewson 6 years ago
parent
commit
da159c45e2
3 changed files with 37 additions and 1 deletions
  1. 27 0
      src/or/conscache.c
  2. 1 0
      src/or/conscache.h
  3. 9 1
      src/or/consdiffmgr.c

+ 27 - 0
src/or/conscache.c

@@ -54,6 +54,11 @@ struct consensus_cache_t {
   storage_dir_t *dir;
   /** List of all the entries in the directory. */
   smartlist_t *entries;
+
+  /** The maximum number of entries that we'd like to allow in this cache.
+   * This is the same as the storagedir limit when MUST_UNMAP_TO_UNLINK is
+   * not defined. */
+  unsigned max_entries;
 };
 
 static void consensus_cache_clear(consensus_cache_t *cache);
@@ -71,6 +76,10 @@ consensus_cache_open(const char *subdir, int max_entries)
 {
   consensus_cache_t *cache = tor_malloc_zero(sizeof(consensus_cache_t));
   char *directory = get_datadir_fname(subdir);
+  cache->max_entries = max_entries;
+#ifdef MUST_UNMAP_TO_UNLINK
+  max_entries = 1000000;
+#endif
   cache->dir = storage_dir_new(directory, max_entries);
   tor_free(directory);
   if (!cache->dir) {
@@ -82,6 +91,19 @@ consensus_cache_open(const char *subdir, int max_entries)
   return cache;
 }
 
+/** Return true if it's okay to put more entries in this cache than
+ * its official file limit. */
+int
+consensus_cache_may_overallocate(consensus_cache_t *cache)
+{
+  (void) cache;
+#ifdef MUST_UNMAP_TO_UNLINK
+  return 1;
+#else
+  return 0;
+#endif
+}
+
 /**
  * Tell the sandbox (if any) configured by <b>cfg</b> to allow the
  * operations that <b>cache</b> will need.
@@ -90,6 +112,11 @@ int
 consensus_cache_register_with_sandbox(consensus_cache_t *cache,
                                       struct sandbox_cfg_elem **cfg)
 {
+#ifdef MUST_UNMAP_TO_UNLINK
+  /* Our sandbox doesn't support huge limits like we use here.
+   */
+  tor_assert_nonfatal_unreached();
+#endif
   return storage_dir_register_with_sandbox(cache->dir, cfg);
 }
 

+ 1 - 0
src/or/conscache.h

@@ -14,6 +14,7 @@ HANDLE_DECL(consensus_cache_entry, consensus_cache_entry_t, )
 consensus_cache_t *consensus_cache_open(const char *subdir, int max_entries);
 void consensus_cache_free(consensus_cache_t *cache);
 struct sandbox_cfg_elem;
+int consensus_cache_may_overallocate(consensus_cache_t *cache);
 int consensus_cache_register_with_sandbox(consensus_cache_t *cache,
                                           struct sandbox_cfg_elem **cfg);
 void consensus_cache_unmap_lazy(consensus_cache_t *cache, time_t cutoff);

+ 9 - 1
src/or/consdiffmgr.c

@@ -1136,7 +1136,7 @@ consdiffmgr_ensure_space_for_files(int n)
     return 0;
   }
   // Let's get more assertive: clean out unused stuff, and force-remove
-  // the files.
+  // the files that we can.
   consdiffmgr_cleanup();
   consensus_cache_delete_pending(cache, 1);
   const int n_to_remove = n - consensus_cache_get_n_filenames_available(cache);
@@ -1159,6 +1159,14 @@ consdiffmgr_ensure_space_for_files(int n)
   smartlist_free(objects);
 
   consensus_cache_delete_pending(cache, 1);
+
+  if (consensus_cache_may_overallocate(cache)) {
+    /* If we're allowed to throw extra files into the cache, let's do so
+     * rather getting upset.
+     */
+    return 0;
+  }
+
   if (BUG(n_marked < n_to_remove))
     return -1;
   else