Kaynağa Gözat

Make DBHandler interface more abstract

The DBHandler interface assumed that openStream, readStream and
closeStream used and returned ifstream objects. This resulted in the
interface not being generic enough and compelled the user to store the
database in a file.

Remove the DBHandler's interface dependency on ifstreams. Instead assume
that openStream accepts a uint64_t as the stream's identifier. Then
readStream accepts the identifier of an open stream to return the actual
data. Likewise closeStream accepts the identifier of an opened stream
and closes it.
mandragore 7 yıl önce
ebeveyn
işleme
4713beb163

+ 22 - 20
pir/dbhandlers/DBDirectoryProcessor.cpp

@@ -27,10 +27,6 @@ DBDirectoryProcessor::DBDirectoryProcessor() : filesSplitting(false) {
 	directory=std::string(DEFAULT_DIR_NAME);
 	maxFileBytesize=0;
 
-	// Create the pool of ifstream
-	for(int i=0;i<NB_FILE_DESCRIPTORS;i++)
-		fdPool.push_back(new std::ifstream());
-
 	// Then create the catalog and get the filenumbers and size
 	DIR *dir = opendir (directory.c_str());
 	struct dirent *ent = nullptr;
@@ -78,10 +74,6 @@ DBDirectoryProcessor::DBDirectoryProcessor(uint64_t nbStreams) : filesSplitting(
 	directory=std::string(DEFAULT_DIR_NAME);
 	maxFileBytesize=0;
 
-	// Create the pool of ifstream
-	for(int i=0;i<NB_FILE_DESCRIPTORS;i++)
-		fdPool.push_back(new std::ifstream());
-
 	// Then create the catalog and get the filenumbers and size
 	DIR *dir = opendir (directory.c_str());
 	struct dirent *ent = nullptr;
@@ -129,7 +121,9 @@ DBDirectoryProcessor::DBDirectoryProcessor(uint64_t nbStreams) : filesSplitting(
 }
 
 DBDirectoryProcessor::~DBDirectoryProcessor() {
-	for (auto ifs : fdPool) delete ifs; 
+    for(auto it : fdPool) {
+        delete it.second;
+    }
 }
 
 std::string DBDirectoryProcessor::getCatalog(const bool typeOfCatalog) {
@@ -170,11 +164,13 @@ bool DBDirectoryProcessor::getErrorStatus() {
 	return error;
 }
 
-std::ifstream* DBDirectoryProcessor::openStream(uint64_t streamNb, uint64_t requested_offset) {
+bool DBDirectoryProcessor::openStream(uint64_t streamNb, uint64_t requested_offset) {
+    if(fdPool.count(streamNb)) {
+        return false;
+    }
 	std::string local_directory(DEFAULT_DIR_NAME);
 
-	std::ifstream* is = fdPool.back();
-	fdPool.pop_back();
+	std::ifstream* is = new std::ifstream();
 	// When there is no splitting, each ifstream is associated with a real file 
 	// (at least when no aggregation is done which is the case for now)
 	if(!filesSplitting) {
@@ -186,10 +182,12 @@ std::ifstream* DBDirectoryProcessor::openStream(uint64_t streamNb, uint64_t requ
 		is->open( realFileName, std::ios::binary );
 		is->seekg(splitting_offset + requested_offset);
 	}
-	return is;
+    fdPool.insert( std::pair<uint64_t, std::ifstream*>(streamNb, is));
+	return true;
 }
 
-uint64_t DBDirectoryProcessor::readStream(std::ifstream* s, char * buf, uint64_t size) {
+uint64_t DBDirectoryProcessor::readStream(uint64_t streamNb, char * buf, uint64_t size) {
+    std::ifstream *s = fdPool[streamNb];
 	uint64_t sizeRead=0;
 	//std::cout << "sizeRead = "<<sizeRead<<" size = "<<size<<std::endl;
 	while(sizeRead<size) {
@@ -205,9 +203,13 @@ uint64_t DBDirectoryProcessor::readStream(std::ifstream* s, char * buf, uint64_t
 	return size;
 }
 
-void DBDirectoryProcessor::closeStream(std::ifstream* s) {
-	s->close();
-	fdPool.push_back(s);
+void DBDirectoryProcessor::closeStream(uint64_t streamNb) {
+    if(!fdPool.count(streamNb)) {
+        return;
+    }
+    std::map<uint64_t, std::ifstream*>::iterator it = fdPool.find(streamNb);
+    it->second->close();
+    fdPool.erase(it);
 }
 
 std::streampos DBDirectoryProcessor::getFileSize( std::string filePath ){
@@ -230,12 +232,12 @@ void DBDirectoryProcessor::readAggregatedStream(uint64_t streamNb, uint64_t alph
 	{	
 		for (int i=startStream; i <= endStream; i++)
 		{
-			std::ifstream *stream = openStream(i, offset);
+			openStream(i, offset);
 
 			// Just read the file (plus padding for that file)
-			readStream(stream, rawBits + (i % alpha) * fileByteSize, fileByteSize);
+			readStream(i, rawBits + (i % alpha) * fileByteSize, fileByteSize);
 
-			closeStream(stream);
+			closeStream(i);
 		} 
 
 		if(paddingStreams !=0)

+ 5 - 4
pir/dbhandlers/DBDirectoryProcessor.hpp

@@ -23,6 +23,7 @@
 #include <boost/thread.hpp>
 #include <dirent.h>
 #include <vector>
+#include <map>
 #include <string>
 #include <iostream>
 #include <fstream>
@@ -37,7 +38,7 @@ class DBDirectoryProcessor : public DBHandler
 private:
 	boost::mutex mutex;
   std::string directory;
-  std::vector<std::ifstream*> fdPool; // a pool of file descriptors
+  std::map<uint64_t, std::ifstream*> fdPool; // a pool of file descriptors
   std::vector <std::string> file_list; // the output file list
   bool filesSplitting;
   bool error = false;
@@ -55,10 +56,10 @@ public:
   uint64_t getmaxFileBytesize();
   bool getErrorStatus();
   
-  std::ifstream* openStream(uint64_t streamNb, uint64_t requested_offset);
-  uint64_t readStream(std::ifstream* s,char * buf, uint64_t size);
+  bool openStream(uint64_t streamNb, uint64_t requested_offset);
+  uint64_t readStream(uint64_t streamNb, char * buf, uint64_t size);
   void readAggregatedStream(uint64_t streamNb, uint64_t alpha, uint64_t offset, uint64_t bytes_per_file, char* rawBits);
-  void closeStream(std::ifstream* s);
+  void closeStream(uint64_t streamNb);
   
   
 	std::streampos getFileSize( std::string filePath );

+ 5 - 5
pir/dbhandlers/DBGenerator.cpp

@@ -73,11 +73,11 @@ uint64_t DBGenerator::getmaxFileBytesize() {
 	return maxFileBytesize;
 }
 
-std::ifstream* DBGenerator::openStream(uint64_t streamNb, uint64_t requested_offset) {
-	return NULL;
+bool DBGenerator::openStream(uint64_t streamNb, uint64_t requested_offset) {
+	return true;
 }
 
-uint64_t DBGenerator::readStream(std::ifstream* s, char * buf, uint64_t size) {
+uint64_t DBGenerator::readStream(uint64_t streamNb, char * buf, uint64_t size) {
   //for (unsigned char i = 0xaa, j = 0; j < size; i++, j++)
   //{
   //  buf[j] = i;
@@ -94,10 +94,10 @@ uint64_t DBGenerator::readStream(std::ifstream* s, char * buf, uint64_t size) {
   return size;
 }
 
-void DBGenerator::closeStream(std::ifstream* s) {}
+void DBGenerator::closeStream(uint64_t streamNb) {}
 
 void DBGenerator::readAggregatedStream(uint64_t streamNb, uint64_t alpha, uint64_t offset, uint64_t bytes_per_file, char* rawBits){
-  readStream(NULL, NULL, 0);
+  readStream(0, 0, 0);
 	uint64_t fileByteSize = std::min(bytes_per_file, maxFileBytesize-offset);
   uint64_t startStream = streamNb*alpha;
   uint64_t endStream = std::min(streamNb*alpha + alpha - 1, getNbStream() - 1);

+ 3 - 3
pir/dbhandlers/DBGenerator.hpp

@@ -36,10 +36,10 @@ public:
   uint64_t getNbStream();
   uint64_t getmaxFileBytesize();
   
-  std::ifstream* openStream(uint64_t streamNb, uint64_t requested_offset);
-  uint64_t readStream(std::ifstream* s, char * buf, uint64_t size);
+  bool openStream(uint64_t streamNb, uint64_t requested_offset);
+  uint64_t readStream(uint64_t streamNb, char * buf, uint64_t size);
   void readAggregatedStream(uint64_t streamNb, uint64_t alpha, uint64_t offset, uint64_t bytes_per_file, char* rawBits);
-  void closeStream(std::ifstream* s);
+  void closeStream(uint64_t streamNb);
   
 private:
   std::mt19937_64 random_engine; // Fixed seed of 0

+ 3 - 3
pir/dbhandlers/DBHandler.hpp

@@ -33,10 +33,10 @@ public:
   virtual uint64_t getNbStream()=0;
   virtual uint64_t getmaxFileBytesize()=0;
   
-  virtual std::ifstream* openStream(uint64_t streamNb, uint64_t requested_offset)=0;
-  virtual uint64_t readStream(std::ifstream* s, char * buf, uint64_t size)=0;
+  virtual bool openStream(uint64_t streamNb, uint64_t requested_offset)=0;
+  virtual uint64_t readStream(uint64_t streamNb, char * buf, uint64_t size)=0;
   virtual void readAggregatedStream(uint64_t streamNb, uint64_t alpha, uint64_t offset, uint64_t bytes_per_file, char* rawBits)=0;
-  virtual void closeStream(std::ifstream* s)=0;
+  virtual void closeStream(uint64_t streamNb)=0;
   virtual ~DBHandler(){};
   
   

+ 3 - 3
pir/replyGenerator/PIRReplyGeneratorGMP.cpp

@@ -50,12 +50,12 @@ void PIRReplyGeneratorGMP::importData()
 	{
 		if (i % pirParam.alpha == 0) datae[i/pirParam.alpha] = new mpz_t[maxChunkSize*pirParam.alpha];
 
-    ifstream* stream=dbhandler->openStream(i, 0);
+    dbhandler->openStream(i, 0);
 
     // For each chunk of size "size" of the file
 		for (unsigned int j = 0 ; j < maxChunkSize ; j++ )
 		{
-			dbhandler->readStream(stream,rawBits, size);
+			dbhandler->readStream(i ,rawBits, size);
 			mpz_init(datae[i/pirParam.alpha][j + (i % pirParam.alpha) * maxChunkSize]);
 			mpz_import(datae[i/pirParam.alpha][j + (i % pirParam.alpha) * maxChunkSize], size, 1, sizeof(char), 0, 0, rawBits);
 		}
@@ -67,7 +67,7 @@ void PIRReplyGeneratorGMP::importData()
 			mpz_init_set_ui(datae[i/pirParam.alpha][j + ((i+1) % pirParam.alpha) * maxChunkSize], 0);
       }
     }
-		dbhandler->closeStream(stream);
+		dbhandler->closeStream(i);
 	}
 
   std::cout << "PIRReplyGeneratorGMP: " << pirParam.alpha*theoretic_nbr_elements - nbFiles << "  non-aggregated padding files need to be added ..." << std::endl;

+ 3 - 3
pir/replyGenerator/PIRReplyGeneratorTrivial.cpp

@@ -68,9 +68,9 @@ void PIRReplyGeneratorTrivial::importData()
 	//pour tous les fichiers.
 	for (unsigned int i = 0 ; i < dbhandler->getNbStream() ; i++)
 	{
-    ifstream* stream = dbhandler->openStream(i, 0);
-	  dbhandler->readStream(stream, dataptr, maxFileBytesize);
-		dbhandler->closeStream(stream);
+    dbhandler->openStream(i, 0);
+	  dbhandler->readStream(i, dataptr, maxFileBytesize);
+		dbhandler->closeStream(i);
     dataptr += maxFileBytesize;
 	}