Sfoglia il codice sorgente

Merge pull request #40 from Mandragorian/db-from-memory

Initialise db from memory
Carlos Aguilar Melchor 6 anni fa
parent
commit
9bfc1cc56a

+ 43 - 7
apps/simplepir/simplePIR.cpp

@@ -211,7 +211,7 @@ int main(int argc, char * argv[]) {
 
   // Simple test
   std::cout << "======================================================================" << std::endl;
-  std::cout << "Test 1/7: database_size = 1ULL<<30; nb_files = 20;" << std::endl;
+  std::cout << "Test 1/9: database_size = 1ULL<<30; nb_files = 20;" << std::endl;
   std::cout << "params.alpha = 1; params.d = 1; crypto_params = LWE:80:2048:120;" << std::endl; 
   std::cout << "======================================================================" << std::endl;
   database_size = 1ULL<<20; nb_files = 20; maxFileBytesize = database_size/nb_files;
@@ -228,7 +228,7 @@ int main(int argc, char * argv[]) {
   // WARNING we must provide the representation of the database GIVEN recursion and aggregation
   // as here we have 100 elements and aggregate them in a unique group we have params.n[0]=1
   std::cout << "======================================================================" << std::endl;
-  std::cout << "Test 2/7: database_size = 1ULL<<25; nb_files = 100;" << std::endl;
+  std::cout << "Test 2/9: database_size = 1ULL<<25; nb_files = 100;" << std::endl;
   std::cout << "params.alpha = 100; params.d = 1; crypto_params = LWE:80:2048:120;" << std::endl; 
   std::cout << "======================================================================" << std::endl;
   database_size = 1ULL<<25; nb_files = 100; maxFileBytesize = database_size/nb_files;
@@ -240,7 +240,7 @@ int main(int argc, char * argv[]) {
 
   // Test with recursion 2
   std::cout << "======================================================================" << std::endl;
-  std::cout << "Test 3/7: database_size = 1ULL<<25; nb_files = 100;" << std::endl;
+  std::cout << "Test 3/9: database_size = 1ULL<<25; nb_files = 100;" << std::endl;
   std::cout << "params.alpha = 1; params.d = 2; crypto_params = LWE:80:2048:120;" << std::endl; 
   std::cout << "======================================================================" << std::endl;
   database_size = 1ULL<<25; nb_files = 100; maxFileBytesize = database_size/nb_files;
@@ -252,7 +252,7 @@ int main(int argc, char * argv[]) {
   
   // Test with recursion 2 and aggregation
   std::cout << "======================================================================" << std::endl;
-  std::cout << "Test 4/7: database_size = 1ULL<<25; nb_files = 100;" << std::endl;
+  std::cout << "Test 4/9: database_size = 1ULL<<25; nb_files = 100;" << std::endl;
   std::cout << "params.alpha = 2; params.d = 2; crypto_params = LWE:80:2048:120;" << std::endl; 
   std::cout << "======================================================================" << std::endl;
   database_size = 1ULL<<25; nb_files = 100; maxFileBytesize = database_size/nb_files;
@@ -264,7 +264,7 @@ int main(int argc, char * argv[]) {
   
   // Test with recursion 3
   std::cout << "======================================================================" << std::endl;
-  std::cout << "Test 5/7: database_size = 1ULL<<25; nb_files = 100;" << std::endl;
+  std::cout << "Test 5/9: database_size = 1ULL<<25; nb_files = 100;" << std::endl;
   std::cout << "params.alpha = 1; params.d = 3; crypto_params = LWE:80:2048:120;" << std::endl; 
   std::cout << "======================================================================" << std::endl;
   database_size = 1ULL<<25; nb_files = 100; maxFileBytesize = database_size/nb_files;
@@ -276,7 +276,7 @@ int main(int argc, char * argv[]) {
   
   // Test with a DBDirectoryProcessor splitting a big real file
   std::cout << "======================================================================" << std::endl;
-  std::cout << "Test 6/7: DBDirectoryProcessor with split; database_size = 1ULL<<25; nb_files = 4;" << std::endl;
+  std::cout << "Test 6/9: DBDirectoryProcessor with split; database_size = 1ULL<<25; nb_files = 4;" << std::endl;
   std::cout << "params.alpha = 1; params.d = 1; crypto_params = LWE:80:2048:120;" << std::endl; 
   std::cout << "======================================================================" << std::endl;
   database_size = 1ULL<<25; nb_files = 4; maxFileBytesize = database_size/nb_files;
@@ -292,7 +292,7 @@ int main(int argc, char * argv[]) {
   
   // Test with a DBDirectoryProcessor reading real files
   std::cout << "======================================================================" << std::endl;
-  std::cout << "Test 7/7: DBDirectoryProcessor without split;" << std::endl;
+  std::cout << "Test 7/9: DBDirectoryProcessor without split;" << std::endl;
   std::cout << "params.alpha = 1; params.d = 1; crypto_params = LWE:80:2048:120;" << std::endl; 
   std::cout << "======================================================================" << std::endl;
   DBDirectoryProcessor db7;
@@ -307,6 +307,42 @@ int main(int argc, char * argv[]) {
     tests_failed |= run(&db7, chosen_element, params);
   }
 
+  // Test with a DBVectorProcessor
+  std::cout << "======================================================================" << std::endl;
+  std::cout << "Test 8/9: DBVectorProcessor;" << std::endl;
+  std::cout << "params.alpha = 1; params.d = 1; crypto_params = LWE:80:2048:120;" << std::endl;
+  std::cout << "======================================================================" << std::endl;
+  std::vector<element_t> elements1;
+  elements1.push_back(Element("first file", 10, (char*) "first data"));
+  elements1.push_back(Element("second file", 11, (char*) "second data"));
+  elements1.push_back(Element("third file", 8, (char*) "3rd data"));
+
+  DBVectorProcessor db8(elements1);
+
+  chosen_element = 0;
+  params.alpha = 1; params.d = 1;
+  params.crypto_params = "LWE:80:2048:120";
+  tests_failed |= run(&db8, chosen_element, params);
+
+  // Test with a DBVectorProcessor
+  std::cout << "======================================================================" << std::endl;
+  std::cout << "Test 9/9: DBVectorProcessor;" << std::endl;
+  std::cout << "params.alpha = 2; params.d = 1; crypto_params = LWE:80:2048:120;" << std::endl;
+  std::cout << "======================================================================" << std::endl;
+  std::vector<element_t> elements2;
+  elements2.push_back(Element("first file", 10, (char*) "first data"));
+  elements2.push_back(Element("second file", 11, (char*) "second data"));
+  elements2.push_back(Element("third file", 8, (char*) "3rd data"));
+  elements2.push_back(Element("fourth file", 14, (char*) "fourth db data"));
+  elements2.push_back(Element("fifth file", 13, (char*) "fifth db data"));
+
+  DBVectorProcessor db9(elements2);
+
+  chosen_element = 0;
+  params.alpha = 2; params.d = 1;
+  params.crypto_params = "LWE:80:2048:120";
+  tests_failed |= run(&db8, chosen_element, params);
+
   if (tests_failed) 
   {
     std::cout << "WARNING : at least one tests failed" << std::endl;

+ 1 - 0
libpir.hpp

@@ -11,6 +11,7 @@
 #include <crypto/HomomorphicCrypto.hpp>
 #include "pir/dbhandlers/DBGenerator.hpp"
 #include "pir/dbhandlers/DBDirectoryProcessor.hpp"
+#include "pir/dbhandlers/DBVectorProcessor.hpp"
 #include <stdint.h>
 
 

+ 1 - 1
pir/dbhandlers/CMakeLists.txt

@@ -3,4 +3,4 @@ cmake_minimum_required(VERSION 2.6.0)
 include_directories(..)
 include_directories(../..)
 
-add_library(pir_dbhandlers STATIC DBDirectoryProcessor.cpp DBGenerator.cpp)
+add_library(pir_dbhandlers STATIC DBDirectoryProcessor.cpp DBGenerator.cpp DBVectorProcessor.cpp DBHandler.cpp)

+ 19 - 42
pir/dbhandlers/DBDirectoryProcessor.cpp

@@ -27,10 +27,6 @@ DBDirectoryProcessor::DBDirectoryProcessor() : filesSplitting(false) {
 	directory=std::string(DEFAULT_DIR_NAME);
 	maxFileBytesize=0;
 
-	// Create the pool of ifstream
-	for(int i=0;i<NB_FILE_DESCRIPTORS;i++)
-		fdPool.push_back(new std::ifstream());
-
 	// Then create the catalog and get the filenumbers and size
 	DIR *dir = opendir (directory.c_str());
 	struct dirent *ent = nullptr;
@@ -78,10 +74,6 @@ DBDirectoryProcessor::DBDirectoryProcessor(uint64_t nbStreams) : filesSplitting(
 	directory=std::string(DEFAULT_DIR_NAME);
 	maxFileBytesize=0;
 
-	// Create the pool of ifstream
-	for(int i=0;i<NB_FILE_DESCRIPTORS;i++)
-		fdPool.push_back(new std::ifstream());
-
 	// Then create the catalog and get the filenumbers and size
 	DIR *dir = opendir (directory.c_str());
 	struct dirent *ent = nullptr;
@@ -129,7 +121,9 @@ DBDirectoryProcessor::DBDirectoryProcessor(uint64_t nbStreams) : filesSplitting(
 }
 
 DBDirectoryProcessor::~DBDirectoryProcessor() {
-	for (auto ifs : fdPool) delete ifs; 
+    for(auto it : fdPool) {
+        delete it.second;
+    }
 }
 
 std::string DBDirectoryProcessor::getCatalog(const bool typeOfCatalog) {
@@ -170,11 +164,13 @@ bool DBDirectoryProcessor::getErrorStatus() {
 	return error;
 }
 
-std::ifstream* DBDirectoryProcessor::openStream(uint64_t streamNb, uint64_t requested_offset) {
+bool DBDirectoryProcessor::openStream(uint64_t streamNb, uint64_t requested_offset) {
+    if(fdPool.count(streamNb)) {
+        return false;
+    }
 	std::string local_directory(DEFAULT_DIR_NAME);
 
-	std::ifstream* is = fdPool.back();
-	fdPool.pop_back();
+	std::ifstream* is = new std::ifstream();
 	// When there is no splitting, each ifstream is associated with a real file 
 	// (at least when no aggregation is done which is the case for now)
 	if(!filesSplitting) {
@@ -186,10 +182,12 @@ std::ifstream* DBDirectoryProcessor::openStream(uint64_t streamNb, uint64_t requ
 		is->open( realFileName, std::ios::binary );
 		is->seekg(splitting_offset + requested_offset);
 	}
-	return is;
+    fdPool.insert( std::pair<uint64_t, std::ifstream*>(streamNb, is));
+	return true;
 }
 
-uint64_t DBDirectoryProcessor::readStream(std::ifstream* s, char * buf, uint64_t size) {
+uint64_t DBDirectoryProcessor::readStream(uint64_t streamNb, char * buf, uint64_t size) {
+    std::ifstream *s = fdPool[streamNb];
 	uint64_t sizeRead=0;
 	//std::cout << "sizeRead = "<<sizeRead<<" size = "<<size<<std::endl;
 	while(sizeRead<size) {
@@ -205,9 +203,13 @@ uint64_t DBDirectoryProcessor::readStream(std::ifstream* s, char * buf, uint64_t
 	return size;
 }
 
-void DBDirectoryProcessor::closeStream(std::ifstream* s) {
-	s->close();
-	fdPool.push_back(s);
+void DBDirectoryProcessor::closeStream(uint64_t streamNb) {
+    if(!fdPool.count(streamNb)) {
+        return;
+    }
+    std::map<uint64_t, std::ifstream*>::iterator it = fdPool.find(streamNb);
+    it->second->close();
+    fdPool.erase(it);
 }
 
 std::streampos DBDirectoryProcessor::getFileSize( std::string filePath ){
@@ -219,28 +221,3 @@ std::streampos DBDirectoryProcessor::getFileSize( std::string filePath ){
 	file.close();
 	return fsize;
 }
-
-void DBDirectoryProcessor::readAggregatedStream(uint64_t streamNb, uint64_t alpha, uint64_t offset, uint64_t bytes_per_file, char* rawBits){
-	uint64_t fileByteSize = std::min(bytes_per_file, getmaxFileBytesize()-offset);
-	uint64_t startStream = streamNb*alpha;
-	uint64_t endStream = std::min(streamNb*alpha + alpha - 1, getNbStream() - 1);
-	uint64_t paddingStreams = (streamNb*alpha+alpha) >= getNbStream() ? (streamNb*alpha+alpha) - getNbStream() : 0;
-
-  #pragma omp critical
-	{	
-		for (int i=startStream; i <= endStream; i++)
-		{
-			std::ifstream *stream = openStream(i, offset);
-
-			// Just read the file (plus padding for that file)
-			readStream(stream, rawBits + (i % alpha) * fileByteSize, fileByteSize);
-
-			closeStream(stream);
-		} 
-
-		if(paddingStreams !=0)
-		{
-			bzero(rawBits + (endStream % alpha) * fileByteSize, fileByteSize*paddingStreams);
-		}
-	}
-}

+ 5 - 5
pir/dbhandlers/DBDirectoryProcessor.hpp

@@ -23,6 +23,7 @@
 #include <boost/thread.hpp>
 #include <dirent.h>
 #include <vector>
+#include <map>
 #include <string>
 #include <iostream>
 #include <fstream>
@@ -37,7 +38,7 @@ class DBDirectoryProcessor : public DBHandler
 private:
 	boost::mutex mutex;
   std::string directory;
-  std::vector<std::ifstream*> fdPool; // a pool of file descriptors
+  std::map<uint64_t, std::ifstream*> fdPool; // a pool of file descriptors
   std::vector <std::string> file_list; // the output file list
   bool filesSplitting;
   bool error = false;
@@ -55,10 +56,9 @@ public:
   uint64_t getmaxFileBytesize();
   bool getErrorStatus();
   
-  std::ifstream* openStream(uint64_t streamNb, uint64_t requested_offset);
-  uint64_t readStream(std::ifstream* s,char * buf, uint64_t size);
-  void readAggregatedStream(uint64_t streamNb, uint64_t alpha, uint64_t offset, uint64_t bytes_per_file, char* rawBits);
-  void closeStream(std::ifstream* s);
+  bool openStream(uint64_t streamNb, uint64_t requested_offset);
+  uint64_t readStream(uint64_t streamNb, char * buf, uint64_t size);
+  void closeStream(uint64_t streamNb);
   
   
 	std::streampos getFileSize( std::string filePath );

+ 5 - 5
pir/dbhandlers/DBGenerator.cpp

@@ -73,11 +73,11 @@ uint64_t DBGenerator::getmaxFileBytesize() {
 	return maxFileBytesize;
 }
 
-std::ifstream* DBGenerator::openStream(uint64_t streamNb, uint64_t requested_offset) {
-	return NULL;
+bool DBGenerator::openStream(uint64_t streamNb, uint64_t requested_offset) {
+	return true;
 }
 
-uint64_t DBGenerator::readStream(std::ifstream* s, char * buf, uint64_t size) {
+uint64_t DBGenerator::readStream(uint64_t streamNb, char * buf, uint64_t size) {
   //for (unsigned char i = 0xaa, j = 0; j < size; i++, j++)
   //{
   //  buf[j] = i;
@@ -94,10 +94,10 @@ uint64_t DBGenerator::readStream(std::ifstream* s, char * buf, uint64_t size) {
   return size;
 }
 
-void DBGenerator::closeStream(std::ifstream* s) {}
+void DBGenerator::closeStream(uint64_t streamNb) {}
 
 void DBGenerator::readAggregatedStream(uint64_t streamNb, uint64_t alpha, uint64_t offset, uint64_t bytes_per_file, char* rawBits){
-  readStream(NULL, NULL, 0);
+  readStream(0, 0, 0);
 	uint64_t fileByteSize = std::min(bytes_per_file, maxFileBytesize-offset);
   uint64_t startStream = streamNb*alpha;
   uint64_t endStream = std::min(streamNb*alpha + alpha - 1, getNbStream() - 1);

+ 3 - 3
pir/dbhandlers/DBGenerator.hpp

@@ -36,10 +36,10 @@ public:
   uint64_t getNbStream();
   uint64_t getmaxFileBytesize();
   
-  std::ifstream* openStream(uint64_t streamNb, uint64_t requested_offset);
-  uint64_t readStream(std::ifstream* s, char * buf, uint64_t size);
+  bool openStream(uint64_t streamNb, uint64_t requested_offset);
+  uint64_t readStream(uint64_t streamNb, char * buf, uint64_t size);
   void readAggregatedStream(uint64_t streamNb, uint64_t alpha, uint64_t offset, uint64_t bytes_per_file, char* rawBits);
-  void closeStream(std::ifstream* s);
+  void closeStream(uint64_t streamNb);
   
 private:
   std::mt19937_64 random_engine; // Fixed seed of 0

+ 26 - 0
pir/dbhandlers/DBHandler.cpp

@@ -0,0 +1,26 @@
+#include "DBHandler.hpp"
+
+void DBHandler::readAggregatedStream(uint64_t streamNb, uint64_t alpha, uint64_t offset, uint64_t bytes_per_file, char* rawBits) {
+    uint64_t fileByteSize = std::min(bytes_per_file, getmaxFileBytesize()-offset);
+    uint64_t startStream = streamNb*alpha;
+    uint64_t endStream = std::min(streamNb*alpha + alpha - 1, getNbStream() - 1);
+    uint64_t paddingStreams = (streamNb*alpha+alpha) >= getNbStream() ? (streamNb*alpha+alpha) - getNbStream() : 0;
+
+  #pragma omp critical
+    {
+        for (int i=startStream; i <= endStream; i++)
+        {
+            openStream(i, offset);
+
+            // Just read the file (plus padding for that file)
+            readStream(i, rawBits + (i % alpha) * fileByteSize, fileByteSize);
+
+            closeStream(i);
+        }
+
+        if(paddingStreams !=0)
+        {
+            bzero(rawBits + (endStream % alpha) * fileByteSize, fileByteSize*paddingStreams);
+        }
+    }
+}

+ 5 - 4
pir/dbhandlers/DBHandler.hpp

@@ -21,6 +21,7 @@
 #include <vector>
 #include <string>
 #include <iostream>
+#include <string.h>
 
 class DBHandler
 {
@@ -33,10 +34,10 @@ public:
   virtual uint64_t getNbStream()=0;
   virtual uint64_t getmaxFileBytesize()=0;
   
-  virtual std::ifstream* openStream(uint64_t streamNb, uint64_t requested_offset)=0;
-  virtual uint64_t readStream(std::ifstream* s, char * buf, uint64_t size)=0;
-  virtual void readAggregatedStream(uint64_t streamNb, uint64_t alpha, uint64_t offset, uint64_t bytes_per_file, char* rawBits)=0;
-  virtual void closeStream(std::ifstream* s)=0;
+  virtual bool openStream(uint64_t streamNb, uint64_t requested_offset)=0;
+  virtual uint64_t readStream(uint64_t streamNb, char * buf, uint64_t size)=0;
+  virtual void readAggregatedStream(uint64_t streamNb, uint64_t alpha, uint64_t offset, uint64_t bytes_per_file, char* rawBits);
+  virtual void closeStream(uint64_t streamNb)=0;
   virtual ~DBHandler(){};
   
   

+ 110 - 0
pir/dbhandlers/DBVectorProcessor.cpp

@@ -0,0 +1,110 @@
+/* Copyright (C) 2017 Carlos Aguilar Melchor, Joris Barrier, Marc-Olivier Killijian
+ *
+ * This file is written by Konstantinos Andrikopoulos
+ *
+ * This file is part of XPIR.
+ *
+ * XPIR is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * XPIR is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with XPIR.  If not, see <http://www.gnu.org/licenses/>.
+ *
+*/
+
+#include "DBVectorProcessor.hpp"
+
+Element::Element(std::string e_name, uint64_t e_size, char* e_data) :
+name(e_name),
+data_size(e_size),
+data(NULL)
+{
+    data = (char*) malloc(data_size);
+    memcpy(data, e_data, data_size);
+}
+
+
+DBVectorProcessor::DBVectorProcessor(std::vector<element_t>& vector_db) :
+elements(vector_db)
+{
+   maxFileByteSize = 0;
+
+   for (auto e : elements)
+   {
+       if (e.data_size > maxFileByteSize)
+       {
+           maxFileByteSize = e.data_size;
+       }
+   }
+}
+
+DBVectorProcessor::~DBVectorProcessor() {}
+
+std::string DBVectorProcessor::getCatalog(const bool typeOfCatalog) {
+    std::string buf;
+    if(typeOfCatalog) {
+        buf = std::to_string((unsigned int)0) + "\n";
+        buf += std::to_string(getNbStream()) + "\n";
+        for (auto e : elements)
+        {
+            //auto e = elements[i];
+            buf += e.name + "\n" + std::to_string(e.data_size) + "\n";
+        }
+        return buf;
+    }
+    else {
+        buf = std::to_string((unsigned int)1) + "\n";
+        buf += std::to_string(getNbStream());
+        buf += std::to_string(getmaxFileBytesize()) + "\n";
+        return buf;
+    }
+}
+
+uint64_t DBVectorProcessor::getNbStream() {
+    return elements.size();
+}
+
+uint64_t DBVectorProcessor::getmaxFileBytesize() {
+    return maxFileByteSize;
+}
+
+bool DBVectorProcessor::openStream(uint64_t streamNb, uint64_t requested_offset) {
+    if(openStreamOffsets.count(streamNb)) {
+        return false;
+    }
+
+    char* stream = elements[streamNb].data + requested_offset;
+    openStreamOffsets.insert( std::pair<uint64_t, char*>(streamNb, stream));
+    return true;
+}
+
+uint64_t DBVectorProcessor::readStream(uint64_t streamNb, char * buf, uint64_t size) {
+    element_t e = elements[streamNb];
+    char* stream = openStreamOffsets[streamNb];
+    uint64_t sizeRead = stream - e.data;
+    uint64_t sizeRemaining = e.data_size - sizeRead;
+
+    if(sizeRemaining >= size) {
+        memcpy(buf, stream, size);
+        stream += size;
+    }
+    else {
+        memcpy(buf, stream, sizeRemaining);
+        bzero(buf + sizeRemaining, size - sizeRemaining);
+        stream += sizeRemaining;
+    }
+
+    openStreamOffsets[streamNb] = stream;
+    return size;
+}
+
+void DBVectorProcessor::closeStream(uint64_t streamNb) {
+    openStreamOffsets.erase(streamNb);
+}

+ 62 - 0
pir/dbhandlers/DBVectorProcessor.hpp

@@ -0,0 +1,62 @@
+/* Copyright (C) 2017 Carlos Aguilar Melchor, Joris Barrier, Marc-Olivier Killijian
+ *
+ * This file is written by Konstantinos Andrikopoulos
+ *
+ * This file is part of XPIR.
+ *
+ *  XPIR is free software: you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation, either version 3 of the License, or
+ *  (at your option) any later version.
+ *
+ *  XPIR is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with XPIR.  If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#ifndef DEF_DBVECPROC
+#define DEF_DBVECPROC
+
+#include "DBHandler.hpp"
+
+#include <vector>
+#include <map>
+#include <tuple>
+#include <string.h>
+
+
+struct Element {
+    Element(std::string, uint64_t, char*);
+    std::string name;
+    size_t data_size;
+    char *data;
+};
+
+typedef Element element_t;
+
+class DBVectorProcessor : public DBHandler
+{
+private:
+    std::vector<element_t>& elements;
+    std::map<uint64_t, char*> openStreamOffsets;
+    uint64_t maxFileByteSize;
+
+public:
+    DBVectorProcessor(std::vector<element_t>& vector_db);
+    virtual ~DBVectorProcessor();
+
+    std::string getCatalog(const bool typeOfCatalog);
+
+	uint64_t getNbStream();
+	uint64_t getmaxFileBytesize();
+
+	bool openStream(uint64_t streamNb, uint64_t requested_offset);
+	uint64_t readStream(uint64_t streamNb, char * buf, uint64_t size);
+	void closeStream(uint64_t streamNb);
+};
+
+#endif /* DEF_DBVECPROC */

+ 3 - 3
pir/replyGenerator/PIRReplyGeneratorGMP.cpp

@@ -50,12 +50,12 @@ void PIRReplyGeneratorGMP::importData()
 	{
 		if (i % pirParam.alpha == 0) datae[i/pirParam.alpha] = new mpz_t[maxChunkSize*pirParam.alpha];
 
-    ifstream* stream=dbhandler->openStream(i, 0);
+    dbhandler->openStream(i, 0);
 
     // For each chunk of size "size" of the file
 		for (unsigned int j = 0 ; j < maxChunkSize ; j++ )
 		{
-			dbhandler->readStream(stream,rawBits, size);
+			dbhandler->readStream(i ,rawBits, size);
 			mpz_init(datae[i/pirParam.alpha][j + (i % pirParam.alpha) * maxChunkSize]);
 			mpz_import(datae[i/pirParam.alpha][j + (i % pirParam.alpha) * maxChunkSize], size, 1, sizeof(char), 0, 0, rawBits);
 		}
@@ -67,7 +67,7 @@ void PIRReplyGeneratorGMP::importData()
 			mpz_init_set_ui(datae[i/pirParam.alpha][j + ((i+1) % pirParam.alpha) * maxChunkSize], 0);
       }
     }
-		dbhandler->closeStream(stream);
+		dbhandler->closeStream(i);
 	}
 
   std::cout << "PIRReplyGeneratorGMP: " << pirParam.alpha*theoretic_nbr_elements - nbFiles << "  non-aggregated padding files need to be added ..." << std::endl;

+ 3 - 3
pir/replyGenerator/PIRReplyGeneratorTrivial.cpp

@@ -68,9 +68,9 @@ void PIRReplyGeneratorTrivial::importData()
 	//pour tous les fichiers.
 	for (unsigned int i = 0 ; i < dbhandler->getNbStream() ; i++)
 	{
-    ifstream* stream = dbhandler->openStream(i, 0);
-	  dbhandler->readStream(stream, dataptr, maxFileBytesize);
-		dbhandler->closeStream(stream);
+    dbhandler->openStream(i, 0);
+	  dbhandler->readStream(i, dataptr, maxFileBytesize);
+		dbhandler->closeStream(i);
     dataptr += maxFileBytesize;
 	}