123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124 |
- from dht_common import SIZE_OF_HASH, SIZE_OF_IP_ADDRESS, SIZE_OF_OT_VALUE, SIZE_OF_KEY, SIZE_OF_SIGNATURE, SIZE_OF_TIMESTAMP
- from qp_node import QP_Quorum
- from math import sqrt, ceil
- from collections import defaultdict
- class DHTPIR_Quorum(QP_Quorum):
- def __init__(self, quorumID, documentSize, numNodes, numItems=0, table=[]):
- QP_Quorum.__init__(self, quorumID, documentSize, numNodes, numItems, table)
- self.PHFGenerations = defaultdict(lambda: 0)
- self.PIRRetrievals = [defaultdict(lambda: 0) for i in range(self.numNodes)]
- def get_PHF_generations(self, whichNode):
- return self.PHFGenerations
- def get_PIR_retrievals(self, whichNode):
- return self.PIRRetrievals[whichNode]
- def insert(self, numKeys, numSignatures):
- self.numItems += 1
- # Asker's ID
- sizeOfRequest = SIZE_OF_HASH
- # timestamp
- sizeOfRequest += SIZE_OF_TIMESTAMP
- # keys in request
- sizeOfRequest += SIZE_OF_KEY * numKeys
- # signatures in request
- sizeOfRequest += SIZE_OF_SIGNATURE * numSignatures
- # actual document sent
- sizeOfRequest += self.documentSize
- # signature over whole thing
- sizeOfRequest += SIZE_OF_SIGNATURE
- sizeOfResponse = SIZE_OF_HASH + SIZE_OF_SIGNATURE
- map(lambda x: x+1, self.nodeNumRounds)
- map(lambda x: x+1, self.nodeNumMessagesSent)
- map(lambda x: x+1, self.nodeNumMessagesRecv)
- map(lambda x: x+sizeOfResponse, self.nodeNumBytesSent)
- map(lambda x: x+sizeOfRequest, self.nodeNumBytesRecv)
- # This should guarantee that this always gives an optimal request/response
- # based on whether it's appropriate to block responses
- blockingFactor = ceil(sqrt(self.numItems / self.documentSize))
- # NOTE: This works because Python 3 promotes ints to floats for "/"
- numRecords = ceil(self.numItems / blockingFactor)
- self.PHFGenerations[numRecords] += 1
- def get_hash_function(self, whichNode, numKeys, numSignatures):
- # Asker's ID
- sizeOfRequest = SIZE_OF_HASH
- # timestamp
- sizeOfRequest += SIZE_OF_TIMESTAMP
- # keys in request
- sizeOfRequest += SIZE_OF_KEY * numKeys
- # signatures in request
- sizeOfRequest += SIZE_OF_SIGNATURE * numSignatures
- # This should guarantee that this always gives an optimal request/response
- # based on whether it's appropriate to block responses
- blockingFactor = ceil(sqrt(self.numItems / self.documentSize))
- # NOTE: This works because Python 3 promotes ints to floats for "/"
- numRecords = ceil(self.numItems / blockingFactor)
- # From "Practical Perfect Hashing in Nearly Optimal Space" by Botelho et al.
- # A conservative but practical estimate of the size of the PHF is 2.7n *bits*,
- # for n = the number of valid keys in the PHF.
- # I round up for hopefully obvious reasons
- sizeOfResponse = ceil((2.7 * numRecords) / 8.0) + SIZE_OF_SIGNATURE
- self.nodeNumRounds[whichNode] += 1
- self.nodeNumMessagesSent[whichNode] += 1
- self.nodeNumMessagesRecv[whichNode] += 1
- self.nodeNumBytesSent[whichNode] += sizeOfResponse
- self.nodeNumBytesRecv[whichNode] += sizeOfRequest
- return sizeOfResponse
- # This shouldn't be used, just here to make sure you don't try the RCP_Quorum function it overrides
- def retrieve(self):
- return None
- def PIR_retrieve(self, whichNode, numKeys, numSignatures):
- # Asker's ID
- sizeOfRequest = SIZE_OF_HASH
- # timestamp
- sizeOfRequest += SIZE_OF_TIMESTAMP
- # keys in request
- sizeOfRequest += SIZE_OF_KEY * numKeys
- # signatures in request
- sizeOfRequest += SIZE_OF_SIGNATURE * numSignatures
- # This should guarantee that this always gives an optimal request/response
- # based on whether it's appropriate to block responses
- blockingFactor = ceil(sqrt(self.numItems / self.documentSize))
- # NOTE: This works because Python 3 promotes ints to floats for "/"
- numRecords = ceil(self.numItems / blockingFactor)
- recordSize = blockingFactor * self.documentSize
- self.nodeNumRounds[whichNode] += 1
- self.nodeNumMessagesSent[whichNode] += 1
- self.nodeNumMessagesRecv[whichNode] += 1
- self.nodeNumBytesSent[whichNode] += recordSize
- # This is specifically because in Goldberg PIR, you need 1B per record
- self.nodeNumBytesRecv[whichNode] += numRecords + sizeOfRequest
- self.PIRRetrievals[whichNode][numRecords] += 1
- return (numRecords, recordSize)
- # TODO: Add unit tests for size calculations
- if __name__ == "__main__":
- SIZE_OF_DOCUMENTS_IN_TEST = 1024
- NUM_NODES_PER_QUORUM_IN_TEST = 10
- test = DHTPIR_Quorum(0, SIZE_OF_DOCUMENTS_IN_TEST, NUM_NODES_PER_QUORUM_IN_TEST)
-
- [test.insert() for x in range(NUM_NODES_PER_QUORUM_IN_TEST)]
-
- [test.get_hash_function(x) for x in range(NUM_NODES_PER_QUORUM_IN_TEST)]
- print("Getting PHFs fires on all nodes correctly.")
- [test.PIR_retrieve(x) for x in range(NUM_NODES_PER_QUORUM_IN_TEST)]
- print("PIR retrieval fires on all nodes correctly.")
|