Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def open (self, path, purge=0):
self.path = path
if purge:
self.purge_database()
print 'DB3OPEN: path=%s contains %s' %(path, os.listdir(path))
self._pdb = self.db3open('PDB01', db.DB_BTREE)
self._adb = self.db3open('AUT01', db.DB_BTREE)
self._kdb = self.db3open('KEY01', db.DB_BTREE)
self._ndb = self.db3open('NUM01', db.DB_BTREE)
self._rdb = self.db3open('REF01', db.DB_BTREE)
self._tdb = self.db3open('TIT01', db.DB_BTREE)
self._cnf = self.db3open('CNF01', db.DB_HASH)
self.sysid = Counter(self._cnf, 'SYS', 1)
self.tmpid = Counter (self._cnf, 'TMP', 999000000)
self.syscnt = Counter(self._cnf, 'SZS', 0)
self.tmpcnt = Counter(self._cnf, 'SZT', 0)
return
def __init__(self, db_env, node_pickler):
self.__db_env = db_env
self.__dbp = db.DB(db_env)
self.__dbp.open("IDMap_hash.db", None, db.DB_HASH, db.DB_CREATE | db.DB_AUTO_COMMIT)
self.__dbs = db.DB(db_env)
self.__dbs.open("IDMap_recno.db", None, db.DB_RECNO, db.DB_CREATE | db.DB_AUTO_COMMIT)
# pickling and un-pickling the data
self.__node_pickler = node_pickler
self.__loads = self.__node_pickler.loads
self.__dumps = self.__node_pickler.dumps
def __init__(self, storage_config=None):
if storage_config is None:
storage_config = DEFAULT_BERKELEY_DB_STORAGE_CONFIG
self.storage_config = storage_config
self.storage = db.DB()
GB = 1024 * 1024 * 1024;
self.storage.set_cachesize(
int(storage_config.get("hashsize", 204800) / GB),
int(storage_config.get("hashsize", 204800) % GB))
self.storage.open(storage_config["filename"], None, db.DB_HASH, db.DB_CREATE)
def __init__(self, myinfo=None, db_dir=''):
if MyDB.__single:
raise RuntimeError, "PeerDB is singleton"
self.db_name = 'mydata.bsd'
self._data = open_db(self.db_name, db_dir, filetype=db.DB_HASH) # dbshelve object
self.initData(myinfo)
MyDB.__single = self
def __init__(self, db_env, node_pickler):
self.__db_env = db_env
self.__dbp = db.DB(db_env)
self.__dbp.open("IDMap_hash.db", None, db.DB_HASH, db.DB_CREATE | db.DB_AUTO_COMMIT)
self.__dbs = db.DB(db_env)
self.__dbs.open("IDMap_recno.db", None, db.DB_RECNO, db.DB_CREATE | db.DB_AUTO_COMMIT)
# pickling and un-pickling the data
self.__node_pickler = node_pickler
self.__loads = self.__node_pickler.loads
self.__dumps = self.__node_pickler.dumps
("note_map", NOTE_TBL, db.DB_HASH),
("tag_map", TAG_TBL, db.DB_HASH),
("reference_map", REF_MAP, db.DB_BTREE),
]
dbflags = DBFLAGS_R if self.readonly else DBFLAGS_O
for (dbmap, dbname, dbtype) in db_maps:
_db = self.__open_shelf(self.full_name, dbname, dbtype)
setattr(self, dbmap, _db)
if callback:
callback(37)
# Open name grouping database
self.name_group = self.__open_db(self.full_name, NAME_GROUP,
db.DB_HASH, db.DB_DUP)
# We have now successfully opened the database, so if the BSDDB version
# has changed, we update the DBSDB version file.
if self.update_env_version:
versionpath = os.path.join(name, BDBVERSFN)
with open(versionpath, "w") as version_file:
version = str(db.version())
version_file.write(version)
_LOG.debug("Updated bsddb version file to %s" % str(db.version()))
if self.update_python_version:
versionpath = os.path.join(name, "pythonversion.txt")
version = str(version_info[0])
_LOG.debug("Updated python version file to %s" % version)
with open(versionpath, "w") as version_file:
or remove invalid secondary index tables.
"""
# index tables used just for speeding up searches
self.surnames = self.__open_db(self.full_name, SURNAMES, db.DB_BTREE,
db.DB_DUP | db.DB_DUPSORT)
db_maps = [
("id_trans", IDTRANS, db.DB_HASH, 0),
("fid_trans", FIDTRANS, db.DB_HASH, 0),
("eid_trans", EIDTRANS, db.DB_HASH, 0),
("pid_trans", PIDTRANS, db.DB_HASH, 0),
("sid_trans", SIDTRANS, db.DB_HASH, 0),
("cid_trans", CIDTRANS, db.DB_HASH, 0),
("oid_trans", OIDTRANS, db.DB_HASH, 0),
("rid_trans", RIDTRANS, db.DB_HASH, 0),
("nid_trans", NIDTRANS, db.DB_HASH, 0),
("tag_trans", TAGTRANS, db.DB_HASH, 0),
("parents", PPARENT, db.DB_HASH, 0),
("reference_map_primary_map", REF_PRI, db.DB_BTREE, 0),
("reference_map_referenced_map", REF_REF, db.DB_BTREE, db.DB_DUPSORT),
]
for (dbmap, dbname, dbtype, dbflags) in db_maps:
_db = self.__open_db(self.full_name, dbname, dbtype,
db.DB_DUP | dbflags)
setattr(self, dbmap, _db)
if not self.readonly:
assoc = [
(self.person_map, self.surnames, find_byte_surname),