How to use the bsddb3.db.DB_INIT_TXN function in bsddb3

To help you get started, we’ve selected a few bsddb3 examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github RDFLib / rdflib / rdflib / store / BerkeleyDB.py View on Github external
def _init_db_environment(self, homeDir, create=True):
        #NOTE: The identifier is appended to the path as the location for the db
        #This provides proper isolation for stores which have the same path but different identifiers
        
        if SUPPORT_MULTIPLE_STORE_ENVIRON:
            fullDir = join(homeDir,self.identifier)
        else:
            fullDir = homeDir
        envsetflags  = db.DB_CDB_ALLDB
        envflags = db.DB_INIT_MPOOL | db.DB_INIT_LOCK | db.DB_THREAD | db.DB_INIT_TXN | db.DB_RECOVER
        if not exists(fullDir):
            if create==True:
                makedirs(fullDir)
                self.create(path)
            else:                
                return NO_STORE

        db_env = db.DBEnv()
        db_env.set_cachesize(0, 1024*1024*50) # TODO
        
        # enable deadlock-detection
        db_env.set_lk_detect(db.DB_LOCK_MAXLOCKS)
        
        # increase the number of locks, this is correlated to the size (num triples) that 
        # can be added/removed with a single transaction
        db_env.set_lk_max_locks(self.__locks)
github RDFLib / rdfextras / rdfextras / store / BDBOptimized.py View on Github external
def _init_db_environment(self, homeDir, create=True):
        """
        NOTE: The identifier is appended to the path as the location for the 
        db. This provides proper isolation for stores which have the same 
        path but different identifiers
        """
        
        if SUPPORT_MULTIPLE_STORE_ENVIRON:
            fullDir = join(homeDir,self.identifier)
        else:
            fullDir = homeDir
        envsetflags  = db.DB_CDB_ALLDB
        envflags = db.DB_INIT_MPOOL | db.DB_INIT_LOCK | db.DB_THREAD | db.DB_INIT_TXN | db.DB_RECOVER
        if not exists(fullDir):
            if create==True:
                makedirs(fullDir)
                self.create(fullDir)
            else:
                return NO_STORE
        
        db_env = db.DBEnv()
        db_env.set_cachesize(0, 1024*1024*100) # TODO
        
        # enable deadlock-detection
        db_env.set_lk_detect(db.DB_LOCK_MAXLOCKS)
        
        # increase the number of locks, this is correlated to the size (num triples) that
        # can be added/removed with a single transaction
        db_env.set_lk_max_locks(self.__locks)
github tdhock / SegAnnDB / plotter / db.py View on Github external
return pickle.loads(a)

# AVOID out of memory Locker errors using apt-get install db-util. We
# can run db_recover -h env from the command line to reset locks and
# lockers to zero. NOTE: we don't do this here, but instead do this
# before the server and daemons are started.

#os.system("db_recover -h %s"%DB_HOME)

env = bsddb3.db.DBEnv()
env.open(
    DB_HOME,
    bsddb3.db.DB_INIT_MPOOL |
    # bsddb3.db.DB_INIT_CDB|
    bsddb3.db.DB_INIT_LOCK |
    bsddb3.db.DB_INIT_TXN |
    bsddb3.db.DB_INIT_LOG |
    bsddb3.db.DB_CREATE)
# print "nlockers=%(nlockers)s"%env.lock_stat()

CLOSE_ON_EXIT = []

# this prevents lockers/locks from accumulating when python is closed
# normally, but does not prevent this when we C-c out of the server.


def close_db():
    for db in CLOSE_ON_EXIT:
        db.close()
    env.close()
atexit.register(close_db)
github dokipen / bitten / bitten / upgrades.py View on Github external
from bitten.model import Report
    from bitten.util import xmlio
    try:
        from bsddb3 import db as bdb
        import dbxml
    except ImportError:
        return

    dbfile = os.path.join(env.path, 'db', 'bitten.dbxml')
    if not os.path.isfile(dbfile):
        return

    dbenv = bdb.DBEnv()
    dbenv.open(os.path.dirname(dbfile),
               bdb.DB_CREATE | bdb.DB_INIT_LOCK | bdb.DB_INIT_LOG |
               bdb.DB_INIT_MPOOL | bdb.DB_INIT_TXN, 0)

    mgr = dbxml.XmlManager(dbenv, 0)
    xtn = mgr.createTransaction()
    container = mgr.openContainer(dbfile, dbxml.DBXML_TRANSACTIONAL)

    def get_pylint_items(xml):
        for problems_elem in xml.children('problems'):
            for problem_elem in problems_elem.children('problem'):
                item = {'type': 'problem'}
                item.update(problem_elem.attr)
                yield item

    def get_trace_items(xml):
        for cov_elem in xml.children('coverage'):
            item = {'type': 'coverage', 'name': cov_elem.attr['module'],
                    'file': cov_elem.attr['file'],
github freenas / freenas / gui / common / freenascache.py View on Github external
def __init__(self, cachedir=FREENAS_CACHEDIR):
        log.debug("FreeNAS_BaseCache._init__: enter")

        self.cachedir = cachedir
        self.__cachefile = os.path.join(self.cachedir, ".cache.db")

        if not self.__dir_exists(self.cachedir):
            os.makedirs(self.cachedir)

        flags = db.DB_CREATE | db.DB_THREAD | db.DB_INIT_LOCK | db.DB_INIT_LOG | \
            db.DB_INIT_MPOOL | db.DB_THREAD | db.DB_INIT_TXN

        self.__dbenv = db.DBEnv()
        self.__dbenv.open(
            self.cachedir,
            flags,
            0o700
        )

        self.__cache = db.DB(self.__dbenv)
        self.__cache.open(self.__cachefile, None, db.DB_HASH, db.DB_CREATE)

        log.debug("FreeNAS_BaseCache._init__: cachedir = %s", self.cachedir)
        log.debug(
            "FreeNAS_BaseCache._init__: cachefile = %s",
            self.__cachefile
        )
github zopefoundation / Zope / lib / python / BDBStorage / base.py View on Github external
def envFromString(name):
    try:
        if not os.path.exists(name): os.mkdir(name)
    except:
        raise "Error creating BerkeleyDB environment dir: %s" % name
    e=DBEnv()
    e.set_lk_max(10000)  # this can be overridden in the DB_CONFIG file
    try:
        e.open(name,
               db.DB_CREATE | db.DB_RECOVER
               | db.DB_INIT_MPOOL | db.DB_INIT_LOCK | db.DB_INIT_TXN
               )
    except DBError, msg:
        raise BerkeleyDBError, "%s (%s)" % (BerkeleyDBError.__doc__, msg)
    return e
github examachine / pisi / pisi / db / lockeddbshelve.py View on Github external
def init_dbenv(write=False, writeversion=False):
    if os.access(pisi.context.config.db_dir(), os.R_OK):
        # try to read version
        check_dbversion('dbversion', pisi.__dbversion__, write=write, update=writeversion)
        check_dbversion('filesdbversion', pisi.__filesdbversion__, write=write, update=writeversion)
    else:
        raise Error(_('Cannot attain read access to database environment'))
    if write:
        if os.access(pisi.context.config.db_dir(), os.W_OK):
            lock_dbenv()
            ctx.dbenv = dbobj.DBEnv()
            flags =  (db.DB_INIT_MPOOL |      # cache
                      db.DB_INIT_TXN |        # transaction subsystem
                      db.DB_INIT_LOG |        # logging subsystem
                      #db.DB_INIT_LOCK |       # locking subsystem
                      db.DB_RECOVER |         # run normal recovery
                      db.DB_CREATE)           # allow db to create files
            # TODO: consider DB_THREAD
            ctx.dbenv.set_cachesize(0, 4*1024*1024)
            ctx.dbenv.open(pisi.context.config.db_dir(), flags)
        else:
            raise Error(_("Cannot write to PISI database."))
    else:
        ctx.dbenv = None # read-only access to database
github gramps-project / gramps / gramps / plugins / db / bsddb / summary.py View on Github external
bdbversion_file = os.path.join(dirpath, BDBVERSFN)
    if os.path.isfile(bdbversion_file):
        with open(bdbversion_file) as vers_file:
            bsddb_version = vers_file.readline().strip()
    else:
        return "Unknown", "Unknown", "Unknown"

    current_bsddb_version = str(db.version())
    if bsddb_version != current_bsddb_version:
        return "Unknown", bsddb_version, "Unknown"

    env = db.DBEnv()
    flags = db.DB_CREATE | db.DB_PRIVATE |\
        db.DB_INIT_MPOOL |\
        db.DB_INIT_LOG | db.DB_INIT_TXN
    try:
        env.open(dirpath, flags)
    except Exception as msg:
        LOG.warning("Error opening db environment for '%s': %s" %
                    (name, str(msg)))
        try:
            env.close()
        except Exception as msg:
            LOG.warning("Error closing db environment for '%s': %s" %
                    (name, str(msg)))
        return "Unknown", bsddb_version, "Unknown"
    dbmap1 = dbshelve.DBShelf(env)
    fname = os.path.join(dirpath, META + ".db")
    try:
        dbmap1.open(fname, META, db.DB_HASH, db.DB_RDONLY)
    except:
github RDFLib / rdflib / rdflib / store / BDBOptimized.py View on Github external
def _init_db_environment(self, homeDir, create=True):
        #NOTE: The identifier is appended to the path as the location for the db
        #This provides proper isolation for stores which have the same path but different identifiers
        
        if SUPPORT_MULTIPLE_STORE_ENVIRON:
            fullDir = join(homeDir,self.identifier)
        else:
            fullDir = homeDir
        envsetflags  = db.DB_CDB_ALLDB
        envflags = db.DB_INIT_MPOOL | db.DB_INIT_LOCK | db.DB_THREAD | db.DB_INIT_TXN | db.DB_RECOVER
        if not exists(fullDir):
            if create==True:
                makedirs(fullDir)
                self.create(path)
            else:                
                return NO_STORE

        db_env = db.DBEnv()
        db_env.set_cachesize(0, 1024*1024*50) # TODO
        
        # enable deadlock-detection
        db_env.set_lk_detect(db.DB_LOCK_MAXLOCKS)
        
        # increase the number of locks, this is correlated to the size (num triples) that 
        # can be added/removed with a single transaction
        db_env.set_lk_max_locks(self.__locks)