Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def _create_btree(self):
conn = self._db.open()
transaction.begin()
root = conn.root
if not hasattr(root, "data"):
conn.root.data = BTrees.OOBTree.OOBTree()
transaction.commit()
conn.close()
if graph_name:
s = " INSERT DATA { GRAPH " + graph_name.n3() + " {" + gs + " } } "
else:
s = " INSERT DATA { " + gs + " } "
L.debug("update query = " + s)
self.conf['rdf.graph'].update(s)
else:
gr = self.conf['rdf.graph']
if self.conf['rdf.source'] == 'ZODB':
transaction.commit()
transaction.begin()
for x in g:
gr.add(x)
if self.conf['rdf.source'] == 'ZODB':
transaction.commit()
transaction.begin()
def beforeSetUp(self):
'''Called before the ZODB connection is opened,
at the start of setUp(). By default begins
a new transaction.
'''
transaction.begin()
try:
uid = IUUID(ob)
index[uid] = ob
except TypeError:
print('Could not get UID of %s' % brain.getPath())
continue
if uid in ids:
# remove from uids... When all said and done,
# we'll make sure the uids left are in fact no longer on the
# system and remove them from es
ids.remove(uid)
if len(index) > 300:
print('finished indexing %i' % count)
index_batch([], index, [], es)
site._p_jar.invalidateCache() # noqa
transaction.begin()
site._p_jar.sync() # noqa
index = {}
index_batch([], index, [], es)
remove = []
for uid in ids:
brains = catalog(UID=uid)
if len(brains) == 0:
remove.append(uid)
index_batch(remove, {}, [], es)
def group_put(self, group_id, data):
"""Put a single group's data back to the Vault"""
transaction.begin()
try:
grp = query(Group).filter_by(id=group_id).one()
except InvalidReq, e:
return vaultMsg(False, "Group not found: %s" % str(e))
ug = query(UserGroup).filter_by(user_id=self.myself_id,
group_id=group_id).first()
if not ug:
return vaultMsg(False, "Cannot write to group: not member of group")
if 'name' in data:
grp.name = data['name']
if 'hidden' in data:
newhidden = bool(data['hidden'])
# TODO: these checks must go in the XML-RPC controller.
def clear_object_cache(ob):
ob._p_jar.invalidateCache()
transaction.begin()
ob._p_jar.sync()
fname = outfile or self.abspath(req)
transaction.abort()
tabledata = defaultdict(list)
for table, model in [
('ParameterTable', Parameter),
('CodeTable', DomainElement),
('LanguageTable', Language),
('ExampleTable', Sentence),
('contributions.csv', Contribution),
(ds.primary_table, Value),
]:
if verbose:
print('exporting {0} ...'.format(model))
transaction.begin()
for item in cldf_cfg.query(model):
tabledata[table].append(cldf_cfg.convert(model, item, req))
transaction.abort()
if verbose:
print('... done')
transaction.begin()
ds.write(**cldf_cfg.custom_tabledata(req, tabledata))
ds.validate()
shutil.make_archive(str(fname.parent / fname.stem), 'zip', str(tmpd))
def do(db, f, args):
"""Do something in a transaction, retrying of necessary
Measure the speed of both the compurartion and the commit
"""
from ZODB.POSException import ConflictError
wcomp = ccomp = wcommit = ccommit = 0.0
rconflicts = wconflicts = 0
start = time.time()
while 1:
connection = db.open()
try:
transaction.begin()
t=time.time()
c=time.clock()
try:
try:
r = f(connection, *args)
except ConflictError:
rconflicts += 1
transaction.abort()
continue
finally:
wcomp += time.time() - t
ccomp += time.clock() - c
t=time.time()
c=time.clock()
try: