How to use filelock - 10 common examples

To help you get started, we’ve selected a few filelock examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github dbader / pytest-mypy / src / pytest_mypy.py View on Github external
def _cached_json_results(results_path, results_factory=None):
    """
    Read results from results_path if it exists;
    otherwise, produce them with results_factory,
    and write them to results_path.
    """
    with FileLock(results_path + '.lock'):
        try:
            with open(results_path, mode='r') as results_f:
                results = json.load(results_f)
        except FileNotFoundError:
            if not results_factory:
                raise
            results = results_factory()
            with open(results_path, mode='w') as results_f:
                json.dump(results, results_f)
    return results
github Digital-Sapphire / PyUpdater / tests / test_pyupdater.py View on Github external
if sys.platform == 'darwin' and windowed:
                app_run_command = './{}.app/Contents/MacOS/{}'.format(app_name,
                                                                      app_name)
                app_name = '{}.app'.format(app_name)

            if custom_dir:
                # update with custom_dir is multiprocessing-safe
                lock_path = 'pyu.lock'
            else:
                if not os.path.exists(appdirs.user_data_dir(APP_NAME)):
                    os.makedirs(appdirs.user_data_dir(APP_NAME))
                lock_path = os.path.join(appdirs.user_data_dir(APP_NAME),
                                         'pyu.lock')

            update_lock = filelock.FileLock(lock_path, LOCK_TIMEOUT)

            version_file = 'version2.txt'
            with update_lock.acquire(LOCK_TIMEOUT, 5):
                count = 0
                while count < 5:
                    # Call the binary to self update
                    subprocess.call(app_run_command, shell=True)
                    if os.path.exists(version_file):
                        break
                    count += 1
                    print("Retrying app launch!")
                    # Allow enough time for update process to complete.
                    time.sleep(AUTO_UPDATE_PAUSE)

            simpleserver.stop()
            # Detect if it was an overwrite error
github conda / conda-build / tests / test_utils.py View on Github external
def test_try_acquire_locks(testing_workdir):
    # Acquiring two unlocked locks should succeed.
    lock1 = filelock.FileLock(os.path.join(testing_workdir, 'lock1'))
    lock2 = filelock.FileLock(os.path.join(testing_workdir, 'lock2'))
    with utils.try_acquire_locks([lock1, lock2], timeout=1):
        pass

    # Acquiring the same lock twice should fail.
    lock1_copy = filelock.FileLock(os.path.join(testing_workdir, 'lock1'))
    # Also verify that the error message contains the word "lock", since we rely
    # on this elsewhere.
    with pytest.raises(BuildLockError, match='Failed to acquire all locks'):
        with utils.try_acquire_locks([lock1, lock1_copy], timeout=1):
            pass
github mozilla / relman-auto-nag / auto_nag / db.py View on Github external
def add(tool, mails, extra, result, ts=lmdutils.get_timestamp("now")):
        check(Email.__tablename__)
        with FileLock(lock_path):
            tool = Tool.get_or_create(tool)
            for mail in mails:
                session.add(Email(tool, ts, mail, extra, result))
            session.commit()
github WeblateOrg / weblate / weblate / trans / vcs.py View on Github external
def __init__(self, path, branch=None, component=None, local=False):
        self.path = path
        if branch is None:
            self.branch = self.default_branch
        else:
            self.branch = branch
        self.component = component
        self.last_output = ''
        self.lock = FileLock(
            self.path.rstrip('/').rstrip('\\') + '.lock',
            timeout=120
        )
        if not local:
            # Create ssh wrapper for possible use
            create_ssh_wrapper()
        if not self.is_valid():
            self.init()
github mechboxes / mech / mech / utils.py View on Github external
del instances[k]
                        updated = True
            else:
                instances = {}
            instance_data = instances.get(instance_name)
            if not instance_data or force:
                if obj:
                    instance_data = instances[instance_name] = obj
                    updated = True
                else:
                    instance_data = {}
            if updated:
                with open(index_path, 'w') as fp:
                    json.dump(instances, fp, sort_keys=True, indent=2, separators=(',', ': '))
            return instance_data
    except Timeout:
        puts_err(colored.red(textwrap.fill("Couldn't access index, it seems locked.")))
        sys.exit(1)
github raiden-network / raiden / raiden / ui / cli.py View on Github external
sys.exit(ReturnCode.GENERIC_COMMUNICATION_ERROR)
    except EthNodeInterfaceError as e:
        click.secho(str(e), fg="red")
        sys.exit(ReturnCode.ETH_INTERFACE_ERROR)
    except RaidenUnrecoverableError as ex:
        click.secho(f"FATAL: An un-recoverable error happen, Raiden is bailing {ex}", fg="red")
        write_stack_trace(ex)
        sys.exit(ReturnCode.FATAL)
    except APIServerPortInUseError as ex:
        click.secho(
            f"ERROR: API Address {ex} is in use. Use --api-address  "
            f"to specify a different port.",
            fg="red",
        )
        sys.exit(ReturnCode.PORT_ALREADY_IN_USE)
    except filelock.Timeout:
        name_or_id = ID_TO_NETWORKNAME.get(kwargs["network_id"], kwargs["network_id"])
        click.secho(
            f"FATAL: Another Raiden instance already running for account "
            f"{to_checksum_address(address)} on network id {name_or_id}",
            fg="red",
        )
        sys.exit(1)
    except Exception as ex:
        write_stack_trace(ex)
        sys.exit(1)
    finally:
        # teardown order is important because of side-effects, both the
        # switch_monitor and profiler could use the tracing api, for the
        # teardown code to work correctly the teardown has to be done in the
        # reverse order of the initialization.
        if switch_monitor is not None:
github izderadicka / mybookshelf2 / app / logic.py View on Github external
def create_new_location(source, upload, move=False):
    base_dir = current_app.config['BOOKS_BASE_DIR']
    if isinstance(upload, model.Upload):
        new_file = os.path.join(current_app.config['UPLOAD_DIR'], upload.file)
    else:
        new_file = upload
    new_location = os.path.join(source.ebook.base_dir, os.path.basename(norm_file_name(source)))
    #if source.ebook.base_dir else norm_file_name(source) #TODO: Remove this WA
    ebook_dir = os.path.join(base_dir, os.path.split(new_location)[0])
    if not os.path.exists(ebook_dir):
        os.makedirs(ebook_dir, exist_ok=True)
    lock_file = os.path.join(ebook_dir, '.lock_this_dir')
    index = 1
    with filelock.SoftFileLock(lock_file, timeout=5):
        while os.path.exists(os.path.join(base_dir, new_location)):
            name, ext = os.path.splitext(new_location)
            new_location = name + '(%d)' % index + ext
            index += 1
        if move:
            shutil.move(new_file, os.path.join(base_dir, new_location))
        else:
            shutil.copy(new_file, os.path.join(base_dir, new_location))

    return new_location
github nipype / pydra / pydra / engine / core.py View on Github external
if result is not None:
                return result
        # creating connections that were defined after adding tasks to the wf
        for task in self.graph.nodes:
            # if workflow has task_rerun=True and propagate_rerun=True,
            # it should be passed to the tasks
            if self.task_rerun and self.propagate_rerun:
                task.task_rerun = self.task_rerun
                # if the task is a wf, than the propagate_rerun should be also set
                if is_workflow(task):
                    task.propagate_rerun = self.propagate_rerun
            task.cache_locations = task._cache_locations + self.cache_locations
            self.create_connections(task)
        # TODO add signal handler for processes killed after lock acquisition
        self.hooks.pre_run(self)
        with SoftFileLock(lockfile):
            # # Let only one equivalent process run
            odir = self.output_dir
            if not self.can_resume and odir.exists():
                shutil.rmtree(odir)
            cwd = os.getcwd()
            odir.mkdir(parents=False, exist_ok=True if self.can_resume else False)
            self.audit.start_audit(odir=odir)
            result = Result(output=None, runtime=None, errored=False)
            self.hooks.pre_run_task(self)
            try:
                self.audit.monitor()
                await self._run_task(submitter, rerun=rerun)
                result.output = self._collect_outputs()
            except Exception as e:
                record_error(self.output_dir, e)
                result.errored = True
github nipype / pydra / pydra / engine / core.py View on Github external
def _run(self, rerun=False, **kwargs):
        self.inputs = attr.evolve(self.inputs, **kwargs)
        self.inputs.check_fields_input_spec()
        checksum = self.checksum
        lockfile = self.cache_dir / (checksum + ".lock")
        # Eagerly retrieve cached - see scenarios in __init__()
        self.hooks.pre_run(self)
        # TODO add signal handler for processes killed after lock acquisition
        with SoftFileLock(lockfile):
            if not (rerun or self.task_rerun):
                result = self.result()
                if result is not None:
                    return result
            # Let only one equivalent process run
            odir = self.output_dir
            if not self.can_resume and odir.exists():
                shutil.rmtree(odir)
            cwd = os.getcwd()
            odir.mkdir(parents=False, exist_ok=True if self.can_resume else False)
            orig_inputs = attr.asdict(self.inputs)
            map_copyfiles = copyfile_input(self.inputs, self.output_dir)
            modified_inputs = template_update(self.inputs, map_copyfiles)
            if modified_inputs:
                self.inputs = attr.evolve(self.inputs, **modified_inputs)
            self.audit.start_audit(odir)

filelock

A platform independent file lock.

Unlicense
Latest version published 11 days ago

Package Health Score

94 / 100
Full package analysis

Similar packages