How to use the pyodm.utils.AtomicCounter function in pyodm

To help you get started, we’ve selected a few pyodm examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github OpenDroneMap / PyODM / pyodm / api.py View on Github external
if outputs:
            fields['outputs'] = json.dumps(outputs)

        e = MultipartEncoder(fields=fields)

        result = self.post('/task/new/init', data=e, headers={'Content-Type': e.content_type})
        if isinstance(result, dict) and 'error' in result:
            raise NodeResponseError(result['error'])
        
        if isinstance(result, dict) and 'uuid' in result:
            uuid = result['uuid']
            progress_event = None

            class nonloc:
                uploaded_files = AtomicCounter(0)
                error = None

            # Equivalent as passing the open file descriptor, since requests
            # eventually calls read(), but this way we make sure to close
            # the file prior to reading the next, so we don't run into open file OS limits
            def read_file(file_path):
                with open(file_path, 'rb') as f:
                    return f.read()

            # Upload
            def worker():
                while True:
                    task = q.get()
                    if task is None or nonloc.error is not None:
                        q.task_done()
                        break
github OpenDroneMap / ODM / opendm / remote.py View on Github external
def run(self, taskClass):
        if not self.project_paths:
            return

        # Shared variables across threads
        class nonloc:
            error = None
            local_processing = False
            max_remote_tasks = None
        
        calculate_task_limit_lock = threading.Lock()
        finished_tasks = AtomicCounter(0)
        remote_running_tasks = AtomicCounter(0)

        # Create queue
        q = queue.Queue()
        for pp in self.project_paths:
            log.ODM_INFO("LRE: Adding to queue %s" % pp)
            q.put(taskClass(pp, self.node, self.params))

        def remove_task_safe(task):
            try:
                removed = task.remove()
            except exceptions.OdmError:
                removed = False
            return removed
        
        def cleanup_remote_tasks():
github OpenDroneMap / ODM / opendm / remote.py View on Github external
def run(self, taskClass):
        if not self.project_paths:
            return

        # Shared variables across threads
        class nonloc:
            error = None
            local_processing = False
            max_remote_tasks = None
        
        calculate_task_limit_lock = threading.Lock()
        finished_tasks = AtomicCounter(0)
        remote_running_tasks = AtomicCounter(0)

        # Create queue
        q = queue.Queue()
        for pp in self.project_paths:
            log.ODM_INFO("LRE: Adding to queue %s" % pp)
            q.put(taskClass(pp, self.node, self.params))

        def remove_task_safe(task):
            try:
                removed = task.remove()
            except exceptions.OdmError:
                removed = False
            return removed
        
        def cleanup_remote_tasks():
            if self.params['tasks']:
github OpenDroneMap / PyODM / pyodm / api.py View on Github external
# Keep track of download progress (if possible)
            content_length = download_stream.headers.get('content-length')
            total_length = int(content_length) if content_length is not None else None
            downloaded = 0
            chunk_size = int(parallel_chunks_size * 1024 * 1024)
            use_fallback = False
            accept_ranges = headers.get('accept-ranges')

            # Can we do parallel downloads?
            if accept_ranges is not None and accept_ranges.lower() == 'bytes' and total_length is not None and total_length > chunk_size and parallel_downloads > 1:
                num_chunks = int(math.ceil(total_length / float(chunk_size)))
                num_workers = parallel_downloads

                class nonloc:
                    completed_chunks = AtomicCounter(0)
                    merge_chunks = [False] * num_chunks
                    error = None

                def merge():
                    current_chunk = 0

                    with open(zip_path, "wb") as out_file:
                        while current_chunk < num_chunks and nonloc.error is None:
                            if nonloc.merge_chunks[current_chunk]:
                                chunk_file = "%s.part%s" % (zip_path, current_chunk)
                                with open(chunk_file, "rb") as fd:
                                    out_file.write(fd.read())

                                os.unlink(chunk_file)
                                
                                current_chunk += 1