How to use the protobuf.server_pb2.Response function in protobuf

To help you get started, we’ve selected a few protobuf examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github HyperionGray / starbelly / starbelly / server.py View on Github external
async def _list_policies(self, command, socket):
        ''' Get a list of policies. '''
        limit = command.page.limit
        offset = command.page.offset
        count, policies = await self._policy_manager.list_policies(
            limit, offset)

        response = Response()
        response.list_policies.total = count

        for policy_doc in policies:
            policy = response.list_policies.policies.add()
            policy.policy_id = UUID(policy_doc['id']).bytes
            policy.name = policy_doc['name']
            policy.created_at = policy_doc['created_at'].isoformat()
            policy.updated_at = policy_doc['updated_at'].isoformat()

        return response
github HyperionGray / starbelly / starbelly / server.py View on Github external
async def _set_captcha_solver(self, command, socket):
        ''' Create or update CAPTCHA solver. '''
        now = datetime.now(tzlocal())
        doc = captcha_pb_to_doc(command.solver)
        doc['updated_at'] = now
        response = Response()
        async with self._db_pool.connection() as conn:
            if command.solver.HasField('solver_id'):
                await r.table('captcha_solver').update(doc).run(conn)
            else:
                doc['created_at'] = now
                result = await r.table('captcha_solver').insert(doc).run(conn)
                solver_id = result['generated_keys'][0]
                response.new_solver.solver_id = UUID(solver_id).bytes

        return response
github HyperionGray / starbelly / starbelly / server / server.py View on Github external
response.request_id = request.request_id
            response.is_success = True
            elapsed = time() - start
            logger.info('Request OK %s %s %0.3fs', command_name, client_ip,
                elapsed)
        except asyncio.CancelledError:
            raise
        except Exception as e:
            if isinstance(e, InvalidRequestException):
                elapsed = time() - start
                logger.error('Request ERROR %s %s %0.3fs', command_name,
                    client_ip, elapsed)
            else:
                logger.exception('Exception while handling request:\n%r',
                    request)
            response = Response()
            response.is_success = False
            response.error_message = str(e)
            try:
                response.request_id = request.request_id
            except:
                # A parsing failure could lead to request or request_id not
                # being defined. There's nothing we can do to fix this.
                pass

        if response.IsInitialized():
            message = ServerMessage()
            message.response.MergeFrom(response)
            message_data = message.SerializeToString()
            await websocket.send(message_data)
        else:
            # This could happen, e.g. if the request_id is not set.
github HyperionGray / starbelly / starbelly / server.py View on Github external
async def _get_policy(self, command, socket):
        ''' Get a single policy. '''
        policy_id = str(UUID(bytes=command.policy_id))
        policy_doc = await self._policy_manager.get_policy(policy_id)
        response = Response()
        Policy.convert_doc_to_pb(policy_doc, response.policy)
        return response
github HyperionGray / starbelly / starbelly / server.py View on Github external
async def _get_job_items(self, command, socket):
        ''' Get a page of items (crawl responses) from a job. '''
        job_id = str(UUID(bytes=command.job_id))
        limit = command.page.limit
        offset = command.page.offset
        total_items, item_docs = await self._crawl_manager.get_job_items(
            job_id, command.include_success, command.include_error,
            command.include_exception, limit, offset)
        response = Response()
        response.list_items.total = total_items
        compression_ok = command.compression_ok
        for item_doc in item_docs:
            item = response.list_items.items.add()

            if item_doc['join'] is None:
                item.is_body_compressed = False
            elif item_doc['join']['is_compressed'] and not compression_ok:
                item.body = gzip.decompress(item_doc['join']['body'])
                item.is_body_compressed = False
            else:
                item.body = item_doc['join']['body']
                item.is_body_compressed = item_doc['join']['is_compressed']
            if 'content_type' in item_doc:
                item.content_type = item_doc['content_type']
            if 'exception' in item_doc:
github HyperionGray / starbelly / starbelly / server.py View on Github external
seeds = [s.strip() for s in command.seeds]
            tags = tags or []

            if name.strip() == '':
                url = urlparse(seeds[0])
                name = url.hostname
                if len(seeds) > 1:
                    name += '& {} more'.format(len(seeds) - 1)

            if command.run_state != protobuf.shared_pb2.RUNNING:
                raise InvalidRequestException(
                    'New job state must be set to RUNNING')

            job_id = await self._crawl_manager.start_job(seeds, policy_id, name,
                tags)
            response = Response()
            response.new_job.job_id = UUID(job_id).bytes

        return response
github HyperionGray / starbelly / starbelly / server.py View on Github external
async def _set_job_schedule(self, command, socket):
        ''' Create or update job schedule metadata. '''
        doc = Scheduler.pb_to_doc(command.job_schedule)
        schedule_id = await self._scheduler.set_job_schedule(doc)
        response = Response()
        if schedule_id is not None:
            response.new_job_schedule.schedule_id = UUID(schedule_id).bytes
        return response
github HyperionGray / starbelly / starbelly / server.py View on Github external
name = command.name if command.HasField('name') else None
            await self._crawl_manager.update_job(job_id, name, tags)

            if command.HasField('run_state'):
                run_state = command.run_state
                if run_state == protobuf.shared_pb2.CANCELLED:
                    await self._crawl_manager.cancel_job(job_id)
                elif run_state == protobuf.shared_pb2.PAUSED:
                    await self._crawl_manager.pause_job(job_id)
                elif run_state == protobuf.shared_pb2.RUNNING:
                    await self._crawl_manager.resume_job(job_id)
                else:
                    raise Exception('Not allowed to set job run state: {}'
                        .format(run_state))

            response = Response()
        else:
            # Create new job.
            name = command.name
            policy_id = str(UUID(bytes=command.policy_id))
            seeds = [s.strip() for s in command.seeds]
            tags = tags or []

            if name.strip() == '':
                url = urlparse(seeds[0])
                name = url.hostname
                if len(seeds) > 1:
                    name += '& {} more'.format(len(seeds) - 1)

            if command.run_state != protobuf.shared_pb2.RUNNING:
                raise InvalidRequestException(
                    'New job state must be set to RUNNING')
github HyperionGray / starbelly / starbelly / server.py View on Github external
async def _get_job(self, command, socket):
        ''' Get status for a single job. '''
        job_id = str(UUID(bytes=command.job_id))
        job_doc = await self._crawl_manager.get_job(job_id)
        response = Response()
        if job_doc is None:
            response.is_success = False
            response.error_message = f'No job exists with ID={job_id}'
        else:
            job = response.job
            job.job_id = UUID(job_doc['id']).bytes
            for seed in job_doc['seeds']:
                job.seeds.append(seed)
            for tag in job_doc['tags']:
                job.tag_list.tags.append(tag)
            Policy.convert_doc_to_pb(job_doc['policy'], job.policy)
            job.name = job_doc['name']
            job.item_count = job_doc['item_count']
            job.http_success_count = job_doc['http_success_count']
            job.http_error_count = job_doc['http_error_count']
            job.exception_count = job_doc['exception_count']
github HyperionGray / starbelly / starbelly / server.py View on Github external
'file': key[0],
                'line_number': key[1],
                'function': key[2],
                'calls': value[0],
                'non_recursive_calls': value[1],
                'total_time': value[2],
                'cumulative_time': value[3],
            })

        try:
            stats.sort(key=operator.itemgetter(command.sort_by), reverse=True)
        except KeyError:
            raise InvalidRequestException('Invalid sort key: {}'
                .format(command.sort_by))

        response = Response()
        response.performance_profile.total_calls = pr_stats.total_calls
        response.performance_profile.total_time = pr_stats.total_tt

        for stat in stats[:command.top_n]:
            function = response.performance_profile.functions.add()
            function.file = stat['file']
            function.line_number = stat['line_number']
            function.function = stat['function']
            function.calls = stat['calls']
            function.non_recursive_calls = stat['non_recursive_calls']
            function.total_time = stat['total_time']
            function.cumulative_time = stat['cumulative_time']

        return response