Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
sys.exit(-1)
if not os.path.exists(vsc.ARTIFACTS_INSTALLERS):
log.warning(f'Installer artifact directory missing {vsc.ARTIFACTS_INSTALLERS}. Cannot proceed.')
sys.exit(-1)
if not os.path.exists(vsc.ARTIFACTS_EXTENSIONS):
log.warning(f'Extensions artifact directory missing {vsc.ARTIFACTS_EXTENSIONS}. Cannot proceed.')
sys.exit(-1)
vscgallery = VSCGallery()
log.debug('Waiting for gallery cache to load')
#vscgallery.loaded.wait()
observer = PollingObserver()
observer.schedule(ArtifactChangedHandler(vscgallery), '/artifacts/', recursive=False)
observer.start()
application = falcon.API()
application.add_route('/api/update/{platform}/{buildquality}/{commitid}', VSCUpdater())
application.add_route('/commit:{commitid}/{platform}/{buildquality}', VSCBinaryFromCommitId())
application.add_route('/extensions/workspaceRecommendations.json.gz', VSCRecommendations()) # Why no compress??
application.add_route('/extensions/marketplace.json', VSCMalicious())
application.add_route('/_apis/public/gallery/extensionquery', vscgallery)
application.add_route('/browse', VSCDirectoryBrowse(vsc.ARTIFACTS))
application.add_route('/', VSCIndex())
application.add_static_route('/artifacts/', '/artifacts/')
if __name__ == '__main__':
httpd = simple_server.make_server('0.0.0.0', 5000, application)
httpd.serve_forever()
# modification with docker mounts
self.created_files.add(event.src_path)
self.process(event)
if __name__ == '__main__': # pragma: no cover
args = None
if len(sys.argv) > 1:
args = sys.argv[1:]
# TODO: counter-intuitively inotify observer uses a lot of resources
# for directories that have a lot of existing files. Rather than
# just adding more watchers, we should encourage archiving of old
# pcaps and use a polling observer instead so that file_drop
# doesn't silently break.
observer = PollingObserver()
observer.schedule(GZHandler(), path=args[0] if args else '/files',
recursive=True)
observer.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt: # pragma: no cover
observer.stop()
observer.join()
def init_bundles(self):
'''Initialize bundles of posts and pages.'''
conf = self.default_conf
runtime = self.runtime
post_dir = runtime.get('directory.post')
page_dir = runtime.get('directory.page')
self.post_bundle = PostBundle(self)
self.page_bundle = PageBundle(self)
post_watch = Watch(self, self.post_bundle, ['*.md'])
page_watch = Watch(self, self.page_bundle, ['*.md'], is_page=True)
watch_polling = conf('local.watch_polling')
self.observer = PollingObserver() if watch_polling else Observer()
self.observer.setDaemon(True)
self.observer.schedule(post_watch, post_dir)
self.observer.schedule(page_watch, page_dir)
self.observer.start()
def __init__(self, use_polling=False):
super(LivereloadWatchdogWatcher, self).__init__()
self._changed = False
# TODO: Hack.
# Allows the LivereloadWatchdogWatcher
# instance to set the file which was
# modified. Used for output purposes only.
self._action_file = None
if use_polling:
self._observer = PollingObserver()
else:
self._observer = Observer()
self._observer.start()
# Compatibility with livereload's builtin watcher
# Accessed by LiveReloadHandler's on_message method to decide if a task
# has to be added to watch the cwd.
self._tasks = True
# Accessed by LiveReloadHandler's watch_task method. When set to a
# boolean false value, everything is reloaded in the browser ('*').
self.filepath = None
# Accessed by Server's serve method to set reload time to 0 in
# LiveReloadHandler's poll_tasks method.
def _register_watcher(self):
logger.debug("Register File Watcher for {0}".format(self.plugin_name))
self.event_handler = PluginEventHandler(self.plugin_name,
self._reload_plugin)
self.observer = Observer()
self.observer.schedule(self.event_handler, self.plugin_dir)
self.observer.start()
def start_watch_loop(self, *args, **options):
self.set_options(**options)
callback = partial(self.watch_handle, *args, **options)
handler = ChangeDebounceHandler(callback)
observer = Observer()
for path in self.collect_watch_paths():
observer.schedule(handler, path, recursive=True)
observer.start()
try:
while True:
handler.process()
time.sleep(1)
except KeyboardInterrupt:
observer.stop()
observer.join()
def _init_file_observer(self):
self._file_pusher = file_pusher.FilePusher(self._api)
# FileEventHandlers (any of the classes at the top) indexed by "save_name," which is the file's path relative to the run directory
self._file_event_handlers = {}
# We use the polling observer because inotify was flaky and could require changes to sysctl.conf
self._file_observer = PollingObserver()
self._file_observer.schedule(self._per_file_event_handler(), self._run.dir, recursive=True)
# We lock this when the back end is down so Watchdog will keep track of all
# the file events that happen. Then, when the back end comes back up, we unlock
# it so all the outstanding events will get handled properly. Watchdog's queue
# only keeps at most one event per file.
self._file_observer_lock = threading.Lock()
# It starts acquired. We release it when we want to allow the events to happen.
# (ie. after the Run is successfully created)
self._block_file_observer()
# Start watching for file changes right away so we can be sure we don't miss anything.
# We don't have to worry about handlers actually being called because of the lock.
self._file_observer.start()
def __init__(self, dbfile=":memory:", poll=False):
self.db = sqlite3.connect(dbfile, check_same_thread=False)
self.log = logging.getLogger(__name__)
self._create_db()
# self.log.warning("I'm warnin' ya!")
# set up watchdog observer to monitor changes to
# keyword files (or more correctly, to directories
# of keyword files)
self.observer = PollingObserver() if poll else Observer()
self.observer.start()
def start_observer(self):
self.observer = PollingObserver()
self.observer.should_keep_running()
self.observer.handler = RenderHandler(self.args)
for path in self.observer_paths:
self.observer.schedule(self.observer.handler,
path, recursive=True)
self.observer.start()