Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def train(self):
logger.debug('start to train')
self._update_status(MLJobStatus.TRAINING)
self.start_time = datetime.datetime.now().timestamp()
try:
self._save_meta()
self._prepare()
logger.debug('prepare complete')
self.model.fit(self.X_train, self.y_train)
self._save_model()
logger.debug('train complete')
self._update_status(MLJobStatus.VALIDATING)
self._validate()
logger.debug('validation complete')
self._update_status(MLJobStatus.SUCCESS)
self.end_time = datetime.datetime.now().timestamp()
self._parse_model_representation()
self.model_stats = self.model.sprint_statistics()
self._save_meta()
except Exception as e:
logger.exception(f'faile to train the auto ml job {e}')
self._update_status(MLJobStatus.FAILED)
self.training_error = str(e)
self._save_meta()
def train(self):
logger.debug('start to train')
self._update_status(MLJobStatus.TRAINING)
self.start_time = datetime.datetime.now().timestamp()
try:
self._save_meta()
self._prepare()
logger.debug('prepare complete')
self.model.fit(self.train_dataset)
logger.debug('train complete')
self._save_model()
self._update_status(MLJobStatus.VALIDATING)
self._validate()
logger.debug('validation complete')
self._update_status(MLJobStatus.SUCCESS)
self.end_time = datetime.datetime.now().timestamp()
self._save_meta()
except Exception as e:
self._update_status(MLJobStatus.FAILED)
self.training_error = str(e)
self._save_meta()
):
listeners = self.listeners[event_name].copy()
if reverse:
listeners.reverse()
# Prepend sanic to the arguments when listeners are triggered
listeners = [partial(listener, self) for listener in listeners]
server_settings[settings_name] = listeners
if self.configure_logging and debug:
logger.setLevel(logging.DEBUG)
if (
self.config.LOGO
and os.environ.get("SANIC_SERVER_RUNNING") != "true"
):
logger.debug(
self.config.LOGO
if isinstance(self.config.LOGO, str)
else BASE_LOGO
)
if run_async:
server_settings["run_async"] = True
# Serve
if host and port and os.environ.get("SANIC_SERVER_RUNNING") != "true":
proto = "http"
if ssl is not None:
proto = "https"
logger.info("Goin' Fast @ {}://{}:{}".format(proto, host, port))
return server_settings
def register(self, name, class_name, module):
logger.debug(
"DatasetTypeRegistry registered name=%s, class=%s, module=%s"
% (name, class_name, module)
)
if not isinstance(module, str):
logger.exception("Wrong module provided, %s is not a module." % (module))
raise RuntimeError('Error while register dataset type "%s %s"' % (name, module))
try:
importlib.import_module(module)
except Exception as e:
logger.exception("Wrong module provided, failed to load %s as a module." % (module))
raise RuntimeError(
'Error while register dataset type "%s %s":%s' % (name, module, str(e))
)
async def delete_job(request, id):
logger.debug(f'delete ml jobs {id}')
try:
MLJobManager.delete_job(id)
return response.json({}, status=204)
except Exception:
logger.exception('faile to delete ml job')
return response.json({}, status=500)
def delete_job_by_id(job_id):
job_dir = os.path.join(MLJob.base_dir, job_id)
try:
shutil.rmtree(job_dir)
except Exception:
logger.exception(f'failed to delete job dir {job_dir}')
else:
logger.debug(f'successfully deleted the directory {job_dir}')
def _load(self):
logger.debug(f'load csv from {self.path}')
self.df = pd.read_csv(self.path)
logger.debug(f'load csv from {self.path} success')
async def list_jobs(request):
logger.debug('list ml jobs with condition={request.args}')
args = request.args
try:
jobs = MLJobManager.list_jobs()
if args and 'type' in args:
query_jobs = [job for job in jobs if job['type'] in args['type']]
return response.json(query_jobs, status=200)
else:
return response.json(jobs, status=200)
except Exception:
logger.exception('failed to list ml jobs')
return response.json({}, status=500)
def _save_model(self):
logger.debug(
f'save model for class={type(self).__name__} id={self.id} name={self.name}'
)
model_file = os.path.join(self.job_dir, 'model.joblib')
dump(self, model_file)
logger.debug('save model complete')