Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def _check_line_number(self,
line_number: int,
tail: str,
expected_line_part: str,
num_line_number_previously_requested: int = 0
):
root_dir = pathlib.Path('root')
logger = sut.PhaseLoggingPaths(root_dir, 'phase-identifier')
for i in range(num_line_number_previously_requested):
logger.for_line(line_number)
actual = logger.for_line(line_number, tail=tail)
self.assertEqual(logger.dir_path / expected_line_part,
actual)
def test_get_path(remotefile):
get(remotefile, 'testget')
with Path('testget').open() as f:
assert f.read() == 'foobarééœ'
Path('testget').unlink()
parser.add_argument("--adam_epsilon", default=1e-8, type=float,
help="Epsilon for Adam optimizer.")
parser.add_argument('--max_grad_norm', default=1.0, type=float)
parser.add_argument("--learning_rate", default=0.00176, type=float,
help="The initial learning rate for Adam.")
parser.add_argument('--seed', type=int, default=42,
help="random seed for initialization")
parser.add_argument('--fp16_opt_level', type=str, default='O2',
help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html")
parser.add_argument('--fp16', action='store_true',
help="Whether to use 16-bit float precision instead of 32-bit")
args = parser.parse_args()
args.data_dir = Path(args.data_dir)
args.output_dir = Path(args.output_dir)
pregenerated_data = args.data_dir / "corpus/train"
init_logger(log_file=str(args.output_dir/ "train_albert_model.log"))
assert pregenerated_data.is_dir(), \
"--pregenerated_data should point to the folder of files made by prepare_lm_data_mask.py!"
samples_per_epoch = 0
for i in range(args.file_num):
data_file = pregenerated_data / f"{args.data_name}_file_{i}.json"
metrics_file = pregenerated_data / f"{args.data_name}_file_{i}_metrics.json"
if data_file.is_file() and metrics_file.is_file():
metrics = json.loads(metrics_file.read_text())
samples_per_epoch += metrics['num_training_examples']
else:
if i == 0:
exit("No training data was found!")
def serve_store_model(model_name):
with open(Path(config.DATASTORE_DIR) / f'models/{model_name}.pkl', 'rb') as f:
r.set(f'model:{model_name}', f.read())
return f'Stored model {model_name}'
def dummy_iter(ids, images, cameras):
""" Standard loaders (shared across the evaluation pipelines) require at
least a dictionary with an item name and an image whose shape can be
needed. To avoid reading images from disk unnecessarily, we create
dummy images that have a shape but no data.
"""
for i in ids:
im = images[i]
cam = cameras[im.camera_id]
name = im.name
yield {'name': Path(Path(name).parent, Path(name).stem).as_posix(),
'image': DummyImage((cam.height, cam.width, 1))}
def _get_translations_path() -> str:
path = Path(__file__)
path = path.parent.joinpath("resources", "locale")
if path.is_dir():
return str(path)
return sys.prefix + "/share/locale"
rows.append("Report.EventNaturalInterval.ConditionalAllIntervalStandardDeviation=false")
rows.append("")
rows.append("AllCodesMutuallyExclusive=true")
rows.append("")
for (behav, key) in all_observed_behaviors:
rows.append(f"Behavior.isModified.{key}=false")
rows.append(f"Behavior.isSubtracted.{key}=false")
rows.append(f"Behavior.isIgnored.{key}=false")
rows.append(f"Behavior.isEventAnalyzed.{key}=false")
rows.append(f"Behavior.switchesOff.{key}=")
rows.append("")
if faf_creation_answer == "" or faf_creation_answer == OVERWRITE:
try:
with open(pathlib.Path(file_name_subject).with_suffix(".faf"), "w") as f_out:
f_out.write("\n".join(rows))
except Exception:
return False, f"File FAF not created: {sys.exc_info()[1]}"
return True, ""
except Exception:
logging.critical("Error during exporting the events for JWatcher")
dialog.error_message("exporting the events for JWatcher", sys.exc_info())
return False, ""
def get_artwork_path(self):
root = pathlib.Path(__file__).parents[2]
artwork_path = root.joinpath("artwork")
if not artwork_path.is_dir():
raise FileNotFoundError
return artwork_path
import pathlib
from pydo import *
this_dir = pathlib.Path(__file__).parent
package = {
'requires': ['qt'],
'sysroot_debs': [],
'root_debs': [],
'target': this_dir / 'qmlrss.tar.gz',
'install': ['{chroot} {stage} /bin/systemctl reenable qmlrss.service'],
}
from .. import qt
from ... import jobs
builddir = this_dir / 'build'
stage = this_dir / 'stage'
repo = this_dir / 'qmlrss'
def init_persistence(self, out_dir_override=None):
"""
Storage locations for results and intermediate data
"""
out_dir_default = Path('{channel.ctx.workdir}') / '{dset.name}_{dset.split}' / f'sem_{self.sem_seg_variant}'
out_dir = Path(out_dir_override or out_dir_default)
self.persistence_base_dir = out_dir
# outputs of the pipelines steps:
self.storage = dict(
# semantic segmentation
pred_labels_trainIds = ChannelLoaderImage(out_dir / 'sem_labels/{fid}_predTrainIds.png'),
pred_labels_colorimg = ChannelLoaderImage(out_dir / 'sem_labels/{fid}_predColorImg.png'),
# synthesized image
gen_image = ChannelLoaderImage(out_dir / 'gen_image/{fid}_gen_image.webp'),
# overview
demo_with_labels = ChannelLoaderImage(out_dir / 'demo/{fid_no_slash}_pipeline.webp'),
demo_with_baselines = ChannelLoaderImage(out_dir / 'demo/{fid_no_slash}_scores.webp'),
)