Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def create_scenario(create_MultiPartnerLearning, create_Dataset, create_partner_list):
params = {"dataset_name": "cifar10", "partners_count":3, "amounts_per_partner": [0.2, 0.5, 0.3], "samples_split_option": ["basic","random"], "multi_partner_learning_aproach":"fedavg", "aggregation_weighting": "uniform", "methods": ["Shapley values", "Independent scores"], "gradient_updates_per_pass_count": 5}
experiment_path = Path('/home/garibou/Documents/distributed-learning-contributivity/experiments/test_unitaire')
scenar = Scenario(
params=params,
experiment_path=experiment_path,
scenario_id=0,
n_repeat=1
)
scenar.mpl = create_MultiPartnerLearning
scenar.dataset
yield scenar
Set mainframe and initialize widgets to various places.
"""
self._mainframe = mainframe
#self._neteditor = mainframe.add_view("Network", Neteditor)
# mainframe.browse_obj(self._module)
self.make_menu()
self.make_toolbar()
args = mainframe.get_args()
if len(args) == 3:
# command line provided rootname and dirpath
rootname = args[1]
dirpath = args[2]
name_scenario = rootname
self._scenario = scenario.Scenario(rootname, workdirpath=dirpath,
name_scenario=name_scenario,
logger=self._mainframe.get_logger())
self._scenario.import_xml()
elif len(args) == 2:
filepath = args[1]
self._scenario = scenario.load_scenario(
filepath, logger=self._mainframe.get_logger())
#self._scenario = cm.load_obj(filepath)
else:
# command line provided nothing
rootname = 'myscenario'
# None# this means no directory will be created
# os.path.join(os.path.expanduser("~"),'sumopy','myscenario')
dirpath = scenario.DIRPATH_SCENARIO
def validate_scenario_list(scenario_params_list, experiment_path):
"""Instantiate every scenario without running it to check if
every scenario is correctly specified. This prevents scenario initialization errors during the experiment"""
logger.debug("Starting to validate scenarios")
for scenario_id, scenario_params in enumerate(scenario_params_list):
logger.debug(f"Validation scenario {scenario_id + 1}/{len(scenario_params_list)}")
# TODO: we should not create scenario folder at this point
current_scenario = scenario.Scenario(scenario_params, experiment_path, is_dry_run=True)
current_scenario.instantiate_scenario_partners()
if current_scenario.samples_split_type == 'basic':
current_scenario.split_data(is_logging_enabled=False)
elif current_scenario.samples_split_type == 'advanced':
current_scenario.split_data_advanced(is_logging_enabled=False)
logger.debug("All scenario have been validated")
def create_scenarios():
alt_opt_cfg = join(faster_rcnn_root, "experiments/cfgs/faster_rcnn_alt_opt.yml")
base_scenario = Scenario(
scenarios_dir=scenarios_dir,
scenario="scales_2_4_8",
train_imdb="technicaldrawings_single-numbers_train",
test_imdb="technicaldrawings_single-numbers_val",
weights_path=join(faster_rcnn_root, "data/imagenet_models/ZF.v2.caffemodel"), # you have to download this first
gpu_id=0,
max_iters=[1, 1, 1, 1], # max iters
rpn_config=RpnConfig(num_classes=2, anchor_scales=[8, 16, 32], anchor_feat_stride=16),
fast_rcnn_config=FastRcnnConfig(num_classes=2),
solver_config=SolverConfig(),
config=yaml.load(open(alt_opt_cfg))
)
small_scales = deepcopy(base_scenario)
small_scales.name("scales_4_8_16")
small_scales.rpn_config.anchor_scales = [4, 8, 16]
def _find_step_matching_to(self, step, msg_set, args_default):
"""find step matching to ``msg_set`` in all scenarios,
passing ``args_default``"""
for scenario in self._scenarios:
for meth, msg, args in getattr(scenario, step):
msg_pattern = re.sub(TEMPLATE_PATTERN, r'(.+)', msg)
msg_pattern = re.escape(msg_pattern)
msg_pattern = msg_pattern.replace(re.escape(r'(.+)'), r'(.+)')
regex = re.match(msg_pattern, msg_set)
if regex:
return meth, msg_set, regex.groups()
return Scenario.undefined_step, msg_set, args_default
this_dir = dirname(abspath(__file__))
sys.path.insert(0,join(this_dir,'..','src'))
scenarios_dir = join(this_dir, 'scenarios')
default_cfg = join(this_dir, 'default-cfg.yml')
STATS_SCALES = [2, 4, 8, 16, 32]
DEFAULT_SCALES = [8, 16, 32]
DEFAULT_RATIOS=[0.5,1,2]
MORE_RATIOS=[0.25, 0.5, 1, 2, 4]
EVEN_MORE_RATIOS=[0.125, 0.25, 0.5, 1, 2, 4, 8]
STATS_AMOUNT_OF_RATIOS = [0.2, 0.5, 0.8, 1, 1/0.8, 1/0.5, 10]
default_scenario=Scenario(
scenarios_dir=scenarios_dir,
scenario="scales_2_4_8",
train_imdb="technicaldrawings_numbers_train",
test_imdb="technicaldrawings_numbers_val",
weights_path=join(faster_rcnn_root, "data/imagenet_models/ZF.v2.caffemodel"), # you have to download this first
gpu_id=0,
# max_iters=[1, 1, 1, 1], # max iters
max_iters=[10000, 10000, 10000, 10000], # max iters
rpn_config=RpnConfig(num_classes=2, anchor_scales=DEFAULT_SCALES, anchor_feat_stride=16, anchor_ratios=DEFAULT_RATIOS),
fast_rcnn_config=FastRcnnConfig(num_classes=2, anchor_scales=DEFAULT_SCALES, anchor_feat_stride=16, anchor_ratios=DEFAULT_RATIOS),
solver_config=SolverConfig(step_size=8500, display=5),
config=yaml.load(open(default_cfg))
)
# Close open figures
plt.close("all")
# Iterate over repeats of all scenarios experiments
for i in range(n_repeats):
logger.info(f"Repeat {i+1}/{n_repeats}")
for scenario_id, scenario_params in enumerate(scenario_params_list):
logger.info(f"Scenario {scenario_id + 1}/{len(scenario_params_list)}")
logger.info("Current params:")
logger.info(scenario_params)
current_scenario = scenario.Scenario(
scenario_params,
experiment_path,
scenario_id=scenario_id+1,
n_repeat=i+1
)
run_scenario(current_scenario)
# Write results to CSV file
df_results = current_scenario.to_dataframe()
df_results["random_state"] = i
df_results["scenario_id"] = scenario_id
with open(experiment_path / "results.csv", "a") as f:
df_results.to_csv(f, header=f.tell() == 0, index=False)
logger.info(f"Results saved to {os.path.relpath(experiment_path)}/results.csv")