How to use the emmental.utils.utils.construct_identifier function in emmental

To help you get started, we’ve selected a few emmental examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github SenWu / emmental / tests / utils / test_utils.py View on Github external
def test_construct_identifier(caplog):
    """Unit test of construct_identifier."""
    caplog.set_level(logging.INFO)

    assert construct_identifier("1", "2", "3", "4") == "1/2/3/4"
    assert construct_identifier("1", "2", "3") == "1/2/3"
github SenWu / emmental / src / emmental / learner.py View on Github external
# Calculate training metric
            for identifier in self.running_uids.keys():
                task_name, data_name, split = identifier.split("/")

                metric_score = model.scorers[task_name].score(
                    self.running_golds[identifier],
                    self.running_probs[identifier],
                    prob_to_pred(self.running_probs[identifier]),
                    self.running_uids[identifier],
                )
                for metric_name, metric_value in metric_score.items():
                    metric_dict[f"{identifier}/{metric_name}"] = metric_value

                # Collect average score
                identifier = construct_identifier(
                    task_name, data_name, split, "average"
                )

                metric_dict[identifier] = np.mean(list(metric_score.values()))

                micro_score_dict[split].extend(list(metric_score.values()))
                macro_score_dict[split].append(metric_dict[identifier])

            # Collect split-wise micro/macro average score
            for split in micro_score_dict.keys():
                identifier = construct_identifier(
                    "model", "all", split, "micro_average"
                )
                metric_dict[identifier] = np.mean(micro_score_dict[split])
                identifier = construct_identifier(
                    "model", "all", split, "macro_average"
github SenWu / emmental / src / emmental / model.py View on Github external
predictions = self.predict(dataloader, return_preds=True)
            for task_name in predictions["golds"].keys():
                metric_score = self.scorers[task_name].score(
                    predictions["golds"][task_name],
                    predictions["probs"][task_name],
                    predictions["preds"][task_name],
                    predictions["uids"][task_name],
                )
                for metric_name, metric_value in metric_score.items():
                    identifier = construct_identifier(
                        task_name, dataloader.data_name, dataloader.split, metric_name
                    )
                    metric_score_dict[identifier] = metric_value

                # Store the loss
                identifier = construct_identifier(
                    task_name, dataloader.data_name, dataloader.split, "loss"
                )
                metric_score_dict[identifier] = predictions["losses"][task_name]

                if return_average:
                    # Collect average score
                    identifier = construct_identifier(
                        task_name, dataloader.data_name, dataloader.split, "average"
                    )
                    metric_score_dict[identifier] = np.mean(list(metric_score.values()))

                    micro_score_dict[dataloader.split].extend(
                        list(metric_score.values())
                    )
                    macro_score_dict[dataloader.split].append(
                        metric_score_dict[identifier]
github SenWu / emmental / src / emmental / learner.py View on Github external
for metric_name, metric_value in metric_score.items():
                    metric_dict[f"{identifier}/{metric_name}"] = metric_value

                # Collect average score
                identifier = construct_identifier(
                    task_name, data_name, split, "average"
                )

                metric_dict[identifier] = np.mean(list(metric_score.values()))

                micro_score_dict[split].extend(list(metric_score.values()))
                macro_score_dict[split].append(metric_dict[identifier])

            # Collect split-wise micro/macro average score
            for split in micro_score_dict.keys():
                identifier = construct_identifier(
                    "model", "all", split, "micro_average"
                )
                metric_dict[identifier] = np.mean(micro_score_dict[split])
                identifier = construct_identifier(
                    "model", "all", split, "macro_average"
                )
                metric_dict[identifier] = np.mean(macro_score_dict[split])

        # Log the learning rate
        metric_dict["model/all/train/lr"] = self.optimizer.param_groups[0]["lr"]

        return metric_dict
github SenWu / emmental / src / emmental / learner.py View on Github external
identifier = construct_identifier(
                    task_name, data_name, split, "average"
                )

                metric_dict[identifier] = np.mean(list(metric_score.values()))

                micro_score_dict[split].extend(list(metric_score.values()))
                macro_score_dict[split].append(metric_dict[identifier])

            # Collect split-wise micro/macro average score
            for split in micro_score_dict.keys():
                identifier = construct_identifier(
                    "model", "all", split, "micro_average"
                )
                metric_dict[identifier] = np.mean(micro_score_dict[split])
                identifier = construct_identifier(
                    "model", "all", split, "macro_average"
                )
                metric_dict[identifier] = np.mean(macro_score_dict[split])

        # Log the learning rate
        metric_dict["model/all/train/lr"] = self.optimizer.param_groups[0]["lr"]

        return metric_dict
github SenWu / emmental / src / emmental / model.py View on Github external
if return_average:
            # Collect split-wise micro/macro average score
            for split in micro_score_dict.keys():
                identifier = construct_identifier(
                    "model", "all", split, "micro_average"
                )
                metric_score_dict[identifier] = np.mean(micro_score_dict[split])
                identifier = construct_identifier(
                    "model", "all", split, "macro_average"
                )
                metric_score_dict[identifier] = np.mean(macro_score_dict[split])
                identifier = construct_identifier("model", "all", split, "loss")
                metric_score_dict[identifier] = np.mean(macro_loss_dict[split])

            # Collect overall micro/macro average score/loss
            identifier = construct_identifier("model", "all", "all", "micro_average")
            metric_score_dict[identifier] = np.mean(
                list(itertools.chain.from_iterable(micro_score_dict.values()))
            )
            identifier = construct_identifier("model", "all", "all", "macro_average")
            metric_score_dict[identifier] = np.mean(
                list(itertools.chain.from_iterable(macro_score_dict.values()))
            )
            identifier = construct_identifier("model", "all", "all", "loss")
            metric_score_dict[identifier] = np.mean(
                list(itertools.chain.from_iterable(macro_loss_dict.values()))
            )

        # TODO: have a better to handle global evaluation metric
        if Meta.config["learner_config"]["global_evaluation_metric_dict"]:
            global_evaluation_metric_dict = Meta.config["learner_config"][
                "global_evaluation_metric_dict"
github SenWu / emmental / src / emmental / model.py View on Github external
macro_loss_dict[dataloader.split].append(
                        metric_score_dict[identifier]
                    )

        if return_average:
            # Collect split-wise micro/macro average score
            for split in micro_score_dict.keys():
                identifier = construct_identifier(
                    "model", "all", split, "micro_average"
                )
                metric_score_dict[identifier] = np.mean(micro_score_dict[split])
                identifier = construct_identifier(
                    "model", "all", split, "macro_average"
                )
                metric_score_dict[identifier] = np.mean(macro_score_dict[split])
                identifier = construct_identifier("model", "all", split, "loss")
                metric_score_dict[identifier] = np.mean(macro_loss_dict[split])

            # Collect overall micro/macro average score/loss
            identifier = construct_identifier("model", "all", "all", "micro_average")
            metric_score_dict[identifier] = np.mean(list(micro_score_dict.values()))
            identifier = construct_identifier("model", "all", "all", "macro_average")
            metric_score_dict[identifier] = np.mean(list(macro_score_dict.values()))
            identifier = construct_identifier("model", "all", "all", "loss")
            metric_score_dict[identifier] = np.mean(list(macro_loss_dict.values()))

        # TODO: have a better to handle global evaluation metric
        if Meta.config["learner_config"]["global_evaluation_metric_dict"]:
            global_evaluation_metric_dict = Meta.config["learner_config"][
                "global_evaluation_metric_dict"
            ]
            for metric_name, metric in global_evaluation_metric_dict.items():
github SenWu / emmental / src / emmental / model.py View on Github external
)
                for metric_name, metric_value in metric_score.items():
                    identifier = construct_identifier(
                        task_name, dataloader.data_name, dataloader.split, metric_name
                    )
                    metric_score_dict[identifier] = metric_value

                # Store the loss
                identifier = construct_identifier(
                    task_name, dataloader.data_name, dataloader.split, "loss"
                )
                metric_score_dict[identifier] = predictions["losses"][task_name]

                if return_average:
                    # Collect average score
                    identifier = construct_identifier(
                        task_name, dataloader.data_name, dataloader.split, "average"
                    )
                    metric_score_dict[identifier] = np.mean(list(metric_score.values()))

                    micro_score_dict[dataloader.split].extend(
                        list(metric_score.values())
                    )
                    macro_score_dict[dataloader.split].append(
                        metric_score_dict[identifier]
                    )

                    # Store the loss
                    identifier = construct_identifier(
                        task_name, dataloader.data_name, dataloader.split, "loss"
                    )
                    macro_loss_dict[dataloader.split].append(
github SenWu / emmental / src / emmental / model.py View on Github external
"model", "all", split, "micro_average"
                )
                metric_score_dict[identifier] = np.mean(micro_score_dict[split])
                identifier = construct_identifier(
                    "model", "all", split, "macro_average"
                )
                metric_score_dict[identifier] = np.mean(macro_score_dict[split])
                identifier = construct_identifier("model", "all", split, "loss")
                metric_score_dict[identifier] = np.mean(macro_loss_dict[split])

            # Collect overall micro/macro average score/loss
            identifier = construct_identifier("model", "all", "all", "micro_average")
            metric_score_dict[identifier] = np.mean(list(micro_score_dict.values()))
            identifier = construct_identifier("model", "all", "all", "macro_average")
            metric_score_dict[identifier] = np.mean(list(macro_score_dict.values()))
            identifier = construct_identifier("model", "all", "all", "loss")
            metric_score_dict[identifier] = np.mean(list(macro_loss_dict.values()))

        # TODO: have a better to handle global evaluation metric
        if Meta.config["learner_config"]["global_evaluation_metric_dict"]:
            global_evaluation_metric_dict = Meta.config["learner_config"][
                "global_evaluation_metric_dict"
            ]
            for metric_name, metric in global_evaluation_metric_dict.items():
                metric_score_dict[metric_name] = metric(metric_score_dict)

        return metric_score_dict