How to use the sacrebleu.compute_bleu function in sacrebleu

To help you get started, we’ve selected a few sacrebleu examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github mjpost / sacreBLEU / test / test_bleu.py View on Github external
def test_degenerate_statistics(statistics, offset, expected_score):
    score = sacrebleu.compute_bleu(statistics[0].common, statistics[0].total, statistics[1], statistics[2], smooth_method='floor', smooth_value=offset).score / 100
    assert score == expected_score
github awslabs / sockeye / test / unit / test_bleu.py View on Github external
def test_scoring(statistics, expected_score):
    score = sacrebleu.compute_bleu(statistics[0].common, statistics[0].total, statistics[1], statistics[2]).score / 100
    assert abs(score - expected_score) < EPSILON
github neulab / compare-mt / compare_mt / scorers.py View on Github external
Score a corpus using SacreBLEU score with cache

    Args:
      sent_ids: The sentence ids for reference and output corpora
      cached_stats: A list of cached statistics

    Returns:
      A tuple containing a single value for the SacreBLEU score and a string summarizing auxiliary information
    """
    if len(cached_stats) == 0:
      return 0.0, None

    counts, totals, sys_len, ref_len = zip(*cached_stats)
    counts, totals, sys_len, ref_len = [np.sum(np.array(x)[sent_ids], 0) for x in [counts, totals, sys_len, ref_len]]

    return sacrebleu.compute_bleu(counts, totals, sys_len, ref_len, smooth_method=self.smooth_method, smooth_value=self.smooth_value, use_effective_order=self.use_effective_order).score, None
github facebookresearch / vizseq / vizseq / scorers / bp.py View on Github external
sb.corpus_bleu, b[0], b[1], force=True,
                        tokenize=tokenizer
                    )
                    for b in batches
                ]
                progress = as_completed(futures)
                if self.verbose:
                    progress = tqdm(progress)
                for future in progress:
                    s = future.result()
                    ref_len += s.ref_len
                    sys_len += s.sys_len
                    for n in range(sb.NGRAM_ORDER):
                        correct[n] += s.counts[n]
                        total[n] += s.totals[n]
            corpus_score = sb.compute_bleu(
                correct, total, sys_len, ref_len, smooth_method='exp'
            ).bp
        return corpus_score
github facebookresearch / vizseq / vizseq / scorers / bleu.py View on Github external
sb.corpus_bleu, b[0], b[1], force=True,
                        tokenize=tokenizer
                    )
                    for b in batches
                ]
                progress = as_completed(futures)
                if self.verbose:
                    progress = tqdm(progress)
                for future in progress:
                    s = future.result()
                    ref_len += s.ref_len
                    sys_len += s.sys_len
                    for n in range(sb.NGRAM_ORDER):
                        correct[n] += s.counts[n]
                        total[n] += s.totals[n]
                corpus_score = sb.compute_bleu(
                    correct, total, sys_len, ref_len, smooth_method='exp'
                ).score
        return corpus_score
github freewym / espresso / examples / translation_moe / score.py View on Github external
def sentence_bleu(hypothesis, reference):
    bleu = _corpus_bleu(hypothesis, reference)
    for i in range(1, 4):
        bleu.counts[i] += 1
        bleu.totals[i] += 1
    bleu = compute_bleu(
        bleu.counts, bleu.totals,
        bleu.sys_len, bleu.ref_len,
        smooth='exp', smooth_floor=0.0,
    )
    return bleu.score