How to use the statistics.stdev function in statistics

To help you get started, we’ve selected a few statistics examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github rrwick / Badread / test / test_qscore_model.py View on Github external
def one_cigar_test(self, cigar, dist_min, dist_max):
        qscores = []
        for _ in range(self.trials):
            q = self.model.get_qscore(cigar)
            q = badread.qscore_model.qscore_char_to_val(q)
            qscores.append(q)
        target_mean = (dist_min + dist_max) / 2
        target_stdev = math.sqrt(((dist_max - dist_min + 1) ** 2 - 1) / 12)
        self.assertAlmostEqual(statistics.mean(qscores), target_mean, delta=0.5)
        self.assertAlmostEqual(statistics.stdev(qscores), target_stdev, delta=0.5)
github home-assistant / home-assistant / tests / components / statistics / test_sensor.py View on Github external
def setup_method(self, method):
        """Set up things to be run when tests are started."""
        self.hass = get_test_home_assistant()
        self.values = [17, 20, 15.2, 5, 3.8, 9.2, 6.7, 14, 6]
        self.count = len(self.values)
        self.min = min(self.values)
        self.max = max(self.values)
        self.total = sum(self.values)
        self.mean = round(sum(self.values) / len(self.values), 2)
        self.median = round(statistics.median(self.values), 2)
        self.deviation = round(statistics.stdev(self.values), 2)
        self.variance = round(statistics.variance(self.values), 2)
        self.change = round(self.values[-1] - self.values[0], 2)
        self.average_change = round(self.change / (len(self.values) - 1), 2)
        self.change_rate = round(self.average_change / (60 * (self.count - 1)), 2)
github datasciencecampus / pyGrams / tests / algorithms / test_tfidf_vv.py View on Github external
dice_score_bi, actual_bi, TP_bi, FN_bi, FP_bi = dice.get_score_bigrams(actual_terms)
        VV_TF_IDF_Tests.total_dice_bi += dice_score_bi

        VV_TF_IDF_Tests.dice_n.append(dice_score_n)
        VV_TF_IDF_Tests.dice_u.append(dice_score_u)
        VV_TF_IDF_Tests.dice_bi.append(dice_score_bi)

        if VV_TF_IDF_Tests.n_tests > 1:
            print(
                f" dice_n:  avg={statistics.mean(VV_TF_IDF_Tests.dice_n):0.3},"
                f" std={statistics.stdev(VV_TF_IDF_Tests.dice_n):0.3}")
            print(
                f" dice_u:  avg={statistics.mean(VV_TF_IDF_Tests.dice_u):0.3},"
                f" std={statistics.stdev(VV_TF_IDF_Tests.dice_u):0.3}")
            print(
                f" dice_bi: avg={statistics.mean(VV_TF_IDF_Tests.dice_bi):0.3},"
                f" std={statistics.stdev(VV_TF_IDF_Tests.dice_bi):0.3}")

        # shall we do try as well?
        VV_TF_IDF_Tests.n_tests += 1
        if dice_score_u < self.dice_threshold:
            tokenised_expected_terms_n = dice.expected_token_ngrams
            tokenised_expected_terms_u = dice.expected_token_unigrams
            tokenised_expected_terms_bi = dice.expected_token_bigrams

            self.fail(
                f'\n===================N-GRAMS============================\n'
                f'expected: {tokenised_expected_terms_n} \n'
github SwagLyrics / autosynch / autosynch / align.py View on Github external
f.write('Percent coverage:      {}\n'.format(song_err_pcdur))
                f.write('\n')

    with open(out_file, 'a') as f:
        f.write('\n')
        f.write('Aggregate evaluation results\n')
        f.write('------------------------------------\n')
        f.write('Avg start error:       {}\n'.format(mean(total_err_start)))
        f.write('Avg start error (abs): {}\n'.format(mean(map(abs, total_err_start))))
        f.write('Avg end error:         {}\n'.format(mean(total_err_end)))
        f.write('Avg end error (abs):   {}\n'.format(mean(map(abs, total_err_end))))

        total_err_start.extend(total_err_end)
        f.write('Avg total error:       {}\n'.format(mean(total_err_start)))
        f.write('Std total error:       {}\n'.format(stdev(total_err_start)))

        total_err_start = list(map(abs, total_err_start))
        f.write('Avg total error (abs): {}\n'.format(mean(total_err_start)))
        f.write('Std total error (abs): {}\n'.format(stdev(total_err_start)))

        f.write('Avg percent coverage:  {}\n'.format(mean(total_err_pcdur)))
        f.write('Std percent coverage:  {}\n'.format(stdev(total_err_pcdur)))
        f.write('\n')

        f.write('Miscellaneous errors ({})\n'.format(len(misc_err)))
        f.write('------------------------------------\n')
        for error in misc_err:
            f.write(error + '\n')

        f.write('\n')
github nirdizati-research / predict-python / src / utils / log_metrics.py View on Github external
def std_var_events_in_log(log: EventLog) -> int:
    """Returns the standard variation of the average number of events in any trace

    :return 3
    """
    return statistics.stdev([len(trace) for trace in log])
github olivierfriard / BORIS / src / time_budget_functions.py View on Github external
if idx and row[1] == rows[idx - 1][1]:
                            all_event_interdurations.append(float(row[0]) - float(rows[idx - 1][0]))

                    out_cat.append({
                        "subject": subject,
                        "behavior": behavior,
                        "modifiers": "",
                        "duration": NA,
                        "duration_mean": NA,
                        "duration_stdev": NA,
                        "number": len(rows),
                        "inter_duration_mean":
                        round(statistics.mean(all_event_interdurations), 3)
                        if len(all_event_interdurations) else NA,
                        "inter_duration_stdev":
                        round(statistics.stdev(all_event_interdurations), 3)
                        if len(all_event_interdurations) > 1 else NA
                    })

                if STATE in project_functions.event_type(behavior, ethogram):

                    cursor.execute(("SELECT occurence, observation FROM events "
                                    "WHERE subject = ? AND code = ? ORDER BY observation, occurence"),
                                   (subject, behavior))

                    rows = list(cursor.fetchall())
                    if not len(rows):
                        if not parameters[EXCLUDE_BEHAVIORS]:  # include behaviors without events
                            out.append({"subject": subject, "behavior": behavior,
                                        "modifiers": "", "duration": 0, "duration_mean": 0,
                                        "duration_stdev": "NA", "number": 0, "inter_duration_mean": "-",
                                        "inter_duration_stdev": "-"})
github aio-libs / aiohttp / benchmark / async.py View on Github external
profile=args.profile))
        all_times[test_name].extend(times)
        all_rps[test_name].append(rps)

    if args.profile:
        profiler.dump_stats('out.prof')

    print()

    for test_name in sorted(all_rps):
        rps = all_rps[test_name]
        times = [t * 1000 for t in all_times[test_name]]

        rps_mean = mean(rps)
        times_mean = mean(times)
        times_stdev = stdev(times)
        times_median = median(times)
        print('Results for', test_name)
        print('RPS: {:d},\tmean: {:.3f} ms,'
              '\tstandard deviation {:.3f} ms\tmedian {:.3f} ms'
              .format(int(rps_mean),
                      times_mean,
                      times_stdev,
                      times_median))
    return 0
github sambler / myblendercontrib / tissue / colors_groups_exchanger.py View on Github external
for e in bm.edges:
            if self.mode == 'LENGTH':
                length = e.calc_length()
                if length == 0: continue
                id0 = e.verts[0].index
                id1 = e.verts[1].index
                lap[id0] += weight[id1]/length - weight[id0]/length
                lap[id1] += weight[id0]/length - weight[id1]/length
            else:
                id0 = e.verts[0].index
                id1 = e.verts[1].index
                lap[id0] += weight[id1] - weight[id0]
                lap[id1] += weight[id0] - weight[id1]

        mean_lap = mean(lap)
        stdev_lap = stdev(lap)
        filter_lap = [i for i in lap if mean_lap-2*stdev_lap < i < mean_lap+2*stdev_lap]
        if self.bounds == 'MANUAL':
            min_def = self.min_def
            max_def = self.max_def
        elif self.bounds == 'AUTOMATIC':
            min_def = min(filter_lap)
            max_def = max(filter_lap)
            self.min_def = min_def
            self.max_def = max_def
        elif self.bounds == 'NEGATIVE':
            min_def = 0
            max_def = min(filter_lap)
            self.min_def = min_def
            self.max_def = max_def
        elif self.bounds == 'POSITIVE':
            min_def = 0
github acadTags / Automated-Social-Annotation / 7 PLST / PLST.py View on Github external
std_test_hamming_loss_th, min_test_hamming_loss_th, max_test_hamming_loss_th, final_test_prec_th, std_test_prec_th,
    min_test_prec_th, max_test_prec_th, final_test_rec_th, std_test_rec_th, min_test_rec_th, max_test_rec_th,
    final_test_fmeasure_th, std_test_fmeasure_th, min_test_fmeasure_th, max_test_fmeasure_th) + "\n"
    output_csv_test = output_csv_test + "\n" + "average" + "," + str(round(final_test_hamming_loss_th, 3)) + "±" + str(
        round(std_test_hamming_loss_th, 3)) + "," + str(round(final_test_acc_th, 3)) + "±" + str(
        round(std_test_acc_th, 3)) + "," + str(round(final_test_prec_th, 3)) + "±" + str(
        round(std_test_prec_th, 3)) + "," + str(round(final_test_rec_th, 3)) + "±" + str(
        round(std_test_rec_th, 3)) + "," + str(round(final_test_fmeasure_th, 3)) + "±" + str(
        round(std_test_fmeasure_th, 3))

    setting = "dataset:" + str(FLAGS.dataset) + "\nC: " + str(FLAGS.C) + "\ngamma: " + str(FLAGS.gamma) + "\nnum_clusters: " + str(FLAGS.num_clusters)
    print("--- The whole program took %s seconds ---" % (time.time() - start_time))
    time_used = "--- The whole program took %s seconds ---" % (time.time() - start_time)
    if FLAGS.kfold != -1:
        print("--- The average training took %s ± %s seconds ---" % (
        sum(time_train) / num_runs, statistics.stdev(time_train)))
        average_time_train = "--- The average training took %s ± %s seconds ---" % (
        sum(time_train) / num_runs, statistics.stdev(time_train))
    else:
        print("--- The average training took %s ± %s seconds ---" % (sum(time_train) / num_runs, 0))
        average_time_train = "--- The average training took %s ± %s seconds ---" % (sum(time_train) / num_runs, 0)

    # output setting configuration, results, prediction and time used
    output_to_file('PLST ' + str(FLAGS.dataset) + " C " + str(FLAGS.C) + ' gamma' + str(FLAGS.gamma) + ' num_clusters' + str(FLAGS.num_clusters) + ' gp_id' + str(
        FLAGS.marking_id) + '.txt',
                   setting + '\n' + output_valid + '\n' + output_test + '\n' + prediction_str + '\n' + time_used + '\n' + average_time_train)
    # output structured evaluation results
    output_to_file('PLST ' + str(FLAGS.dataset) + " C " + str(FLAGS.C) + ' gamma' + str(FLAGS.gamma) + ' num_clusters' + str(FLAGS.num_clusters) + ' gp_id' + str(
        FLAGS.marking_id) + ' valid.csv', output_csv_valid)
    output_to_file('PLST ' + str(FLAGS.dataset) + " C " + str(FLAGS.C) + ' gamma' + str(FLAGS.gamma) + ' num_clusters' + str(FLAGS.num_clusters) + ' gp_id' + str(
        FLAGS.marking_id) + ' test.csv', output_csv_test)
github acadTags / Automated-Social-Annotation / 2 HAN / HAN_train.py View on Github external
max_test_rec_topk = max(test_rec_topk)
    max_test_fmeasure_topk = max(test_fmeasure_topk)
    max_test_hamming_loss_topk = max(test_hamming_loss_topk)
    
    if FLAGS.kfold != -1:
        std_test_loss = statistics.stdev(test_loss)
        std_test_acc_th = statistics.stdev(test_acc_th) # to change
        std_test_prec_th = statistics.stdev(test_prec_th)
        std_test_rec_th = statistics.stdev(test_rec_th)
        std_test_fmeasure_th = statistics.stdev(test_fmeasure_th)
        std_test_hamming_loss_th = statistics.stdev(test_hamming_loss_th)
        std_test_acc_topk = statistics.stdev(test_acc_topk)
        std_test_prec_topk = statistics.stdev(test_prec_topk)
        std_test_rec_topk = statistics.stdev(test_rec_topk)
        std_test_fmeasure_topk = statistics.stdev(test_fmeasure_topk)
        std_test_hamming_loss_topk = statistics.stdev(test_hamming_loss_topk)
    
    final_test_loss = sum(test_loss)/num_runs # final is average
    final_test_acc_th = sum(test_acc_th)/num_runs
    final_test_prec_th = sum(test_prec_th)/num_runs
    final_test_rec_th = sum(test_rec_th)/num_runs
    final_test_fmeasure_th = sum(test_fmeasure_th)/num_runs
    final_test_hamming_loss_th = sum(test_hamming_loss_th)/num_runs
    final_test_acc_topk = sum(test_acc_topk)/num_runs
    final_test_prec_topk = sum(test_prec_topk)/num_runs
    final_test_rec_topk = sum(test_rec_topk)/num_runs
    final_test_fmeasure_topk = sum(test_fmeasure_topk)/num_runs
    final_test_hamming_loss_topk = sum(test_hamming_loss_topk)/num_runs
    
    print("HAN==>Final Test results Validation Loss:%.3f ± %.3f (%.3f - %.3f)\tValidation Accuracy: %.3f ± %.3f (%.3f - %.3f)\tValidation Hamming Loss: %.3f ± %.3f (%.3f - %.3f)\tValidation Precision: %.3f ± %.3f (%.3f - %.3f)\tValidation Recall: %.3f ± %.3f (%.3f - %.3f)\tValidation F-measure: %.3f ± %.3f (%.3f - %.3f)\tValidation Accuracy@k: %.3f ± %.3f (%.3f - %.3f)\tValidation Hamming Loss@k: %.3f ± %.3f (%.3f - %.3f)\tValidation Precision@k: %.3f ± %.3f (%.3f - %.3f)\tValidation Recall@k: %.3f ± %.3f (%.3f - %.3f)\tValidation F-measure@k: %.3f ± %.3f (%.3f - %.3f)" % (final_test_loss,std_test_loss,min_test_loss,max_test_loss,final_test_acc_th,std_test_acc_th,min_test_acc_th,max_test_acc_th,final_test_hamming_loss_th,std_test_hamming_loss_th,min_test_hamming_loss_th,max_test_hamming_loss_th,final_test_prec_th,std_test_prec_th,min_test_prec_th,max_test_prec_th,final_test_rec_th,std_test_rec_th,min_test_rec_th,max_test_rec_th,final_test_fmeasure_th,std_test_fmeasure_th,min_test_fmeasure_th,max_test_fmeasure_th,final_test_acc_topk,std_test_acc_topk,min_test_acc_topk,max_test_acc_topk,final_test_hamming_loss_topk,std_test_hamming_loss_topk,min_test_hamming_loss_topk,max_test_hamming_loss_topk,final_test_prec_topk,std_test_prec_topk,min_test_prec_topk,max_test_prec_topk,final_test_rec_topk,std_test_rec_topk,min_test_rec_topk,max_test_rec_topk,final_test_fmeasure_topk,std_test_fmeasure_topk,min_test_fmeasure_topk,max_test_fmeasure_topk))
    #output the result to a file
    output_test = output_test + "\n" + "HAN==>Final Test results Validation Loss:%.3f ± %.3f (%.3f - %.3f)\tValidation Accuracy: %.3f ± %.3f (%.3f - %.3f)\tValidation Hamming Loss: %.3f ± %.3f (%.3f - %.3f)\tValidation Precision: %.3f ± %.3f (%.3f - %.3f)\tValidation Recall: %.3f ± %.3f (%.3f - %.3f)\tValidation F-measure: %.3f ± %.3f (%.3f - %.3f)\tValidation Accuracy@k: %.3f ± %.3f (%.3f - %.3f)\tValidation Hamming Loss@k: %.3f ± %.3f (%.3f - %.3f)\tValidation Precision@k: %.3f ± %.3f (%.3f - %.3f)\tValidation Recall@k: %.3f ± %.3f (%.3f - %.3f)\tValidation F-measure@k: %.3f ± %.3f (%.3f - %.3f)" % (final_test_loss,std_test_loss,min_test_loss,max_test_loss,final_test_acc_th,std_test_acc_th,min_test_acc_th,max_test_acc_th,final_test_hamming_loss_th,std_test_hamming_loss_th,min_test_hamming_loss_th,max_test_hamming_loss_th,final_test_prec_th,std_test_prec_th,min_test_prec_th,max_test_prec_th,final_test_rec_th,std_test_rec_th,min_test_rec_th,max_test_rec_th,final_test_fmeasure_th,std_test_fmeasure_th,min_test_fmeasure_th,max_test_fmeasure_th,final_test_acc_topk,std_test_acc_topk,min_test_acc_topk,max_test_acc_topk,final_test_hamming_loss_topk,std_test_hamming_loss_topk,min_test_hamming_loss_topk,max_test_hamming_loss_topk,final_test_prec_topk,std_test_prec_topk,min_test_prec_topk,max_test_prec_topk,final_test_rec_topk,std_test_rec_topk,min_test_rec_topk,max_test_rec_topk,final_test_fmeasure_topk,std_test_fmeasure_topk,min_test_fmeasure_topk,max_test_fmeasure_topk) + "\n"