How to use the statistics.statistics function in statistics

To help you get started, we’ve selected a few statistics examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github behzadanksu / rl-attack / train.py View on Github external
iteration_time_est = RunningAvg(0.999)
        obs = env.reset()
        # Record the mean of the \sigma
        sigma_name_list = []
        sigma_list = []
        for param in tf.trainable_variables():
            # only record the \sigma in the action network
            if 'sigma' in param.name and 'deepq/q_func/action_value' in param.name:
                summary_name = param.name.replace('deepq/q_func/action_value/', '').replace('/', '.').split(':')[0]
                sigma_name_list.append(summary_name)
                sigma_list.append(tf.reduce_mean(tf.abs(param)))
        f_mean_sigma = U.function(inputs=[], outputs=sigma_list)
        # Statistics
        writer = tf.summary.FileWriter(savedir, sess.graph)
        im_stats = statistics(scalar_keys=['action', 'im_reward', 'td_errors', 'huber_loss']+sigma_name_list)
        ep_stats = statistics(scalar_keys=['ep_reward', 'ep_length'])  
        # Main trianing loop
        ep_length = 0
        while True:
            num_iters += 1
            ep_length += 1

            #V: Perturb observation if we are past the init stage and at a designated attack step #
            #if craft_adv != None and (num_iters >= args.attack_init) and ((num_iters - args.attack_init) % args.attack_freq == 0) :          
            if craft_adv != None and (num_iters >= args.attack_init) and (random.random() <= args.attack_prob) :          
                obs = craft_adv(np.array(obs)[None])[0]

            # Take action and store transition in the replay buffer.
            if args.noisy:
                # greedily choose
                action = act(np.array(obs)[None], stochastic=False)[0]
            else:
github tensorflow / cleverhans / examples / RL-attack / train.py View on Github external
for param in tf.trainable_variables():
      # only record the \sigma in the action network
      if 'sigma' in param.name \
              and 'deepq/q_func/action_value' in param.name:
        summary_name = \
            param.name.replace(
                'deepq/q_func/action_value/', '').replace(
                    '/', '.').split(':')[0]
        sigma_name_list.append(summary_name)
        sigma_list.append(tf.reduce_mean(tf.abs(param)))
    f_mean_sigma = U.function(inputs=[], outputs=sigma_list)
    # Statistics
    writer = tf.summary.FileWriter(savedir, sess.graph)
    im_stats = statistics(scalar_keys=['action', 'im_reward', 'td_errors',
                                       'huber_loss'] + sigma_name_list)
    ep_stats = statistics(scalar_keys=['ep_reward', 'ep_length'])
    # Main trianing loop
    ep_length = 0
    while True:
      num_iters += 1
      ep_length += 1

      # V: Perturb observation if we are past the init stage
      # and at a designated attack step
      # if craft_adv != None and (num_iters >= args.attack_init)
      # and ((num_iters - args.attack_init) % args.attack_freq == 0) :
      if craft_adv is not None and (num_iters >= args.attack_init) and (
              random.random() <= args.attack_prob):
        obs = craft_adv(np.array(obs)[None])[0]

      # Take action and store transition in the replay buffer.
      if args.noisy:
github tensorflow / cleverhans / examples / RL-attack / train.py View on Github external
sigma_name_list = []
    sigma_list = []
    for param in tf.trainable_variables():
      # only record the \sigma in the action network
      if 'sigma' in param.name \
              and 'deepq/q_func/action_value' in param.name:
        summary_name = \
            param.name.replace(
                'deepq/q_func/action_value/', '').replace(
                    '/', '.').split(':')[0]
        sigma_name_list.append(summary_name)
        sigma_list.append(tf.reduce_mean(tf.abs(param)))
    f_mean_sigma = U.function(inputs=[], outputs=sigma_list)
    # Statistics
    writer = tf.summary.FileWriter(savedir, sess.graph)
    im_stats = statistics(scalar_keys=['action', 'im_reward', 'td_errors',
                                       'huber_loss'] + sigma_name_list)
    ep_stats = statistics(scalar_keys=['ep_reward', 'ep_length'])
    # Main trianing loop
    ep_length = 0
    while True:
      num_iters += 1
      ep_length += 1

      # V: Perturb observation if we are past the init stage
      # and at a designated attack step
      # if craft_adv != None and (num_iters >= args.attack_init)
      # and ((num_iters - args.attack_init) % args.attack_freq == 0) :
      if craft_adv is not None and (num_iters >= args.attack_init) and (
              random.random() <= args.attack_prob):
        obs = craft_adv(np.array(obs)[None])[0]
github LincolnZjx / ISIC_2018_Classification / previous_code / tf_version / train_v4.py View on Github external
#            _, y_ = sess.run([x, y])
            #            y_list_.extend((np.argmax(y_, axis=1)))
            #    except tf.errors.OutOfRangeError:
            #        cnt += 1
            #        print(cnt)
            #        assert all([a==b for a, b in zip(y_list, y_list_)])

            pp = []
            while True:
                predictions = est.predict(input_fn=lambda: data.read_record('evaluate'))
                predictions_list = []
                for pre in predictions:
                    p = np.argmax(pre['fc7'])
                    predictions_list.append(p)

                statistics_ = statistics.statistics(hps, mode='evaluate')
                statistics_.add_labels_predictions(predictions_list, y_list)
                statistics_.get_acc_normal()
                result = statistics_.get_acc_imbalanced()
                np.save('predictions_label_fc', [predictions_list, y_list])
                #np.save('predictions_label_fc_without_fulcon', [predictions_list, y_list])
                pp.append(result)

                print('---')
                np.save('result_fc', pp)
                #np.save('result_fc_without_fulcon', pp)
                time.sleep(120)
github LincolnZjx / ISIC_2018_Classification / previous_code / tf_version / train_v5.py View on Github external
#            _, y_ = sess.run([x, y])
            #            y_list_.extend((np.argmax(y_, axis=1)))
            #    except tf.errors.OutOfRangeError:
            #        cnt += 1
            #        print(cnt)
            #        assert all([a==b for a, b in zip(y_list, y_list_)])

            pp = []
            while True:
                predictions = est.predict(input_fn=lambda: data.read_record('evaluate'))
                predictions_list = []
                for pre in predictions:
                    p = np.argmax(pre['fc7'])
                    predictions_list.append(p)

                statistics_ = statistics.statistics(hps, mode='evaluate')
                statistics_.add_labels_predictions(predictions_list, y_list)
                statistics_.get_acc_normal()
                result = statistics_.get_acc_imbalanced()
                np.save('predictions_label_fc_3', [predictions_list, y_list])
                #np.save('predictions_label_fc_without_fulcon', [predictions_list, y_list])
                pp.append(result)

                print('---')
                np.save('result_fc_3', pp)
                #np.save('result_fc_without_fulcon', pp)
                time.sleep(120)
github LincolnZjx / ISIC_2018_Classification / previous_code / tf_version / train_v2.py View on Github external
#            _, y_ = sess.run([x, y])
            #            y_list_.extend((np.argmax(y_, axis=1)))
            #    except tf.errors.OutOfRangeError:
            #        cnt += 1
            #        print(cnt)
            #        assert all([a==b for a, b in zip(y_list, y_list_)])

            pp = []
            while True:
                predictions = est.predict(input_fn=lambda: data.read_record('evaluate'))
                predictions_list = []
                for pre in predictions:
                    p = np.argmax(pre['fc7'])
                    predictions_list.append(p)

                statistics_ = statistics.statistics(hps, mode='evaluate')
                statistics_.add_labels_predictions(predictions_list, y_list)
                statistics_.get_acc_normal()
                result = statistics_.get_acc_imbalanced()
                np.save('predictions_label_fc_repeat', [predictions_list, y_list])
                #np.save('predictions_label_fc_3_m', [predictions_list, y_list])
                #np.save('predictions_label_fc_without_fulcon', [predictions_list, y_list])
                pp.append(result)

                print('---')
                np.save('result_fc_repeat', pp)
                #np.save('result_fc_3_m', pp)
                #np.save('result_fc_without_fulcon', pp)
                time.sleep(120)
github idooo / tweeria / system / tweet_parser.py View on Github external
getDungeons()
		getMonstersByClass()

		getLocationsMatrix()

		getQuestsData()

		pvp_rewards = self.mongo.getu('pvp_rewards')
		self.pvp_rewards = {}
		for record in pvp_rewards:
			self.pvp_rewards.update({str(record['lvl']): record['exp']})

		self.pool_items = self.mongo.getu('items_pool')


		self.stats = statistics.statistics()
		self.achvs = achv.achievements()
		self.static_achvs = getAchvs()

		self.guilds = self.model.guilds.getGuilds()
		self.guilds_updates = {}
		self.gmessages = {}

		self.lvls = self.mongo.find('lvls', fields = {'_id':0})

		self.items = [[],[],[],[]]
		for i in [0,2,3]:
			self.items[i] = self.mongo.getu('items', {'color': i,'holidays': 0})

		getFasterStructures()