How to use the matplotlib.pyplot function in matplotlib

To help you get started, we’ve selected a few matplotlib examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github columbia / fairtest / src / fairtest / modules / bug_report / report.py View on Github external
for (key, group) in grouped]
                min_key_diff = min([keys[_ + 1]-keys[_]
                                    for _ in xrange(len(keys)-1)])

                plt.boxplot(groups, positions=keys,
                            widths=(1.0*min_key_diff)/2, sym='')

                plt.rcParams.update({'font.size': 22})
                if namer.sens in namer.encoders:
                    ax = plt.gca()
                    ax.set_xticklabels(namer.get_sens_feature_vals(
                        len(data[data.columns[1]].unique())))
                else:
                    plt.xlim(np.min(sens) - 0.4*np.std(sens),
                             np.max(sens) + 0.4*np.std(sens))
                    plt.ylim(np.min(out) - 0.4*np.std(out),
                             np.max(out) + 0.4*np.std(out))

                plt.xlabel(data.columns[1])
                plt.ylabel(data.columns[0])
                plt.show()

            (effect_low, effect_high, p_val) = context_stats[i+1]
            print >> output_stream, \
                'p-value = {:.2e} ; {} = [{:.4f}, {:.4f}]'.\
                format(p_val, 'CORR', effect_low, effect_high)
            print >> output_stream

        context_stats = context_stats[0]

    # print p-value and confidence interval of correlation
    (effect_low, effect_high, p_val) = context_stats
github loliverhennigh / Phy-Net / test / generate_em_error_plot.py View on Github external
# run network 
        for step in tqdm(xrange(FLAGS.test_length)):
          # network step
          state_feed_dict, boundary_feed_dict = feed_dict(1, shape, FLAGS.lattice_size, sim, run+step+0)
          fd = {state:state_feed_dict, boundary:boundary_feed_dict, y_1:y_1_g, small_boundary_mul:small_boundary_mul_g, small_boundary_add:small_boundary_add_g}
          mse, y_1_g = sess.run([mean_squared_error, y_2],feed_dict=fd)
          # calc error
          mse_error[sim, step] = mse

    # step count variable for plotting
    x = np.arange(FLAGS.test_length)

    # mse 
    mse_error_mean, mse_error_std = calc_mean_and_std(mse_error)

    plt.figure(figsize = (6,12))
    plt.style.use('seaborn-darkgrid')

    font = {'family' : 'normal',
        'weight' : 'normal',
        'size'   : 16}

    matplotlib.rc('font', **font)

    plt.title(str(shape[0]) + "x" + str(shape[1]) + " EM Simulation", y=1.00, fontsize="x-large")
    plt.errorbar(x, mse_error_mean, yerr=mse_error_std, c='y', capsize=0, lw=1.0)
    plt.xlabel('step', fontsize="x-large")
    plt.ylabel('MSError', fontsize="x-large")
    plt.legend(loc="upper_left")
    plt.savefig("figs/" + str(shape[0]) + "x" + str(shape[1]) + "_2d_em_error_plot.png")
github mindgarage / Ovation / templates / sentiment_analysis_classification.py View on Github external
plt.title('Regression Plot for Test Set Similarities')
        plt.ylabel('Ground Truth Similarities')
        plt.xlabel('Predicted  Similarities')

        print("Performing Non Parametric Regression")
        non_param_reg = non_parametric_regression(train_predicted_sentiments,
                                          train_gt,
                                          method=npr_methods.SpatialAverage())

        reg_test_sentiments = non_param_reg(test_predicted_sentiments)
        reg_accuracy = pearsonr(reg_test_sentiments, test_gt)
        reg_mse = mean_squared_error(test_gt, reg_test_sentiments)
        print("Post Regression Test Results:\Accuraccy: {}\nMSE: {}".format(
                                                        reg_accuracy, reg_mse))

        plt.scatter(reg_test_sentiments, test_gt, label='Similarities', s=0.2)
        plt.savefig(figure_path)

        plt.clf()

        plt.title('Regression Plot for Test Set Similarities')
        plt.ylabel('Ground Truth Similarities')
        plt.xlabel('Predicted  Similarities')
        plt.scatter(test_predicted_sentiments, test_gt,
                    label='Similarities', s=0.2)
        plt.plot(grid, non_param_reg(grid), label="Local Linear Smoothing",
                 linewidth=2.0, color='r')
        plt.savefig(reg_fig_path)

        print("saved similarity plot at {}".format(figure_path))
        print("saved regression plot at {}".format(reg_fig_path))
github flaport / fdtd / examples / example.py View on Github external
profiler.add_function(grid.update_E)
profiler.enable()

# run simulation
grid.run(50, progress_bar=False)


# print profiler summary
profiler.print_stats()


## Plots

# Fields
if True:
    fig, axes = plt.subplots(2, 3, squeeze=False)
    titles = ["Ex: xy", "Ey: xy", "Ez: xy", "Hx: xy", "Hy: xy", "Hz: xy"]

    fields = bd.stack(
        [
            grid.E[:, :, 0, 0],
            grid.E[:, :, 0, 1],
            grid.E[:, :, 0, 2],
            grid.H[:, :, 0, 0],
            grid.H[:, :, 0, 1],
            grid.H[:, :, 0, 2],
        ]
    )

    m = max(abs(fields.min().item()), abs(fields.max().item()))

    for ax, field, title in zip(axes.ravel(), fields, titles):
github scikit-multiflow / scikit-multiflow / src / skmultiflow / visualization / evaluation_visualizer.py View on Github external
are: lists of values and matplot line objects.
        
        The __configure function will also initialize each subplot with the 
        correct name and setup the axis.
        
        The subplot size will self adjust to each screen size, so that data can 
        be better viewed in different contexts.

        """
        font_size_small = 8
        font_size_medium = 10
        font_size_large = 12

        plt.rc('font', size=font_size_small)  # controls default text sizes
        plt.rc('axes', titlesize=font_size_medium)  # font size of the axes title
        plt.rc('axes', labelsize=font_size_small)  # font size of the x and y labels
        plt.rc('xtick', labelsize=font_size_small)  # font size of the tick labels
        plt.rc('ytick', labelsize=font_size_small)  # font size of the tick labels
        plt.rc('legend', fontsize=font_size_small)  # legend font size
        plt.rc('figure', titlesize=font_size_large)  # font size of the figure title

        warnings.filterwarnings("ignore", ".*GUI is implemented.*")
        warnings.filterwarnings("ignore", ".*left==right.*")
        warnings.filterwarnings("ignore", ".*Passing 1d.*")

        self._sample_ids = []
        memory_time = {}

        plt.ion()
        self.fig = plt.figure(figsize=(9, 5))
        self.fig.suptitle(self.dataset_name)
        plot_metrics = [m for m in self.metrics if m not in [constants.RUNNING_TIME, constants.MODEL_SIZE]]
github david-zwicker / video-analysis / projects / tail_segments / kymograph.py View on Github external
use_tex=None):
        """ initializes the plotter with a `kymograph` and a `title`.
        `length_scale` sets the length of a single pixel in micrometer
        `time_scale` sets the length of a single pixel in minutes
        `use_tex` determines whether tex is used for outputting values 
        """
        self.kymograph = kymograph
        self.title = title
        self.length_scale = length_scale
        self.time_scale = time_scale
    
        # setup the plotting
        if use_tex is not None:
            plt.rcParams['text.usetex'] = use_tex

        self.fig = plt.figure()
        self.ax = plt.gca()
    
        # create image and determine the length and time scales
        img = self.kymograph.get_image()
        distance = img.shape[1] * self.length_scale
        duration = img.shape[0] * self.time_scale 
        extent = (0, distance, 0, duration)
        
        # plot image in gray scale
        self.ax.imshow(img, extent=extent, aspect='auto',
                       interpolation=self.interpolation, origin='lower',
                       cmap=plt.get_cmap('gray'))
        
        # use a time format for the y axis
        def hours_minutes(value, pos):
            """ formatting function """
github GUDHI / gudhi-devel / src / python / example / diagram_vectorizations_distances_kernels.py View on Github external
from gudhi.representations import DiagramSelector, Clamping, Landscape, Silhouette, BettiCurve, ComplexPolynomial,\
  TopologicalVector, DiagramScaler, BirthPersistenceTransform,\
  PersistenceImage, PersistenceWeightedGaussianKernel, Entropy, \
  PersistenceScaleSpaceKernel, SlicedWassersteinDistance,\
  SlicedWassersteinKernel, BottleneckDistance, PersistenceFisherKernel

D = np.array([[0.,4.],[1.,2.],[3.,8.],[6.,8.], [0., np.inf], [5., np.inf]])
diags = [D]

diags = DiagramSelector(use=True, point_type="finite").fit_transform(diags)
diags = DiagramScaler(use=True, scalers=[([0,1], MinMaxScaler())]).fit_transform(diags)
diags = DiagramScaler(use=True, scalers=[([1], Clamping(limit=.9))]).fit_transform(diags)

D = diags[0]
plt.scatter(D[:,0],D[:,1])
plt.plot([0.,1.],[0.,1.])
plt.title("Test Persistence Diagram for vector methods")
plt.show()

LS = Landscape(resolution=1000)
L = LS.fit_transform(diags)
plt.plot(L[0][:1000])
plt.plot(L[0][1000:2000])
plt.plot(L[0][2000:3000])
plt.title("Landscape")
plt.show()

def pow(n):
  return lambda x: np.power(x[1]-x[0],n)

SH = Silhouette(resolution=1000, weight=pow(2))
github louisenaud / stock_prediction / src / run_multistep.py View on Github external
for k in range(batch_size):
                    predicted.append(output.data[k, 0, :, :].cpu().numpy())
                    gt.append(target.data[k, 0, :, :].cpu().numpy())

        print("Epoch = ", i)
        print("Loss = ", loss_)
        losses.append(loss_)
        writer.add_scalar("loss_epoch", loss_, i)

        scheduler_model.step()
        # Plot current predictions
        if i % display_step == 0:
            gt = np.array(gt)
            series = dset.train_data
            xs = np.array(range(series.shape[0]))
            h = plt.figure()
            plt.plot(xs, series[:, 0])

            for t in range(len(predicted)):
                xaxis = [x for x in range(t*n_step_data, t*n_step_data + n_out)]
                yaxis = predicted[t][0, :]
                plt.plot(xaxis, yaxis)
            plt.legend()
            plt.show()

    torch.save(model, 'conv2d_' + fn_base + '.pkl')

    h = plt.figure()
    x = xrange(len(losses))
    plt.plot(x, np.array(losses), label="loss")
    plt.xlabel("Time")
    plt.ylabel("Stock Price")
github jkitchin / dft-book / dft-scripts / script-210.py View on Github external
return enthalpy(T) - T * entropy(T) + kB * T * np.log(P / atm)
P = 1e-10*atm
def func(T):
    'Cu2O'
    return -1.95 - 0.5*DeltaMu(T, P)
print 'Cu2O decomposition temperature is {0:1.0f} K'.format(fsolve(func,
                                                                   900)[0])
def func(T):
    'Ag2O'
    return -0.99 - 0.5 * DeltaMu(T, P)
print 'Ag2O decomposition temperature is {0:1.0f} K'.format(fsolve(func,
                                                                   470)[0])
T = np.linspace(100, 1000)
# Here we plot delta mu as a function of temperature at different pressures
# you have use \\times to escape the first \ in pyplot
plt.plot(T, DeltaMu(T, 1e10*atm), label=r'1$\times 10^{10}$ atm')
plt.plot(T, DeltaMu(T, 1e5*atm), label=r'1$\times 10^5$ atm')
plt.plot(T, DeltaMu(T, 1*atm), label='1 atm')
plt.plot(T, DeltaMu(T, 1e-5*atm), label=r'1$\times 10^{-5}$ atm')
plt.plot(T, DeltaMu(T, 1e-10*atm), label=r'1$\times 10^{-10}$ atm')
plt.xlabel('Temperature (K)')
plt.ylabel(r'$\Delta \mu_{O_2}(T,p)$ (eV)')
plt.legend(loc='best')
plt.savefig('images/O2-mu-diff-p.png')
github tomasz-oponowicz / spoken_language_identification / model.py View on Github external
features = features[mask]

    labels = label_binarizer.transform(metadata[:, 0])

    print("[{group}] labels: {labels}, features: {features}".format(
        group=group, labels=labels.shape, features=features.shape
    ))

    image = features[0, :, :, 0]

    # plt.imgshow doesn't support float16
    image = image.astype('float32')

    plt.figure() # reset plot
    plt.imshow(image)
    plt.savefig(group + '.png', bbox_inches='tight')

    if not skip_input_validation:
        validate(labels, features, metadata, label_binarizer.classes_)

    return (labels, features, metadata)