How to use the plotnine.ggplot function in plotnine

To help you get started, we’ve selected a few plotnine examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github dputhier / pygtftk / pygtftk / plugins / merge_ologram_stats.py View on Github external
message('Enclosing directories are ambiguous and cannot be used as labels. You may use "--labels".',
                type="ERROR")

    # -------------------------------------------------------------------------
    # Concatenate dataframes (row bind)
    # -------------------------------------------------------------------------

    message("Merging dataframes.")
    df_merged = pd.concat(df_list, axis=0)

    # -------------------------------------------------------------------------
    # Plotting
    # -------------------------------------------------------------------------

    message("Plotting")
    my_plot = ggplot(data=df_merged,
                     mapping=aes(y='Feature', x='dataset'))
    my_plot += geom_tile(aes(fill = 'summed_bp_overlaps_log2_fold_change'))
    my_plot += scale_fill_gradient2()
    my_plot += labs(fill = "log2(fold change) for summed bp overlaps")

    # Points for p-val. Must be after geom_tile()
    my_plot += geom_point(data = df_merged.loc[df_merged['pval_signif']],
        mapping = aes(x='dataset',y='Feature',color = '-log_10(pval)'), size=5, shape ='D', inherit_aes = False)
    my_plot += scale_color_gradientn(colors = ["#160E00","#FFB025","#FFE7BD"])
    my_plot += labs(color = "-log10(p-value)")

    # Theming
    my_plot += theme_bw()
    my_plot += theme(panel_grid_major=element_blank(),
                     axis_text_x=element_text(rotation=90),
                     panel_border=element_blank(),
github iosband / ts_tutorial / src / base / plot.py View on Github external
def simple_algorithm_plot(experiment_name, data_path=_DEFAULT_DATA_PATH):
  """Simple plot of average instantaneous regret by agent, per timestep.

  Args:
    experiment_name: string = name of experiment config.
    data_path: string = where to look for the files.

  Returns:
    https://web.stanford.edu/~bvr/pubs/TS_Tutorial.pdf
  """
  df = load_data(experiment_name, data_path)
  plt_df = (df.groupby(['t', 'agent'])
            .agg({'instant_regret': np.mean})
            .reset_index())
  p = (gg.ggplot(plt_df)
       + gg.aes('t', 'instant_regret', colour='agent')
       + gg.geom_line(size=1.25, alpha=0.75)
       + gg.xlab('time period (t)')
       + gg.ylab('per-period regret')
       + gg.scale_colour_brewer(name='agent', type='qual', palette='Set1'))
  
  plot_dict = {experiment_name + '_simple': p}
  return plot_dict
github dputhier / pygtftk / pygtftk / plugins / ologram.py View on Github external
mat_n = mat_n.assign(Statistic=['N'] * mat_n.shape[0])

        mat_s = d[['feature_type',
                   'summed_bp_overlaps_log2_fold_change',
                   'summed_bp_overlaps_pvalue']]
        # Uncomputed pvalue are discarded
        mat_s = mat_s.drop(mat_s[mat_s.summed_bp_overlaps_pvalue == -1].index)
        # Pval set to 0 are changed to  1e-320
        mat_s.loc[mat_s['summed_bp_overlaps_pvalue'] == 0, 'summed_bp_overlaps_pvalue'] = 1e-320
        mat_s = mat_s.assign(minus_log10_pvalue=list(-np.log10(list(mat_s.summed_bp_overlaps_pvalue))))
        mat_s.columns = ['Feature', 'log2_FC', 'pvalue', 'minus_log10_pvalue']
        mat_s = mat_s.assign(Statistic=['S'] * mat_s.shape[0])

        df_volc = mat_n.append(mat_s)

        p = ggplot(data=df_volc, mapping=aes(x='log2_FC', y='minus_log10_pvalue'))
        p += geom_vline(xintercept=0, color='darkgray')
        p += geom_label(aes(label='Feature', fill='Statistic'),
                        size=5,
                        color='black',
                        alpha=.5,
                        label_size=0)
        p += ylab('-log10(pvalue)') + xlab('log2(FC)')
        p += ggtitle('Volcano plot (for both N and S statistics)')
        p += scale_fill_manual(values={'N': '#7570b3', 'S': '#e7298a'})
        p += theme_bw()

        return p
github Pinafore / qb / figures.py View on Github external
def plot_char_percent_vs_accuracy_smooth(self, category=False):
        if category:
            return (
                ggplot(self.char_plot_df)
                + aes(x='char_percent', y='correct', color='category_jmlr')
                + geom_smooth()
            )
        else:
            return (
                ggplot(self.char_plot_df)
                + aes(x='char_percent', y='correct')
                + geom_smooth(method='mavg')
            )
github iosband / ts_tutorial / src / base / plot.py View on Github external
def cumulative_travel_time_plot(experiment_name, data_path=_DEFAULT_DATA_PATH):
  """Plot cumulative ratio total travel time relative to optimal shortest path.

  Args:
    experiment_name: string = name of experiment config.
    data_path: string = where to look for the files.

  Returns:
    https://web.stanford.edu/~bvr/pubs/TS_Tutorial.pdf
  """
  df = load_data(experiment_name, data_path)
  df['cum_ratio'] = (df.cum_optimal - df.cum_regret) / df.cum_optimal
  plt_df = (df.groupby(['t', 'agent'])
            .agg({'cum_ratio': np.mean})
            .reset_index())
  p = (gg.ggplot(plt_df)
       + gg.aes('t', 'cum_ratio', colour='agent')
       + gg.geom_line(size=1.25, alpha=0.75)
       + gg.xlab('time period (t)')
       + gg.ylab('Total distance / optimal')
       + gg.scale_colour_brewer(name='agent', type='qual', palette='Set1')
       + gg.aes(ymin=1)
       + gg.geom_hline(yintercept=1, linetype='dashed', size=2, alpha=0.5))
  
  plot_dict = {experiment_name + '_cum': p}
  return plot_dict
github danforthcenter / plantcv / plantcv / plantcv / transform / color_correction.py View on Github external
# Combine info
    color_data_r = np.column_stack((sr, tr, red))
    color_data_g = np.column_stack((sg, tg, green))
    color_data_b = np.column_stack((sb, tb, blue))
    all_color_data = np.row_stack((color_data_b, color_data_g, color_data_r))

    # Create a dataframe with headers
    dataset = pd.DataFrame({'source': all_color_data[:, 0], 'target': all_color_data[:, 1],
                            'color': all_color_data[:, 2]})

    # Add chip numbers to the dataframe
    dataset['chip'] = chips
    dataset = dataset.astype({'color': str, 'chip': str, 'target': float, 'source': float})

    # Make the plot
    p1 = ggplot(dataset, aes(x='target', y='source', color='color', label='chip')) + \
        geom_point(show_legend=False, size=2) + \
        geom_smooth(method='lm', size=.5, show_legend=False) + \
        theme_seaborn() + facet_grid('.~color') + \
        geom_label(angle=15, size=7, nudge_y=-.25, nudge_x=.5, show_legend=False) + \
        scale_x_continuous(limits=(-5, 270)) + scale_y_continuous(limits=(-5, 275)) + \
        scale_color_manual(values=['blue', 'green', 'red'])

    # Reset debug
    if params.debug is not None:
        if params.debug == 'print':
            p1.save(os.path.join(params.debug_outdir, 'color_quick_check.png'))
        elif params.debug == 'plot':
            print(p1)
github danforthcenter / plantcv / plantcv / plantcv / analyze_nir_intensity.py View on Github external
masked1 = cv2.bitwise_and(rgbimg, rgbimg, mask=mask)
    # cplant_back = cv2.add(masked1, img_back1)
    if params.debug is not None:
        if params.debug == "print":
            print_image(masked1, os.path.join(params.debug_outdir, str(params.device) + "_masked_nir_plant.jpg"))
        if params.debug == "plot":
            plot_image(masked1)

    analysis_image = None

    if histplot is True:
        hist_x = hist_percent
        # bin_labels = np.arange(0, bins)
        dataset = pd.DataFrame({'Grayscale pixel intensity': bin_labels,
                                'Proportion of pixels (%)': hist_x})
        fig_hist = (ggplot(data=dataset,
                           mapping=aes(x='Grayscale pixel intensity',
                                       y='Proportion of pixels (%)'))
                    + geom_line(color='red')
                    + scale_x_continuous(breaks=list(range(0, maxval, 25))))

        analysis_image = fig_hist
        if params.debug == "print":
            fig_hist.save(os.path.join(params.debug_outdir, str(params.device) + '_nir_hist.png'))
        elif params.debug == "plot":
            print(fig_hist)

    outputs.add_observation(variable='nir_frequencies', trait='near-infrared frequencies',
                            method='plantcv.plantcv.analyze_nir_intensity', scale='frequency', datatype=list,
                            value=hist_nir, label=bin_labels)

    # Store images
github deepmind / bsuite / bsuite / experiments / deep_sea / analysis.py View on Github external
def _base_scaling(plt_df: pd.DataFrame,
                  sweep_vars: Sequence[Text] = None,
                  with_baseline: bool = True) -> gg.ggplot:
  """Base underlying piece of the scaling plots for deep sea."""
  p = (gg.ggplot(plt_df)
       + gg.aes(x='size', y='episode')
      )
  if np.all(plt_df.finished):
    p += gg.geom_point(gg.aes(colour='solved'), size=3, alpha=0.75)
  else:
    p += gg.geom_point(gg.aes(shape='finished', colour='solved'),
                       size=3, alpha=0.75)
    p += gg.scale_shape_manual(values=['x', 'o'])

  if np.all(plt_df.solved):
    p += gg.scale_colour_manual(values=['#313695'])  # blue
  else:
    p += gg.scale_colour_manual(values=['#d73027', '#313695'])  # [red, blue]

  if with_baseline:
    baseline_df = _make_baseline(plt_df, sweep_vars)
github danforthcenter / plantcv / plantcv / plantcv / analyze_color.py View on Github external
bin_values = [l for l in binval]

    analysis_image = None
    # Create a dataframe of bin labels and histogram data
    dataset = pd.DataFrame({'bins': binval, 'blue': histograms["b"]["hist"],
                            'green': histograms["g"]["hist"], 'red': histograms["r"]["hist"],
                            'lightness': histograms["l"]["hist"], 'green-magenta': histograms["m"]["hist"],
                            'blue-yellow': histograms["y"]["hist"], 'hue': histograms["h"]["hist"],
                            'saturation': histograms["s"]["hist"], 'value': histograms["v"]["hist"]})

    # Make the histogram figure using plotnine
    if hist_plot_type is not None:
        if hist_plot_type.upper() == 'RGB':
            df_rgb = pd.melt(dataset, id_vars=['bins'], value_vars=['blue', 'green', 'red'],
                             var_name='Color Channel', value_name='Pixels')
            hist_fig = (ggplot(df_rgb, aes(x='bins', y='Pixels', color='Color Channel'))
                        + geom_line()
                        + scale_x_continuous(breaks=list(range(0, 256, 25)))
                        + scale_color_manual(['blue', 'green', 'red'])
                        )

        elif hist_plot_type.upper() == 'LAB':
            df_lab = pd.melt(dataset, id_vars=['bins'],
                             value_vars=['lightness', 'green-magenta', 'blue-yellow'],
                             var_name='Color Channel', value_name='Pixels')
            hist_fig = (ggplot(df_lab, aes(x='bins', y='Pixels', color='Color Channel'))
                        + geom_line()
                        + scale_x_continuous(breaks=list(range(0, 256, 25)))
                        + scale_color_manual(['yellow', 'magenta', 'dimgray'])
                        )

        elif hist_plot_type.upper() == 'HSV':