How to use the seaborn.set function in seaborn

To help you get started, we’ve selected a few seaborn examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github ucbdrive / 3d-vehicle-tracking / 3d-tracking / tools / plot_tracking.py View on Github external
help='draw 2D box')
    parser.add_argument('--draw_bev', default=False, action='store_true',
                        help='draw Birds eye view')
    args = parser.parse_args()
    args.select_seq = [args.select_seq] if isinstance(args.select_seq,
                                                      int) else args.select_seq

    print(' '.join(sys.argv))

    return args


args = parse_args()

# Global Variable
sns.set(style="darkgrid")
FONT = cv2.FONT_HERSHEY_SIMPLEX
FOURCC = cv2.VideoWriter_fourcc(*'mp4v')
OUTPUT_PATH = cfg.OUTPUT_PATH
FOV_H = 60
NEAR_CLIP = 0.15

if args.dataset == 'gta':
    W = cfg.GTA.W  # 1920
    H = cfg.GTA.H  # 1080
    resW = W // 2
    resH = H // 2
    FOCAL_LENGTH = cfg.GTA.FOCAL_LENGTH  # 935.3074360871937
else:
    W = cfg.KITTI.W # 1248
    H = cfg.KITTI.H # 384
    resW = W
github MrtnMndt / OCDVAE_ContinualLearning / lib / Utility / visualization.py View on Github external
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
from matplotlib.colors import ListedColormap

# matplotlib backend, required for plotting of images to tensorboard
matplotlib.use('Agg')

# setting font sizes
title_font_size = 60
axes_font_size = 45
legend_font_size = 36
ticks_font_size = 48

# setting seaborn specifics
sns.set(font_scale=2.5)
sns.set_style("whitegrid")
colors = sns.color_palette("Set2")
pal = sns.cubehelix_palette(10, light=0.0)
linestyles = [(0, (1, 3)),  # 'dotted'
              (0, (1, 1)),  # 'densely dotted'
              (0, (2, 2)),  # 'dashed'
              (0, (3, 1)),  # 'densely dashed'
              (0, (3, 3, 1, 3)),  # 'dashdotted'
              (0, (3, 1, 1, 1)),  # 'densely dashdotted'
              (0, (3, 3, 1, 3, 1, 3)),  # 'dashdotdotted'
              (0, (3, 1, 1, 1, 1, 1))]  # 'densely dashdotdotted'


def args_to_tensorboard(writer, args):
    """
    Takes command line parser arguments and formats them to
github OpenMined / PyGrid / examples / Serving models / helper.py View on Github external
def plot_confusion_matrix(model, loader):
    # Predict the values from the validation dataset
    model.eval()

    model_output = torch.cat([model(x) for x, _ in loader])
    predictions = torch.argmax(model_output, dim=1)
    targets = torch.cat([y for _, y in loader])

    conf_matrix = confusion_matrix(targets, predictions)
    df_cm = pd.DataFrame(conf_matrix)
    sn.set(font_scale=1)
    sn.heatmap(df_cm, annot=True, annot_kws={"size": 16})
github blei-lab / edward / examples / gan_wasserstein_synthetic.py View on Github external
def main(_):
  sns.set(color_codes=True)
  ed.set_seed(42)

  # DATA. We use a placeholder to represent a minibatch. During
  # inference, we generate data on the fly and feed `x_ph`.
  x_ph = tf.placeholder(tf.float32, [FLAGS.M, 1])

  # MODEL
  with tf.variable_scope("Gen"):
    eps = tf.linspace(-8.0, 8.0, FLAGS.M) + 0.01 * tf.random_normal([FLAGS.M])
    eps = tf.reshape(eps, [FLAGS.M, 1])
    x = generative_network(eps)

  # INFERENCE
  optimizer = tf.train.GradientDescentOptimizer(0.03)
  optimizer_d = tf.train.GradientDescentOptimizer(0.03)
github IndicoDataSolutions / Enso / enso / visualize / facets.py View on Github external
**kwargs
    ):
        """
        Create a tiled visualization of experiment results.

        :param results: pd.DataFrame of results, loaded from results .csv file.
        :param x_tile: string name of DataFrame column to vary over the x axis of the grid of line graphs
        :param y_tile: string name of DataFrame column to vary over the y axis of the grid of line graphs
        :param x_axis: string name of DataFrame column to plot on the x axis within each individual line graph
        :param y_axis: string name of DataFrame column to plot on the y axis within each individual line graph
        :param lines: string name or list of DataFrame column string names displayed as separate lines within each graph.
                      Providing multiple values means that each unique combination of values will be displayed as a single line.
        :param results_id: string name of folder to save resulting visual in, relative to the root of the results directory
        :param filename: filename (excluding filetype) to use when saving visualization.  Value is relative to folder specified by results_id.
        """
        sns.set(style="ticks", color_codes=True)

        if isinstance(lines, (tuple, list)):
            results['key'] = results[lines].apply(lambda x: ','.join(x), axis=1)
            lines = 'key'
        y_tiles = np.unique(results[y_tile])
        x_tiles = np.unique(results[x_tile])
        keys = np.unique(results.key)
        colors_dict = {key: color for key, color in zip(keys, sns.color_palette("hls", len(keys)))}
        n_y_tiles = len(y_tiles)
        n_x_tiles = len(x_tiles)
        # we adjust the figsize based on how many plots will be plotted
        # we maintain a 6:8 ratio of height to width for uniformity
        fig, axes = plt.subplots(n_y_tiles, n_x_tiles, figsize=(n_x_tiles * 8, n_y_tiles * 6), squeeze=False)
        for i, row in enumerate(y_tiles):
            for j, col in enumerate(x_tiles):
                ax = axes[i][j]
github mwaskom / seaborn / examples / anscombes_quartet.py View on Github external
"""
Anscombe's quartet
==================

_thumb: .4, .4
"""
import seaborn as sns
sns.set(style="ticks")

# Load the example dataset for Anscombe's quartet
df = sns.load_dataset("anscombe")

# Show the results of a linear regression within each dataset
sns.lmplot(x="x", y="y", col="dataset", hue="dataset", data=df,
           col_wrap=2, ci=None, palette="muted", height=4,
           scatter_kws={"s": 50, "alpha": 1})
github jangirrishabh / Overcoming-exploration-from-demos / experiment / plot.py View on Github external
import os
import matplotlib.pyplot as plt
import numpy as np
import json
import seaborn as sns; sns.set()
import glob2
import argparse


def smooth_reward_curve(x, y):
    halfwidth = int(np.ceil(len(x) / 100))  # Halfwidth of our smoothing convolution
    k = halfwidth
    xsmoo = x
    ysmoo = np.convolve(y, np.ones(2 * k + 1), mode='same') / np.convolve(np.ones_like(y), np.ones(2 * k + 1),
        mode='same')
    return xsmoo, ysmoo


def load_results(file):
    if not os.path.exists(file):
        return None
github robertmartin8 / udemyML / templates / plot_templates.py View on Github external
# Simple classifcation plot
import matplotlib.pyplot as plt

plt.scatter(X1, X2, color=["green" if i else "red" for i in y])


# Regression pairplot
import seaborn as sns
import numpy as np

sns.set(style="ticks", color_codes=True)
g = sns.pairplot(df, kind="reg")

# Visualising regression results
X_grid = np.arange(min(X), max(X), 0.01)
X_grid = X_grid.reshape((len(X_grid), 1))
plt.scatter(X, y, color="red")
plt.plot(X_grid, regressor.predict(X_grid), color="blue")
plt.title("Title")
plt.xlabel("Independent variable")
plt.ylabel(list(df)[-1])
plt.show()


# Classifier with two dependent variables
from matplotlib.colors import ListedColormap
github cangermueller / deepcpg / predict / models / dnn / filter_motifs.py View on Github external
def plot_score_density(f_scores, out_pdf):
    with warnings.catch_warnings():
        warnings.simplefilter('ignore')
        sns.set(font_scale=1.3)
        plt.figure()
        sns.distplot(f_scores, kde=False)
        plt.xlabel('ReLU output')
        plt.savefig(out_pdf)
        plt.close()
github joelcthomas / modeldrift / utils / viz_utils.py View on Github external
def plot_summary(df1, df2):
  sns.set(style='dark')
  sns.set()
  fig, ax = plt.subplots(figsize=(14,4))

  hue_order=['Accurate', 'Inaccurate']
  sns.lineplot(x='window_day', y='ratio', hue='accurate_prediction', hue_order=hue_order, style='accurate_prediction', style_order=hue_order, alpha=0.1, data = df1.toPandas())
  sns.lineplot(x='window_day', y='ratio', hue='accurate_prediction', hue_order=hue_order, style='accurate_prediction', style_order=hue_order, legend=False, data = df1.filter(df1.window_day < '2019-07-21').toPandas())
  sns.lineplot(x='window_day', y='ratio', hue='accurate_prediction', hue_order=hue_order, style='accurate_prediction', style_order=hue_order,legend=False, alpha=1, data = df2.filter(df2.window_day >= '2019-07-21').toPandas())
  plt.yticks(rotation=0)
  plt.xticks(rotation=0)
  plt.ylabel('% in population')
  plt.xlabel('Date')
  plt.title('Model Monitoring KPI over time')

  ax.axvline(x='2019-07-10', linewidth=1, linestyle='--', alpha=0.3)
  ax.axvline(x='2019-07-19', linewidth=1, linestyle='--', alpha=0.3)
  ax.axvline(x='2019-08-04', linewidth=1, linestyle='--', alpha=0.3)