How to use the wandb.Image function in wandb

To help you get started, we’ve selected a few wandb examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github wandb / client / standalone_tests / all_media_types.py View on Github external
def main():
    wandb.init()

    histogram_small_literal = wandb.Histogram(np_histogram=([1, 2, 4], [3, 10, 20, 0]))
    histogram_large_random = wandb.Histogram(numpy.random.randint(255, size=(1000)))
    numpy_array = numpy.random.rand(1000)
    torch_tensor = torch.rand(1000, 1000)
    data_frame = pandas.DataFrame(data=numpy.random.rand(1000), columns=['col'])
    tensorflow_variable_single = tensorflow.Variable(543.01, tensorflow.float32)
    tensorflow_variable_multi = tensorflow.Variable([[2, 3], [7, 11]], tensorflow.int32)
    plot_scatter = plotly.graph_objs.Scatter(x=[0, 1, 2])

    image_data = numpy.zeros((28, 28))
    image_cool = wandb.Image(image_data, caption="Cool zeros")
    image_nice = wandb.Image(image_data, caption="Nice zeros")
    image_random = wandb.Image(numpy.random.randint(255, size=(28, 28, 3)))
    image_pil = wandb.Image(PIL.Image.new("L", (28, 28)))
    plt.plot([1, 2, 3, 4])
    plt.ylabel('some interesting numbers')
    image_matplotlib_plot = wandb.Image(plt)
    matplotlib_plot = plt

    audio_data = numpy.random.uniform(-1, 1, 44100)
    sample_rate = 44100
    caption1 = "This is what a dog sounds like"
    caption2 = "This is what a chicken sounds like"
    # test with all captions
    audio1 = wandb.Audio(audio_data, sample_rate=sample_rate, caption=caption1)
    audio2 = wandb.Audio(audio_data, sample_rate=sample_rate, caption=caption2)
    # test with no captions
    audio3 = wandb.Audio(audio_data, sample_rate=sample_rate)
github wandb / client / tests / test_data_types.py View on Github external
def test_captions():
    wbone = wandb.Image(image, caption="Cool")
    wbtwo = wandb.Image(image, caption="Nice")
    assert wandb.Image.captions([wbone, wbtwo]) == ["Cool", "Nice"]
github wandb / client / wandb / tensorboard / __init__.py View on Github external
summary_pb = tf_summary_str_or_pb

    for value in summary_pb.value:
        kind = value.WhichOneof("value")
        if kind in IGNORE_KINDS:
            continue
        if kind == "simple_value":
            values[namespaced_tag(value.tag, namespace)] = value.simple_value
        elif kind == "image":
            from PIL import Image
            img_str = value.image.encoded_image_string
            # Supports gifs from TboardX
            if img_str.startswith(b"GIF"):
                image = wandb.Video(six.BytesIO(img_str), format="gif")
            else:
                image = wandb.Image(Image.open(
                    six.BytesIO(img_str)))
            tag_idx = value.tag.rsplit('/', 1)
            if len(tag_idx) > 1 and tag_idx[1].isdigit():
                tag, idx = tag_idx
                values.setdefault(history_image_key(
                    tag, namespace), []).append(image)
            else:
                values[history_image_key(value.tag, namespace)] = [image]
        # Coming soon...
        # elif kind == "audio":
        #    audio = wandb.Audio(six.BytesIO(value.audio.encoded_audio_string),
        #                        sample_rate=value.audio.sample_rate, content_type=value.audio.content_type)
        elif kind == "histo":
            tag = namespaced_tag(value.tag, namespace)
            if len(value.histo.bucket_limit) >= 3:
                first = value.histo.bucket_limit[0] + \
github wandb / client / wandb / keras / __init__.py View on Github external
reference_image_data = self._masks_to_pixels(
                    test_output) if self.output_type == 'segmentation_mask' else test_output
                input_images = [wandb.Image(data, grouping=3) for i, data in enumerate(input_image_data)]
                output_images = [wandb.Image(data) for i, data in enumerate(output_image_data)]
                reference_images = [wandb.Image(data) for i, data in enumerate(reference_image_data)]
                return list(chain.from_iterable(zip(input_images, output_images, reference_images)))
            else:
                # unknown output, just log the input images
                return [wandb.Image(img) for img in test_data]
        elif self.output_type in ('image', 'images', 'segmentation_mask'):
            # unknown input, just log the predicted and reference outputs without captions
            output_image_data = self._masks_to_pixels(
                predictions) if self.output_type == 'segmentation_mask' else predictions
            reference_image_data = self._masks_to_pixels(
                test_output) if self.output_type == 'segmentation_mask' else test_output
            output_images = [wandb.Image(data, grouping=2) for i, data in enumerate(output_image_data)]
            reference_images = [wandb.Image(data) for i, data in enumerate(reference_image_data)]
            return list(chain.from_iterable(zip(output_images, reference_images)))
github wandb / client / wandb / keras / __init__.py View on Github external
wandb.Image(data, caption=captions[i])
                    for i, data in enumerate(reference_image_data)
                ]
                return list(chain.from_iterable(zip(output_images, reference_images)))
        elif self.input_type in ('image', 'images', 'segmentation_mask'):
            input_image_data = self._masks_to_pixels(test_data) if self.input_type == 'segmentation_mask' else test_data
            if self.output_type == 'label':
                # we just use the predicted label as the caption for now
                captions = self._logits_to_captions(predictions)
                return [wandb.Image(data, caption=captions[i]) for i, data in enumerate(test_data)]
            elif self.output_type in ('image', 'images', 'segmentation_mask'):
                output_image_data = self._masks_to_pixels(
                    predictions) if self.output_type == 'segmentation_mask' else predictions
                reference_image_data = self._masks_to_pixels(
                    test_output) if self.output_type == 'segmentation_mask' else test_output
                input_images = [wandb.Image(data, grouping=3) for i, data in enumerate(input_image_data)]
                output_images = [wandb.Image(data) for i, data in enumerate(output_image_data)]
                reference_images = [wandb.Image(data) for i, data in enumerate(reference_image_data)]
                return list(chain.from_iterable(zip(input_images, output_images, reference_images)))
            else:
                # unknown output, just log the input images
                return [wandb.Image(img) for img in test_data]
        elif self.output_type in ('image', 'images', 'segmentation_mask'):
            # unknown input, just log the predicted and reference outputs without captions
            output_image_data = self._masks_to_pixels(
                predictions) if self.output_type == 'segmentation_mask' else predictions
            reference_image_data = self._masks_to_pixels(
                test_output) if self.output_type == 'segmentation_mask' else test_output
            output_images = [wandb.Image(data, grouping=2) for i, data in enumerate(output_image_data)]
            reference_images = [wandb.Image(data) for i, data in enumerate(reference_image_data)]
            return list(chain.from_iterable(zip(output_images, reference_images)))
github lukas / ml-class / keras-autoencoder / util.py View on Github external
def on_epoch_end(self, epoch, logs):
        indices = np.random.randint(self.validation_data[0].shape[0], size=8)
        test_data = self.validation_data[0][indices]
        pred_data = np.clip(self.model.predict(test_data), 0, 1)
        wandb.log({
            "examples": [
                wandb.Image(np.hstack([data, pred_data[i]]), caption=str(i))
                for i, data in enumerate(test_data)]
        }, commit=False)
github lukas / ml-class / lstm / attention / train.py View on Github external
viz = []
        # Swap the weights
        weights = self.visualizer.pred_model.get_layer(
            "attention_decoder_1").get_weights()
        self.visualizer.proba_model.get_layer(
            "attention_decoder_prob").set_weights(weights)
        for i, o in zip(data_in, data_out):
            text = decode(input_vocab.int_to_string(i)).replace('', '')
            truth = decode(output_vocab.int_to_string(np.argmax(o, -1)), True)
            pred = run_example(self.model, input_vocab, output_vocab, text)
            out = decode(pred, True)
            print(f"{decode(text, True)} -> {out} ({truth})")
            examples.append([decode(text, True), out, truth])
            amap = self.visualizer.attention_map(text)
            if amap:
                viz.append(wandb.Image(amap,))
                amap.close()
        if len(viz) > 0:
            logs["attention_map"] = viz[:5]
        logs["examples"] = wandb.Table(data=examples)
        wandb.log(logs)
github lukas / ml-class / lstm / ocr / util.py View on Github external
pred = [''.join(self.dataset.mapping.get(label, '')
                        for label in pred).strip(' |_') for pred in result]
        truth = [''.join(self.dataset.mapping.get(label, '')
                         for label in true).strip(' |_') for true in truth]
        pred_train = [''.join(self.dataset.mapping.get(label, '')
                              for label in pred).strip(' |_') for pred in result_train]
        truth_train = [''.join(self.dataset.mapping.get(label, '')
                               for label in true).strip(' |_') for true in truth_train]
        dists = [lev(list(a), list(b)) for a, b in zip(pred, truth)]
        print("Val Levenstein: ", np.mean(dists))
        print("Val Examples:\n" +
              "\n".join([f"{t}\n{p}\n---" for p, t in zip(pred, truth)]))
        print("Train :\n" +
              "\n".join([f"{t}\n{p}\n---" for p, t in zip(pred_train, truth_train)]))
        wandb.log({"examples": [
            wandb.Image(img, caption=f"Pred: \"{pred[i]}\" -- Truth: \"{truth[i]}\"") for i, img in enumerate(images)],
            "train_examples": [
                wandb.Image(img, caption=f"Pred: \"{pred_train[i]}\" -- Truth: \"{truth_train[i]}\"") for i, img in enumerate(train_images)
        ],
            **logs})
github wandb / client / wandb / dataframes.py View on Github external
class_colors = None

    class_count = y_true.shape[-1]

    if class_colors is None:
        class_colors = util.class_colors(class_count)
    class_colors = np.array(class_colors)

    y_true_class = np.argmax(y_true, axis=-1)
    y_pred_class = np.argmax(y_pred, axis=-1)

    y_pred_discrete = np.round(y_pred)

    images = [Image(img) for img in x]
    label_imgs = [Image(mask) for mask in class_colors[y_true_class]]
    predictions = [Image(mask) for mask in class_colors[y_pred_class]]

    flat_shape = (x.shape[0], -1)

    intersection = np.sum(np.logical_and(y_true, y_pred_discrete).reshape(flat_shape), axis=1)
    union = np.sum(np.logical_or(y_true, y_pred_discrete).reshape(flat_shape), axis=1)

    iou = intersection / (union + 1e-9)
    accuracy = np.mean(np.equal(y_true_class, y_pred_class).reshape(flat_shape), axis=1)

    difference = np.zeros(y_true_class.shape)
    difference[y_true_class != y_pred_class] = 1.

    incorrect_predictions = [Image(mask) for mask in difference]

    iou_class = [
        np.sum(np.logical_and(y_true_class == i, y_pred_class == i).reshape(flat_shape), axis=1) / # intersection
github lukas / ml-class / examples / keras-autoencoder / conditional_autoencoder.py View on Github external
# latent_vis = manifold.TSNE(n_components=2, init='pca', random_state=0)
                latent_vis = PCA(n_components=2)
                X = latent_vis.fit_transform(latent)
            else:
                X = latent
            trace = go.Scatter(x=list(X[:, 0]), y=list(X[:, 1]),
                               mode='markers', showlegend=False,
                               marker=dict(color=list(np.argmax(y_test[latent_idx], axis=1)),
                                           colorscale='Viridis',
                                           size=8,
                                           showscale=True))
            fig = go.Figure(data=[trace])
            wandb.log({"latent_vis": fig}, commit=False)
        # Always log training images
        wandb.log({
            "train_images": [wandb.Image(
                np.hstack([t_inputs[i], res])) for i, res in enumerate(t_results)
            ]
        }, commit=False)

        # Log image conversion when conditional
        if wandb.config.conditional:
            wandb.log({
                "images": [wandb.Image(
                    np.hstack([inputs[i], res]), caption=" to ".join([
                        wandb.config.labels[np.argmax(r_labels[i])
                                            ], wandb.config.labels[np.argmax((labels)[i])]
                    ])) for i, res in enumerate(results)]}, commit=False)