How to use the ivis.Ivis function in ivis

To help you get started, we’ve selected a few ivis examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github beringresearch / ivis / tests / integration / test_semi-supervised_iris.py View on Github external
def test_non_zero_indexed_semi_supervised_classificaton_classes():
    iris = datasets.load_iris()
    x = iris.data
    y = iris.target

    # Make labels non-zero indexed
    y = y + 1

    # Mark points as unlabeled
    mask = np.random.choice(range(len(y)), size=len(y) // 2, replace=False)
    y[mask] = -1

    supervision_metric = 'sparse_categorical_crossentropy'
    ivis_iris = Ivis(k=15, batch_size=16, epochs=5,
                     supervision_metric=supervision_metric)

    with pytest.raises(ValueError):
        embeddings = ivis_iris.fit_transform(x, y)
github beringresearch / ivis / tests / integration / test_semi-supervised_iris.py View on Github external
def test_non_consecutive_indexed_semi_supervised_classificaton_classes():
    iris = datasets.load_iris()
    x = iris.data
    y = iris.target

    # Make labels non-consecutive indexed
    y[y == max(y)] = max(y) + 1

    # Mark points as unlabeled
    mask = np.random.choice(range(len(y)), size=len(y) // 2, replace=False)
    y[mask] = -1

    supervision_metric = 'sparse_categorical_crossentropy'
    ivis_iris = Ivis(k=15, batch_size=16, epochs=5,
                     supervision_metric=supervision_metric)

    with pytest.raises(ValueError):
        embeddings = ivis_iris.fit_transform(x, y)
github beringresearch / ivis / tests / integration / test_semi-supervised_iris.py View on Github external
def test_correctly_indexed_semi_supervised_classificaton_classes():
    iris = datasets.load_iris()
    x = iris.data
    y = iris.target
    
    # Mark points as unlabeled
    mask = np.random.choice(range(len(y)), size=len(y) // 2, replace=False)
    y[mask] = -1

    supervision_metric = 'sparse_categorical_crossentropy'
    ivis_iris = Ivis(k=15, batch_size=16, epochs=5,
                     supervision_metric=supervision_metric)

    embeddings = ivis_iris.fit_transform(x, y)
github beringresearch / ivis / tests / integration / test_h5.py View on Github external
def test_h5_file(h5_filepath):
    rows, dims = 258, 32
    create_random_dataset(h5_filepath, rows, dims)

    # Load data
    test_index = rows // 5
    X_train = HDF5Matrix(h5_filepath, 'data', start=0, end=test_index)
    y_train = HDF5Matrix(h5_filepath, 'labels', start=0, end=test_index)

    X_test = HDF5Matrix(h5_filepath, 'data', start=test_index, end=rows)
    y_test = HDF5Matrix(h5_filepath, 'labels', start=test_index, end=rows)

    # Train and transform with ivis
    ivis_iris = Ivis(epochs=5, k=15, batch_size=16)

    y_pred_iris = ivis_iris.fit_transform(X_train, shuffle_mode='batch')
    y_pred = ivis_iris.transform(X_test)

    assert y_pred.shape[0] == len(X_test)
    assert y_pred.shape[1] == ivis_iris.embedding_dims
github beringresearch / ivis / tests / test_model_saving.py View on Github external
def test_supervised_model_saving(model_filepath):
    model = Ivis(k=15, batch_size=16, epochs=5,
                 supervision_metric='sparse_categorical_crossentropy')
    iris = datasets.load_iris()
    X = iris.data
    Y = iris.target

    model.fit(X, Y)
    model.save_model(model_filepath, overwrite=True)

    model_2 = Ivis()
    model_2.load_model(model_filepath)

    # Check that model embeddings are same
    assert np.all(model.transform(X) == model_2.transform(X))
    # Check that model supervised predictions are same
    assert np.all(model.score_samples(X) == model_2.score_samples(X))
    # Serializable dict eles same
    assert model.__getstate__() == model_2.__getstate__()

    # Check all weights are the same
    for model_layer, model_2_layer in zip(model.encoder.layers,
                                          model_2.encoder.layers):
        model_layer_weights = model_layer.get_weights()
        model_2_layer_weights = model_2_layer.get_weights()
        for i in range(len(model_layer_weights)):
            assert np.all(model_layer_weights[i] == model_2_layer_weights[i])
github beringresearch / ivis / tests / test_model_saving.py View on Github external
def test_ivis_model_saving(model_filepath):
    model = Ivis(k=15, batch_size=16, epochs=5)
    iris = datasets.load_iris()
    X = iris.data

    model.fit(X)
    model.save_model(model_filepath)

    model_2 = Ivis()
    model_2.load_model(model_filepath)

    # Check that model predictions are same
    assert np.all(model.transform(X) == model_2.transform(X))
    # Serializable dict eles same
    assert model.__getstate__() == model_2.__getstate__()

    # Check all weights are the same
    for model_layer, model_2_layer in zip(model.encoder.layers,
github beringresearch / ivis / tests / test_model_saving.py View on Github external
def test_ivis_model_saving(model_filepath):
    model = Ivis(k=15, batch_size=16, epochs=5)
    iris = datasets.load_iris()
    X = iris.data

    model.fit(X)
    model.save_model(model_filepath)

    model_2 = Ivis()
    model_2.load_model(model_filepath)

    # Check that model predictions are same
    assert np.all(model.transform(X) == model_2.transform(X))
    # Serializable dict eles same
    assert model.__getstate__() == model_2.__getstate__()

    # Check all weights are the same
    for model_layer, model_2_layer in zip(model.encoder.layers,
                                          model_2.encoder.layers):
        model_layer_weights = model_layer.get_weights()
        model_2_layer_weights = model_2_layer.get_weights()
        for i in range(len(model_layer_weights)):
            assert np.all(model_layer_weights[i] == model_2_layer_weights[i])

    # Check optimizer weights are the same
github beringresearch / ivis / tests / test_model_saving.py View on Github external
def test_supervised_model_saving(model_filepath):
    model = Ivis(k=15, batch_size=16, epochs=5,
                 supervision_metric='sparse_categorical_crossentropy')
    iris = datasets.load_iris()
    X = iris.data
    Y = iris.target

    model.fit(X, Y)
    model.save_model(model_filepath, overwrite=True)

    model_2 = Ivis()
    model_2.load_model(model_filepath)

    # Check that model embeddings are same
    assert np.all(model.transform(X) == model_2.transform(X))
    # Check that model supervised predictions are same
    assert np.all(model.score_samples(X) == model_2.score_samples(X))
    # Serializable dict eles same
github beringresearch / ivis / tests / integration / test_supervised.py View on Github external
def test_invalid_metric():
    iris = datasets.load_iris()
    x = iris.data
    y = iris.target

    supervision_metric = 'invalid_loss_function'
    ivis_iris = Ivis(k=15, batch_size=16, epochs=5,
                     supervision_metric=supervision_metric)

    # Loss function not specified
    with pytest.raises(ValueError):
        embeddings = ivis_iris.fit_transform(x, y)
github beringresearch / ivis / examples / iris.py View on Github external
Example of reducing dimensionality of the iris dataset using ivis.
"""

import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.datasets import load_iris
from sklearn.preprocessing import MinMaxScaler

from ivis import Ivis

sns.set(context='paper', style='white')

X = load_iris().data
X = MinMaxScaler().fit_transform(X)

ivis = Ivis(k=3, batch_size=120, model='maaten')
ivis.fit(X)

embeddings = ivis.transform(X)

fig, ax = plt.subplots(figsize=(12, 10))
plt.scatter(embeddings[:, 0],
            embeddings[:, 1],
            c=load_iris().target, cmap='Spectral', s=1)
plt.setp(ax, xticks=[], yticks=[])
plt.title('ivis embeddings of the iris dataset', fontsize=18)

plt.show()