How to use the fancyimpute.NuclearNormMinimization function in fancyimpute

To help you get started, we’ve selected a few fancyimpute examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github iskandr / fancyimpute / test / test_nuclear_norm_minimization.py View on Github external
def test_rank1_symmetric_convex_solver():
    XYXY_rank1, XYXY_missing_rank1 = create_rank1_data(symmetric=True)
    solver = NuclearNormMinimization(require_symmetric_solution=True)
    completed = solver.fit_transform(XYXY_missing_rank1)
    assert abs(completed[1, 2] - XYXY_rank1[1, 2]) < 0.01, \
        "Expected %0.4f but got %0.4f" % (
            XYXY_rank1[1, 2], completed[1, 2])
github iskandr / fancyimpute / test / test_nuclear_norm_minimization.py View on Github external
def test_rank1_convex_solver():
    XY_rank1, XY_missing_rank1 = create_rank1_data(symmetric=False)
    solver = NuclearNormMinimization(max_iters=50000)
    XY_completed_rank1 = solver.fit_transform(XY_missing_rank1)
    assert abs(XY_completed_rank1[1, 2] - XY_rank1[1, 2]) < 0.01, \
        "Expected %0.4f but got %0.4f" % (
            XY_rank1[1, 2], XY_completed_rank1[1, 2])
github iskandr / fancyimpute / test / test_nuclear_norm_minimization.py View on Github external
def test_nuclear_norm_minimization_with_low_rank_random_matrix():
    solver = NuclearNormMinimization(max_iters=2000)
    XY_completed = solver.fit_transform(XY_incomplete[:100])
    _, missing_mae = reconstruction_error(
        XY[:100], XY_completed, missing_mask[:100], name="NuclearNorm")
    assert missing_mae < 0.1, "Error too high!"
github iskandr / fancyimpute / experiments / readme_example.py View on Github external
# X is a data matrix which we're going to randomly drop entries from
missing_mask = np.random.rand(*X.shape) < 0.1
X_incomplete = X.copy()
# missing entries indicated with NaN
X_incomplete[missing_mask] = np.nan

meanFill = SimpleFill("mean")
X_filled_mean = meanFill.fit_transform(X_incomplete)

# Use 3 nearest rows which have a feature to fill in each row's missing features
knnImpute = KNN(k=3)
X_filled_knn = knnImpute.fit_transform(X_incomplete)

# matrix completion using convex optimization to find low-rank solution
# that still matches observed values. Slow!
X_filled_nnm = NuclearNormMinimization().fit_transform(X_incomplete)

# Instead of solving the nuclear norm objective directly, instead
# induce sparsity using singular value thresholding
softImpute = SoftImpute()

# simultaneously normalizes the rows and columns of your observed data,
# sometimes useful for low-rank imputation methods
biscaler = BiScaler()

# rescale both rows and columns to have zero mean and unit variance
X_incomplete_normalized = biscaler.fit_transform(X_incomplete)

X_filled_softimpute_normalized = softImpute.fit_transform(X_incomplete_normalized)
X_filled_softimpute = biscaler.inverse_transform(X_filled_softimpute_normalized)

X_filled_softimpute_no_biscale = softImpute.fit_transform(X_incomplete)