How to use the fancyimpute.SimpleFill function in fancyimpute

To help you get started, we’ve selected a few fancyimpute examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github iskandr / fancyimpute / test / test_solver.py View on Github external
def test_solver_fill_methods_with_low_rank_random_matrix():
    for fill_method in ("zero", "mean", "median", "min", "random"):
        imputer = SimpleFill(fill_method=fill_method)
        XY_completed = imputer.fit_transform(XY_incomplete)
        _, missing_mae = reconstruction_error(
            XY,
            XY_completed,
            missing_mask,
            name="Solver with fill_method=%s" %fill_method)
        assert missing_mae < 5, "Error too high for Solver with %s fill method!" %fill_method
github iskandr / fancyimpute / experiments / readme_example.py View on Github external
SimpleFill
)

n = 200
m = 20
inner_rank = 4
X = np.dot(np.random.randn(n, inner_rank), np.random.randn(inner_rank, m))
print("Mean squared element: %0.4f" % (X ** 2).mean())

# X is a data matrix which we're going to randomly drop entries from
missing_mask = np.random.rand(*X.shape) < 0.1
X_incomplete = X.copy()
# missing entries indicated with NaN
X_incomplete[missing_mask] = np.nan

meanFill = SimpleFill("mean")
X_filled_mean = meanFill.fit_transform(X_incomplete)

# Use 3 nearest rows which have a feature to fill in each row's missing features
knnImpute = KNN(k=3)
X_filled_knn = knnImpute.fit_transform(X_incomplete)

# matrix completion using convex optimization to find low-rank solution
# that still matches observed values. Slow!
X_filled_nnm = NuclearNormMinimization().fit_transform(X_incomplete)

# Instead of solving the nuclear norm objective directly, instead
# induce sparsity using singular value thresholding
softImpute = SoftImpute()

# simultaneously normalizes the rows and columns of your observed data,
# sometimes useful for low-rank imputation methods
github iskandr / fancyimpute / experiments / complete_faces.py View on Github external
images_dict=images_dict,
        scale_rows=False,
        center_rows=False)

    for negative_log_regularization_weight in [2, 3, 4]:
        regularization_weight = 10.0 ** -negative_log_regularization_weight
        table.add_entry(
            solver=IterativeImputer(
                n_nearest_features=80,
                max_iter=50
            ),
            name="IterativeImputer_%d" % negative_log_regularization_weight)

    for fill_method in ["mean", "median"]:
        table.add_entry(
            solver=SimpleFill(fill_method=fill_method),
            name="SimpleFill_%s" % fill_method)

    for k in [1, 3, 7]:
        table.add_entry(
            solver=KNN(
                k=k,
                orientation="rows"),
            name="KNN_k%d" % (k,))

    for shrinkage_value in [25, 50, 100]:
        # SoftImpute without rank constraints
        table.add_entry(
            solver=SoftImpute(
                shrinkage_value=shrinkage_value),
            name="SoftImpute_lambda%d" % (shrinkage_value,))
github awslabs / datawig / experiments / benchmarks.py View on Github external
def impute_mean(X, mask):
    return fancyimpute_hpo(SimpleFill,{'fill_method':["mean"]}, X, mask)