How to use the mleap.sklearn.preprocessing.data.StandardScaler function in mleap

To help you get started, we’ve selected a few mleap examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github combust / mleap / python / mleap / sklearn / preprocessing / tests.py View on Github external
standard_scaler = StandardScaler(with_mean=True,
                                         with_std=True
                                         )

        standard_scaler.mlinit(prior_tf=feature_extractor,
                               output_features=['a_scaled', 'b_scaled'])

        standard_scaler.fit(self.df[['a', 'b']])

        standard_scaler.serialize_to_bundle(self.tmp_dir, standard_scaler.name)

        # Now deserialize it back

        node_name = "{}.node".format(standard_scaler.name)

        standard_scaler_tf = StandardScaler()

        standard_scaler_tf = standard_scaler_tf.deserialize_from_bundle(self.tmp_dir, node_name)

        # Transform some sample data
        res_a = standard_scaler.transform(self.df[['a', 'b']])
        res_b = standard_scaler_tf.transform(self.df[['a', 'b']])

        self.assertEqual(res_a[0][0], res_b[0][0])
        self.assertEqual(res_a[0][1], res_b[0][1])
        self.assertEqual(standard_scaler.name, standard_scaler_tf.name)
        self.assertEqual(standard_scaler.op, standard_scaler_tf.op)
        self.assertEqual(standard_scaler.mean_[0], standard_scaler_tf.mean_[0])
        self.assertEqual(standard_scaler.mean_[1], standard_scaler_tf.mean_[1])
        self.assertEqual(standard_scaler.scale_[0], standard_scaler_tf.scale_[0])
        self.assertEqual(standard_scaler.scale_[1], standard_scaler_tf.scale_[1])
github combust / mleap / python / mleap / sklearn / preprocessing / tests.py View on Github external
standard_scaler = StandardScaler(with_mean=True,
                                         with_std=True
                                         )

        standard_scaler.mlinit(prior_tf=feature_extractor,
                               output_features='a_scaled')

        standard_scaler.fit(self.df[['a']])

        standard_scaler.serialize_to_bundle(self.tmp_dir, standard_scaler.name)

        # Now deserialize it back

        node_name = "{}.node".format(standard_scaler.name)

        standard_scaler_tf = StandardScaler()

        standard_scaler_tf = standard_scaler_tf.deserialize_from_bundle(self.tmp_dir, node_name)

        # Transform some sample data
        res_a = standard_scaler.transform(self.df[['a']])
        res_b = standard_scaler_tf.transform(self.df[['a']])

        self.assertEqual(res_a[0], res_b[0])
        self.assertEqual(standard_scaler.name, standard_scaler_tf.name)
        self.assertEqual(standard_scaler.op, standard_scaler_tf.op)
        self.assertEqual(standard_scaler.mean_, standard_scaler_tf.mean_)
        self.assertEqual(standard_scaler.scale_, standard_scaler_tf.scale_)
github combust / mleap / python / mleap / sklearn / preprocessing / tests.py View on Github external
def test_standard_scaler_multi_deserializer(self):

        extract_features = ['a', 'b']
        feature_extractor = FeatureExtractor(input_scalars=['a', 'b'],
                                             output_vector='extracted_multi_outputs',
                                             output_vector_items=["{}_out".format(x) for x in extract_features])

        # Serialize a standard scaler to a bundle
        standard_scaler = StandardScaler(with_mean=True,
                                         with_std=True
                                         )

        standard_scaler.mlinit(prior_tf=feature_extractor,
                               output_features=['a_scaled', 'b_scaled'])

        standard_scaler.fit(self.df[['a', 'b']])

        standard_scaler.serialize_to_bundle(self.tmp_dir, standard_scaler.name)

        # Now deserialize it back

        node_name = "{}.node".format(standard_scaler.name)

        standard_scaler_tf = StandardScaler()
github combust / mleap / python / mleap / sklearn / preprocessing / tests.py View on Github external
def test_standard_scaler_serializer(self):

        standard_scaler = StandardScaler(with_mean=True,
                                         with_std=True
                                         )

        extract_features = ['a']
        feature_extractor = FeatureExtractor(input_scalars=['a'],
                                             output_vector='extracted_a_output',
                                             output_vector_items=["{}_out".format(x) for x in extract_features])

        standard_scaler.mlinit(prior_tf=feature_extractor,
                               output_features='a_scaled')

        standard_scaler.fit(self.df[['a']])

        standard_scaler.serialize_to_bundle(self.tmp_dir, standard_scaler.name)

        expected_mean = self.df.a.mean()
github combust / mleap / python / mleap / sklearn / preprocessing / tests.py View on Github external
def test_standard_scaler_deserializer(self):

        extract_features = ['a']
        feature_extractor = FeatureExtractor(input_scalars=['a'],
                                         output_vector='extracted_a_output',
                                         output_vector_items=["{}_out".format(x) for x in extract_features])

        # Serialize a standard scaler to a bundle
        standard_scaler = StandardScaler(with_mean=True,
                                         with_std=True
                                         )

        standard_scaler.mlinit(prior_tf=feature_extractor,
                               output_features='a_scaled')

        standard_scaler.fit(self.df[['a']])

        standard_scaler.serialize_to_bundle(self.tmp_dir, standard_scaler.name)

        # Now deserialize it back

        node_name = "{}.node".format(standard_scaler.name)

        standard_scaler_tf = StandardScaler()