How to use the lale.type_checking.validate_is_schema function in lale

To help you get started, we’ve selected a few lale examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github IBM / lale / test / test_core_operators.py View on Github external
def test_regressor(self):
        X_train, y_train = self.X_train, self.y_train
        X_test, y_test = self.X_test, self.y_test
        import importlib
        module_name = ".".join(clf_name.split('.')[0:-1])
        class_name = clf_name.split('.')[-1]
        module = importlib.import_module(module_name)

        class_ = getattr(module, class_name)
        regr = class_()

        #test_schemas_are_schemas
        lale.type_checking.validate_is_schema(regr.input_schema_fit())
        lale.type_checking.validate_is_schema(regr.input_schema_predict())
        lale.type_checking.validate_is_schema(regr.output_schema_predict())
        lale.type_checking.validate_is_schema(regr.hyperparam_schema())

        #test_init_fit_predict
        trained = regr.fit(self.X_train, self.y_train)
        predictions = trained.predict(self.X_test)

        #test_predict_on_trainable
        trained = regr.fit(X_train, y_train)
        regr.predict(X_train)

        #test_to_json
        regr.to_json()

        #test_in_a_pipeline
        pipeline = NoOp() >> regr
github IBM / lale / test / test_nlp_operators.py View on Github external
def test_encoder(self):
        import importlib
        module_name = ".".join(encoder_name.split('.')[0:-1])
        class_name = encoder_name.split('.')[-1]
        module = importlib.import_module(module_name)

        class_ = getattr(module, class_name)
        encoder = class_()

        #test_schemas_are_schemas
        lale.type_checking.validate_is_schema(encoder.input_schema_fit())
        lale.type_checking.validate_is_schema(encoder.input_schema_transform())
        lale.type_checking.validate_is_schema(encoder.output_schema_transform())
        lale.type_checking.validate_is_schema(encoder.hyperparam_schema())

        #test_init_fit_transform
        trained = encoder.fit(self.X_train, self.y_train)
        transformed = trained.transform(self.X_train)
github IBM / lale / test / test_core_operators.py View on Github external
module_name = ".".join(fproc_name.split('.')[0:-1])
        class_name = fproc_name.split('.')[-1]
        module = importlib.import_module(module_name)

        class_ = getattr(module, class_name)
        fproc = class_()

        from lale.lib.sklearn.one_hot_encoder import OneHotEncoderImpl
        if fproc._impl_class() == OneHotEncoderImpl:
            #fproc = OneHotEncoder(handle_unknown = 'ignore')
            #remove the hack when this is fixed
            fproc = PCA()
        #test_schemas_are_schemas
        lale.type_checking.validate_is_schema(fproc.input_schema_fit())
        lale.type_checking.validate_is_schema(fproc.input_schema_transform())
        lale.type_checking.validate_is_schema(fproc.output_schema_transform())
        lale.type_checking.validate_is_schema(fproc.hyperparam_schema())

        #test_init_fit_transform
        trained = fproc.fit(self.X_train, self.y_train)
        predictions = trained.transform(self.X_test)

        #test_predict_on_trainable
        trained = fproc.fit(X_train, y_train)
        fproc.transform(X_train)

        #test_to_json
        fproc.to_json()

        #test_in_a_pipeline
        #This test assumes that the output of feature processing is compatible with LogisticRegression
        from lale.lib.sklearn import LogisticRegression
github IBM / lale / test / test_interoperability.py View on Github external
from lale.lib.sklearn import PCA, Nystroem, LogisticRegression, RandomForestClassifier
        from lale.lib.lale import NoOp, ConcatFeatures
        X_train, y_train = self.X_train, self.y_train
        X_test, y_test = self.X_test, self.y_test
        import importlib
        module_name = ".".join(res_name.split('.')[0:-1])
        class_name = res_name.split('.')[-1]
        module = importlib.import_module(module_name)

        class_ = getattr(module, class_name)
        with self.assertRaises(ValueError):
            res = class_()

        #test_schemas_are_schemas
        lale.type_checking.validate_is_schema(class_.input_schema_fit())
        lale.type_checking.validate_is_schema(class_.input_schema_predict())
        lale.type_checking.validate_is_schema(class_.output_schema_predict())
        lale.type_checking.validate_is_schema(class_.hyperparam_schema())

        #test_init_fit_predict
        from lale.operators import make_pipeline
        pipeline1 = PCA() >> class_(operator=make_pipeline(LogisticRegression()))
        trained = pipeline1.fit(X_train, y_train)
        predictions = trained.predict(X_test)

        pipeline2 = class_(operator=make_pipeline(PCA(), LogisticRegression()))
        trained = pipeline2.fit(X_train, y_train)
        predictions = trained.predict(X_test)

        #test_with_hyperopt
        from lale.lib.lale import Hyperopt
        optimizer = Hyperopt(estimator=PCA >> class_(operator=make_pipeline(LogisticRegression())), max_evals = 1, show_progressbar=False)
github IBM / lale / test / test_custom_schemas.py View on Github external
def test_override_input(self):
        init_input_schema = self.sk_pca.get_schema('input_fit')
        pca_input = self.ll_pca.get_schema('input_fit')
        foo = self.sk_pca.customize_schema(input_fit=schemas.JSON(pca_input))
        self.assertEqual(foo.get_schema('input_fit'), pca_input)
        lale.type_checking.validate_is_schema(foo._schemas)
        self.assertEqual(self.sk_pca.get_schema(
            'input_fit'), init_input_schema)
        self.assertRaises(
            Exception, self.sk_pca.customize_schema, input_fit={})
        self.assertRaises(
            Exception, self.sk_pca.customize_schema, input_foo=pca_input)
github IBM / lale / lale / datasets / data_schemas.py View on Github external
result = ndarray_to_schema(obj)
    elif isinstance(obj, scipy.sparse.csr_matrix):
        result = csr_matrix_to_schema(obj)
    elif isinstance(obj, pd.DataFrame):
        result = dataframe_to_schema(obj)
    elif isinstance(obj, pd.Series):
        result = series_to_schema(obj)
    elif torch_installed and isinstance(obj, torch.Tensor):
        result = torch_tensor_to_schema(obj)
    elif is_liac_arff(obj):
        result = liac_arff_to_schema(obj)
    elif lale.type_checking.is_schema(obj):
        result = obj
    else:
        raise ValueError(f'to_schema(obj), type {type(obj)}, value {obj}')
    lale.type_checking.validate_is_schema(result)
    return result
github IBM / lale / lale / operators.py View on Github external
k2, v2 = self._enum_to_strings(v)
                if k != k2:
                    raise ValueError(
                        'Invalid keyword {} for argument {}.'.format(k2, v2))
            else:
                v2 = v
            hyperparams[k] = v2
        #using params_all instead of hyperparams to ensure the construction is consistent with schema
        trainable_to_get_params = TrainableIndividualOp(_name=self.name(), _impl=None, _schemas=self._schemas)
        trainable_to_get_params._hyperparams = hyperparams
        params_all = trainable_to_get_params._get_params_all()
        try:
            lale.type_checking.validate_schema(params_all, self.hyperparam_schema())
        except jsonschema.ValidationError as e_orig:
            e = e_orig if e_orig.parent is None else e_orig.parent
            lale.type_checking.validate_is_schema(e.schema)
            schema = lale.pretty_print.to_string(e.schema)
            if [*e.schema_path][:3] == ['allOf', 0, 'properties']:
                arg = e.schema_path[3]
                reason = f'invalid value {arg}={e.instance}'
                schema_path = f'argument {arg}'
            elif [*e.schema_path][:3] == ['allOf', 0, 'additionalProperties']:
                pref, suff = 'Additional properties are not allowed (', ')'
                assert e.message.startswith(pref) and e.message.endswith(suff)
                reason = 'argument ' + e.message[len(pref):-len(suff)]
                schema_path = 'arguments and their defaults'
                schema = self.hyperparam_defaults()
            elif e.schema_path[0] == 'allOf' and int(e.schema_path[1]) != 0:
                assert e.schema_path[2] == 'anyOf'
                descr = e.schema['description']
                if descr.endswith('.'):
                    descr = descr[:-1]
github IBM / lale / lale / json_operator.py View on Github external
'properties': {
        'class': {
          'enum': ['lale.operators.OperatorChoice']},
        'state': {
          'enum': ['planned']},
        'operator': {
          'type': 'string'},
        'steps': {
          'type': 'object',
          'patternProperties': {
            '^[a-z][a-z_0-9]*$': {'$ref': '#/definitions/operator'}}}}}},
  '$ref': '#/definitions/operator'}

if __name__ == "__main__":
    import lale.type_checking
    lale.type_checking.validate_is_schema(SCHEMA)

def json_op_kind(jsn: JSON_TYPE) -> str:
    if jsn['class'] == 'lale.operators.OperatorChoice':
        return 'OperatorChoice'
    if jsn['class'] in ['lale.operators.PlannedPipeline',
                        'lale.operators.TrainablePipeline',
                        'lale.operators.TrainedPipeline']:
        return 'Pipeline'
    return 'IndividualOp'

def _get_state(op: 'lale.operators.Operator') -> str:
    if isinstance(op, lale.operators.TrainedOperator):
        return 'trained'
    if isinstance(op, lale.operators.TrainableOperator):
        return 'trainable'
    if isinstance(op, lale.operators.PlannedOperator) or isinstance(op, lale.operators.OperatorChoice):