How to use the py4j.java_gateway.JavaObject function in py4j

To help you get started, we’ve selected a few py4j examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github autodeployai / pypmml / pypmml / base.py View on Github external
def _java2py(r):
    if isinstance(r, JavaArray):
        return [_java2py(x) for x in r]
    elif isinstance(r, JavaObject):
        cls_name = r.getClass().getName()
        if cls_name == 'scala.Some':
            return r.get()
        elif cls_name == 'scala.None$':
            return None
        elif cls_name == 'scala.Enumeration$Val':
            return r.toString()
    return r
github bartdag / py4j / py4j-python / src / py4j / java_collections.py View on Github external
return self.keySet().iterator()

    def __contains__(self, key):
        return self.containsKey(key)

    def __str__(self):
        return self.__repr__()

    def __repr__(self):
        items = (
            "{0}: {1}".format(repr(k), repr(v))
            for k, v in iteritems(self))
        return "{{{0}}}".format(", ".join(items))


class JavaSet(JavaObject, MutableSet):
    """Maps a Python Set to a Java Set.

    All operations possible on a Python set are implemented."""

    __EMPTY_SET = "set([])" if sys.version_info.major < 3 else "set()"
    __SET_TEMPLATE = "set([{0}])" if sys.version_info.major < 3 else "{{{0}}}"

    def __init__(self, target_id, gateway_client):
        JavaObject.__init__(self, target_id, gateway_client)
        self._add = get_method(self, "add")
        self._clear = get_method(self, "clear")
        self._remove = get_method(self, "remove")

    def add(self, value):
        self._add(value)
github intel-analytics / BigDL / pyspark / bigdl / optim / optimizer.py View on Github external
def __init__(self, jvalue, bigdl_type, *args):
        if (jvalue):
            assert(type(jvalue) == JavaObject)
            self.value = jvalue
        else:
            self.value = callBigDlFunc(
                bigdl_type, JavaValue.jvm_class_constructor(self), *args)
        self.bigdl_type = bigdl_type
github UCLA-VAST / blaze / spark-1.5.1 / python / pyspark / mllib / common.py View on Github external
def _py2java(sc, obj):
    """ Convert Python object into Java """
    if isinstance(obj, RDD):
        obj = _to_java_object_rdd(obj)
    elif isinstance(obj, DataFrame):
        obj = obj._jdf
    elif isinstance(obj, SparkContext):
        obj = obj._jsc
    elif isinstance(obj, list):
        obj = ListConverter().convert([_py2java(sc, x) for x in obj], sc._gateway._gateway_client)
    elif isinstance(obj, JavaObject):
        pass
    elif isinstance(obj, (int, long, float, bool, bytes, unicode)):
        pass
    else:
        data = bytearray(PickleSerializer().dumps(obj))
        obj = sc._jvm.SerDe.loads(data)
    return obj
github qubole / spark-on-lambda / python / pyspark / ml / param / __init__.py View on Github external
def _setDefault(self, **kwargs):
        """
        Sets default params.
        """
        for param, value in kwargs.items():
            p = getattr(self, param)
            if value is not None and not isinstance(value, JavaObject):
                try:
                    value = p.typeConverter(value)
                except TypeError as e:
                    raise TypeError('Invalid default param value given for param "%s". %s'
                                    % (p.name, e))
            self._defaultParamMap[p] = value
        return self
github locationtech-labs / geopyspark / geopyspark / __init__.py View on Github external
Source: ``pyspark/streaming/context.py`` in ``StreamingContext._ensure_initialized``
    """
    # start callback server
    # getattr will fallback to JVM, so we cannot test by hasattr()
    if "_callback_server" not in gw.__dict__ or gw._callback_server is None:
        gw.callback_server_parameters.eager_load = True
        gw.callback_server_parameters.daemonize = True
        gw.callback_server_parameters.daemonize_connections = True
        gw.callback_server_parameters.port = 0
        gw.start_callback_server(gw.callback_server_parameters)
        cbport = gw._callback_server.server_socket.getsockname()[1]
        gw._callback_server.port = cbport
        # gateway with real port
        gw._python_proxy_port = gw._callback_server.port
        # get the GatewayServer object in JVM by ID
        jgws = JavaObject("GATEWAY_SERVER", gw._gateway_client)
        # update the port of CallbackClient with real port
        jgws.resetCallbackClient(jgws.getCallbackClient().getAddress(), gw._python_proxy_port)
github qubole / spark-on-lambda / python / pyspark / ml / common.py View on Github external
def _py2java(sc, obj):
    """ Convert Python object into Java """
    if isinstance(obj, RDD):
        obj = _to_java_object_rdd(obj)
    elif isinstance(obj, DataFrame):
        obj = obj._jdf
    elif isinstance(obj, SparkContext):
        obj = obj._jsc
    elif isinstance(obj, list):
        obj = [_py2java(sc, x) for x in obj]
    elif isinstance(obj, JavaObject):
        pass
    elif isinstance(obj, (int, long, float, bool, bytes, unicode)):
        pass
    else:
        data = bytearray(PickleSerializer().dumps(obj))
        obj = sc._jvm.org.apache.spark.ml.python.MLSerDe.loads(data)
    return obj
github bartdag / py4j / py4j-python / src / py4j / java_collections.py View on Github external
from collections import (
    MutableMapping, Sequence, MutableSequence,
    MutableSet, Set)
import sys

from py4j.compat import (
    iteritems, next, hasattr2, isbytearray,
    ispython3bytestr, basestring)
from py4j.java_gateway import JavaObject, JavaMember, get_method, JavaClass
from py4j import protocol as proto
from py4j.protocol import (
    Py4JError, get_command_part, get_return_value, register_input_converter,
    register_output_converter)


class JavaIterator(JavaObject):
    """Maps a Python list iterator to a Java list iterator.

    The `JavaIterator` follows the Python iterator protocol and raises a
    `StopIteration` error when the iterator can no longer iterate."""
    def __init__(self, target_id, gateway_client):
        JavaObject.__init__(self, target_id, gateway_client)
        self._next_name = "next"
        # To bind lifecycle of this iterator to the java iterator. To prevent
        # gc of the iterator.

    def __iter__(self):
        return self

    def next(self):
        """This next method wraps the `next` method in Java iterators.
github UCLA-VAST / blaze / spark-1.5.1 / python / pyspark / mllib / linalg / distributed.py View on Github external
>>> mat_same = IndexedRowMatrix(mat._java_matrix_wrapper._java_model)
        >>> (mat_same._java_matrix_wrapper._java_model ==
        ...  mat._java_matrix_wrapper._java_model)
        True
        """
        if isinstance(rows, RDD):
            rows = rows.map(_convert_to_indexed_row)
            # We use DataFrames for serialization of IndexedRows from
            # Python, so first convert the RDD to a DataFrame on this
            # side. This will convert each IndexedRow to a Row
            # containing the 'index' and 'vector' values, which can
            # both be easily serialized.  We will convert back to
            # IndexedRows on the Scala side.
            java_matrix = callMLlibFunc("createIndexedRowMatrix", rows.toDF(),
                                        long(numRows), int(numCols))
        elif (isinstance(rows, JavaObject)
              and rows.getClass().getSimpleName() == "IndexedRowMatrix"):
            java_matrix = rows
        else:
            raise TypeError("rows should be an RDD of IndexedRows or (long, vector) tuples, "
                            "got %s" % type(rows))

        self._java_matrix_wrapper = JavaModelWrapper(java_matrix)