How to use the pennylane.utils._flatten function in PennyLane

To help you get started, we’ve selected a few PennyLane examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github XanaduAI / pennylane / pennylane / qnode.py View on Github external
Keyword Args:
            diag_approx (bool): If ``True``, forces the diagonal
                approximation. Default is ``False``.

        Returns:
            array[float]: measured values
        """
        diag_approx = kwargs.pop("diag_approx", False)

        if not self.ops or not self.cache:
            # construct the circuit
            self.construct(args, kwargs)

        # temporarily store keyword arguments
        keyword_values = {}
        keyword_values.update({k: np.array(list(_flatten(v))) for k, v in self.keyword_defaults.items()})
        keyword_values.update({k: np.array(list(_flatten(v))) for k, v in kwargs.items()})

        # temporarily store the free parameter values in the Variable class
        Variable.free_param_values = np.array(list(_flatten(args)))
        Variable.kwarg_values = keyword_values

        if not self._metric_tensor_subcircuits:
            self.construct_metric_tensor(args, diag_approx=diag_approx, **kwargs)

        tensor = np.zeros([self.num_variables, self.num_variables])

        # execute any constructed metric tensor subcircuits
        for params, circuit in self._metric_tensor_subcircuits.items():
            self.device.reset()

            s = np.array(circuit['scale'])
github XanaduAI / pennylane / pennylane / optimize / qng.py View on Github external
def apply_grad(self, grad, x):
        r"""Update the variables x to take a single optimization step. Flattens and unflattens
        the inputs to maintain nested iterables as the parameters of the optimization.

        Args:
            grad (array): The gradient of the objective
                function at point :math:`x^{(t)}`: :math:`\nabla f(x^{(t)})`
            x (array): the current value of the variables :math:`x^{(t)}`

        Returns:
            array: the new values :math:`x^{(t+1)}`
        """
        grad_flat = np.array(list(_flatten(grad)))
        x_flat = np.array(list(_flatten(x)))
        x_new_flat = x_flat - self._stepsize * np.linalg.solve(self.metric_tensor, grad_flat)
        return unflatten(x_new_flat, x)
github XanaduAI / pennylane / pennylane / optimize / nesterov_momentum.py View on Github external
def compute_grad(self, objective_fn, x, grad_fn=None):
        r"""Compute gradient of the objective_fn at at
        the shifted point :math:`(x - m\times\text{accumulation})`.

        Args:
            objective_fn (function): the objective function for optimization
            x (array): NumPy array containing the current values of the variables to be updated
            grad_fn (function): Optional gradient function of the
                objective function with respect to the variables ``x``.
                If ``None``, the gradient function is computed automatically.

        Returns:
            array: NumPy array containing the gradient :math:`\nabla f(x^{(t)})`
        """

        x_flat = _flatten(x)

        if self.accumulation is None:
            shifted_x_flat = list(x_flat)
        else:
            shifted_x_flat = [e - self.momentum * a for a, e in zip(self.accumulation, x_flat)]

        shifted_x = unflatten(shifted_x_flat, x)

        if grad_fn is not None:
            g = grad_fn(shifted_x)  # just call the supplied grad function
        else:
            # default is autograd
            g = autograd.grad(objective_fn)(shifted_x)  # pylint: disable=no-value-for-parameter
        return g
github XanaduAI / pennylane / pennylane / qnode.py View on Github external
to the quantum circuit function.

        Raises:
            QuantumFunctionError: if the :class:`pennylane.QNode`'s _current_context is attempted to be modified
                inside of this method, the quantum function returns incorrect values or if
                both continuous and discrete operations are specified in the same quantum circuit
        """
        # pylint: disable=too-many-branches,too-many-statements
        self.queue = []
        self.ev = []  # temporary queue for EVs

        if kwargs is None:
            kwargs = {}

        # flatten the args, replace each with a Variable instance with a unique index
        temp = [Variable(idx) for idx, val in enumerate(_flatten(args))]
        self.num_variables = len(temp)

        # store the nested shape of the arguments for later unflattening
        self.model = args

        # arrange the newly created Variables in the nested structure of args
        variables = unflatten(temp, args)

        # get default kwargs that weren't passed
        keyword_sig = _get_default_args(self.func)
        self.keyword_defaults = {k: v[1] for k, v in keyword_sig.items()}
        self.keyword_positions = {v[0]: k for k, v in keyword_sig.items()}

        keyword_values = {}
        keyword_values.update(self.keyword_defaults)
        keyword_values.update(kwargs)
github XanaduAI / pennylane / pennylane / qnode.py View on Github external
def evaluate_obs(self, obs, args, **kwargs):
        """Evaluate the value of the given observables.

        Assumes :meth:`construct` has already been called.

        Args:
            obs  (Iterable[Observable]): observables to measure
            args (array[float]): circuit input parameters

        Returns:
            array[float]: measured values
        """
        # temporarily store keyword arguments
        keyword_values = {}
        keyword_values.update({k: np.array(list(_flatten(v))) for k, v in self.keyword_defaults.items()})
        keyword_values.update({k: np.array(list(_flatten(v))) for k, v in kwargs.items()})

        # temporarily store the free parameter values in the Variable class
        Variable.free_param_values = args
        Variable.kwarg_values = keyword_values

        self.device.reset()
        ret = self.device.execute(self.circuit.operations, obs, self.circuit.variable_deps)
        return ret
github XanaduAI / pennylane / pennylane / qnode.py View on Github external
self.keyword_defaults = {k: v[1] for k, v in keyword_sig.items()}
        self.keyword_positions = {v[0]: k for k, v in keyword_sig.items()}

        keyword_values = {}
        keyword_values.update(self.keyword_defaults)
        keyword_values.update(kwargs)

        if self.cache:
            # caching mode, must use variables for kwargs
            # wrap each keyword argument as a Variable
            kwarg_variables = {}
            for key, val in keyword_values.items():
                temp = [Variable(idx, name=key) for idx, _ in enumerate(_flatten(val))]
                kwarg_variables[key] = unflatten(temp, val)

        Variable.free_param_values = np.array(list(_flatten(args)))
        Variable.kwarg_values = {k: np.array(list(_flatten(v))) for k, v in keyword_values.items()}

        # set up the context for Operation entry
        if QNode._current_context is None:
            QNode._current_context = self
        else:
            raise QuantumFunctionError('QNode._current_context must not be modified outside this method.')
        # generate the program queue by executing the quantum circuit function
        try:
            if self.cache:
                # caching mode, must use variables for kwargs
                # so they can be updated without reconstructing
                res = self.func(*variables, **kwarg_variables)
            else:
                # no caching, fine to directly pass kwarg values
                res = self.func(*variables, **keyword_values)
github XanaduAI / pennylane / pennylane / qnode.py View on Github external
circuit_kwargs = pop_jacobian_kwargs(kwargs)

        if not self.ops or not self.cache:
            # construct the circuit
            self.construct(params, circuit_kwargs)

        sample_ops = [
            e for e in self.circuit.observables if e.return_type is qml.operation.Sample]

        if sample_ops:
            names = [str(e) for e in sample_ops]
            raise QuantumFunctionError("Circuits that include sampling can not be differentiated. "
                                       "The following observable include sampling: {}".format('; '.join(names)))

        flat_params = np.array(list(_flatten(params)))

        if which is None:
            which = range(len(flat_params))
        else:
            if min(which) < 0 or max(which) >= self.num_variables:
                raise ValueError("Tried to compute the gradient wrt. free parameters {} "
                                 "(this node has {} free parameters).".format(which, self.num_variables))
            if len(which) != len(set(which)):  # set removes duplicates
                raise ValueError("Parameter indices must be unique.")

        # check if the method can be used on the requested parameters
        mmap = _inv_dict(self.grad_method_for_par)
        def check_method(m):
            """Intersection of ``which`` with free params whose best grad method is m."""
            return mmap.get(m, set()).intersection(which)
github XanaduAI / pennylane / pennylane / qnode.py View on Github external
# variables are appended to the argument list.

                    # flatten and unflatten arguments
                    flat_args = list(_flatten(args))
                    shaped_args = unflatten(flat_args, self.model)

                    # construct the circuit
                    self.construct(shaped_args, kwargs)
            else:
                # circuit has not yet been constructed
                # construct the circuit
                self.construct(args, kwargs)

        # temporarily store keyword arguments
        keyword_values = {}
        keyword_values.update({k: np.array(list(_flatten(v))) for k, v in self.keyword_defaults.items()})
        keyword_values.update({k: np.array(list(_flatten(v))) for k, v in kwargs.items()})

        # Try and insert kwargs-as-positional back into the kwargs dictionary.
        # NOTE: this works, but the creation of new, temporary arguments
        # by pd_analytic breaks this.
        # positional = []
        # kwargs_as_position = {}
        # for idx, v in enumerate(args):
        #     if idx not in self.keyword_positions:
        #     positional.append(v)
        #     else:
        #         kwargs_as_position[self.keyword_positions[idx]] = np.array(list(_flatten(v)))
        # keyword_values.update(kwargs_as_position)

        # temporarily store the free parameter values in the Variable class
        Variable.free_param_values = np.array(list(_flatten(args)))
github XanaduAI / pennylane / pennylane / qnode.py View on Github external
"allowed in the same quantum circuit.")

        # TODO: we should enforce plugins using the Device.capabilities dictionary to specify
        # whether they are qubit or CV devices, and remove this logic here.
        self.type = 'CV' if all(are_cvs) else 'qubit'

        if self.device.operations:
            # replace operations in the queue with any decompositions if required
            self.queue = decompose_queue(self.queue, self.device)

        self.ops = self.queue + self.ev  #: list[Operation]: combined list of circuit operations

        # map each free variable to the operations which depend on it
        self.variable_deps = {}
        for k, op in enumerate(self.ops):
            for j, p in enumerate(_flatten(op.params)):
                if isinstance(p, Variable):
                    if p.name is None: # ignore keyword arguments
                        self.variable_deps.setdefault(p.idx, []).append(ParameterDependency(op, j))

        # generate directed acyclic graph
        self.circuit = CircuitGraph(self.ops, self.variable_deps)

        #: dict[int->str]: map from free parameter index to the gradient method to be used with that parameter
        self.grad_method_for_par = {k: self._best_method(k) for k in self.variable_deps}

PennyLane

PennyLane is a cross-platform Python library for quantum computing, quantum machine learning, and quantum chemistry. Train a quantum computer the same way as a neural network.

Apache-2.0
Latest version published 2 days ago

Package Health Score

87 / 100
Full package analysis