How to use the raiden.utils.typing.TokenNetworkAddress function in raiden

To help you get started, we’ve selected a few raiden examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github raiden-network / raiden-services / tests / pathfinding / fixtures / __init__.py View on Github external
def token_network_model() -> TokenNetwork:
    return TokenNetwork(TokenNetworkAddress(bytes([1] * 20)))
github raiden-network / raiden-services / tests / pathfinding / test_service.py View on Github external
)
    event_log = format_to_hex(_logger=logger, _log_method=log_method, event_dict=dict(event=event))
    assert (  # pylint: disable=unsubscriptable-object
        to_checksum_address(address) == event_log["event"]["token_address"]
    )
    assert (  # pylint: disable=unsubscriptable-object
        to_checksum_address(address2) == event_log["event"]["token_network_address"]
    )
    assert (  # pylint: disable=unsubscriptable-object
        event_log["event"]["type_name"] == "ReceiveTokenNetworkCreatedEvent"
    )

    message = PFSFeeUpdate(
        canonical_identifier=CanonicalIdentifier(
            chain_identifier=ChainID(61),
            token_network_address=TokenNetworkAddress(address),
            channel_identifier=ChannelID(1),
        ),
        updating_participant=PARTICIPANT1,
        fee_schedule=FeeScheduleState(),
        timestamp=datetime.utcnow(),
        signature=EMPTY_SIGNATURE,
    )
    message_log = format_to_hex(
        _logger=logger, _log_method=log_method, event_dict=dict(message=message)
    )
    assert (  # pylint: disable=unsubscriptable-object
        to_checksum_address(address)
        == message_log["message"]["canonical_identifier"]["token_network_address"]
    )
    assert (  # pylint: disable=unsubscriptable-object
        message_log["message"]["type_name"] == "PFSFeeUpdate"
github raiden-network / raiden-services / tests / pathfinding / test_service.py View on Github external
def test_token_channel_opened(pathfinding_service_mock, token_network_model):
    setup_channel(pathfinding_service_mock, token_network_model)
    assert len(pathfinding_service_mock.token_networks) == 1
    assert len(token_network_model.channel_id_to_addresses) == 1

    # Test invalid token network address
    channel_event = ReceiveChannelOpenedEvent(
        token_network_address=TokenNetworkAddress(bytes([2] * 20)),
        channel_identifier=ChannelID(1),
        participant1=PARTICIPANT1,
        participant2=PARTICIPANT2,
        settle_timeout=BlockTimeout(20),
        block_number=BlockNumber(1),
    )

    pathfinding_service_mock.handle_event(channel_event)
    assert len(pathfinding_service_mock.token_networks) == 1
    assert len(token_network_model.channel_id_to_addresses) == 1

    # Check that presence of these addresses is followed
    pathfinding_service_mock.matrix_listener.follow_address_presence.assert_has_calls(
        [call(PARTICIPANT1, refresh=True), call(PARTICIPANT2, refresh=True)]
    )
github raiden-network / raiden-services / src / raiden_libs / blockchain.py View on Github external
to_block=to_block,
        # When `to_block` == `from_block` we query one block, so add one
        num_blocks=to_block - from_block + 1,
    )

    # first check for new token networks and add to state
    registry_events = query_blockchain_events(
        web3=web3,
        contract_addresses=[chain_state.token_network_registry_address],
        from_block=from_block,
        to_block=to_block,
    )

    events: List[Event] = []
    for event_dict in registry_events:
        token_network_address = TokenNetworkAddress(
            to_canonical_address(event_dict["args"]["token_network_address"])
        )
        events.append(
            ReceiveTokenNetworkCreatedEvent(
                token_network_address=token_network_address,
                token_address=TokenAddress(
                    to_canonical_address(event_dict["args"]["token_address"])
                ),
                block_number=event_dict["blockNumber"],
            )
        )
        token_network_addresses.append(token_network_address)

    # then check all token networks
    network_events = query_blockchain_events(
        web3=web3,
github raiden-network / raiden-services / src / raiden_libs / blockchain.py View on Github external
log.info("Querying new block(s)", from_block=from_block, end_block=to_block)

    # first check for new token networks and add to state
    registry_events = query_blockchain_events(
        web3=web3,
        contract_manager=contract_manager,
        contract_address=new_chain_state.token_network_registry_address,
        contract_name=CONTRACT_TOKEN_NETWORK_REGISTRY,
        topics=create_registry_event_topics(contract_manager),
        from_block=from_block,
        to_block=to_block,
    )

    events: List[Event] = []
    for event_dict in registry_events:
        token_network_address = TokenNetworkAddress(
            to_canonical_address(event_dict["args"]["token_network_address"])
        )
        events.append(
            ReceiveTokenNetworkCreatedEvent(
                token_network_address=token_network_address,
                token_address=to_canonical_address(event_dict["args"]["token_address"]),
                block_number=event_dict["blockNumber"],
            )
        )
        new_chain_state.token_network_addresses.append(token_network_address)

    # then check all token networks
    for token_network_address in new_chain_state.token_network_addresses:
        network_events = query_blockchain_events(
            web3=web3,
            contract_manager=contract_manager,
github raiden-network / raiden / raiden / raiden_service.py View on Github external
msg = (
            f"The blockchain event handler has to be instantiated before the "
            f"alarm task is started. node:{self!r}"
        )
        assert self.blockchain_events, msg

        poll_result = ZERO_POLL_RESULT

        sync_start = datetime.now()

        while self.blockchain_events.last_fetched_block < target_block_number:
            self._log_sync_progress(target_block_number)

            poll_result = self.blockchain_events.fetch_logs_in_batch(target_block_number)
            pendingtokenregistration: Dict[
                TokenNetworkAddress, Tuple[TokenNetworkRegistryAddress, TokenAddress]
            ] = dict()

            state_changes: List[StateChange] = list()
            for event in poll_result.events:
                state_changes.extend(
                    blockchainevent_to_statechange(
                        self, event, poll_result.polled_block_number, pendingtokenregistration
                    )
                )

            # On restarts the node has to pick up all events generated since the
            # last run. To do this the node will set the filters' from_block to
            # the value of the latest block number known to have *all* events
            # processed.
            #
            # To guarantee the above the node must either:
github raiden-network / raiden / raiden / blockchain / state.py View on Github external
def get_contractreceivechannelclosed_data_from_event(
    chain_state: "ChainState", event: DecodedEvent
) -> Optional[CanonicalIdentifier]:
    token_network_address = TokenNetworkAddress(event.originating_contract)
    data = event.event_data
    args = data["args"]
    channel_identifier = args["channel_identifier"]

    channel_state = views.get_channelstate_by_canonical_identifier(
        chain_state=chain_state,
        canonical_identifier=CanonicalIdentifier(
            chain_identifier=chain_state.chain_id,
            token_network_address=token_network_address,
            channel_identifier=channel_identifier,
        ),
    )

    if channel_state:
        return channel_state.canonical_identifier
github raiden-network / raiden / raiden / transfer / identifiers.py View on Github external
@dataclass(frozen=True)
class QueueIdentifier:
    recipient: Address
    canonical_identifier: CanonicalIdentifier

    def __str__(self) -> str:
        return (
            "QueueIdentifier("
            f"recipient={to_checksum_address(self.recipient)}, "
            f"canonical_identifier={self.canonical_identifier}"
            ")"
        )


CANONICAL_IDENTIFIER_UNORDERED_QUEUE = CanonicalIdentifier(
    ChainID(0), TokenNetworkAddress(EMPTY_ADDRESS), ChannelID(0)
)
github raiden-network / raiden-services / src / pathfinding_service / service.py View on Github external
def on_capacity_update(self, message: PFSCapacityUpdate) -> Channel:
        token_network = self._validate_pfs_capacity_update(message)
        log.debug("Received Capacity Update", message=message)
        self.database.upsert_capacity_update(message)

        updating_capacity_partner, other_capacity_partner = self.database.get_capacity_updates(
            updating_participant=message.other_participant,
            token_network_address=TokenNetworkAddress(
                message.canonical_identifier.token_network_address
            ),
            channel_id=message.canonical_identifier.channel_identifier,
        )
        return token_network.handle_channel_balance_update_message(
            message=message,
            updating_capacity_partner=updating_capacity_partner,
            other_capacity_partner=other_capacity_partner,
        )