Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
the channel_description fixture. Tests all PFS methods w.r.t. to that topology
"""
clients = get_accounts(7)
token_network_address = TokenNetworkAddress(to_canonical_address(token_network.address))
with patch("pathfinding_service.service.MatrixListener", new=Mock):
pfs = PathfindingService(
web3=web3,
contracts={
CONTRACT_TOKEN_NETWORK_REGISTRY: token_network_registry_contract,
CONTRACT_USER_DEPOSIT: user_deposit_contract,
},
required_confirmations=BlockTimeout(1),
db_filename=":memory:",
poll_interval=0.1,
sync_start_block=BlockNumber(0),
private_key=PrivateKey(
decode_hex("3a1076bf45ab87712ad64ccb3b10217737f7faacbf2872e88fdd9a537d8fe266")
),
)
# greenlet needs to be started and context switched to
pfs.start()
pfs.updated.wait(timeout=5)
# there should be one token network registered
assert len(pfs.token_networks) == 1
token_network_model = pfs.token_networks[token_network_address]
graph = token_network_model.G
channel_identifiers = []
for (
def test_token_network_created(pathfinding_service_mock):
token_address = TokenAddress(bytes([1] * 20))
token_network_address = TokenNetworkAddress(bytes(bytes([2] * 20)))
network_event = ReceiveTokenNetworkCreatedEvent(
token_address=token_address,
token_network_address=token_network_address,
block_number=BlockNumber(1),
)
assert not pathfinding_service_mock.follows_token_network(token_network_address)
assert len(pathfinding_service_mock.token_networks) == 1
pathfinding_service_mock.handle_event(network_event)
assert pathfinding_service_mock.follows_token_network(token_network_address)
assert len(pathfinding_service_mock.token_networks) == 2
# Test idempotency
pathfinding_service_mock.handle_event(network_event)
assert pathfinding_service_mock.follows_token_network(token_network_address)
assert len(pathfinding_service_mock.token_networks) == 2
def test_save_and_load_token_networks(pathfinding_service_mock_empty):
pfs = pathfinding_service_mock_empty
token_address = TokenAddress(bytes([1] * 20))
token_network_address = TokenNetworkAddress(bytes([2] * 20))
channel_id = ChannelID(1)
p1 = Address(bytes([3] * 20))
p2 = Address(bytes([4] * 20))
events = [
ReceiveTokenNetworkCreatedEvent(
token_address=token_address,
token_network_address=token_network_address,
block_number=BlockNumber(1),
),
ReceiveChannelOpenedEvent(
token_network_address=token_network_address,
channel_identifier=channel_id,
participant1=p1,
participant2=p2,
settle_timeout=BlockTimeout(2 ** 65), # larger than max_uint64 to check hex storage
block_number=BlockNumber(2),
),
]
for event in events:
pfs.handle_event(event)
assert len(pfs.token_networks) == 1
loaded_networks = pfs._load_token_networks() # pylint: disable=protected-access
assert len(loaded_networks) == 1
user_deposit_contract,
token_network_registry_contract,
ms_database: Database,
get_private_key,
service_registry,
):
ms = MonitoringService(
web3=web3,
private_key=get_private_key(ms_address),
contracts={
CONTRACT_TOKEN_NETWORK_REGISTRY: token_network_registry_contract,
CONTRACT_MONITORING_SERVICE: monitoring_service_contract,
CONTRACT_USER_DEPOSIT: user_deposit_contract,
CONTRACT_SERVICE_REGISTRY: service_registry,
},
sync_start_block=BlockNumber(0),
required_confirmations=BlockTimeout(0), # for faster tests
poll_interval=0.01, # for faster tests
db_filename=":memory:",
)
# We need a shared db between MS and RC so the MS can use MR saved by the RC
ms.context.database = ms_database
ms.database = ms_database
return ms
def test_get_blockchain_events_returns_early_for_invalid_interval(
web3: Web3, token_network_registry_contract: Contract
):
events = get_blockchain_events(
web3=web3,
token_network_addresses=[],
chain_state=BlockchainState(
chain_id=ChainID(1),
token_network_registry_address=to_canonical_address(
token_network_registry_contract.address
),
latest_committed_block=BlockNumber(4),
),
from_block=BlockNumber(10),
to_block=BlockNumber(5),
)
assert len(events) == 0
[
ReceiveTokenNetworkCreatedEvent(
token_address=token_address,
token_network_address=token_network_address,
block_number=BlockNumber(1),
)
],
[UpdatedHeadBlockEvent(BlockNumber(2))],
[
ReceiveChannelOpenedEvent(
token_network_address=token_network_address,
channel_identifier=channel_id,
participant1=p1,
participant2=p2,
settle_timeout=BlockTimeout(1000),
block_number=BlockNumber(3),
)
],
[UpdatedHeadBlockEvent(BlockNumber(4))],
]
mockchain(events)
server_private_key = PrivateKey(decode_hex(get_random_privkey()))
contracts = {
CONTRACT_TOKEN_NETWORK_REGISTRY: ContractMock(),
CONTRACT_USER_DEPOSIT: ContractMock(),
}
def new_service(filename):
service = PathfindingService(
web3=Web3Mock(),
private_key=server_private_key,
config["mediation_fees"] = fee_config
config["blockchain"]["query_interval"] = blockchain_query_interval
config["environment_type"] = environment_type
contracts = load_deployed_contracts_data(config, network_id)
rpc_client = JSONRPCClient(
web3=web3,
privkey=privatekey,
gas_price_strategy=gas_price,
block_num_confirmations=DEFAULT_NUMBER_OF_BLOCK_CONFIRMATIONS,
)
token_network_registry_deployed_at = None
if "TokenNetworkRegistry" in contracts:
token_network_registry_deployed_at = BlockNumber(
contracts["TokenNetworkRegistry"]["block_number"]
)
if token_network_registry_deployed_at is None:
smart_contracts_start_at = get_smart_contracts_start_at(network_id)
else:
smart_contracts_start_at = token_network_registry_deployed_at
proxy_manager = ProxyManager(
rpc_client=rpc_client,
contract_manager=ContractManager(config["contracts_path"]),
metadata=ProxyManagerMetadata(
token_network_registry_deployed_at=token_network_registry_deployed_at,
filters_start_at=smart_contracts_start_at,
),
)
def _transform_snapshot(raw_snapshot: str, storage: SQLiteStorage, cache: BlockHashCache) -> str:
"""Upgrades a single snapshot by adding the blockhash to it and to any pending transactions"""
snapshot = json.loads(raw_snapshot)
block_number = BlockNumber(int(snapshot["block_number"]))
snapshot["block_hash"] = cache.get(block_number)
pending_transactions = snapshot["pending_transactions"]
new_pending_transactions = []
for transaction_data in pending_transactions:
if "raiden.transfer.events.ContractSend" not in transaction_data["_type"]:
raise InvalidDBData(
"Error during v18 -> v19 upgrade. Chain state's pending transactions "
"should only contain ContractSend transactions"
)
# For each pending transaction find the corresponding DB event record.
event_record = storage.get_latest_event_by_data_field(filters=transaction_data)
if not event_record.data:
raise InvalidDBData(
"Error during v18 -> v19 upgrade. Could not find a database event "
def get_blockchain_events(
web3: Web3,
contract_manager: ContractManager,
chain_state: BlockchainState,
to_block: BlockNumber,
) -> Tuple[BlockchainState, List[Event]]:
# increment by one, as `latest_committed_block` has been queried last time already
from_block = BlockNumber(chain_state.latest_committed_block + 1)
# Check if the current block was already processed
if from_block > to_block:
return chain_state, []
new_chain_state = deepcopy(chain_state)
log.info("Querying new block(s)", from_block=from_block, end_block=to_block)
# first check for new token networks and add to state
registry_events = query_blockchain_events(
web3=web3,
contract_manager=contract_manager,
contract_address=new_chain_state.token_network_registry_address,
contract_name=CONTRACT_TOKEN_NETWORK_REGISTRY,
topics=create_registry_event_topics(contract_manager),
from_block=from_block,