Ms.new breaking changes (#1182)

1. The formula for computing iterations is simplified, so that only one division is necessary, and inverting the (1-x) into just x.
2. There are new timestamp rules. A block N must have a greater timestamp than block N-1. Also, a block's timestamp cannot be more than 5 minutes in the future.
3. A List[Tuple[uint16, str]] is added to the handshake. These are the capabilities that the node supports, to add new features to the protocol.
4. The message_id is now before the data in each message.
This commit is contained in:
Mariano Sorgente 2021-03-10 17:14:13 +09:00 committed by GitHub
parent d2e558835d
commit 1c6c372044
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
23 changed files with 136 additions and 86 deletions

View File

@ -58,7 +58,7 @@ jobs:
with:
repository: 'Chia-Network/test-cache'
path: '.chia'
ref: '0.20.0'
ref: '0.21.0'
fetch-depth: 1
- name: Link home directory

View File

@ -58,7 +58,7 @@ jobs:
with:
repository: 'Chia-Network/test-cache'
path: '.chia'
ref: '0.20.0'
ref: '0.21.0'
fetch-depth: 1
- name: Link home directory

View File

@ -58,7 +58,7 @@ jobs:
with:
repository: 'Chia-Network/test-cache'
path: '.chia'
ref: '0.20.0'
ref: '0.21.0'
fetch-depth: 1
- name: Link home directory

View File

@ -58,7 +58,7 @@ jobs:
with:
repository: 'Chia-Network/test-cache'
path: '.chia'
ref: '0.20.0'
ref: '0.21.0'
fetch-depth: 1
- name: Link home directory

View File

@ -65,7 +65,7 @@ jobs:
with:
repository: 'Chia-Network/test-cache'
path: '.chia'
ref: '0.20.0'
ref: '0.21.0'
fetch-depth: 1
- name: Link home directory

View File

@ -65,7 +65,7 @@ jobs:
with:
repository: 'Chia-Network/test-cache'
path: '.chia'
ref: '0.20.0'
ref: '0.21.0'
fetch-depth: 1
- name: Link home directory

View File

@ -65,7 +65,7 @@ jobs:
with:
repository: 'Chia-Network/test-cache'
path: '.chia'
ref: '0.20.0'
ref: '0.21.0'
fetch-depth: 1
- name: Link home directory

View File

@ -65,7 +65,7 @@ jobs:
with:
repository: 'Chia-Network/test-cache'
path: '.chia'
ref: '0.20.0'
ref: '0.21.0'
fetch-depth: 1
- name: Link home directory

View File

@ -1,7 +1,7 @@
import dataclasses
import logging
import time
from typing import List, Optional, Tuple
from typing import Optional, Tuple
from blspy import AugSchemeMPL
@ -811,26 +811,16 @@ def validate_unfinished_header_block(
if header_block.foliage_transaction_block.filter_hash != std_hash(header_block.transactions_filter):
return None, ValidationError(Err.INVALID_TRANSACTIONS_FILTER_HASH)
# 26. The timestamp in Foliage Block must comply with the timestamp rules
if prev_b is not None:
last_timestamps: List[uint64] = []
curr_b = blocks.block_record(header_block.foliage_transaction_block.prev_transaction_block_hash)
assert curr_b.timestamp is not None
while len(last_timestamps) < constants.NUMBER_OF_TIMESTAMPS:
last_timestamps.append(curr_b.timestamp)
fetched: Optional[BlockRecord] = blocks.try_block_record(curr_b.prev_transaction_block_hash)
if not fetched:
break
curr_b = fetched
if len(last_timestamps) != constants.NUMBER_OF_TIMESTAMPS:
# For blocks 1 to 10, average timestamps of all previous blocks
assert curr_b.height == 0
prev_time: uint64 = uint64(int(sum(last_timestamps) // len(last_timestamps)))
if header_block.foliage_transaction_block.timestamp <= prev_time:
return None, ValidationError(Err.TIMESTAMP_TOO_FAR_IN_PAST)
if header_block.foliage_transaction_block.timestamp > int(time.time() + constants.MAX_FUTURE_TIME):
return None, ValidationError(Err.TIMESTAMP_TOO_FAR_IN_FUTURE)
# 26a. The timestamp in Foliage Block must not be over 5 minutes in the future
if header_block.foliage_transaction_block.timestamp > int(time.time() + constants.MAX_FUTURE_TIME):
return None, ValidationError(Err.TIMESTAMP_TOO_FAR_IN_FUTURE)
if prev_b is not None:
# 26b. The timestamp must be greater than the previous transaction block timestamp
prev_transaction_b = blocks.block_record(header_block.foliage_transaction_block.prev_transaction_block_hash)
assert prev_transaction_b.timestamp is not None
if header_block.foliage_transaction_block.timestamp <= prev_transaction_b.timestamp:
return None, ValidationError(Err.TIMESTAMP_TOO_FAR_IN_PAST)
return required_iters, None # Valid unfinished header block

View File

@ -24,7 +24,7 @@ testnet_kwargs = {
"MAX_PLOT_SIZE": 59,
"SUB_SLOT_TIME_TARGET": 600, # The target number of seconds per slot, mainnet 600
"NUM_SP_INTERVALS_EXTRA": 3, # The number of sp intervals to add to the signage point
"MAX_FUTURE_TIME": 7200, # The next block can have a timestamp of at most these many seconds more
"MAX_FUTURE_TIME": 5 * 60, # The next block can have a timestamp of at most these many seconds in the future
"NUMBER_OF_TIMESTAMPS": 11, # Than the average of the last NUMBER_OF_TIMESTAMPS blocks
# Used as the initial cc rc challenges, as well as first block back pointers, and first SES back pointer
# We override this value based on the chain being run (testnet0, testnet1, mainnet, etc)

View File

@ -1,4 +1,3 @@
from src.types.blockchain_format.sized_bytes import bytes32
from src.util.ints import uint64
# The actual space in bytes of a plot, is _expected_plot_size(k) * UI_ACTUAL_SPACE_CONSTANT_FACTO
@ -16,17 +15,3 @@ def _expected_plot_size(k: int) -> uint64:
"""
return ((2 * k) + 1) * (2 ** (k - 1))
def quality_str_to_quality(quality_str: bytes32, k: int) -> uint64:
"""
Takes a 256 bit quality string, converts it to an integer between 0 and 2**256,
representing a decimal d=0.xxxxx..., where x are the bits of the quality.
Then we perform 1/d, and multiply by the plot size and the
This is a very good approximation for x when x is close to 1. However, we only
work with big ints, to avoid using decimals. Finally, we divide by the plot size,
to make bigger plots have a proportionally higher change to win.
"""
t = pow(2, 256)
xt = t - int.from_bytes(quality_str, "big")
return t * _expected_plot_size(k) // xt

View File

@ -1,7 +1,7 @@
from src.consensus.constants import ConsensusConstants
from src.consensus.pos_quality import quality_str_to_quality
from src.types.blockchain_format.sized_bytes import bytes32
from src.util.hash import std_hash
from src.consensus.pos_quality import _expected_plot_size
from src.consensus.constants import ConsensusConstants
from src.util.ints import uint8, uint64, uint128
@ -51,12 +51,15 @@ def calculate_iterations_quality(
cc_sp_output_hash: bytes32,
) -> uint64:
"""
Calculates the number of iterations from the quality. The quality is converted to a number
between 0 and 1, then divided by expected plot size, and finally multiplied by the
difficulty.
Calculates the number of iterations from the quality. This is derives as the difficulty times the constant factor
times a random number between 0 and 1 (based on quality string), divided by plot size.
"""
sp_quality_string: bytes32 = std_hash(quality_string + cc_sp_output_hash)
iters = uint64(
uint128(int(difficulty) * int(difficulty_constant_factor)) // quality_str_to_quality(sp_quality_string, size)
int(difficulty)
* int(difficulty_constant_factor)
* int.from_bytes(sp_quality_string, "big", signed=False)
// (int(pow(2, 256)) * int(_expected_plot_size(size)))
)
return max(iters, uint64(1))

View File

@ -733,6 +733,16 @@ class FullNodeAPI:
required_iters,
)
# The block's timestamp must be greater than the previous transaction block's timestamp
timestamp = uint64(int(time.time()))
curr: Optional[BlockRecord] = prev_b
while curr is not None and not curr.is_transaction_block and curr.height != 0:
curr = self.full_node.blockchain.try_block_record(curr.prev_hash)
if curr is not None:
assert curr.timestamp is not None
if timestamp <= curr.timestamp:
timestamp = uint64(int(curr.timestamp + 1))
self.log.info("Starting to make the unfinished block")
unfinished_block: UnfinishedBlock = create_unfinished_block(
self.full_node.constants,
@ -748,7 +758,7 @@ class FullNodeAPI:
get_plot_sig,
get_pool_sig,
sp_vdfs,
uint64(int(time.time())),
timestamp,
self.full_node.blockchain,
b"",
spend_bundle,

View File

@ -1,6 +1,8 @@
from dataclasses import dataclass
from typing import Tuple, List
from src.types.blockchain_format.sized_bytes import bytes32
from enum import IntEnum
from src.util.ints import uint8, uint16
from src.util.streamable import Streamable, streamable
@ -12,6 +14,12 @@ Note: When changing this file, also change protocol_message_types.py
"""
# Capabilities can be added here when new features are added to the protocol
# These are passed in as uint16 into the Handshake
class Capability(IntEnum):
BASE = 1 # Base capability just means it supports the chia protocol at mainnet
@dataclass(frozen=True)
@streamable
class Handshake(Streamable):
@ -20,6 +28,7 @@ class Handshake(Streamable):
software_version: str
server_port: uint16
node_type: uint8
capabilities: List[Tuple[uint16, str]]
@dataclass(frozen=True)

View File

@ -35,11 +35,11 @@ class Delivery(IntEnum):
@streamable
class Message(Streamable):
type: uint8 # one of ProtocolMessageTypes
# Message data for that type
data: bytes
# message id
id: Optional[uint16]
# Message data for that type
data: bytes
def make_msg(msg_type: ProtocolMessageTypes, data: Any) -> Message:
return Message(uint8(msg_type.value), bytes(data), None)
return Message(uint8(msg_type.value), None, bytes(data))

View File

@ -512,7 +512,7 @@ class ChiaServer:
)
if response is not None:
response_message = Message(response.type, response.data, full_message.id)
response_message = Message(response.type, full_message.id, response.data)
await connection.reply_to_request(response_message)
except Exception as e:
if self.connection_close_task is None:

View File

@ -8,7 +8,7 @@ from aiohttp import WSCloseCode, WSMessage, WSMsgType
from src.cmds.init import chia_full_version_str
from src.protocols.protocol_message_types import ProtocolMessageTypes
from src.protocols.shared_protocol import Handshake
from src.protocols.shared_protocol import Handshake, Capability
from src.server.outbound_message import Message, NodeType, make_msg
from src.server.rate_limits import RateLimiter
from src.types.blockchain_format.sized_bytes import bytes32
@ -110,6 +110,7 @@ class WSChiaConnection:
chia_full_version_str(),
uint16(server_port),
uint8(local_type.value),
[(uint16(Capability.BASE.value), "1")],
),
)
assert outbound_handshake is not None
@ -147,6 +148,7 @@ class WSChiaConnection:
chia_full_version_str(),
uint16(server_port),
uint8(local_type.value),
[(uint16(Capability.BASE.value), "1")],
),
)
await self._send_message(outbound_handshake)
@ -244,7 +246,7 @@ class WSChiaConnection:
if attribute is None:
raise AttributeError(f"Node type {self.connection_type} does not have method {attr_name}")
msg = Message(uint8(getattr(ProtocolMessageTypes, attr_name).value), args[0], None)
msg = Message(uint8(getattr(ProtocolMessageTypes, attr_name).value), None, args[0])
request_start_t = time.time()
result = await self.create_request(msg, timeout)
self.log.debug(
@ -279,7 +281,7 @@ class WSChiaConnection:
request_id = self.request_nonce
self.request_nonce = uint16(self.request_nonce + 1) if self.request_nonce != (2 ** 16 - 1) else uint16(0)
message = Message(message_no_id.type, message_no_id.data, request_id)
message = Message(message_no_id.type, request_id, message_no_id.data)
self.pending_requests[message.id] = event
await self.outgoing_queue.put(message)

View File

@ -141,6 +141,7 @@ class BlockTools:
_, loaded_plots, _, _ = load_plots({}, {}, farmer_pubkeys, self.pool_pubkeys, None, False, root_path)
self.plots: Dict[Path, PlotInfo] = loaded_plots
self.local_sk_cache: Dict[bytes32, PrivateKey] = {}
self._config = load_config(self.root_path, "config.yaml")
self._config["logging"]["log_stdout"] = True
self._config["selected_network"] = "testnet0"
@ -223,15 +224,16 @@ class BlockTools:
"""
farmer_sk = master_sk_to_farmer_sk(self.all_sks[0])
for _, plot_info in self.plots.items():
# Look up local_sk from plot to save locked memory
(
pool_public_key_or_puzzle_hash,
farmer_public_key,
local_master_sk,
) = parse_plot_info(plot_info.prover.get_memo())
local_sk = master_sk_to_local_sk(local_master_sk)
agg_pk = ProofOfSpace.generate_plot_public_key(local_sk.get_g1(), farmer_public_key)
if agg_pk == plot_pk:
if plot_pk == plot_info.plot_public_key:
# Look up local_sk from plot to save locked memory
if plot_info.prover.get_id() in self.local_sk_cache:
local_master_sk = self.local_sk_cache[plot_info.prover.get_id()]
else:
_, _, local_master_sk = parse_plot_info(plot_info.prover.get_memo())
self.local_sk_cache[plot_info.prover.get_id()] = local_master_sk
local_sk = master_sk_to_local_sk(local_master_sk)
agg_pk = ProofOfSpace.generate_plot_public_key(local_sk.get_g1(), farmer_sk.get_g1())
assert agg_pk == plot_pk
harv_share = AugSchemeMPL.sign(local_sk, m, agg_pk)
farm_share = AugSchemeMPL.sign(farmer_sk, m, agg_pk)
return AugSchemeMPL.aggregate([harv_share, farm_share])

View File

@ -10,18 +10,21 @@ import pytest
from blspy import AugSchemeMPL, G2Element
from src.consensus.blockchain import ReceiveBlockResult
from src.consensus.pot_iterations import is_overflow_block
from src.types.blockchain_format.classgroup import ClassgroupElement
from src.types.blockchain_format.sized_bytes import bytes32
from src.types.blockchain_format.slots import InfusedChallengeChainSubSlot
from src.types.blockchain_format.vdf import VDFInfo, VDFProof
from src.types.end_of_slot_bundle import EndOfSubSlotBundle
from src.types.full_block import FullBlock
from src.types.unfinished_block import UnfinishedBlock
from src.util.block_tools import get_vdf_info_and_proof
from src.types.blockchain_format.vdf import VDFInfo, VDFProof
from src.util.block_tools import get_vdf_info_and_proof, BlockTools
from src.util.errors import Err
from src.util.hash import std_hash
from src.util.ints import uint8, uint64
from src.util.recursive_replace import recursive_replace
from tests.core.fixtures import empty_blockchain, create_blockchain # noqa: F401
from tests.core.fixtures import default_1000_blocks # noqa: F401
from src.util.wallet_tools import WalletTool
from tests.core.fixtures import default_400_blocks # noqa: F401
from tests.core.fixtures import default_1000_blocks # noqa: F401
@ -443,8 +446,12 @@ class TestBlockHeaderValidation:
assert err == Err.SHOULD_NOT_HAVE_ICC
@pytest.mark.asyncio
async def test_invalid_icc_sub_slot_vdf(self, empty_blockchain):
blocks = bt.get_consecutive_blocks(10)
async def test_invalid_icc_sub_slot_vdf(self):
bt_high_iters = BlockTools(
constants=test_constants.replace(SUB_SLOT_ITERS_STARTING=(2 ** 12), DIFFICULTY_STARTING=(2 ** 14))
)
bc1, connection, db_path = await create_blockchain(bt_high_iters.constants)
blocks = bt_high_iters.get_consecutive_blocks(10)
for block in blocks:
if len(block.finished_sub_slots) > 0 and block.finished_sub_slots[-1].infused_challenge_chain is not None:
# Bad iters
@ -463,7 +470,7 @@ class TestBlockHeaderValidation:
block_bad = recursive_replace(
block, "finished_sub_slots", block.finished_sub_slots[:-1] + [new_finished_ss]
)
result, err, _ = await empty_blockchain.receive_block(block_bad)
result, err, _ = await bc1.receive_block(block_bad)
assert err == Err.INVALID_ICC_EOS_VDF
# Bad output
@ -479,10 +486,11 @@ class TestBlockHeaderValidation:
)
),
)
log.warning(f"Proof: {block.finished_sub_slots[-1].proofs}")
block_bad_2 = recursive_replace(
block, "finished_sub_slots", block.finished_sub_slots[:-1] + [new_finished_ss_2]
)
result, err, _ = await empty_blockchain.receive_block(block_bad_2)
result, err, _ = await bc1.receive_block(block_bad_2)
assert err == Err.INVALID_ICC_EOS_VDF
# Bad challenge hash
@ -501,7 +509,7 @@ class TestBlockHeaderValidation:
block_bad_3 = recursive_replace(
block, "finished_sub_slots", block.finished_sub_slots[:-1] + [new_finished_ss_3]
)
result, err, _ = await empty_blockchain.receive_block(block_bad_3)
result, err, _ = await bc1.receive_block(block_bad_3)
assert err == Err.INVALID_ICC_EOS_VDF
# Bad proof
@ -513,13 +521,17 @@ class TestBlockHeaderValidation:
block_bad_5 = recursive_replace(
block, "finished_sub_slots", block.finished_sub_slots[:-1] + [new_finished_ss_5]
)
result, err, _ = await empty_blockchain.receive_block(block_bad_5)
result, err, _ = await bc1.receive_block(block_bad_5)
assert err == Err.INVALID_ICC_EOS_VDF
result, err, _ = await empty_blockchain.receive_block(block)
result, err, _ = await bc1.receive_block(block)
assert err is None
assert result == ReceiveBlockResult.NEW_PEAK
await connection.close()
bc1.shut_down()
db_path.unlink()
@pytest.mark.asyncio
async def test_invalid_icc_into_cc(self, empty_blockchain):
blockchain = empty_blockchain
@ -668,6 +680,7 @@ class TestBlockHeaderValidation:
async def test_invalid_cc_sub_slot_vdf(self, empty_blockchain):
# 2q
blocks = bt.get_consecutive_blocks(10)
for block in blocks:
if len(block.finished_sub_slots):
# Bad iters
@ -1003,7 +1016,7 @@ class TestBlockHeaderValidation:
case_1 = True
block_bad = recursive_replace(blocks[-1], "reward_chain_block.signage_point_index", uint8(1))
assert (await empty_blockchain.receive_block(block_bad))[1] == Err.INVALID_SP_INDEX
else:
elif not is_overflow_block(test_constants, blocks[-1].reward_chain_block.signage_point_index):
case_2 = True
block_bad = recursive_replace(blocks[-1], "reward_chain_block.signage_point_index", uint8(0))
error_code = (await empty_blockchain.receive_block(block_bad))[1]
@ -1071,6 +1084,7 @@ class TestBlockHeaderValidation:
@pytest.mark.asyncio
async def test_bad_cc_sp_vdf(self, empty_blockchain):
# 13. Note: does not validate fully due to proof of space being validated first
blocks = bt.get_consecutive_blocks(1)
assert (await empty_blockchain.receive_block(blocks[0]))[0] == ReceiveBlockResult.NEW_PEAK
@ -1344,6 +1358,19 @@ class TestBlockHeaderValidation:
block_bad = recursive_replace(block_bad, "foliage.foliage_transaction_block_signature", new_fbh_sig)
assert (await empty_blockchain.receive_block(block_bad))[1] == Err.TIMESTAMP_TOO_FAR_IN_PAST
block_bad: FullBlock = recursive_replace(
blocks[-1],
"foliage_transaction_block.timestamp",
blocks[0].foliage_transaction_block.timestamp,
)
block_bad: FullBlock = recursive_replace(
block_bad, "foliage.foliage_transaction_block_hash", block_bad.foliage_transaction_block.get_hash()
)
new_m = block_bad.foliage.foliage_transaction_block_hash
new_fbh_sig = bt.get_plot_signature(new_m, blocks[-1].reward_chain_block.proof_of_space.plot_public_key)
block_bad = recursive_replace(block_bad, "foliage.foliage_transaction_block_signature", new_fbh_sig)
assert (await empty_blockchain.receive_block(block_bad))[1] == Err.TIMESTAMP_TOO_FAR_IN_PAST
block_bad: FullBlock = recursive_replace(
blocks[-1],
"foliage_transaction_block.timestamp",

View File

@ -21,6 +21,13 @@ from tests.time_out_assert import time_out_assert
log = logging.getLogger(__name__)
async def disconnect_all_and_reconnect(server: ChiaServer, reconnect_to: ChiaServer) -> bool:
cons = list(server.all_connections.values())[:]
for con in cons:
await con.close()
return await server.start_client(PeerInfo(self_hostname, uint16(reconnect_to._port)), None)
async def add_dummy_connection(server: ChiaServer, dummy_port: int) -> Tuple[asyncio.Queue, bytes32]:
timeout = aiohttp.ClientTimeout(total=10)
session = aiohttp.ClientSession(timeout=timeout)

View File

@ -1,10 +1,12 @@
import pickle
from os import path
from pathlib import Path
from typing import List
import aiosqlite
import pytest
from typing import List
from pathlib import Path
from src.consensus.constants import ConsensusConstants
from src.consensus.blockchain import Blockchain
from src.full_node.block_store import BlockStore
@ -14,20 +16,24 @@ from src.util.path import mkdir
from tests.setup_nodes import bt, test_constants
@pytest.fixture(scope="function")
async def empty_blockchain():
"""
Provides a list of 10 valid blocks, as well as a blockchain with 9 blocks added to it.
"""
async def create_blockchain(constants: ConsensusConstants):
db_path = Path("blockchain_test.db")
if db_path.exists():
db_path.unlink()
connection = await aiosqlite.connect(db_path)
coin_store = await CoinStore.create(connection)
store = await BlockStore.create(connection)
bc1 = await Blockchain.create(coin_store, store, test_constants)
bc1 = await Blockchain.create(coin_store, store, constants)
assert bc1.get_peak() is None
return bc1, connection, db_path
@pytest.fixture(scope="function")
async def empty_blockchain():
"""
Provides a list of 10 valid blocks, as well as a blockchain with 9 blocks added to it.
"""
bc1, connection, db_path = await create_blockchain(test_constants)
yield bc1
await connection.close()

View File

@ -398,7 +398,7 @@ class TestFullNodeStore:
sb = blockchain.block_record(blocks[-1].header_hash)
if sb.first_in_sub_slot:
break
assert len(blocks) >= 3
assert len(blocks) >= 2
dependant_sub_slots = blocks[-1].finished_sub_slots
for block in blocks[:-2]:
sb = blockchain.block_record(block.header_hash)

View File

@ -9,6 +9,7 @@ from src.simulator.simulator_protocol import FarmNewBlockProtocol
from src.types.peer_info import PeerInfo
from src.util.ints import uint16, uint32
from src.wallet.wallet_state_manager import WalletStateManager
from tests.connection_utils import disconnect_all_and_reconnect
from tests.core.fixtures import default_400_blocks, default_1000_blocks
from tests.setup_nodes import bt, self_hostname, setup_node_and_wallet, setup_simulators_and_wallets, test_constants
from tests.time_out_assert import time_out_assert
@ -63,6 +64,8 @@ class TestWalletSync:
for i in range(1, len(blocks_reorg)):
await full_node_api.full_node.respond_block(full_node_protocol.RespondBlock(blocks_reorg[i]))
await disconnect_all_and_reconnect(wallet_server, full_node_server)
await time_out_assert(
100, wallet_height_at_least, True, wallet_node, len(default_400_blocks) + num_blocks - 5 - 1
)
@ -81,12 +84,16 @@ class TestWalletSync:
# same tip at height num_blocks - 1.
await time_out_assert(600, wallet_height_at_least, True, wallet_node, len(default_400_blocks) - 1)
await disconnect_all_and_reconnect(wallet_server, full_node_server)
# Tests a long reorg
for block in default_1000_blocks:
await full_node_api.full_node.respond_block(full_node_protocol.RespondBlock(block))
await time_out_assert(600, wallet_height_at_least, True, wallet_node, len(default_1000_blocks) - 1)
await disconnect_all_and_reconnect(wallet_server, full_node_server)
# Tests a short reorg
num_blocks = 30
blocks_reorg = bt.get_consecutive_blocks(num_blocks, block_list_input=default_1000_blocks[:-5])
@ -180,6 +187,8 @@ class TestWalletSync:
for block in blocks_reorg_2[-41:]:
await full_node_api.full_node.respond_block(full_node_protocol.RespondBlock(block))
await disconnect_all_and_reconnect(server_2, fn_server)
# Confirm we have the funds
funds = calculate_pool_reward(uint32(len(blocks_reorg_1))) + calculate_base_farmer_reward(
uint32(len(blocks_reorg_1))