Consensus fix, full node store fixes, change sync, more linting

This commit is contained in:
Mariano Sorgente 2020-12-11 22:29:51 +09:00 committed by Yostra
parent 7e7aea0976
commit b7c131448b
12 changed files with 331 additions and 267 deletions

View File

@ -30,18 +30,14 @@ def main():
# If this is a beta dev release - get which beta it is
if "0b" in scm_minor_version:
orignial_minor_ver_list = scm_minor_version.split("0b")
major_release_number = str(
1 - int(scm_major_version)
) # decrement the major release for beta
major_release_number = str(1 - int(scm_major_version)) # decrement the major release for beta
minor_release_number = scm_major_version
patch_release_number = orignial_minor_ver_list[1]
if smc_patch_version and "dev" in smc_patch_version:
dev_release_number = "." + smc_patch_version
elif "0rc" in version[1]:
original_minor_ver_list = scm_minor_version.split("0rc")
major_release_number = str(
1 - int(scm_major_version)
) # decrement the major release for release candidate
major_release_number = str(1 - int(scm_major_version)) # decrement the major release for release candidate
minor_release_number = str(int(scm_major_version) + 1) # RC is 0.2.1 for RC 1
patch_release_number = original_minor_ver_list[1]
if smc_patch_version and "dev" in smc_patch_version:
@ -57,9 +53,7 @@ def main():
install_release_number += "." + patch_release_number
if len(dev_release_number) > 0:
if windows:
dev_release_number_digits = "".join(
[i for i in dev_release_number if i.isdigit()]
)
dev_release_number_digits = "".join([i for i in dev_release_number if i.isdigit()])
dev_release_number = dev_release_number_digits
install_release_number += dev_release_number

View File

@ -421,6 +421,7 @@ async def validate_unfinished_header_block(
log.error(
f"Data: {genesis_block} {overflow} {skip_overflow_last_ss_validation} {header_block.total_iters} "
f"{header_block.reward_chain_sub_block.signage_point_index}"
f"Prev: {prev_sb}"
)
log.error(f"Challenge {challenge} provided {header_block.reward_chain_sub_block.pos_ss_cc_challenge_hash}")
return None, ValidationError(Err.INVALID_CC_CHALLENGE)
@ -717,6 +718,9 @@ async def validate_unfinished_header_block(
while not curr_sb.is_block:
curr_sb = sub_blocks[curr_sb.prev_hash]
if not header_block.foliage_block.prev_block_hash == curr_sb.header_hash:
log.error(
f"Prev BH: {header_block.foliage_block.prev_block_hash} {curr_sb.header_hash} curr sb: {curr_sb}"
)
return None, ValidationError(Err.INVALID_PREV_BLOCK_HASH)
# 25. The filter hash in the Foliage Block must be the hash of the filter
@ -907,11 +911,6 @@ async def validate_finished_header_block(
if header_block.reward_chain_sub_block.infused_challenge_chain_ip_vdf is None:
# If we don't have an ICC chain, deficit must be 4 or 5
if deficit < constants.MIN_SUB_BLOCKS_PER_CHALLENGE_BLOCK - 1:
log.error(
"no icc vdf and deficit is lower than %d",
header_block.header_hash,
constants.MIN_SUB_BLOCKS_PER_CHALLENGE_BLOCK - 1,
)
return None, ValidationError(Err.INVALID_ICC_VDF)
else:
assert header_block.infused_challenge_chain_ip_proof is not None

View File

@ -24,6 +24,7 @@ from src.full_node.block_store import BlockStore
from src.full_node.coin_store import CoinStore
from src.full_node.full_node_store import FullNodeStore
from src.full_node.mempool_manager import MempoolManager
from src.full_node.signage_point import SignagePoint
from src.full_node.sync_blocks_processor import SyncBlocksProcessor
from src.full_node.sync_peers_handler import SyncPeersHandler
from src.full_node.sync_store import SyncStore
@ -60,6 +61,7 @@ class FullNode:
mempool_manager: MempoolManager
connection: aiosqlite.Connection
sync_peers_handler: Optional[SyncPeersHandler]
_sync_task: Optional[asyncio.Task]
blockchain: Blockchain
config: Dict
server: Any
@ -107,6 +109,8 @@ class FullNode:
start_time = time.time()
self.blockchain = await Blockchain.create(self.coin_store, self.block_store, self.constants)
self.mempool_manager = MempoolManager(self.coin_store, self.constants)
self.weight_proof_handler = WeightProofHandler(self.constants, BlockCache(self.blockchain))
self._sync_task = None
time_taken = time.time() - start_time
if self.blockchain.get_peak() is None:
self.log.info(f"Initialized with empty blockchain time taken: {int(time_taken)}s")
@ -156,7 +160,7 @@ class FullNode:
if self.state_changed_callback is not None:
self.state_changed_callback(change)
async def _send_peak_to_timelords(self):
async def send_peak_to_timelords(self):
"""
Sends current peak to timelords
"""
@ -232,7 +236,7 @@ class FullNode:
)
await connection.send_message(Message("new_peak", request_wallet))
elif connection.connection_type is NodeType.TIMELORD:
await self._send_peak_to_timelords()
await self.send_peak_to_timelords()
async def _on_disconnect(self, connection: ws.WSChiaConnection):
self.log.info(f"peer disconnected {connection.get_peer_info()}")
@ -251,6 +255,11 @@ class FullNode:
async def _await_closed(self):
await self.connection.close()
try:
if self._sync_task is not None:
await asyncio.wait_for(self._sync_task, timeout=2)
except asyncio.exceptions.TimeoutError:
pass
async def _sync(self):
"""
@ -272,108 +281,117 @@ class FullNode:
highest_weight: uint128 = uint128(0)
target_peak_sb_height: uint32 = uint32(0)
sync_start_time = time.time()
try:
# Based on responses from peers about the current heads, see which head is the heaviest
# (similar to longest chain rule).
self.sync_store.waiting_for_peaks = False
# Based on responses from peers about the current heads, see which head is the heaviest
# (similar to longest chain rule).
self.sync_store.waiting_for_peaks = False
potential_peaks: List[Tuple[bytes32, FullBlock]] = self.sync_store.get_potential_peaks_tuples()
self.log.info(f"Have collected {len(potential_peaks)} potential peaks")
if self._shut_down:
return
for header_hash, potential_peak_block in potential_peaks:
if potential_peak_block.weight > highest_weight:
highest_weight = potential_peak_block.weight
target_peak_sb_height = potential_peak_block.sub_block_height
if self.blockchain.get_peak() is not None and highest_weight <= self.blockchain.get_peak().weight:
self.log.info("Not performing sync, already caught up.")
return
self.log.info(f"Peak height {target_peak_sb_height}")
peers: List[ws.WSChiaConnection] = self.server.get_full_node_connections()
# send weight proof message, continue on first response
# fork_point_height = await self._fetch_and_validate_weight_proof(peak_hash, peers, target_peak_sb_height)
# if fork_point_height is None:
# self.log.error("failed to validate weight proof")
# return
fork_point_height = -1
self.sync_peers_handler = SyncPeersHandler(
self.sync_store,
peers,
fork_point_height,
self.blockchain,
target_peak_sb_height,
self.server,
)
# Start processing blocks that we have received (no block yet)
block_processor = SyncBlocksProcessor(
self.sync_store,
fork_point_height,
uint32(target_peak_sb_height),
self.blockchain,
)
block_processor_task = asyncio.create_task(block_processor.process())
peak: Optional[SubBlockRecord] = self.blockchain.get_peak()
while not self.sync_peers_handler.done():
# Periodically checks for done, timeouts, shutdowns, new peers or disconnected peers.
potential_peaks: List[Tuple[bytes32, FullBlock]] = self.sync_store.get_potential_peaks_tuples()
self.log.info(f"Have collected {len(potential_peaks)} potential peaks")
if self._shut_down:
block_processor.shut_down()
break
if block_processor_task.done():
break
await self.sync_peers_handler.monitor_timeouts()
return
cur_peers: List[ws.WSChiaConnection] = [
con
for _, con in self.server.all_connections.items()
if (con.peer_node_id is not None and con.connection_type == NodeType.FULL_NODE)
]
for header_hash, potential_peak_block in potential_peaks:
if potential_peak_block.weight > highest_weight:
highest_weight = potential_peak_block.weight
target_peak_sb_height = potential_peak_block.sub_block_height
for node_id in cur_peers:
if node_id not in peers:
self.sync_peers_handler.new_node_connected(node_id)
for node_id in peers:
if node_id not in cur_peers:
# Disconnected peer, removes requests that are being sent to it
self.sync_peers_handler.node_disconnected(node_id)
peers = cur_peers
if self.blockchain.get_peak() is not None and highest_weight <= self.blockchain.get_peak().weight:
self.log.info("Not performing sync, already caught up.")
return
await self.sync_peers_handler.add_to_request_sets()
self.log.info(f"Peak height {target_peak_sb_height}")
peers: List[ws.WSChiaConnection] = self.server.get_full_node_connections()
new_peak = self.blockchain.get_peak()
if new_peak != peak:
msg = Message(
"new_peak",
wallet_protocol.NewPeak(
new_peak.header_hash,
new_peak.sub_block_height,
new_peak.weight,
new_peak.prev_hash,
),
)
await self.server.send_to_all([msg], NodeType.WALLET)
# send weight proof message, continue on first response
# fork_point_height = await self._fetch_and_validate_weight_proof(peak_hash, peers, target_peak_sb_height)
# if fork_point_height is None:
# self.log.error("failed to validate weight proof")
# return
fork_point_height = -1
self._state_changed("sub_block")
await asyncio.sleep(5)
self.sync_peers_handler = SyncPeersHandler(
self.sync_store,
peers,
fork_point_height,
self.blockchain,
target_peak_sb_height,
self.server,
)
# Awaits for all blocks to be processed, a timeout to happen, or the node to shutdown
await block_processor_task
block_processor_task.result() # If there was a timeout, this will raise TimeoutError
if self._shut_down:
return
# Start processing blocks that we have received (no block yet)
block_processor = SyncBlocksProcessor(
self.sync_store,
fork_point_height,
uint32(target_peak_sb_height),
self.blockchain,
)
block_processor_task = asyncio.create_task(block_processor.process())
peak: Optional[SubBlockRecord] = self.blockchain.get_peak()
while not self.sync_peers_handler.done():
# Periodically checks for done, timeouts, shutdowns, new peers or disconnected peers.
if self._shut_down:
block_processor.shut_down()
break
if block_processor_task.done():
break
await self.sync_peers_handler.monitor_timeouts()
# A successful sync will leave the height at least as high as peak_height
assert self.blockchain.get_peak().sub_block_height >= target_peak_sb_height
cur_peers: List[ws.WSChiaConnection] = [
con
for _, con in self.server.all_connections.items()
if (con.peer_node_id is not None and con.connection_type == NodeType.FULL_NODE)
]
self.log.info(
f"Finished sync up to height {target_peak_sb_height}. Total time: "
f"{round((time.time() - sync_start_time)/60, 2)} minutes."
)
for node_id in cur_peers:
if node_id not in peers:
self.sync_peers_handler.new_node_connected(node_id)
for node_id in peers:
if node_id not in cur_peers:
# Disconnected peer, removes requests that are being sent to it
self.sync_peers_handler.node_disconnected(node_id)
peers = cur_peers
await self.sync_peers_handler.add_to_request_sets()
new_peak = self.blockchain.get_peak()
if new_peak != peak:
msg = Message(
"new_peak",
wallet_protocol.NewPeak(
new_peak.header_hash,
new_peak.sub_block_height,
new_peak.weight,
new_peak.prev_hash,
),
)
await self.server.send_to_all([msg], NodeType.WALLET)
self._state_changed("sub_block")
await asyncio.sleep(5)
# Awaits for all blocks to be processed, a timeout to happen, or the node to shutdown
await block_processor_task
block_processor_task.result() # If there was a timeout, this will raise TimeoutError
if self._shut_down:
return
# A successful sync will leave the height at least as high as peak_height
assert self.blockchain.get_peak().sub_block_height >= target_peak_sb_height
self.log.info(
f"Finished sync up to height {target_peak_sb_height}. Total time: "
f"{round((time.time() - sync_start_time)/60, 2)} minutes."
)
except asyncio.CancelledError:
self.log.warning("Syncing failed, CancelledError")
except Exception as e:
tb = traceback.format_exc()
self.log.error(f"Error with syncing: {type(e)}{tb}")
finally:
if self._shut_down:
return
await self._finish_sync()
async def _fetch_and_validate_weight_proof(self, peak_hash, peers, target_peak_sb_height) -> Optional[uint32]:
wpf = WeightProofHandler(self.constants, init_block_block_cache_mock())
@ -423,7 +441,7 @@ class FullNode:
await self.respond_sub_block(full_node_protocol.RespondSubBlock(block))
# Update timelords with most recent information
await self._send_peak_to_timelords()
await self.send_peak_to_timelords()
peak: SubBlockRecord = self.blockchain.get_peak()
if peak is not None:
@ -521,16 +539,8 @@ class FullNode:
f"We are too far behind this block. Our height is {peak_height} and block is at "
f"{sub_block.sub_block_height}"
)
try:
# Performs sync, and catch exceptions so we don't close the connection
await self._sync()
except asyncio.CancelledError:
self.log.error("Syncing failed, CancelledError")
except Exception as e:
tb = traceback.format_exc()
self.log.error(f"Error with syncing: {type(e)}{tb}")
finally:
await self._finish_sync()
# Performs sync, and catch exceptions so we don't close the connection
self._sync_task = asyncio.create_task(self._sync())
elif sub_block.sub_block_height >= peak_height - 5:
# Allows shallow reorgs by simply requesting the previous height repeatedly
@ -574,6 +584,19 @@ class FullNode:
fork_height != sub_block.sub_block_height - 1 and sub_block.sub_block_height != 0,
self.blockchain.sub_blocks,
)
# Ensure the signage point is also in the store, for consistency
self.full_node_store.new_signage_point(
new_peak.signage_point_index,
self.blockchain.sub_blocks,
new_peak,
new_peak.sub_slot_iters,
SignagePoint(
sub_block.reward_chain_sub_block.challenge_chain_sp_vdf,
sub_block.challenge_chain_sp_proof,
sub_block.reward_chain_sub_block.reward_chain_sp_vdf,
sub_block.reward_chain_sp_proof,
),
)
# TODO: maybe broadcast new SP/IPs as well?
# If there were pending end of slots that happen after this peak, broadcast them if they are added
@ -591,7 +614,7 @@ class FullNode:
# Occasionally clear the seen list to keep it small
self.full_node_store.clear_seen_unfinished_blocks()
await self._send_peak_to_timelords()
await self.send_peak_to_timelords()
# Tell full nodes about the new peak
msg = Message(

View File

@ -273,6 +273,8 @@ class FullNodeAPI:
respond_unfinished_sub_block: full_node_protocol.RespondUnfinishedSubBlock,
peer: ws.WSChiaConnection,
) -> Optional[Message]:
if self.full_node.sync_store.get_sync_mode():
return None
await self.full_node.respond_unfinished_sub_block(respond_unfinished_sub_block, peer)
return None
@ -326,6 +328,7 @@ class FullNodeAPI:
async def request_signage_point_or_end_of_sub_slot(
self, request: full_node_protocol.RequestSignagePointOrEndOfSubSlot
) -> Optional[Message]:
if request.index_from_challenge == 0:
sub_slot: Optional[Tuple[EndOfSubSlotBundle, int, uint128]] = self.full_node.full_node_store.get_sub_slot(
request.challenge_hash
@ -369,87 +372,98 @@ class FullNodeAPI:
async def respond_signage_point(
self, request: full_node_protocol.RespondSignagePoint, peer: ws.WSChiaConnection
) -> Optional[Message]:
peak = self.full_node.blockchain.get_peak()
if peak is not None and peak.sub_block_height > self.full_node.constants.MAX_SUB_SLOT_SUB_BLOCKS:
sub_slot_iters = peak.sub_slot_iters
difficulty = uint64(peak.weight - self.full_node.blockchain.sub_blocks[peak.prev_hash].weight)
next_sub_slot_iters = self.full_node.blockchain.get_next_slot_iters(peak.header_hash, True)
next_difficulty = self.full_node.blockchain.get_next_difficulty(peak.header_hash, True)
sub_slots_for_peak = await self.full_node.blockchain.get_sp_and_ip_sub_slots(peak.header_hash)
assert sub_slots_for_peak is not None
ip_sub_slot: Optional[EndOfSubSlotBundle] = sub_slots_for_peak[1]
else:
sub_slot_iters = self.full_node.constants.SUB_SLOT_ITERS_STARTING
difficulty = self.full_node.constants.DIFFICULTY_STARTING
next_sub_slot_iters = sub_slot_iters
next_difficulty = difficulty
ip_sub_slot = None
added = self.full_node.full_node_store.new_signage_point(
request.index_from_challenge,
self.full_node.blockchain.sub_blocks,
self.full_node.blockchain.get_peak(),
next_sub_slot_iters,
SignagePoint(
request.challenge_chain_vdf,
request.challenge_chain_proof,
request.reward_chain_vdf,
request.reward_chain_proof,
),
)
if added:
self.log.info(
f"⏲️ Finished signage point {request.index_from_challenge}/"
f"{self.full_node.constants.NUM_SPS_SUB_SLOT}: "
f"{request.challenge_chain_vdf.output.get_hash()} "
)
sub_slot_tuple = self.full_node.full_node_store.get_sub_slot(request.challenge_chain_vdf.challenge)
if sub_slot_tuple is not None:
prev_challenge = sub_slot_tuple[0].challenge_chain.challenge_chain_end_of_slot_vdf.challenge
else:
prev_challenge = None
# Notify nodes of the new signage point
broadcast = full_node_protocol.NewSignagePointOrEndOfSubSlot(
prev_challenge,
request.challenge_chain_vdf.challenge,
request.index_from_challenge,
request.reward_chain_vdf.challenge,
)
msg = Message("new_signage_point_or_end_of_sub_slot", broadcast)
await self.server.send_to_all_except([msg], NodeType.FULL_NODE, peer.peer_node_id)
if self.full_node.sync_store.get_sync_mode():
return None
async with self.full_node.timelord_lock:
# Already have signage point
if (
self.full_node.full_node_store.get_signage_point(request.challenge_chain_vdf.output.get_hash())
is not None
):
return None
peak = self.full_node.blockchain.get_peak()
if peak is not None and peak.sub_block_height > self.full_node.constants.MAX_SUB_SLOT_SUB_BLOCKS:
# Makes sure to potentially update the difficulty if we are past the peak (into a new sub-slot)
assert ip_sub_slot is not None
if request.challenge_chain_vdf.challenge != ip_sub_slot.challenge_chain.get_hash():
difficulty = next_difficulty
sub_slot_iters = next_sub_slot_iters
sub_slot_iters = peak.sub_slot_iters
difficulty = uint64(peak.weight - self.full_node.blockchain.sub_blocks[peak.prev_hash].weight)
next_sub_slot_iters = self.full_node.blockchain.get_next_slot_iters(peak.header_hash, True)
next_difficulty = self.full_node.blockchain.get_next_difficulty(peak.header_hash, True)
sub_slots_for_peak = await self.full_node.blockchain.get_sp_and_ip_sub_slots(peak.header_hash)
assert sub_slots_for_peak is not None
ip_sub_slot: Optional[EndOfSubSlotBundle] = sub_slots_for_peak[1]
else:
sub_slot_iters = self.full_node.constants.SUB_SLOT_ITERS_STARTING
difficulty = self.full_node.constants.DIFFICULTY_STARTING
next_sub_slot_iters = sub_slot_iters
next_difficulty = difficulty
ip_sub_slot = None
# Notify farmers of the new signage point
broadcast_farmer = farmer_protocol.NewSignagePoint(
request.challenge_chain_vdf.challenge,
request.challenge_chain_vdf.output.get_hash(),
request.reward_chain_vdf.output.get_hash(),
difficulty,
sub_slot_iters,
added = self.full_node.full_node_store.new_signage_point(
request.index_from_challenge,
)
msg = Message("new_signage_point", broadcast_farmer)
await self.server.send_to_all([msg], NodeType.FARMER)
else:
self.log.warning(
f"Signage point {request.index_from_challenge} not added, CC challenge: "
f"{request.challenge_chain_vdf.challenge}, RC challenge: {request.reward_chain_vdf.challenge}"
self.full_node.blockchain.sub_blocks,
self.full_node.blockchain.get_peak(),
next_sub_slot_iters,
SignagePoint(
request.challenge_chain_vdf,
request.challenge_chain_proof,
request.reward_chain_vdf,
request.reward_chain_proof,
),
)
return None
if added:
self.log.info(
f"⏲️ Finished signage point {request.index_from_challenge}/"
f"{self.full_node.constants.NUM_SPS_SUB_SLOT}: "
f"{request.challenge_chain_vdf.output.get_hash()} "
)
sub_slot_tuple = self.full_node.full_node_store.get_sub_slot(request.challenge_chain_vdf.challenge)
if sub_slot_tuple is not None:
prev_challenge = sub_slot_tuple[0].challenge_chain.challenge_chain_end_of_slot_vdf.challenge
else:
prev_challenge = None
# Notify nodes of the new signage point
broadcast = full_node_protocol.NewSignagePointOrEndOfSubSlot(
prev_challenge,
request.challenge_chain_vdf.challenge,
request.index_from_challenge,
request.reward_chain_vdf.challenge,
)
msg = Message("new_signage_point_or_end_of_sub_slot", broadcast)
await self.server.send_to_all_except([msg], NodeType.FULL_NODE, peer.peer_node_id)
if peak is not None and peak.sub_block_height > self.full_node.constants.MAX_SUB_SLOT_SUB_BLOCKS:
# Makes sure to potentially update the difficulty if we are past the peak (into a new sub-slot)
assert ip_sub_slot is not None
if request.challenge_chain_vdf.challenge != ip_sub_slot.challenge_chain.get_hash():
difficulty = next_difficulty
sub_slot_iters = next_sub_slot_iters
# Notify farmers of the new signage point
broadcast_farmer = farmer_protocol.NewSignagePoint(
request.challenge_chain_vdf.challenge,
request.challenge_chain_vdf.output.get_hash(),
request.reward_chain_vdf.output.get_hash(),
difficulty,
sub_slot_iters,
request.index_from_challenge,
)
msg = Message("new_signage_point", broadcast_farmer)
await self.server.send_to_all([msg], NodeType.FARMER)
else:
self.log.warning(
f"Signage point {request.index_from_challenge} not added, CC challenge: "
f"{request.challenge_chain_vdf.challenge}, RC challenge: {request.reward_chain_vdf.challenge}"
)
return None
@peer_required
@api_request
async def respond_end_of_sub_slot(
self, request: full_node_protocol.RespondEndOfSubSlot, peer: ws.WSChiaConnection
) -> Optional[Message]:
if self.full_node.sync_store.get_sync_mode():
return None
msg, _ = await self.full_node.respond_end_of_sub_slot(request, peer)
return msg
@ -570,62 +584,64 @@ class FullNodeAPI:
def get_pool_sig(_1, _2) -> G2Element:
return request.pool_signature
# Get the previous sub block at the signage point
if peak is not None:
curr = peak
while curr.total_iters > (total_iters_pos_slot + sp_iters) and curr.sub_block_height > 0:
curr = self.full_node.blockchain.sub_blocks[curr.prev_hash]
if curr.total_iters > (total_iters_pos_slot + sp_iters):
pool_target = PoolTarget(
self.full_node.constants.GENESIS_PRE_FARM_POOL_PUZZLE_HASH,
uint32(0),
)
prev_sb = None
prev_sb: Optional[SubBlockRecord] = self.full_node.blockchain.get_peak()
# Finds the previous sub block from the signage point, ensuring that the reward chain VDF is correct
if prev_sb is not None:
if request.signage_point_index == 0:
if pos_sub_slot is None:
self.log.warning("Pos sub slot is None")
return None
rc_challenge = pos_sub_slot[0].reward_chain.end_of_slot_vdf.challenge
else:
pool_target = request.pool_target
prev_sb = curr
else:
pool_target = PoolTarget(
self.full_node.constants.GENESIS_PRE_FARM_POOL_PUZZLE_HASH,
uint32(0),
)
prev_sb = None
assert sp_vdfs.rc_vdf is not None
rc_challenge = sp_vdfs.rc_vdf.challenge
# Backtrack through empty sub-slots
for eos, _, _ in reversed(self.full_node.full_node_store.finished_sub_slots):
if eos is not None and eos.reward_chain.get_hash() == rc_challenge:
rc_challenge = eos.reward_chain.end_of_slot_vdf.challenge
found = False
attempts = 0
while prev_sb is not None and attempts < 10:
if prev_sb.reward_infusion_new_challenge == rc_challenge:
found = True
break
if prev_sb.finished_reward_slot_hashes is not None and len(prev_sb.finished_reward_slot_hashes) > 0:
if prev_sb.finished_reward_slot_hashes[-1] == rc_challenge:
# This sub-block includes a sub-slot which is where our SP vdf starts. Go back one more
# to find the prev sub block
prev_sb = self.full_node.blockchain.sub_blocks.get(prev_sb.prev_hash, None)
found = True
break
prev_sb = self.full_node.blockchain.sub_blocks.get(prev_sb.prev_hash, None)
attempts += 1
if not found:
self.log.warning("Did not find a previous block with the correct reward chain hash")
return None
try:
finished_sub_slots: List[EndOfSubSlotBundle] = self.full_node.full_node_store.get_finished_sub_slots(
prev_sb, self.full_node.blockchain.sub_blocks, cc_challenge_hash
)
if (
len(finished_sub_slots) > 0
and pos_sub_slot is not None
and finished_sub_slots[-1] != pos_sub_slot[0]
):
self.log.error("Have different sub-slots than is required to farm this sub-block")
return None
except ValueError as e:
self.log.warning(f"Value Error: {e}")
return None
if len(finished_sub_slots) == 0:
if prev_sb is not None:
if request.signage_point_index == 0:
# No need to get correct block since SP RC is not validated for this sub block
pass
else:
assert sp_vdfs.rc_vdf is not None
found = False
attempts = 0
while prev_sb is not None and attempts < 10:
if prev_sb.reward_infusion_new_challenge == sp_vdfs.rc_vdf.challenge:
found = True
break
if (
prev_sb.finished_reward_slot_hashes is not None
and len(prev_sb.finished_reward_slot_hashes) > 0
):
if prev_sb.finished_reward_slot_hashes[-1] == sp_vdfs.rc_vdf.challenge:
prev_sb = self.full_node.blockchain.sub_blocks.get(prev_sb.prev_hash, None)
found = True
break
prev_sb = self.full_node.blockchain.sub_blocks.get(prev_sb.prev_hash, None)
attempts += 1
if not found:
self.log.info("Did not find a previous block with the correct reward chain hash")
return None
elif request.signage_point_index > 0:
assert sp_vdfs.rc_vdf is not None
assert finished_sub_slots[-1].reward_chain.get_hash() == sp_vdfs.rc_vdf.challenge
if prev_sb is None:
pool_target = PoolTarget(
self.full_node.constants.GENESIS_PRE_FARM_POOL_PUZZLE_HASH,
uint32(0),
)
else:
pool_target = request.pool_target
unfinished_block: UnfinishedBlock = create_unfinished_block(
self.full_node.constants,
@ -711,6 +727,8 @@ class FullNodeAPI:
# TIMELORD PROTOCOL
@api_request
async def new_infusion_point_vdf(self, request: timelord_protocol.NewInfusionPointVDF) -> Optional[Message]:
if self.full_node.sync_store.get_sync_mode():
return None
# Lookup unfinished blocks
return await self.full_node.new_infusion_point_vdf(request)
@ -719,6 +737,9 @@ class FullNodeAPI:
async def new_signage_point_vdf(
self, request: timelord_protocol.NewSignagePointVDF, peer: ws.WSChiaConnection
) -> None:
if self.full_node.sync_store.get_sync_mode():
return None
full_node_message = full_node_protocol.RespondSignagePoint(
request.index_from_challenge,
request.challenge_chain_sp_vdf,
@ -733,6 +754,13 @@ class FullNodeAPI:
async def new_end_of_sub_slot_vdf(
self, request: timelord_protocol.NewEndOfSubSlotVDF, peer: ws.WSChiaConnection
) -> Optional[Message]:
if self.full_node.sync_store.get_sync_mode():
return None
if (
self.full_node.full_node_store.get_sub_slot(request.end_of_sub_slot_bundle.challenge_chain.get_hash())
is not None
):
return None
# Calls our own internal message to handle the end of sub slot, and potentially broadcasts to other peers.
full_node_message = full_node_protocol.RespondEndOfSubSlot(request.end_of_sub_slot_bundle)
msg, added = await self.full_node.respond_end_of_sub_slot(full_node_message, peer)
@ -742,7 +770,7 @@ class FullNodeAPI:
f"{request.end_of_sub_slot_bundle.challenge_chain.challenge_chain_end_of_slot_vdf.challenge}. "
f"Re-sending new-peak to timelord"
)
await self.full_node._send_peak_to_timelords()
await self.full_node.send_peak_to_timelords()
return None
else:
return msg

View File

@ -176,7 +176,7 @@ class FullNodeStore:
assert len(self.finished_sub_slots) >= 1
if len(self.finished_sub_slots) == 0:
log.warning("no fini sub slots")
log.warning("no finished sub slots")
return None
last_slot, _, last_slot_iters = self.finished_sub_slots[-1]
@ -465,7 +465,6 @@ class FullNodeStore:
reorg: bool,
sub_blocks: Dict[bytes32, SubBlockRecord],
) -> Tuple[Optional[EndOfSubSlotBundle], List[SignagePoint], List[timelord_protocol.NewInfusionPointVDF]]:
"""
If the peak is an overflow block, must provide two sub-slots: one for the current sub-slot and one for
the prev sub-slot (since we still might get more sub-blocks with an sp in the previous sub-slot)
@ -476,12 +475,18 @@ class FullNodeStore:
if not reorg:
# This is a new peak that adds to the last peak. We can clear data in old sub-slots. (and new ones)
for index, (sub_slot, sps, total_iters) in enumerate(self.finished_sub_slots):
if peak.overflow:
if sub_slot == sp_sub_slot:
# In the case of a peak overflow sub-block, the previous sub-slot is added
if sub_slot == sp_sub_slot:
# In the case of a peak overflow sub-block (or first ss), the previous sub-slot is added
if sp_sub_slot is None:
if (
ip_sub_slot is not None
and ip_sub_slot.challenge_chain.challenge_chain_end_of_slot_vdf.challenge
) == self.constants.FIRST_CC_CHALLENGE:
new_finished_sub_slots.append((sub_slot, sps, total_iters))
continue
else:
new_finished_sub_slots.append((sub_slot, sps, total_iters))
continue
if sub_slot == ip_sub_slot:
new_finished_sub_slots.append((sub_slot, sps, total_iters))
self.finished_sub_slots = new_finished_sub_slots
@ -489,7 +494,7 @@ class FullNodeStore:
# This is either a reorg, which means some sub-blocks are reverted, or this sub slot is not in our current
# cache, delete the entire cache and add this sub slot.
self.clear_slots()
if peak.overflow and sp_sub_slot is not None:
if peak.overflow:
prev_sub_slot_total_iters = peak.sp_sub_slot_total_iters(self.constants)
assert total_iters_peak != prev_sub_slot_total_iters
self.finished_sub_slots = [

View File

@ -1,9 +1,7 @@
from dataclasses import dataclass
from typing import List, Optional
from blspy import PrependSignature, PublicKey
from src.types.challenge import Challenge
from src.types.coinbase import CoinbaseInfo
from src.types.proof_of_space import ProofOfSpace
from src.util.cbor_message import cbor_message
@ -20,7 +18,7 @@ Protocol between farmer and pool.
@streamable
class SignedCoinbase:
coinbase: CoinbaseInfo
coinbase_signature: PrependSignature
# coinbase_signature: PrependSignature
@dataclass(frozen=True)
@ -34,7 +32,7 @@ class RequestData:
@cbor_message
class RespondData:
posting_url: str
pool_public_key: PublicKey
# pool_public_key: PublicKey
partials_threshold: uint64
coinbase_info: List[SignedCoinbase]
@ -42,11 +40,11 @@ class RespondData:
@dataclass(frozen=True)
@cbor_message
class Partial:
challenge: Challenge
# challenge: Challenge
proof_of_space: ProofOfSpace
farmer_target: str
# Signature of the challenge + farmer target hash
signature: PrependSignature
# signature: PrependSignature
@dataclass(frozen=True)

View File

@ -49,12 +49,14 @@ class FullNodeRpcApi:
# return payloads
return []
#
# async def get_blockchain_state(self, request: Dict):
# """
# Returns a summary of the node's view of the blockchain.
# """
# tips: List[Header] = self.service.blockchain.get_current_tips()
# lca: Header = self.service.blockchain.lca_block
# peak: Optional[SubBlockRecord] = self.service.blockchain.get_peak()
# if peak is not None and peak.height > 0:
# difficulty = uint64(peak.weight - self.service.blockchain.sub_blocks[peak.prev_hash].weight)
# sync_mode: bool = self.service.sync_store.get_sync_mode()
# difficulty: uint64 = self.service.blockchain.get_next_difficulty(lca)
# lca_block = await self.service.block_store.get_block(lca.header_hash)
@ -105,6 +107,7 @@ class FullNodeRpcApi:
# }
# self.cached_blockchain_state = dict(response["blockchain_state"])
# return response
#
# async def get_block(self, request: Dict) -> Optional[Dict]:
# if "header_hash" not in request:

View File

@ -2,7 +2,7 @@ import asyncio
import logging
import ssl
from pathlib import Path
from typing import Any, List, Dict, Tuple, Callable, Optional, Set
from typing import Any, List, Dict, Callable, Optional, Set
from aiohttp.web_app import Application
from aiohttp.web_runner import TCPSite
@ -91,7 +91,7 @@ class ChiaServer:
self.root_path = root_path
self.config = config
self.on_connect: Optional[Callable] = None
self.incoming_messages: asyncio.Queue[Tuple[Payload, WSChiaConnection]] = asyncio.Queue()
self.incoming_messages: asyncio.Queue = asyncio.Queue()
self.shut_down_event = asyncio.Event()
if self._local_type is NodeType.INTRODUCER:
@ -213,7 +213,12 @@ class ChiaServer:
url = f"wss://{target_node.host}:{target_node.port}/ws"
self.log.info(f"Connecting: {url}, Peer info: {target_node}")
ws = await session.ws_connect(url, autoclose=False, autoping=True, ssl=ssl_context)
try:
ws = await session.ws_connect(url, autoclose=False, autoping=True, ssl=ssl_context)
except asyncio.exceptions.TimeoutError as e:
self.log.warning(f"Timeout error {e} connecting to {url}")
await session.close()
return False
if ws is not None:
connection = WSChiaConnection(
self._local_type,

View File

@ -4,7 +4,7 @@ import asyncio
import traceback
from secrets import token_bytes
from typing import Any, AsyncGenerator, Callable, Optional, List, Tuple, Dict
from typing import Any, AsyncGenerator, Callable, Optional, List, Dict
from aiohttp import WSMessage, WSMsgType
@ -74,8 +74,8 @@ class WSChiaConnection:
self.last_message_time: float = 0
# Messaging
self.incoming_queue: asyncio.Queue[Tuple[Payload, WSChiaConnection]] = incoming_queue
self.outgoing_queue: asyncio.Queue[Payload] = asyncio.Queue()
self.incoming_queue: asyncio.Queue = incoming_queue
self.outgoing_queue: asyncio.Queue = asyncio.Queue()
self.inbound_task = None
self.outbound_task = None

View File

@ -57,9 +57,18 @@ class SubSlotData(Streamable):
rc_slot_end_info: Optional[VDFInfo]
def is_challenge(self):
if self.proof_of_space is not None:
return True
return False
if self.cc_slot_end is not None:
return False
if self.cc_sp_sig is None:
return False
if self.cc_signage_point is None:
return False
if self.cc_infusion_point is None:
return False
if self.icc_slot_end_info is None:
return False
return True
@dataclass(frozen=True)

View File

@ -162,7 +162,7 @@ full_node:
host: *self_hostname
port: 8446
introducer_peer:
host: introducer.beta.chia.net # Chia AWS introducer IPv4/IPv6
host: introducer-ms.new_consensus_rebased.chiatechlab.com # Chia AWS introducer IPv4/IPv6
port: 8444
wallet_peer:
host: *self_hostname
@ -226,7 +226,7 @@ wallet:
recent_peer_threshold: 6000
introducer_peer:
host: introducer.beta.chia.net # Chia AWS introducer IPv4/IPv6
host: introducer-ms.new_consensus_rebased.chiatechlab.com # Chia AWS introducer IPv4/IPv6
port: 8444
ssl:

View File

@ -12,7 +12,7 @@ test_constants_modified = test_constants.replace(
"DISCRIMINANT_SIZE_BITS": 1024,
"SUB_EPOCH_SUB_BLOCKS": 140,
"MAX_SUB_SLOT_SUB_BLOCKS": 50,
"NUM_SPS_SUB_SLOT": 64, # Must be a power of 2
"NUM_SPS_SUB_SLOT": 32, # Must be a power of 2
"EPOCH_SUB_BLOCKS": 280,
"SUB_SLOT_ITERS_STARTING": 2 ** 20,
"NUMBER_ZERO_BITS_PLOT_FILTER": 5,
@ -31,7 +31,7 @@ class TestSimulation:
node1, node2, _, _, _, _, _, server1 = simulation
await server1.start_client(PeerInfo("localhost", uint16(21238)))
# Use node2 to test node communication, since only node1 extends the chain.
await time_out_assert(500, node_height_at_least, True, node2, 10)
await time_out_assert(500, node_height_at_least, True, node2, 20)
# Wait additional 2 minutes to get a compact block.
# max_height = node1.full_node.blockchain.lca_block.height