Add limit for AGG_SIG condition message of 1024 bytes. Catch any exception thrown in mempool block creation, and catch any excepction making a block, then make an empty block. (#2013)

This commit is contained in:
Mariano Sorgente 2021-04-18 15:57:07 +09:00 committed by GitHub
parent f1be1714dc
commit 615d8af00d
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
8 changed files with 106 additions and 26 deletions

View File

@ -193,7 +193,10 @@ async def validate_block_body(
npc_list = npc_result.npc_list
# 8. Check that cost <= MAX_BLOCK_COST_CLVM
log.warning(f"Cost: {cost} max: {constants.MAX_BLOCK_COST_CLVM}")
log.debug(
f"Cost: {cost} max: {constants.MAX_BLOCK_COST_CLVM} "
f"percent full: {cost / constants.MAX_BLOCK_COST_CLVM}"
)
if cost > constants.MAX_BLOCK_COST_CLVM:
return Err.BLOCK_COST_EXCEEDS_MAX, None
if npc_result.error is not None:

View File

@ -567,7 +567,10 @@ class FullNodeAPI:
# FARMER PROTOCOL
@api_request
async def declare_proof_of_space(self, request: farmer_protocol.DeclareProofOfSpace) -> Optional[Message]:
@peer_required
async def declare_proof_of_space(
self, request: farmer_protocol.DeclareProofOfSpace, peer: ws.WSChiaConnection
) -> Optional[Message]:
"""
Creates a block body and header, with the proof of space, coinbase, and fee targets provided
by the farmer, and sends the hash of the header data back to the farmer.
@ -629,7 +632,13 @@ class FullNodeAPI:
async with self.full_node.blockchain.lock:
peak: Optional[BlockRecord] = self.full_node.blockchain.get_peak()
if peak is not None:
mempool_bundle = await self.full_node.mempool_manager.create_bundle_from_mempool(peak.header_hash)
try:
mempool_bundle = await self.full_node.mempool_manager.create_bundle_from_mempool(
peak.header_hash
)
except Exception as e:
self.full_node.log.error(f"Error making spend bundle {e} peak: {peak}")
mempool_bundle = None
if mempool_bundle is not None:
spend_bundle = mempool_bundle[0]
additions = mempool_bundle[1]
@ -807,22 +816,58 @@ class FullNodeAPI:
foliage_sb_data_hash,
foliage_transaction_block_hash,
)
return make_msg(ProtocolMessageTypes.request_signed_values, message)
await peer.send_message(make_msg(ProtocolMessageTypes.request_signed_values, message))
# Adds backup in case the first one fails
if unfinished_block.is_transaction_block() and unfinished_block.transactions_generator is not None:
unfinished_block_backup = create_unfinished_block(
self.full_node.constants,
total_iters_pos_slot,
sub_slot_iters,
request.signage_point_index,
sp_iters,
ip_iters,
request.proof_of_space,
cc_challenge_hash,
farmer_ph,
pool_target,
get_plot_sig,
get_pool_sig,
sp_vdfs,
timestamp,
self.full_node.blockchain,
b"",
None,
G2Element(),
None,
None,
prev_b,
finished_sub_slots,
)
self.full_node.full_node_store.add_candidate_block(
quality_string, height, unfinished_block_backup, backup=True
)
return None
@api_request
async def signed_values(self, farmer_request: farmer_protocol.SignedValues) -> Optional[Message]:
@peer_required
async def signed_values(
self, farmer_request: farmer_protocol.SignedValues, peer: ws.WSChiaConnection
) -> Optional[Message]:
"""
Signature of header hash, by the harvester. This is enough to create an unfinished
block, which only needs a Proof of Time to be finished. If the signature is valid,
we call the unfinished_block routine.
"""
candidate: Optional[UnfinishedBlock] = self.full_node.full_node_store.get_candidate_block(
candidate_tuple: Optional[Tuple[uint32, UnfinishedBlock]] = self.full_node.full_node_store.get_candidate_block(
farmer_request.quality_string
)
if candidate is None:
if candidate_tuple is None:
self.log.warning(f"Quality string {farmer_request.quality_string} not found in database")
return None
height, candidate = candidate_tuple
if not AugSchemeMPL.verify(
candidate.reward_chain_block.proof_of_space.plot_public_key,
@ -848,8 +893,25 @@ class FullNodeAPI:
# Propagate to ourselves (which validates and does further propagations)
request = full_node_protocol.RespondUnfinishedBlock(new_candidate)
try:
await self.full_node.respond_unfinished_block(request, None, True)
except Exception as e:
# If we have an error with this block, try making an empty block
self.full_node.log.error(f"Error farming block {e} {request}")
candidate_tuple = self.full_node.full_node_store.get_candidate_block(
farmer_request.quality_string, backup=True
)
if candidate_tuple is not None:
height, unfinished_block = candidate_tuple
self.full_node.full_node_store.add_candidate_block(
farmer_request.quality_string, height, unfinished_block, False
)
message = farmer_protocol.RequestSignedValues(
farmer_request.quality_string,
unfinished_block.foliage.foliage_block_data.get_hash(),
unfinished_block.foliage.foliage_transaction_block_hash,
)
await peer.send_message(make_msg(ProtocolMessageTypes.request_signed_values, message))
return None
# TIMELORD PROTOCOL

View File

@ -28,6 +28,7 @@ class FullNodeStore:
# Blocks which we have created, but don't have plot signatures yet, so not yet "unfinished blocks"
candidate_blocks: Dict[bytes32, Tuple[uint32, UnfinishedBlock]]
candidate_backup_blocks: Dict[bytes32, Tuple[uint32, UnfinishedBlock]]
# Header hashes of unfinished blocks that we have seen recently
seen_unfinished_blocks: set
@ -60,6 +61,7 @@ class FullNodeStore:
def __init__(self):
self.candidate_blocks = {}
self.candidate_backup_blocks = {}
self.seen_unfinished_blocks = set()
self.unfinished_blocks = {}
self.finished_sub_slots = []
@ -78,18 +80,20 @@ class FullNodeStore:
return self
def add_candidate_block(
self,
quality_string: bytes32,
height: uint32,
unfinished_block: UnfinishedBlock,
self, quality_string: bytes32, height: uint32, unfinished_block: UnfinishedBlock, backup: bool = False
):
if backup:
self.candidate_backup_blocks[quality_string] = (height, unfinished_block)
else:
self.candidate_blocks[quality_string] = (height, unfinished_block)
def get_candidate_block(self, quality_string: bytes32) -> Optional[UnfinishedBlock]:
result = self.candidate_blocks.get(quality_string, None)
if result is None:
return None
return result[1]
def get_candidate_block(
self, quality_string: bytes32, backup: bool = False
) -> Optional[Tuple[uint32, UnfinishedBlock]]:
if backup:
return self.candidate_backup_blocks.get(quality_string, None)
else:
return self.candidate_blocks.get(quality_string, None)
def clear_candidate_blocks_below(self, height: uint32) -> None:
del_keys = []
@ -101,6 +105,15 @@ class FullNodeStore:
del self.candidate_blocks[key]
except KeyError:
pass
del_keys = []
for key, value in self.candidate_backup_blocks.items():
if value[0] < height:
del_keys.append(key)
for key in del_keys:
try:
del self.candidate_backup_blocks[key]
except KeyError:
pass
def seen_unfinished_block(self, object_hash: bytes32) -> bool:
if object_hash in self.seen_unfinished_blocks:

View File

@ -5,7 +5,6 @@ import logging
import time
from concurrent.futures.process import ProcessPoolExecutor
from typing import Dict, List, Optional, Set, Tuple
from blspy import AugSchemeMPL, G1Element
from chiabip158 import PyBIP158
@ -56,6 +55,7 @@ class MempoolManager:
self.coin_store = coin_store
self.limit_factor = 0.5
self.mempool_max_total_cost = int(self.constants.MAX_BLOCK_COST_CLVM * self.constants.MEMPOOL_BLOCK_BUFFER)
self.potential_cache_max_total_cost = int(self.constants.MAX_BLOCK_COST_CLVM * 5)
self.potential_cache_cost: int = 0
@ -96,7 +96,7 @@ class MempoolManager:
for item in dic.values():
log.info(f"Cumulative cost: {cost_sum}")
if (
item.cost + cost_sum <= 0.5 * self.constants.MAX_BLOCK_COST_CLVM
item.cost + cost_sum <= self.limit_factor * self.constants.MAX_BLOCK_COST_CLVM
and item.fee + fee_sum <= self.constants.MAX_COIN_AMOUNT
):
spend_bundles.append(item.spend_bundle)
@ -224,7 +224,7 @@ class MempoolManager:
log.debug(f"Cost: {cost}")
if cost > self.constants.MAX_BLOCK_COST_CLVM:
if cost > int(self.limit_factor * self.constants.MAX_BLOCK_COST_CLVM):
return None, MempoolInclusionStatus.FAILED, Err.BLOCK_COST_EXCEEDS_MAX
if npc_result.error is not None:

View File

@ -62,7 +62,7 @@ class TimelordAPI:
last_ip_iters = self.timelord.last_state.get_last_ip()
if sp_iters > ip_iters:
self.timelord.overflow_blocks.append(new_unfinished_block)
log.warning(f"Overflow unfinished block, total {self.timelord.total_unfinished}")
log.debug(f"Overflow unfinished block, total {self.timelord.total_unfinished}")
elif ip_iters > last_ip_iters:
new_block_iters: Optional[uint64] = self.timelord._can_infuse_unfinished_block(new_unfinished_block)
if new_block_iters:
@ -73,7 +73,7 @@ class TimelordAPI:
self.timelord.iters_to_submit[Chain.INFUSED_CHALLENGE_CHAIN].append(new_block_iters)
self.timelord.iteration_to_proof_type[new_block_iters] = IterationType.INFUSION_POINT
self.timelord.total_unfinished += 1
log.warning(f"Non-overflow unfinished block, total {self.timelord.total_unfinished}")
log.debug(f"Non-overflow unfinished block, total {self.timelord.total_unfinished}")
@api_request
async def request_compact_proof_of_time(self, vdf_info: timelord_protocol.RequestCompactProofOfTime):

View File

@ -75,11 +75,13 @@ def pkm_pairs_for_conditions_dict(
for cwa in conditions_dict.get(ConditionOpcode.AGG_SIG_UNSAFE, []):
assert len(cwa.vars) == 2
assert len(cwa.vars[0]) == 48 and len(cwa.vars[1]) <= 1024
assert cwa.vars[0] is not None and cwa.vars[1] is not None
ret.append((G1Element.from_bytes(cwa.vars[0]), cwa.vars[1]))
for cwa in conditions_dict.get(ConditionOpcode.AGG_SIG_ME, []):
assert len(cwa.vars) == 2
assert len(cwa.vars[0]) == 48 and len(cwa.vars[1]) <= 1024
assert cwa.vars[0] is not None and cwa.vars[1] is not None
ret.append((G1Element.from_bytes(cwa.vars[0]), cwa.vars[1] + coin_name + additional_data))
return ret

View File

@ -87,7 +87,7 @@ class Wallet:
self.cost_of_single_tx = cost_result
self.log.info(f"Cost of a single tx for standard wallet: {self.cost_of_single_tx}")
max_cost = self.wallet_state_manager.constants.MAX_BLOCK_COST_CLVM / 2 # avoid full block TXs
max_cost = self.wallet_state_manager.constants.MAX_BLOCK_COST_CLVM / 5 # avoid full block TXs
current_cost = 0
total_amount = 0
total_coin_count = 0

View File

@ -66,7 +66,7 @@ class TestFullNodeStore:
for height, unf_block in enumerate(unfinished_blocks):
store.add_candidate_block(unf_block.get_hash(), height, unf_block)
assert store.get_candidate_block(unfinished_blocks[4].get_hash()) == unfinished_blocks[4]
assert store.get_candidate_block(unfinished_blocks[4].get_hash())[1] == unfinished_blocks[4]
store.clear_candidate_blocks_below(uint32(8))
assert store.get_candidate_block(unfinished_blocks[5].get_hash()) is None
assert store.get_candidate_block(unfinished_blocks[8].get_hash()) is not None