Fix a few bugs

This commit is contained in:
Mariano Sorgente 2019-10-29 12:33:32 +09:00
parent 22a70166e7
commit 8bafbbaac7
8 changed files with 69 additions and 53 deletions

View File

@ -23,8 +23,6 @@ sh install.sh
When running the servers on Mac OS, allow the application to accept incoming connections.
Run the servers in the following order (you can also use ipython):
```bash
./lib/chiavdf/fast_vdf/server 8889
./lib/chiavdf/fast_vdf/server 8890
python -m src.server.start_plotter
python -m src.server.start_timelord
python -m src.server.start_farmer

View File

@ -239,18 +239,20 @@ class Blockchain:
# First epoch has a hardcoded vdf speed
return self.constants["VDF_IPS_STARTING"]
elif next_height % self.constants["DIFFICULTY_EPOCH"] != self.constants["DIFFICULTY_DELAY"]:
prev_block = await self.store.get_block(block.prev_header_hash)
if prev_block is None:
raise Exception("Previous block is invalid.")
proof_of_space = block.trunk_block.proof_of_space
challenge_hash = block.trunk_block.proof_of_time.output.challenge_hash
difficulty = await self.get_next_difficulty(prev_block.header_hash)
iterations = block.trunk_block.challenge.total_iters - prev_block.trunk_block.challenge.total_iters
prev_ips = calculate_ips_from_iterations(proof_of_space, challenge_hash, difficulty, iterations,
self.constants["MIN_BLOCK_TIME"])
if next_height % self.constants["DIFFICULTY_EPOCH"] != self.constants["DIFFICULTY_DELAY"]:
# Not at a point where ips would change, so return the previous ips
# TODO: cache this for efficiency
prev_block = await self.store.get_block(block.prev_header_hash)
if prev_block is None:
raise Exception("Previous block is invalid.")
proof_of_space = block.trunk_block.proof_of_space
challenge_hash = block.trunk_block.proof_of_time.output.challenge_hash
difficulty = await self.get_next_difficulty(prev_block.header_hash)
iterations = block.trunk_block.challenge.total_iters - prev_block.trunk_block.challenge.total_iters
return calculate_ips_from_iterations(proof_of_space, challenge_hash, difficulty, iterations,
self.constants["MIN_BLOCK_TIME"])
return prev_ips
# ips (along with difficulty) will change in this block, so we need to calculate the new one.
# The calculation is (iters_2 - iters_1) // (timestamp_2 - timestamp_1).
@ -296,7 +298,13 @@ class Blockchain:
timestamp2 = block2.trunk_block.header.data.timestamp
iters2 = block2.trunk_block.challenge.total_iters
return uint64((iters2 - iters1) // (timestamp2 - timestamp1))
new_ips = uint64((iters2 - iters1) // (timestamp2 - timestamp1))
# Only change by a max factor, and must be at least 1
if new_ips >= prev_ips:
return min(new_ips, uint64(self.constants["IPS_FACTOR"] * new_ips))
else:
return max([uint64(1), new_ips, uint64(prev_ips // self.constants["IPS_FACTOR"])])
async def receive_block(self, block: FullBlock) -> ReceiveBlockResult:
"""

View File

@ -19,8 +19,8 @@ pool_sks:
# sha256(PrivateKey.from_seed(b'0').get_public_key().serialize()).digest()
pool_target: "9940b95222a1d19abb73c192f2c10dc65b32bcc7a703db1b40456f2dbf1e416e"
pool_share_threshold: 1000 # To send to pool, must be expected to take less than these seconds
propagate_threshold: 300 # To propagate to network, must be expected to take less than these seconds
pool_share_threshold: 600 # To send to pool, must be expected to take less than these seconds
propagate_threshold: 500 # To propagate to network, must be expected to take less than these seconds
plotter_peer:
host: "127.0.0.1"

View File

@ -4,6 +4,7 @@ constants: Dict[str, Any] = {
"NUMBER_OF_HEADS": 3, # The number of tips each full node keeps track of and propagates
"DIFFICULTY_STARTING": 500, # These are in units of 2^32
"DIFFICULTY_FACTOR": 3, # The next difficulty is truncated to range [prev / FACTOR, prev * FACTOR]
"IPS_FACTOR": 3, # The next ips is truncated to range [prev / FACTOR, prev * FACTOR]
# These 3 constants must be changed at the same time
"DIFFICULTY_EPOCH": 12, # The number of blocks per epoch

View File

@ -520,6 +520,7 @@ class FullNode:
timelord_request = timelord_protocol.ProofOfSpaceInfo(challenge_hash, iterations_needed)
log.warn(f"Sending pos info height {unfinished_block.block.height}")
yield OutboundMessage(NodeType.TIMELORD, Message("proof_of_space_info", timelord_request), Delivery.BROADCAST)
yield OutboundMessage(NodeType.FULL_NODE, Message("unfinished_block", unfinished_block),
Delivery.BROADCAST_TO_OTHERS)
@ -584,20 +585,16 @@ class FullNode:
async with (await self.store.get_lock()):
log.info(f"\tUpdated heads, new heights: {[b.height for b in self.blockchain.get_current_heads()]}")
difficulty = await self.blockchain.get_next_difficulty(block.block.prev_header_hash)
old_ips = await self.store.get_proof_of_time_estimate_ips()
next_vdf_ips = await self.blockchain.get_next_ips(block.block.header_hash)
log.info(f"Difficulty {difficulty} IPS {old_ips}")
log.info(f"Difficulty {difficulty} IPS {next_vdf_ips}")
if next_vdf_ips != await self.store.get_proof_of_time_estimate_ips():
await self.store.set_proof_of_time_estimate_ips(next_vdf_ips)
ips_changed = True
if ips_changed:
if next_vdf_ips > old_ips:
# TODO: remove this for testnet/mainnet
# If rate dropped this much, don't send an update (for testing, blockchain offline, etc)
rate_update = farmer_protocol.ProofOfTimeRate(max(old_ips, next_vdf_ips))
log.error(f"Sending proof of time rate {max(old_ips, next_vdf_ips)}")
yield OutboundMessage(NodeType.FARMER, Message("proof_of_time_rate", rate_update),
Delivery.BROADCAST)
rate_update = farmer_protocol.ProofOfTimeRate(next_vdf_ips)
log.error(f"Sending proof of time rate {next_vdf_ips}")
yield OutboundMessage(NodeType.FARMER, Message("proof_of_time_rate", rate_update),
Delivery.BROADCAST)
assert block.block.trunk_block.proof_of_time
assert block.block.trunk_block.challenge
pos_quality = block.block.trunk_block.proof_of_space.verify_and_get_quality(

View File

@ -1,22 +1,22 @@
ps -e | grep python | grep "start_" | awk '{print $1}' | xargs -L1 kill -9
ps -e | grep "fast_vdf/server" | awk '{print $1}' | xargs -L1 kill -9
_kill_servers() {
ps -e | grep python | grep "start_" | awk '{print $1}' | xargs -L1 kill -9
ps -e | grep "fast_vdf/server" | awk '{print $1}' | xargs -L1 kill -9
}
_kill_servers
./lib/chiavdf/fast_vdf/server 8889 &
P1=$!
./lib/chiavdf/fast_vdf/server 8890 &
P2=$!
python -m src.server.start_plotter &
P3=$!
P1=$!
python -m src.server.start_timelord &
P4=$!
P2=$!
python -m src.server.start_farmer &
P5=$!
P3=$!
python -m src.server.start_full_node "127.0.0.1" 8002 "-f" &
P6=$!
P4=$!
python -m src.server.start_full_node "127.0.0.1" 8004 "-t" &
P7=$!
P5=$!
python -m src.server.start_full_node "127.0.0.1" 8005 &
P8=$!
P6=$!
_term() {
echo "Caught SIGTERM signal, killing all servers."
@ -26,11 +26,10 @@ _term() {
kill -TERM "$P4" 2>/dev/null
kill -TERM "$P5" 2>/dev/null
kill -TERM "$P6" 2>/dev/null
kill -TERM "$P7" 2>/dev/null
kill -TERM "$P8" 2>/dev/null
_kill_servers
}
trap _term SIGTERM
trap _term SIGINT
trap _term INT
wait $P1 $P2 $P3 $P4 $P5 $P6 $P7 $P8
wait $P1 $P2 $P3 $P4 $P5 $P6

View File

@ -46,7 +46,7 @@ class Timelord:
disc: int = create_discriminant(challenge_start.challenge_hash, constants["DISCRIMINANT_SIZE_BITS"])
async with self.lock:
if (challenge_start.challenge_hash in self.seen_discriminants):
log.info("Already seen this one... Ignoring")
log.info("Already seen this challenge hash {challenge_start.challenge_hash}. Ignoring.")
return
self.seen_discriminants.append(challenge_start.challenge_hash)
self.active_heights.append(challenge_start.height)
@ -58,7 +58,7 @@ class Timelord:
if (challenge_start.height <= max(self.active_heights) - 3):
self.done_discriminants.append(challenge_start.challenge_hash)
self.active_heights.remove(challenge_start.height)
log.info(f"Will not execute challenge at height {challenge_start.height}, too old")
log.info(f"Will not execute challenge at height {challenge_start}, too old")
return
assert(len(self.active_heights) > 0)
if (challenge_start.height == max(self.active_heights)):
@ -71,7 +71,7 @@ class Timelord:
# Poll until a server becomes free.
if port == -1:
await asyncio.sleep(0.5)
await asyncio.sleep(1)
proc = await asyncio.create_subprocess_shell("./lib/chiavdf/fast_vdf/server " + str(port))
@ -102,8 +102,8 @@ class Timelord:
async with self.lock:
if (challenge_start.challenge_hash in self.pending_iters):
log.info(f"Writing pending iters {challenge_start.challenge_hash}")
for iter in sorted(self.pending_iters[challenge_start.challenge_hash]):
log.info(f"Writing pending iters {challenge_start.challenge_hash}")
writer.write((str(len(str(iter))) + str(iter)).encode())
await writer.drain()
@ -111,6 +111,7 @@ class Timelord:
while True:
data = await reader.readexactly(4)
if (data.decode() == "STOP"):
log.info("Stopped server")
# Server is now available.
async with self.lock:
writer.write(b"ACK")
@ -121,7 +122,8 @@ class Timelord:
elif (data.decode() == "POLL"):
async with self.lock:
# If I have a newer discriminant... Free up the VDF server
if (len(self.active_heights) > 0 and challenge_start.height <= max(self.active_heights)):
if (len(self.active_heights) > 0 and challenge_start.height <= max(self.active_heights)
and challenge_start.challenge_hash in self.active_discriminants):
log.info("Got poll, stopping the challenge!")
writer.write(b'10')
await writer.drain()
@ -145,7 +147,8 @@ class Timelord:
proof_blob = ClassGroup.from_ab_discriminant(y.a, y.b, disc).serialize() + proof_bytes
x = ClassGroup.from_ab_discriminant(2, 1, disc)
if (not check_proof_of_time_nwesolowski(disc, x, proof_blob, iterations_needed,
constants["DISCRIMINANT_SIZE_BITS"], self.config["n_wesolowski"])):
constants["DISCRIMINANT_SIZE_BITS"],
self.config["n_wesolowski"])):
log.error("My proof is incorrect!")
output = ProofOfTimeOutput(challenge_start.challenge_hash,
@ -155,10 +158,14 @@ class Timelord:
response = timelord_protocol.ProofOfTimeFinished(proof_of_time)
async with self.lock:
time_taken = time.time() - self.active_discriminants_start_time[challenge_start.challenge_hash]
ips = int(iterations_needed / time_taken * 10)/10
log.info(f"Finished PoT, chall:{challenge_start.challenge_hash[:10].hex()}.. {iterations_needed}"
f" iters. {int(time_taken*1000)/1000}s, {ips} ips")
if challenge_start.challenge_hash in self.active_discriminants:
time_taken = time.time() - self.active_discriminants_start_time[challenge_start.challenge_hash]
ips = int(iterations_needed / time_taken * 10)/10
log.info(f"Finished PoT, chall:{challenge_start.challenge_hash[:10].hex()}.."
f" {iterations_needed} iters. {int(time_taken*1000)/1000}s, {ips} ips")
else:
log.info(f"Finished PoT chall:{challenge_start.challenge_hash[:10].hex()}.. {iterations_needed}"
f" iters. But challenge not active anymore")
yield OutboundMessage(NodeType.FULL_NODE, Message("proof_of_time_finished", response), Delivery.RESPOND)
@ -178,7 +185,6 @@ class Timelord:
del self.active_discriminants[challenge_end.challenge_hash]
del self.active_discriminants_start_time[challenge_end.challenge_hash]
self.done_discriminants.append(challenge_end.challenge_hash)
await asyncio.sleep(0.5)
@api_request
async def proof_of_space_info(self, proof_of_space_info: timelord_protocol.ProofOfSpaceInfo):
@ -187,8 +193,10 @@ class Timelord:
have a process for this challenge, we should communicate to the process to tell it how
many iterations to run for.
"""
async with self.lock:
log.info(f"{proof_of_space_info.challenge_hash in self.active_discriminants}")
log.info(f"{proof_of_space_info.challenge_hash in self.done_discriminants}")
log.info(f"{proof_of_space_info.challenge_hash in self.pending_iters}")
if (proof_of_space_info.challenge_hash in self.active_discriminants):
writer = self.active_discriminants[proof_of_space_info.challenge_hash]
writer.write(((str(len(str(proof_of_space_info.iterations_needed))) +
@ -199,4 +207,4 @@ class Timelord:
return
elif (proof_of_space_info.challenge_hash not in self.pending_iters):
self.pending_iters[proof_of_space_info.challenge_hash] = []
self.pending_iters[proof_of_space_info.challenge_hash].append(proof_of_space_info.iterations_needed)
self.pending_iters[proof_of_space_info.challenge_hash].append(proof_of_space_info.iterations_needed)

View File

@ -147,9 +147,14 @@ class BlockTools:
new_difficulty = max([uint64(1), new_difficulty,
uint64(curr_difficulty // test_constants["DIFFICULTY_FACTOR"])])
new_ips = uint64((iters3 - iters1)//(timestamp3 - timestamp1))
if new_ips >= curr_ips:
curr_ips = min(new_ips, uint64(test_constants["IPS_FACTOR"] * new_ips))
else:
curr_ips = max([uint64(1), new_ips, uint64(curr_ips // test_constants["IPS_FACTOR"])])
prev_difficulty = curr_difficulty
curr_difficulty = new_difficulty
curr_ips = uint64((iters3 - iters1)//(timestamp3 - timestamp1))
time_taken = seconds_per_block
timestamp += time_taken
block_list.append(self.create_next_block(test_constants, block_list[-1], timestamp, curr_difficulty,