tests|github: Improve PlotManager tests + move them to test_plot_manager.py (#8875)

* tests: Improve `PlotManager` tests + move them to `test_plot_manager.py`

* github|tests: Enable `plotting` tests and generate the workflow files
This commit is contained in:
dustinface 2021-11-17 15:54:25 +01:00 committed by GitHub
parent d2b6f28a3c
commit 7178d47d3a
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
6 changed files with 622 additions and 261 deletions

View File

@ -0,0 +1,102 @@
#
# THIS FILE IS GENERATED. SEE https://github.com/Chia-Network/chia-blockchain/tree/main/tests#readme
#
name: MacOS plotting Tests
on:
push:
branches:
- main
tags:
- '**'
pull_request:
branches:
- '**'
jobs:
build:
name: MacOS plotting Tests
runs-on: ${{ matrix.os }}
timeout-minutes: 30
strategy:
fail-fast: false
max-parallel: 4
matrix:
python-version: [3.8, 3.9]
os: [macOS-latest]
steps:
- name: Cancel previous runs on the same branch
if: ${{ github.ref != 'refs/heads/main' }}
uses: styfle/cancel-workflow-action@0.9.1
with:
access_token: ${{ github.token }}
- name: Checkout Code
uses: actions/checkout@v2
with:
fetch-depth: 0
- name: Setup Python environment
uses: actions/setup-python@v2
with:
python-version: ${{ matrix.python-version }}
- name: Create keychain for CI use
run: |
security create-keychain -p foo chiachain
security default-keychain -s chiachain
security unlock-keychain -p foo chiachain
security set-keychain-settings -t 7200 -u chiachain
- name: Get pip cache dir
id: pip-cache
run: |
echo "::set-output name=dir::$(pip cache dir)"
- name: Cache pip
uses: actions/cache@v2.1.6
with:
# Note that new runners may break this https://github.com/actions/cache/issues/292
path: ${{ steps.pip-cache.outputs.dir }}
key: ${{ runner.os }}-pip-${{ hashFiles('**/setup.py') }}
restore-keys: |
${{ runner.os }}-pip-
- name: Checkout test blocks and plots
uses: actions/checkout@v2
with:
repository: 'Chia-Network/test-cache'
path: '.chia'
ref: '0.27.0'
fetch-depth: 1
- name: Link home directory
run: |
cd $HOME
ln -s $GITHUB_WORKSPACE/.chia
echo "$HOME/.chia"
ls -al $HOME/.chia
- name: Run install script
env:
INSTALL_PYTHON_VERSION: ${{ matrix.python-version }}
BUILD_VDF_CLIENT: "N"
run: |
brew install boost
sh install.sh
# Omitted installing Timelord
- name: Install developer requirements
run: |
. ./activate
venv/bin/python -m pip install pytest pytest-asyncio pytest-xdist
- name: Test plotting code with pytest
run: |
. ./activate
./venv/bin/py.test tests/plotting/test_*.py -s -v --durations 0
#
# THIS FILE IS GENERATED. SEE https://github.com/Chia-Network/chia-blockchain/tree/main/tests#readme
#

View File

@ -0,0 +1,109 @@
#
# THIS FILE IS GENERATED. SEE https://github.com/Chia-Network/chia-blockchain/tree/main/tests#readme
#
name: Ubuntu plotting Test
on:
push:
branches:
- main
tags:
- '**'
pull_request:
branches:
- '**'
jobs:
build:
name: Ubuntu plotting Test
runs-on: ${{ matrix.os }}
timeout-minutes: 30
strategy:
fail-fast: false
max-parallel: 4
matrix:
python-version: [3.7, 3.8, 3.9]
os: [ubuntu-latest]
steps:
- name: Cancel previous runs on the same branch
if: ${{ github.ref != 'refs/heads/main' }}
uses: styfle/cancel-workflow-action@0.9.1
with:
access_token: ${{ github.token }}
- name: Checkout Code
uses: actions/checkout@v2
with:
fetch-depth: 0
- name: Setup Python environment
uses: actions/setup-python@v2
with:
python-version: ${{ matrix.python-version }}
- name: Cache npm
uses: actions/cache@v2.1.6
with:
path: ~/.npm
key: ${{ runner.os }}-node-${{ hashFiles('**/package-lock.json') }}
restore-keys: |
${{ runner.os }}-node-
- name: Get pip cache dir
id: pip-cache
run: |
echo "::set-output name=dir::$(pip cache dir)"
- name: Cache pip
uses: actions/cache@v2.1.6
with:
path: ${{ steps.pip-cache.outputs.dir }}
key: ${{ runner.os }}-pip-${{ hashFiles('**/setup.py') }}
restore-keys: |
${{ runner.os }}-pip-
- name: Checkout test blocks and plots
uses: actions/checkout@v2
with:
repository: 'Chia-Network/test-cache'
path: '.chia'
ref: '0.27.0'
fetch-depth: 1
- name: Link home directory
run: |
cd $HOME
ln -s $GITHUB_WORKSPACE/.chia
echo "$HOME/.chia"
ls -al $HOME/.chia
- name: Install ubuntu dependencies
run: |
sudo apt-get install software-properties-common
sudo add-apt-repository ppa:deadsnakes/ppa
sudo apt-get update
sudo apt-get install python${{ matrix.python-version }}-venv python${{ matrix.python-version }}-distutils git -y
- name: Run install script
env:
INSTALL_PYTHON_VERSION: ${{ matrix.python-version }}
run: |
sh install.sh
# Omitted installing Timelord
- name: Install developer requirements
run: |
. ./activate
venv/bin/python -m pip install pytest pytest-asyncio pytest-xdist pytest-monitor
- name: Test plotting code with pytest
run: |
. ./activate
./venv/bin/py.test tests/plotting/test_*.py -s -v --durations 0
#
# THIS FILE IS GENERATED. SEE https://github.com/Chia-Network/chia-blockchain/tree/main/tests#readme
#

View File

@ -1,8 +1,5 @@
import logging
from os import unlink
from pathlib import Path
from secrets import token_bytes
from shutil import copy, move
import time
import pytest
@ -10,8 +7,7 @@ from blspy import AugSchemeMPL
from chiapos import DiskPlotter
from chia.consensus.coinbase import create_puzzlehash_for_pk
from chia.plotting.util import stream_plot_info_ph, stream_plot_info_pk, PlotRefreshResult, PlotRefreshEvents
from chia.plotting.manager import PlotManager
from chia.plotting.util import stream_plot_info_ph, stream_plot_info_pk
from chia.protocols import farmer_protocol
from chia.rpc.farmer_rpc_api import FarmerRpcApi
from chia.rpc.farmer_rpc_client import FarmerRpcClient
@ -193,262 +189,6 @@ class TestRpc:
farmer_api.farmer.update_harvester_cache_interval = update_interval_before
farmer_api.farmer.harvester_cache = {}
expected_result: PlotRefreshResult = PlotRefreshResult()
expected_result_matched = True
# Note: We assign `expected_result_matched` in the callback and assert it in the test thread to avoid
# crashing the refresh thread of the plot manager with invalid assertions.
def test_refresh_callback(event: PlotRefreshEvents, refresh_result: PlotRefreshResult):
if event != PlotRefreshEvents.done:
# Only validate the final results for this tests
return
def test_value(name: str, actual: PlotRefreshResult, expected: PlotRefreshResult):
nonlocal expected_result_matched
try:
actual_value = actual.__getattribute__(name)
expected_value = expected.__getattribute__(name)
if actual_value != expected_value:
log.error(f"{name} invalid: actual {actual_value} expected {expected_value}")
expected_result_matched = False
except AttributeError as error:
log.error(f"{error}")
expected_result_matched = False
test_value("loaded", refresh_result, expected_result)
test_value("removed", refresh_result, expected_result)
test_value("processed", refresh_result, expected_result)
test_value("remaining", refresh_result, expected_result)
harvester.plot_manager.set_refresh_callback(test_refresh_callback)
async def test_refresh_results(manager: PlotManager, start_refreshing: bool = False):
nonlocal expected_result_matched
expected_result_matched = True
if start_refreshing:
manager.start_refreshing()
else:
manager.trigger_refresh()
await time_out_assert(5, manager.needs_refresh, value=False)
assert expected_result_matched
async def test_case(
trigger,
expect_loaded,
expect_duplicates,
expect_removed,
expect_processed,
expected_directories,
expect_total_plots,
):
nonlocal expected_result_matched
expected_result.loaded = expect_loaded
expected_result.removed = expect_removed
expected_result.processed = expect_processed
await trigger
assert len(await client_2.get_plot_directories()) == expected_directories
await test_refresh_results(harvester.plot_manager)
result = await client_2.get_plots()
assert len(result["plots"]) == expect_total_plots
assert len(harvester.plot_manager.cache) == expect_total_plots
assert len(harvester.plot_manager.get_duplicates()) == expect_duplicates
assert len(harvester.plot_manager.failed_to_open_filenames) == 0
# Add plot_dir with two new plots
await test_case(
client_2.add_plot_directory(str(plot_dir)),
expect_loaded=2,
expect_removed=0,
expect_processed=num_plots + 2,
expect_duplicates=0,
expected_directories=2,
expect_total_plots=num_plots + 2,
)
# Add plot_dir_sub with one duplicate
await test_case(
client_2.add_plot_directory(str(plot_dir_sub)),
expect_loaded=0,
expect_removed=0,
expect_processed=num_plots + 3,
expect_duplicates=1,
expected_directories=3,
expect_total_plots=num_plots + 2,
)
assert plot_dir_sub.resolve() / filename_2 in harvester.plot_manager.get_duplicates()
# Delete one plot
await test_case(
client_2.delete_plot(str(plot_dir / filename)),
expect_loaded=0,
expect_removed=1,
expect_processed=num_plots + 2,
expect_duplicates=1,
expected_directories=3,
expect_total_plots=num_plots + 1,
)
# Remove directory with the duplicate
await test_case(
client_2.remove_plot_directory(str(plot_dir_sub)),
expect_loaded=0,
expect_removed=1,
expect_processed=num_plots + 1,
expect_duplicates=0,
expected_directories=2,
expect_total_plots=num_plots + 1,
)
assert plot_dir_sub.resolve() / filename_2 not in harvester.plot_manager.get_duplicates()
# Re-add the directory with the duplicate for other tests
await test_case(
client_2.add_plot_directory(str(plot_dir_sub)),
expect_loaded=0,
expect_removed=0,
expect_processed=num_plots + 2,
expect_duplicates=1,
expected_directories=3,
expect_total_plots=num_plots + 1,
)
# Remove the directory which has the duplicated plot loaded. This removes the duplicated plot from plot_dir
# and in the same run loads the plot from plot_dir_sub which is not longer seen as duplicate.
await test_case(
client_2.remove_plot_directory(str(plot_dir)),
expect_loaded=1,
expect_removed=1,
expect_processed=num_plots + 1,
expect_duplicates=0,
expected_directories=2,
expect_total_plots=num_plots + 1,
)
# Re-add the directory now the plot seen as duplicate is from plot_dir, not from plot_dir_sub like before
await test_case(
client_2.add_plot_directory(str(plot_dir)),
expect_loaded=0,
expect_removed=0,
expect_processed=num_plots + 2,
expect_duplicates=1,
expected_directories=3,
expect_total_plots=num_plots + 1,
)
# Remove the duplicated plot
await test_case(
client_2.delete_plot(str(plot_dir / filename_2)),
expect_loaded=0,
expect_removed=1,
expect_processed=num_plots + 1,
expect_duplicates=0,
expected_directories=3,
expect_total_plots=num_plots + 1,
)
# Remove the directory with the loaded plot which is not longer a duplicate
await test_case(
client_2.remove_plot_directory(str(plot_dir_sub)),
expect_loaded=0,
expect_removed=1,
expect_processed=num_plots,
expect_duplicates=0,
expected_directories=2,
expect_total_plots=num_plots,
)
# Remove the directory which contains all other plots
await test_case(
client_2.remove_plot_directory(str(get_plot_dir())),
expect_loaded=0,
expect_removed=num_plots,
expect_processed=0,
expect_duplicates=0,
expected_directories=1,
expect_total_plots=0,
)
# Recover the plots to test caching
# First make sure cache gets written if required and new plots are loaded
await test_case(
client_2.add_plot_directory(str(get_plot_dir())),
expect_loaded=num_plots,
expect_removed=0,
expect_processed=num_plots,
expect_duplicates=0,
expected_directories=2,
expect_total_plots=num_plots,
)
assert harvester.plot_manager.cache.path().exists()
unlink(harvester.plot_manager.cache.path())
# Should not write the cache again on shutdown because it didn't change
assert not harvester.plot_manager.cache.path().exists()
harvester.plot_manager.stop_refreshing()
assert not harvester.plot_manager.cache.path().exists()
# Manually trigger `save_cache` and make sure it creates a new cache file
harvester.plot_manager.cache.save()
assert harvester.plot_manager.cache.path().exists()
expected_result.loaded = 20
expected_result.removed = 0
expected_result.processed = 20
expected_result.remaining = 0
plot_manager: PlotManager = PlotManager(harvester.root_path, test_refresh_callback)
plot_manager.cache.load()
assert len(harvester.plot_manager.cache) == len(plot_manager.cache)
await test_refresh_results(plot_manager, start_refreshing=True)
for path, plot_info in harvester.plot_manager.plots.items():
assert path in plot_manager.plots
assert plot_manager.plots[path].prover.get_filename() == plot_info.prover.get_filename()
assert plot_manager.plots[path].prover.get_id() == plot_info.prover.get_id()
assert plot_manager.plots[path].prover.get_memo() == plot_info.prover.get_memo()
assert plot_manager.plots[path].prover.get_size() == plot_info.prover.get_size()
assert plot_manager.plots[path].pool_public_key == plot_info.pool_public_key
assert plot_manager.plots[path].pool_contract_puzzle_hash == plot_info.pool_contract_puzzle_hash
assert plot_manager.plots[path].plot_public_key == plot_info.plot_public_key
assert plot_manager.plots[path].file_size == plot_info.file_size
assert plot_manager.plots[path].time_modified == plot_info.time_modified
assert harvester.plot_manager.plot_filename_paths == plot_manager.plot_filename_paths
assert harvester.plot_manager.failed_to_open_filenames == plot_manager.failed_to_open_filenames
assert harvester.plot_manager.no_key_filenames == plot_manager.no_key_filenames
plot_manager.stop_refreshing()
# Modify the content of the plot_manager.dat
with open(harvester.plot_manager.cache.path(), "r+b") as file:
file.write(b"\xff\xff") # Sets Cache.version to 65535
# Make sure it just loads the plots normally if it fails to load the cache
plot_manager = PlotManager(harvester.root_path, test_refresh_callback)
plot_manager.cache.load()
assert len(plot_manager.cache) == 0
plot_manager.set_public_keys(
harvester.plot_manager.farmer_public_keys, harvester.plot_manager.pool_public_keys
)
expected_result.loaded = 20
expected_result.removed = 0
expected_result.processed = 20
expected_result.remaining = 0
await test_refresh_results(plot_manager, start_refreshing=True)
assert len(plot_manager.plots) == len(harvester.plot_manager.plots)
plot_manager.stop_refreshing()
# Test re-trying if processing a plot failed
# First save the plot
retry_test_plot = Path(plot_dir_sub / filename_2).resolve()
retry_test_plot_save = Path(plot_dir_sub / "save").resolve()
copy(retry_test_plot, retry_test_plot_save)
# Invalidate the plot
with open(plot_dir_sub / filename_2, "r+b") as file:
file.write(bytes(100))
# Add it and validate it fails to load
await harvester.add_plot_directory(str(plot_dir_sub))
expected_result.loaded = 0
expected_result.removed = 0
expected_result.processed = num_plots + 1
expected_result.remaining = 0
await test_refresh_results(harvester.plot_manager, start_refreshing=True)
assert retry_test_plot in harvester.plot_manager.failed_to_open_filenames
# Make sure the file stays in `failed_to_open_filenames` and doesn't get loaded or processed in the next
# update round
expected_result.loaded = 0
expected_result.processed = num_plots + 1
await test_refresh_results(harvester.plot_manager)
assert retry_test_plot in harvester.plot_manager.failed_to_open_filenames
# Now decrease the re-try timeout, restore the valid plot file and make sure it properly loads now
harvester.plot_manager.refresh_parameter.retry_invalid_seconds = 0
move(retry_test_plot_save, retry_test_plot)
expected_result.loaded = 1
expected_result.processed = num_plots + 1
await test_refresh_results(harvester.plot_manager)
assert retry_test_plot not in harvester.plot_manager.failed_to_open_filenames
targets_1 = await client.get_reward_targets(False)
assert "have_pool_sk" not in targets_1
assert "have_farmer_sk" not in targets_1

View File

1
tests/plotting/config.py Normal file
View File

@ -0,0 +1 @@
install_timelord = False

View File

@ -0,0 +1,409 @@
import logging
from os import unlink
from pathlib import Path
from shutil import copy, move
from typing import Callable, List
import pytest
from dataclasses import dataclass
from chia.plotting.util import (
PlotRefreshResult,
PlotRefreshEvents,
remove_plot,
get_plot_directories,
add_plot_directory,
remove_plot_directory,
)
from chia.util.path import mkdir
from chia.plotting.manager import PlotManager
from tests.block_tools import get_plot_dir
from tests.setup_nodes import bt
from tests.time_out_assert import time_out_assert
log = logging.getLogger(__name__)
expected_result: PlotRefreshResult = PlotRefreshResult()
expected_result_matched = True
class TestDirectory:
path: Path
plots: List[Path]
def __init__(self, path: Path, plots_origin: List[Path]):
self.path = path
mkdir(path)
# Drop the existing files in the test directories
for plot in path.iterdir():
unlink(plot)
# Copy over the original plots
for plot in plots_origin:
if not Path(path / plot.name).exists():
copy(plot, path)
# Adjust the paths to reflect the testing plots
self.plots = [path / plot.name for plot in plots_origin]
def __len__(self):
return len(self.plots)
def drop(self, path: Path):
assert self.path / path.name
del self.plots[self.plots.index(self.path / path.name)]
@dataclass
class TestEnvironment:
root_path: Path
plot_manager: PlotManager
dir_1: TestDirectory
dir_2: TestDirectory
def create_test_environment(*, dir_1_count: int, dir_2_count: int) -> TestEnvironment:
plots: List[Path] = list(sorted(get_plot_dir().glob("*.plot")))
assert dir_1_count > 0
assert len(plots) >= dir_1_count + dir_2_count
dir_1: TestDirectory = TestDirectory(get_plot_dir().resolve() / "1", plots[0:dir_1_count])
dir_2: TestDirectory = TestDirectory(get_plot_dir().resolve() / "2", plots[dir_1_count : dir_1_count + dir_2_count])
bt.refresh_plots()
assert len(bt.plot_manager.plots) >= len(dir_1) + len(dir_2)
bt.plot_manager.stop_refreshing()
for directory in get_plot_directories(bt.root_path):
remove_plot_directory(bt.root_path, directory)
plot_manager = PlotManager(bt.root_path, refresh_callback)
plot_manager.set_public_keys(bt.plot_manager.farmer_public_keys, bt.plot_manager.pool_public_keys)
return TestEnvironment(bt.root_path, plot_manager, dir_1, dir_2)
# Wrap `remove_plot` to give it the same interface as the other triggers, e.g. `add_plot_directory(Path, str)`.
def trigger_remove_plot(_: Path, plot_path: str):
remove_plot(Path(plot_path))
# Note: We assign `expected_result_matched` in the callback and assert it in the test thread to avoid
# crashing the refresh thread of the plot manager with invalid assertions.
def refresh_callback(event: PlotRefreshEvents, refresh_result: PlotRefreshResult):
global expected_result_matched
if event != PlotRefreshEvents.done:
# Only validate the final results for this tests
return
expected_result_matched = validate_values(
["loaded", "removed", "processed", "remaining"], refresh_result, expected_result
)
def validate_values(names: List[str], actual: PlotRefreshResult, expected: PlotRefreshResult):
for name in names:
try:
actual_value = actual.__getattribute__(name)
expected_value = expected.__getattribute__(name)
if actual_value != expected_value:
log.error(f"{name} invalid: actual {actual_value} expected {expected_value}")
return False
except AttributeError as error:
log.error(f"{error}")
return False
return True
async def run_refresh_test(manager: PlotManager):
global expected_result_matched
expected_result_matched = True
manager.start_refreshing()
manager.trigger_refresh()
await time_out_assert(5, manager.needs_refresh, value=False)
assert expected_result_matched
@pytest.mark.asyncio
async def test_plot_refreshing():
env: TestEnvironment = create_test_environment(dir_1_count=5, dir_2_count=7)
dir_duplicates: TestDirectory = TestDirectory(get_plot_dir().resolve() / "duplicates", env.dir_1.plots)
async def run_test_case(
*,
trigger: Callable,
test_path: Path,
expect_loaded: int,
expect_removed: int,
expect_processed: int,
expect_duplicates: int,
expected_directories: int,
expect_total_plots: int,
):
expected_result.loaded = expect_loaded
expected_result.removed = expect_removed
expected_result.processed = expect_processed
trigger(env.root_path, str(test_path))
assert len(get_plot_directories(env.root_path)) == expected_directories
await run_refresh_test(env.plot_manager)
assert len(env.plot_manager.plots) == expect_total_plots
assert len(env.plot_manager.cache) == expect_total_plots
assert len(env.plot_manager.get_duplicates()) == expect_duplicates
assert len(env.plot_manager.failed_to_open_filenames) == 0
# Add dir_1
await run_test_case(
trigger=add_plot_directory,
test_path=env.dir_1.path,
expect_loaded=len(env.dir_1),
expect_removed=0,
expect_processed=len(env.dir_1),
expect_duplicates=0,
expected_directories=1,
expect_total_plots=len(env.dir_1),
)
# Add dir_2
await run_test_case(
trigger=add_plot_directory,
test_path=env.dir_2.path,
expect_loaded=len(env.dir_2),
expect_removed=0,
expect_processed=len(env.dir_1) + len(env.dir_2),
expect_duplicates=0,
expected_directories=2,
expect_total_plots=len(env.dir_1) + len(env.dir_2),
)
# Add dir_duplicates
await run_test_case(
trigger=add_plot_directory,
test_path=dir_duplicates.path,
expect_loaded=0,
expect_removed=0,
expect_processed=len(env.dir_1) + len(env.dir_2) + len(dir_duplicates),
expect_duplicates=len(dir_duplicates),
expected_directories=3,
expect_total_plots=len(env.dir_1) + len(env.dir_2),
)
for item in dir_duplicates.path.iterdir():
assert item.is_file() and item in env.plot_manager.get_duplicates()
# Drop the duplicated plot we remove in the next test case from the test directory upfront so that the numbers match
# the expected below
drop_path = dir_duplicates.plots[0]
dir_duplicates.drop(drop_path)
# Delete one duplicated plot
await run_test_case(
trigger=trigger_remove_plot,
test_path=drop_path,
expect_loaded=0,
expect_removed=1,
expect_processed=len(env.dir_1) + len(env.dir_2) + len(dir_duplicates),
expect_duplicates=len(dir_duplicates),
expected_directories=3,
expect_total_plots=len(env.dir_1) + len(env.dir_2),
)
# Drop the duplicated plot we remove in the next test case from the test directory upfront so that the numbers match
# the expected below
drop_path = env.dir_1.plots[0]
env.dir_1.drop(drop_path)
# Delete one duplicated plot
await run_test_case(
trigger=trigger_remove_plot,
test_path=drop_path,
expect_loaded=0,
expect_removed=1,
expect_processed=len(env.dir_1) + len(env.dir_2) + len(dir_duplicates),
expect_duplicates=len(dir_duplicates),
expected_directories=3,
expect_total_plots=len(env.dir_1) + len(env.dir_2),
)
# Remove directory with the duplicates
await run_test_case(
trigger=remove_plot_directory,
test_path=dir_duplicates.path,
expect_loaded=0,
expect_removed=len(dir_duplicates),
expect_processed=len(env.dir_1) + len(env.dir_2),
expect_duplicates=0,
expected_directories=2,
expect_total_plots=len(env.dir_1) + len(env.dir_2),
)
for item in dir_duplicates.path.iterdir():
assert item.is_file() and item not in env.plot_manager.get_duplicates()
# Re-add the directory with the duplicates for other tests
await run_test_case(
trigger=add_plot_directory,
test_path=dir_duplicates.path,
expect_loaded=0,
expect_removed=0,
expect_processed=len(env.dir_1) + len(env.dir_2) + len(dir_duplicates),
expect_duplicates=len(dir_duplicates),
expected_directories=3,
expect_total_plots=len(env.dir_1) + len(env.dir_2),
)
# Remove dir_1 from which the duplicated plots are loaded. This removes the duplicates of dir_1
# and in the same run loads them from dir_duplicates.
await run_test_case(
trigger=remove_plot_directory,
test_path=env.dir_1.path,
expect_loaded=len(dir_duplicates),
expect_removed=len(env.dir_1),
expect_processed=len(env.dir_2) + len(dir_duplicates),
expect_duplicates=0,
expected_directories=2,
expect_total_plots=len(env.dir_2) + len(dir_duplicates),
)
# Re-add the directory. Now the plot seen as duplicate is from dir_1, not from dir_duplicates like before
await run_test_case(
trigger=add_plot_directory,
test_path=env.dir_1.path,
expect_loaded=len(env.dir_1) - len(dir_duplicates),
expect_removed=0,
expect_processed=len(env.dir_1) + len(env.dir_2) + len(dir_duplicates),
expect_duplicates=len(dir_duplicates),
expected_directories=3,
expect_total_plots=len(env.dir_1) + len(env.dir_2),
)
# Drop the duplicated plot we remove in the next test case from the test directory upfront so that the numbers match
# the expected below
drop_path = env.dir_1.plots[2]
env.dir_1.drop(drop_path)
# Remove the duplicated plot
await run_test_case(
trigger=trigger_remove_plot,
test_path=drop_path,
expect_loaded=0,
expect_removed=1,
expect_processed=len(env.dir_1) + len(env.dir_2) + len(dir_duplicates),
expect_duplicates=len(env.dir_1),
expected_directories=3,
expect_total_plots=len(env.dir_2) + len(dir_duplicates),
)
# Remove dir_duplicates, this drops the duplicates and loads all plots from dir_1
await run_test_case(
trigger=remove_plot_directory,
test_path=dir_duplicates.path,
expect_loaded=len(env.dir_1),
expect_removed=len(dir_duplicates),
expect_processed=len(env.dir_1) + len(env.dir_2),
expect_duplicates=0,
expected_directories=2,
expect_total_plots=len(env.dir_1) + len(env.dir_2),
)
# Remove dir_2
await run_test_case(
trigger=remove_plot_directory,
test_path=env.dir_2.path,
expect_loaded=0,
expect_removed=len(env.dir_2),
expect_processed=len(env.dir_1),
expect_duplicates=0,
expected_directories=1,
expect_total_plots=len(env.dir_1),
)
# Remove dir_1
await run_test_case(
trigger=remove_plot_directory,
test_path=env.dir_1.path,
expect_loaded=0,
expect_removed=len(env.dir_1),
expect_processed=0,
expect_duplicates=0,
expected_directories=0,
expect_total_plots=0,
)
env.plot_manager.stop_refreshing()
@pytest.mark.asyncio
async def test_invalid_plots():
env: TestEnvironment = create_test_environment(dir_1_count=3, dir_2_count=0)
# Test re-trying if processing a plot failed
# First create a backup of the plot
retry_test_plot = list(env.dir_1.path.iterdir())[0].resolve()
retry_test_plot_save = Path(env.dir_1.path / ".backup").resolve()
copy(retry_test_plot, retry_test_plot_save)
# Invalidate the plot
with open(retry_test_plot, "r+b") as file:
file.write(bytes(100))
# Add it and validate it fails to load
add_plot_directory(env.root_path, str(env.dir_1.path))
expected_result.loaded = len(env.dir_1) - 1
expected_result.removed = 0
expected_result.processed = len(env.dir_1)
expected_result.remaining = 0
await run_refresh_test(env.plot_manager)
assert len(env.plot_manager.failed_to_open_filenames) == 1
assert retry_test_plot in env.plot_manager.failed_to_open_filenames
# Make sure the file stays in `failed_to_open_filenames` and doesn't get loaded in the next refresh cycle
expected_result.loaded = 0
expected_result.processed = len(env.dir_1)
await run_refresh_test(env.plot_manager)
assert len(env.plot_manager.failed_to_open_filenames) == 1
assert retry_test_plot in env.plot_manager.failed_to_open_filenames
# Now decrease the re-try timeout, restore the valid plot file and make sure it properly loads now
env.plot_manager.refresh_parameter.retry_invalid_seconds = 0
move(retry_test_plot_save, retry_test_plot)
expected_result.loaded = 1
expected_result.processed = len(env.dir_1)
await run_refresh_test(env.plot_manager)
assert len(env.plot_manager.failed_to_open_filenames) == 0
assert retry_test_plot not in env.plot_manager.failed_to_open_filenames
env.plot_manager.stop_refreshing()
@pytest.mark.asyncio
async def test_plot_info_caching():
env: TestEnvironment = create_test_environment(dir_1_count=5, dir_2_count=0)
add_plot_directory(env.root_path, str(env.dir_1.path))
expected_result.loaded = len(env.dir_1)
expected_result.removed = 0
expected_result.processed = len(env.dir_1)
expected_result.remaining = 0
await run_refresh_test(env.plot_manager)
assert env.plot_manager.cache.path().exists()
unlink(env.plot_manager.cache.path())
# Should not write the cache again on shutdown because it didn't change
assert not env.plot_manager.cache.path().exists()
env.plot_manager.stop_refreshing()
assert not env.plot_manager.cache.path().exists()
# Manually trigger `save_cache` and make sure it creates a new cache file
env.plot_manager.cache.save()
assert env.plot_manager.cache.path().exists()
expected_result.loaded = len(env.dir_1)
expected_result.removed = 0
expected_result.processed = len(env.dir_1)
expected_result.remaining = 0
plot_manager: PlotManager = PlotManager(env.root_path, refresh_callback)
plot_manager.cache.load()
assert len(plot_manager.cache) == len(plot_manager.cache)
await run_refresh_test(plot_manager)
for path, plot_info in plot_manager.plots.items():
assert path in plot_manager.plots
assert plot_manager.plots[path].prover.get_filename() == plot_info.prover.get_filename()
assert plot_manager.plots[path].prover.get_id() == plot_info.prover.get_id()
assert plot_manager.plots[path].prover.get_memo() == plot_info.prover.get_memo()
assert plot_manager.plots[path].prover.get_size() == plot_info.prover.get_size()
assert plot_manager.plots[path].pool_public_key == plot_info.pool_public_key
assert plot_manager.plots[path].pool_contract_puzzle_hash == plot_info.pool_contract_puzzle_hash
assert plot_manager.plots[path].plot_public_key == plot_info.plot_public_key
assert plot_manager.plots[path].file_size == plot_info.file_size
assert plot_manager.plots[path].time_modified == plot_info.time_modified
assert plot_manager.plot_filename_paths == plot_manager.plot_filename_paths
assert plot_manager.failed_to_open_filenames == plot_manager.failed_to_open_filenames
assert plot_manager.no_key_filenames == plot_manager.no_key_filenames
plot_manager.stop_refreshing()
# Modify the content of the plot_manager.dat
with open(plot_manager.cache.path(), "r+b") as file:
file.write(b"\xff\xff") # Sets Cache.version to 65535
# Make sure it just loads the plots normally if it fails to load the cache
plot_manager = PlotManager(env.root_path, refresh_callback)
plot_manager.cache.load()
assert len(plot_manager.cache) == 0
plot_manager.set_public_keys(bt.plot_manager.farmer_public_keys, bt.plot_manager.pool_public_keys)
expected_result.loaded = len(env.dir_1)
expected_result.removed = 0
expected_result.processed = len(env.dir_1)
expected_result.remaining = 0
await run_refresh_test(plot_manager)
assert len(plot_manager.plots) == len(plot_manager.plots)
plot_manager.stop_refreshing()