mirror of
https://github.com/Chia-Network/chia-blockchain.git
synced 2025-01-08 18:34:27 +03:00
Merge branch 'catchup/long_lived_vault_from_main_84f3e3d9f2f1e1a7a3db3e3b630fbd507c607fe7' into quex.signer_protocol
This commit is contained in:
commit
533935dbc1
2
.github/actions/install/action.yml
vendored
2
.github/actions/install/action.yml
vendored
@ -63,7 +63,7 @@ runs:
|
||||
|
||||
- name: Upload constraints file
|
||||
if: inputs.constraints-file-artifact-name != ''
|
||||
uses: actions/upload-artifact@v3
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: ${{ inputs.constraints-file-artifact-name }}
|
||||
path: venv/constraints.txt
|
||||
|
2
.github/workflows/benchmarks.yml
vendored
2
.github/workflows/benchmarks.yml
vendored
@ -115,7 +115,7 @@ jobs:
|
||||
|
||||
- name: Publish JUnit results
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v3
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: junit-data
|
||||
path: junit-data/*
|
||||
|
@ -191,7 +191,7 @@ jobs:
|
||||
sh build_linux_deb-2-installer.sh ${{ matrix.os.arch }}
|
||||
|
||||
- name: Upload Linux artifacts
|
||||
uses: actions/upload-artifact@v3
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: chia-installers-linux-deb-${{ matrix.os.arch }}
|
||||
path: ${{ github.workspace }}/build_scripts/final_installer/
|
||||
@ -233,7 +233,7 @@ jobs:
|
||||
directories: ${{ steps.create-venv.outputs.activate-venv-directories }}
|
||||
|
||||
- name: Download constraints file
|
||||
uses: actions/download-artifact@v3
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: constraints-file-${{ matrix.os.arch }}
|
||||
path: venv
|
||||
@ -243,7 +243,7 @@ jobs:
|
||||
pip install --constraint venv/constraints.txt py3createtorrent
|
||||
|
||||
- name: Download packages
|
||||
uses: actions/download-artifact@v3
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: chia-installers-linux-deb-${{ matrix.os.arch }}
|
||||
path: build_scripts/final_installer/
|
||||
@ -396,7 +396,7 @@ jobs:
|
||||
- uses: Chia-Network/actions/clean-workspace@main
|
||||
|
||||
- name: Download packages
|
||||
uses: actions/download-artifact@v3
|
||||
uses: actions/download-artifact@v4
|
||||
id: download
|
||||
with:
|
||||
name: chia-installers-linux-deb-${{ matrix.arch.artifact-name }}
|
||||
|
10
.github/workflows/build-linux-installer-rpm.yml
vendored
10
.github/workflows/build-linux-installer-rpm.yml
vendored
@ -184,7 +184,7 @@ jobs:
|
||||
bash build_linux_rpm-2-installer.sh amd64
|
||||
|
||||
- name: Upload fpm-generated rpm spec files
|
||||
uses: actions/upload-artifact@v3
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
if-no-files-found: error
|
||||
name: spec
|
||||
@ -193,7 +193,7 @@ jobs:
|
||||
build_scripts/dist/gui.spec
|
||||
|
||||
- name: Upload Linux artifacts
|
||||
uses: actions/upload-artifact@v3
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: chia-installers-linux-rpm-intel
|
||||
path: ${{ github.workspace }}/build_scripts/final_installer/
|
||||
@ -229,7 +229,7 @@ jobs:
|
||||
directories: ${{ steps.create-venv.outputs.activate-venv-directories }}
|
||||
|
||||
- name: Download constraints file
|
||||
uses: actions/download-artifact@v3
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: constraints-file-intel
|
||||
path: venv
|
||||
@ -239,7 +239,7 @@ jobs:
|
||||
pip install --constraint venv/constraints.txt py3createtorrent
|
||||
|
||||
- name: Download packages
|
||||
uses: actions/download-artifact@v3
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: chia-installers-linux-rpm-intel
|
||||
path: build_scripts/final_installer/
|
||||
@ -394,7 +394,7 @@ jobs:
|
||||
- uses: Chia-Network/actions/clean-workspace@main
|
||||
|
||||
- name: Download packages
|
||||
uses: actions/download-artifact@v3
|
||||
uses: actions/download-artifact@v4
|
||||
id: download
|
||||
with:
|
||||
name: chia-installers-linux-rpm-intel
|
||||
|
8
.github/workflows/build-macos-installers.yml
vendored
8
.github/workflows/build-macos-installers.yml
vendored
@ -230,7 +230,7 @@ jobs:
|
||||
sh build_macos-2-installer.sh
|
||||
|
||||
- name: Upload MacOS artifacts
|
||||
uses: actions/upload-artifact@v3
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: chia-installers-macos-dmg-${{ matrix.os.name }}
|
||||
path: ${{ github.workspace }}/build_scripts/final_installer/
|
||||
@ -284,7 +284,7 @@ jobs:
|
||||
directories: ${{ steps.create-venv.outputs.activate-venv-directories }}
|
||||
|
||||
- name: Download constraints file
|
||||
uses: actions/download-artifact@v3
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: constraints-file-${{ matrix.os.name }}
|
||||
path: venv
|
||||
@ -294,7 +294,7 @@ jobs:
|
||||
pip install --constraint venv/constraints.txt py3createtorrent
|
||||
|
||||
- name: Download packages
|
||||
uses: actions/download-artifact@v3
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: chia-installers-macos-dmg-${{ matrix.os.name }}
|
||||
path: build_scripts/final_installer/
|
||||
@ -437,7 +437,7 @@ jobs:
|
||||
- uses: Chia-Network/actions/clean-workspace@main
|
||||
|
||||
- name: Download packages
|
||||
uses: actions/download-artifact@v3
|
||||
uses: actions/download-artifact@v4
|
||||
id: download
|
||||
with:
|
||||
name: chia-installers-macos-dmg-${{ matrix.arch.artifact-name }}
|
||||
|
@ -258,7 +258,7 @@ jobs:
|
||||
.\build_windows-2-installer.ps1
|
||||
|
||||
- name: Upload Installer to artifacts
|
||||
uses: actions/upload-artifact@v3
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: chia-installers-windows-exe-intel
|
||||
path: ${{ github.workspace }}\chia-blockchain-gui\release-builds\
|
||||
@ -302,7 +302,7 @@ jobs:
|
||||
directories: ${{ steps.create-venv.outputs.activate-venv-directories }}
|
||||
|
||||
- name: Download constraints file
|
||||
uses: actions/download-artifact@v3
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: constraints-file-intel
|
||||
path: venv
|
||||
@ -312,7 +312,7 @@ jobs:
|
||||
pip install --constraint venv/constraints.txt py3createtorrent
|
||||
|
||||
- name: Download packages
|
||||
uses: actions/download-artifact@v3
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: chia-installers-windows-exe-intel
|
||||
path: chia-blockchain-gui/release-builds/
|
||||
@ -434,7 +434,7 @@ jobs:
|
||||
- uses: Chia-Network/actions/clean-workspace@main
|
||||
|
||||
- name: Download packages
|
||||
uses: actions/download-artifact@v3
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: chia-installers-windows-exe-intel
|
||||
path: packages
|
||||
|
6
.github/workflows/codeql-analysis.yml
vendored
6
.github/workflows/codeql-analysis.yml
vendored
@ -47,7 +47,7 @@ jobs:
|
||||
|
||||
# Initializes the CodeQL tools for scanning.
|
||||
- name: Initialize CodeQL
|
||||
uses: github/codeql-action/init@v2
|
||||
uses: github/codeql-action/init@v3
|
||||
with:
|
||||
languages: ${{ matrix.language }}
|
||||
# If you wish to specify custom queries, you can do so here or in a config file.
|
||||
@ -58,7 +58,7 @@ jobs:
|
||||
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
|
||||
# If this step fails, then you should remove it and run the build manually (see below)
|
||||
- name: Autobuild
|
||||
uses: github/codeql-action/autobuild@v2
|
||||
uses: github/codeql-action/autobuild@v3
|
||||
|
||||
# ℹ️ Command-line programs to run using the OS shell.
|
||||
# 📚 https://git.io/JvXDl
|
||||
@ -72,4 +72,4 @@ jobs:
|
||||
# make release
|
||||
|
||||
- name: Perform CodeQL Analysis
|
||||
uses: github/codeql-action/analyze@v2
|
||||
uses: github/codeql-action/analyze@v3
|
||||
|
8
.github/workflows/test-single.yml
vendored
8
.github/workflows/test-single.yml
vendored
@ -228,9 +228,9 @@ jobs:
|
||||
mv notchia/ chia/
|
||||
|
||||
- name: Publish JUnit results
|
||||
uses: actions/upload-artifact@v3
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: junit-data
|
||||
name: junit-data-${{ env.JOB_FILE_NAME }}
|
||||
path: junit-data/*
|
||||
if-no-files-found: error
|
||||
|
||||
@ -243,9 +243,9 @@ jobs:
|
||||
coverage report --rcfile=.coveragerc --show-missing
|
||||
|
||||
- name: Publish coverage data
|
||||
uses: actions/upload-artifact@v3
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: coverage-data
|
||||
name: coverage-data-${{ env.JOB_FILE_NAME }}
|
||||
path: coverage-data/*
|
||||
if-no-files-found: error
|
||||
|
||||
|
16
.github/workflows/test.yml
vendored
16
.github/workflows/test.yml
vendored
@ -119,9 +119,10 @@ jobs:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Download Results
|
||||
uses: actions/download-artifact@v3
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: junit-data
|
||||
merge-multiple: true
|
||||
pattern: junit-data-*
|
||||
path: junit-data
|
||||
|
||||
- name: Format JUnit data and prepare results
|
||||
@ -135,7 +136,7 @@ jobs:
|
||||
ls junit-results/*.xml | xargs --max-procs=10 --replace={} yq eval '.testsuites.testsuite |= sort_by(.+@name) | .testsuites.testsuite[].testcase |= sort_by(.+@classname, .+@name)' --inplace {}
|
||||
|
||||
- name: Publish formatted JUnit data
|
||||
uses: actions/upload-artifact@v3
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: junit-data
|
||||
path: junit-data/*
|
||||
@ -143,16 +144,17 @@ jobs:
|
||||
|
||||
- name: Publish JUnit results
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v3
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: junit-results
|
||||
path: junit-results/*
|
||||
if-no-files-found: error
|
||||
|
||||
- name: Download Coverage
|
||||
uses: actions/download-artifact@v3
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: coverage-data
|
||||
merge-multiple: true
|
||||
pattern: coverage-data-*
|
||||
path: coverage-data
|
||||
|
||||
- name: Set up ${{ matrix.python.name }}
|
||||
@ -280,7 +282,7 @@ jobs:
|
||||
|
||||
- name: Publish coverage reports
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v3
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: coverage-reports
|
||||
path: coverage-reports/*
|
||||
|
2
.github/workflows/upload-pypi-source.yml
vendored
2
.github/workflows/upload-pypi-source.yml
vendored
@ -196,7 +196,7 @@ jobs:
|
||||
python -m build --sdist --outdir dist .
|
||||
|
||||
- name: Upload artifacts
|
||||
uses: actions/upload-artifact@v3
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: dist
|
||||
path: ./dist
|
||||
|
14
CHANGELOG.md
14
CHANGELOG.md
@ -6,6 +6,20 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
|
||||
and this project does not yet adhere to [Semantic Versioning](https://semver.org/spec/v2.0.0.html)
|
||||
for setuptools_scm/PEP 440 reasons.
|
||||
|
||||
## 2.1.4 Chia blockchain 2024-01-10
|
||||
|
||||
### Fixed
|
||||
* Update chia_rs to 0.2.15 for AMD K10 architecture (fixes #16386)
|
||||
|
||||
### Changed
|
||||
* improved CPU usage due to tight loop in `send_transaction()`
|
||||
* improve performance of `total_mempool_fees()` and `total_mempool_cost()`
|
||||
* reduced the default maximum peer count to 40 from 80 (only applies to new configs)
|
||||
* changed to `normal` SQlite db sync option (previously was `full`)
|
||||
* reduced the mempool size to 10 blocks from 50 blocks (improves performance)
|
||||
* improve performance of the mempool by batch fetching items from the db
|
||||
|
||||
|
||||
## 2.1.3 Chia blockchain 2023-12-18
|
||||
|
||||
### Fixed
|
||||
|
12
README.md
12
README.md
@ -61,13 +61,13 @@ for consensus.
|
||||
## Installing
|
||||
|
||||
Install instructions are available in the
|
||||
[INSTALL](https://github.com/Chia-Network/chia-blockchain/wiki/INSTALL)
|
||||
[Installation Details](https://docs.chia.net/installation/)
|
||||
section of the
|
||||
[chia-blockchain repository wiki](https://github.com/Chia-Network/chia-blockchain/wiki).
|
||||
[Chia Docs](https://docs.chia.net/introduction/).
|
||||
|
||||
## Running
|
||||
|
||||
Once installed, a
|
||||
[Quick Start Guide](https://github.com/Chia-Network/chia-blockchain/wiki/Quick-Start-Guide)
|
||||
is available from the repository
|
||||
[wiki](https://github.com/Chia-Network/chia-blockchain/wiki).
|
||||
Once installed, an
|
||||
[Introduction to Chia](https://docs.chia.net/introduction/)
|
||||
guide is available in the
|
||||
[Chia Docs](https://docs.chia.net/introduction/).
|
||||
|
@ -8,18 +8,7 @@ from pathlib import Path
|
||||
from time import monotonic
|
||||
from typing import List
|
||||
|
||||
from benchmarks.utils import (
|
||||
clvm_generator,
|
||||
rand_bytes,
|
||||
rand_class_group_element,
|
||||
rand_g1,
|
||||
rand_g2,
|
||||
rand_hash,
|
||||
rand_vdf,
|
||||
rand_vdf_proof,
|
||||
rewards,
|
||||
setup_db,
|
||||
)
|
||||
from benchmarks.utils import setup_db
|
||||
from chia.consensus.block_record import BlockRecord
|
||||
from chia.full_node.block_store import BlockStore
|
||||
from chia.types.blockchain_format.foliage import Foliage, FoliageBlockData, FoliageTransactionBlock, TransactionsInfo
|
||||
@ -31,6 +20,17 @@ from chia.types.blockchain_format.sized_bytes import bytes32
|
||||
from chia.types.blockchain_format.sub_epoch_summary import SubEpochSummary
|
||||
from chia.types.full_block import FullBlock
|
||||
from chia.util.ints import uint8, uint32, uint64, uint128
|
||||
from tests.util.benchmarks import (
|
||||
clvm_generator,
|
||||
rand_bytes,
|
||||
rand_class_group_element,
|
||||
rand_g1,
|
||||
rand_g2,
|
||||
rand_hash,
|
||||
rand_vdf,
|
||||
rand_vdf_proof,
|
||||
rewards,
|
||||
)
|
||||
|
||||
# to run this benchmark:
|
||||
# python -m benchmarks.coin_store
|
||||
|
@ -8,11 +8,12 @@ from pathlib import Path
|
||||
from time import monotonic
|
||||
from typing import List, Tuple
|
||||
|
||||
from benchmarks.utils import rand_hash, rewards, setup_db
|
||||
from benchmarks.utils import setup_db
|
||||
from chia.full_node.coin_store import CoinStore
|
||||
from chia.types.blockchain_format.coin import Coin
|
||||
from chia.types.blockchain_format.sized_bytes import bytes32
|
||||
from chia.util.ints import uint32, uint64
|
||||
from tests.util.benchmarks import rand_hash, rewards
|
||||
|
||||
# to run this benchmark:
|
||||
# python -m benchmarks.coin_store
|
||||
|
@ -10,11 +10,12 @@ from typing import Any, Callable, Dict, List, Optional, TextIO, Tuple, Type, Uni
|
||||
|
||||
import click
|
||||
|
||||
from benchmarks.utils import EnumType, get_commit_hash, rand_bytes, rand_full_block, rand_hash
|
||||
from benchmarks.utils import EnumType, get_commit_hash
|
||||
from chia.types.blockchain_format.sized_bytes import bytes32
|
||||
from chia.types.full_block import FullBlock
|
||||
from chia.util.ints import uint8, uint64
|
||||
from chia.util.streamable import Streamable, streamable
|
||||
from tests.util.benchmarks import rand_bytes, rand_full_block, rand_hash
|
||||
|
||||
# to run this benchmark:
|
||||
# python -m benchmarks.streamable
|
||||
|
@ -3,36 +3,14 @@ from __future__ import annotations
|
||||
import contextlib
|
||||
import enum
|
||||
import os
|
||||
import random
|
||||
import subprocess
|
||||
import sys
|
||||
from pathlib import Path
|
||||
from typing import Any, AsyncIterator, Generic, Optional, Tuple, Type, TypeVar, Union
|
||||
from typing import Any, AsyncIterator, Generic, Optional, Type, TypeVar, Union
|
||||
|
||||
import click
|
||||
from chia_rs import AugSchemeMPL, G1Element, G2Element
|
||||
|
||||
from chia.consensus.coinbase import create_farmer_coin, create_pool_coin
|
||||
from chia.consensus.default_constants import DEFAULT_CONSTANTS
|
||||
from chia.types.blockchain_format.classgroup import ClassgroupElement
|
||||
from chia.types.blockchain_format.coin import Coin
|
||||
from chia.types.blockchain_format.foliage import Foliage, FoliageBlockData, FoliageTransactionBlock, TransactionsInfo
|
||||
from chia.types.blockchain_format.pool_target import PoolTarget
|
||||
from chia.types.blockchain_format.proof_of_space import ProofOfSpace
|
||||
from chia.types.blockchain_format.reward_chain_block import RewardChainBlock
|
||||
from chia.types.blockchain_format.serialized_program import SerializedProgram
|
||||
from chia.types.blockchain_format.sized_bytes import bytes32, bytes100
|
||||
from chia.types.blockchain_format.vdf import VDFInfo, VDFProof
|
||||
from chia.types.full_block import FullBlock
|
||||
from chia.util.db_wrapper import DBWrapper2
|
||||
from chia.util.ints import uint8, uint32, uint64, uint128
|
||||
|
||||
# farmer puzzle hash
|
||||
ph = bytes32(b"a" * 32)
|
||||
|
||||
with open(Path(os.path.realpath(__file__)).parent / "clvm_generator.bin", "rb") as f:
|
||||
clvm_generator = f.read()
|
||||
|
||||
|
||||
_T_Enum = TypeVar("_T_Enum", bound=enum.Enum)
|
||||
|
||||
@ -48,136 +26,6 @@ class EnumType(click.Choice, Generic[_T_Enum]):
|
||||
return self.__enum(converted_str)
|
||||
|
||||
|
||||
def rewards(height: uint32) -> Tuple[Coin, Coin]:
|
||||
farmer_coin = create_farmer_coin(height, ph, uint64(250000000), DEFAULT_CONSTANTS.GENESIS_CHALLENGE)
|
||||
pool_coin = create_pool_coin(height, ph, uint64(1750000000), DEFAULT_CONSTANTS.GENESIS_CHALLENGE)
|
||||
return farmer_coin, pool_coin
|
||||
|
||||
|
||||
def rand_bytes(num: int) -> bytes:
|
||||
ret = bytearray(num)
|
||||
for i in range(num):
|
||||
ret[i] = random.getrandbits(8)
|
||||
return bytes(ret)
|
||||
|
||||
|
||||
def rand_hash() -> bytes32:
|
||||
return bytes32(rand_bytes(32))
|
||||
|
||||
|
||||
def rand_g1() -> G1Element:
|
||||
sk = AugSchemeMPL.key_gen(rand_bytes(96))
|
||||
return sk.get_g1()
|
||||
|
||||
|
||||
def rand_g2() -> G2Element:
|
||||
sk = AugSchemeMPL.key_gen(rand_bytes(96))
|
||||
return AugSchemeMPL.sign(sk, b"foobar")
|
||||
|
||||
|
||||
def rand_class_group_element() -> ClassgroupElement:
|
||||
return ClassgroupElement(bytes100(rand_bytes(100)))
|
||||
|
||||
|
||||
def rand_vdf() -> VDFInfo:
|
||||
return VDFInfo(rand_hash(), uint64(random.randint(100000, 1000000000)), rand_class_group_element())
|
||||
|
||||
|
||||
def rand_vdf_proof() -> VDFProof:
|
||||
return VDFProof(
|
||||
uint8(1), # witness_type
|
||||
rand_hash(), # witness
|
||||
bool(random.randint(0, 1)), # normalized_to_identity
|
||||
)
|
||||
|
||||
|
||||
def rand_full_block() -> FullBlock:
|
||||
proof_of_space = ProofOfSpace(
|
||||
rand_hash(),
|
||||
rand_g1(),
|
||||
None,
|
||||
rand_g1(),
|
||||
uint8(0),
|
||||
rand_bytes(8 * 32),
|
||||
)
|
||||
|
||||
reward_chain_block = RewardChainBlock(
|
||||
uint128(1),
|
||||
uint32(2),
|
||||
uint128(3),
|
||||
uint8(4),
|
||||
rand_hash(),
|
||||
proof_of_space,
|
||||
None,
|
||||
rand_g2(),
|
||||
rand_vdf(),
|
||||
None,
|
||||
rand_g2(),
|
||||
rand_vdf(),
|
||||
rand_vdf(),
|
||||
True,
|
||||
)
|
||||
|
||||
pool_target = PoolTarget(
|
||||
rand_hash(),
|
||||
uint32(0),
|
||||
)
|
||||
|
||||
foliage_block_data = FoliageBlockData(
|
||||
rand_hash(),
|
||||
pool_target,
|
||||
rand_g2(),
|
||||
rand_hash(),
|
||||
rand_hash(),
|
||||
)
|
||||
|
||||
foliage = Foliage(
|
||||
rand_hash(),
|
||||
rand_hash(),
|
||||
foliage_block_data,
|
||||
rand_g2(),
|
||||
rand_hash(),
|
||||
rand_g2(),
|
||||
)
|
||||
|
||||
foliage_transaction_block = FoliageTransactionBlock(
|
||||
rand_hash(),
|
||||
uint64(0),
|
||||
rand_hash(),
|
||||
rand_hash(),
|
||||
rand_hash(),
|
||||
rand_hash(),
|
||||
)
|
||||
|
||||
farmer_coin, pool_coin = rewards(uint32(0))
|
||||
|
||||
transactions_info = TransactionsInfo(
|
||||
rand_hash(),
|
||||
rand_hash(),
|
||||
rand_g2(),
|
||||
uint64(0),
|
||||
uint64(1),
|
||||
[farmer_coin, pool_coin],
|
||||
)
|
||||
|
||||
full_block = FullBlock(
|
||||
[],
|
||||
reward_chain_block,
|
||||
rand_vdf_proof(),
|
||||
rand_vdf_proof(),
|
||||
rand_vdf_proof(),
|
||||
rand_vdf_proof(),
|
||||
rand_vdf_proof(),
|
||||
foliage,
|
||||
foliage_transaction_block,
|
||||
transactions_info,
|
||||
SerializedProgram.from_bytes(clvm_generator),
|
||||
[],
|
||||
)
|
||||
|
||||
return full_block
|
||||
|
||||
|
||||
@contextlib.asynccontextmanager
|
||||
async def setup_db(name: Union[str, os.PathLike[str]], db_version: int) -> AsyncIterator[DBWrapper2]:
|
||||
db_filename = Path(name)
|
||||
|
@ -13,37 +13,6 @@ THIS_IS_MAC = platform.system().lower().startswith("darwin")
|
||||
ROOT = pathlib.Path(importlib.import_module("chia").__file__).absolute().parent.parent
|
||||
|
||||
|
||||
def solve_name_collision_problem(analysis):
|
||||
"""
|
||||
There is a collision between the `chia` file name (which is the executable)
|
||||
and the `chia` directory, which contains non-code resources like `english.txt`.
|
||||
We move all the resources in the zipped area so there is no
|
||||
need to create the `chia` directory, since the names collide.
|
||||
|
||||
Fetching data now requires going into a zip file, so it will be slower.
|
||||
It's best if files that are used frequently are cached.
|
||||
|
||||
A sample large compressible file (1 MB of `/dev/zero`), seems to be
|
||||
about eight times slower.
|
||||
|
||||
Note that this hack isn't documented, but seems to work.
|
||||
"""
|
||||
|
||||
zipped = []
|
||||
datas = []
|
||||
for data in analysis.datas:
|
||||
if str(data[0]).startswith("chia/"):
|
||||
zipped.append(data)
|
||||
else:
|
||||
datas.append(data)
|
||||
|
||||
# items in this field are included in the binary
|
||||
analysis.zipped_data = zipped
|
||||
|
||||
# these items will be dropped in the root folder uncompressed
|
||||
analysis.datas = datas
|
||||
|
||||
|
||||
keyring_imports = collect_submodules("keyring.backends")
|
||||
|
||||
# keyring uses entrypoints to read keyring.backends from metadata file entry_points.txt.
|
||||
@ -176,8 +145,6 @@ def add_binary(name, path_to_script, collect_args):
|
||||
noarchive=False,
|
||||
)
|
||||
|
||||
solve_name_collision_problem(analysis)
|
||||
|
||||
binary_pyz = PYZ(analysis.pure, analysis.zipped_data, cipher=block_cipher)
|
||||
|
||||
binary_exe = EXE(
|
||||
|
@ -206,7 +206,7 @@ def print_min_max_derivation_for_wallets(derivation_paths: List[DerivationPath])
|
||||
class WalletDBReader:
|
||||
db_wrapper: DBWrapper2 # TODO: Remove db_wrapper member
|
||||
config = {"db_readers": 1}
|
||||
sql_log_path = None
|
||||
sql_log_path: Optional[Path] = None
|
||||
verbose = False
|
||||
|
||||
async def get_all_wallets(self) -> List[Wallet]:
|
||||
|
@ -104,10 +104,10 @@ def configure(
|
||||
if testnet == "true" or testnet == "t":
|
||||
print("Setting Testnet")
|
||||
testnet_port = "58444"
|
||||
testnet_introducer = "introducer-testnet10.chia.net"
|
||||
testnet_dns_introducer = "dns-introducer-testnet10.chia.net"
|
||||
bootstrap_peers = ["testnet10-node.chia.net"]
|
||||
testnet = "testnet10"
|
||||
testnet_introducer = "introducer-testnet11.chia.net"
|
||||
testnet_dns_introducer = "dns-introducer-testnet11.chia.net"
|
||||
bootstrap_peers = ["testnet11-node-us-west-2.chia.net"]
|
||||
testnet = "testnet11"
|
||||
config["full_node"]["port"] = int(testnet_port)
|
||||
if config["full_node"]["introducer_peer"] is None:
|
||||
config["full_node"]["introducer_peer"] = {}
|
||||
|
@ -121,7 +121,7 @@ async def print_block_from_hash(
|
||||
cost = str(full_block.transactions_info.cost)
|
||||
tx_filter_hash: Union[str, bytes32] = "Not a transaction block"
|
||||
if full_block.foliage_transaction_block:
|
||||
tx_filter_hash = full_block.foliage_transaction_block.filter_hash
|
||||
tx_filter_hash = bytes32(full_block.foliage_transaction_block.filter_hash)
|
||||
fees: Any = block.fees
|
||||
else:
|
||||
block_time_string = "Not a transaction block"
|
||||
|
@ -441,7 +441,6 @@ def add_token_cmd(wallet_rpc_port: Optional[int], asset_id: str, token_name: str
|
||||
"-r",
|
||||
"--request",
|
||||
help="A wallet id of an asset to receive and the amount you wish to receive (formatted like wallet_id:amount)",
|
||||
required=True,
|
||||
multiple=True,
|
||||
)
|
||||
@click.option("-p", "--filepath", help="The path to write the generated offer file to", required=True)
|
||||
@ -454,6 +453,7 @@ def add_token_cmd(wallet_rpc_port: Optional[int], asset_id: str, token_name: str
|
||||
is_flag=True,
|
||||
default=False,
|
||||
)
|
||||
@click.option("-o", "--override", help="Creates offer without checking for unusual values", is_flag=True, default=False)
|
||||
def make_offer_cmd(
|
||||
wallet_rpc_port: Optional[int],
|
||||
fingerprint: int,
|
||||
@ -462,9 +462,14 @@ def make_offer_cmd(
|
||||
filepath: str,
|
||||
fee: str,
|
||||
reuse: bool,
|
||||
override: bool,
|
||||
) -> None:
|
||||
from .wallet_funcs import make_offer
|
||||
|
||||
if len(request) == 0 and not override:
|
||||
print("Cannot make an offer without requesting something without --override")
|
||||
return
|
||||
|
||||
asyncio.run(
|
||||
make_offer(
|
||||
wallet_rpc_port=wallet_rpc_port,
|
||||
@ -873,14 +878,20 @@ def did_transfer_did(
|
||||
id: int,
|
||||
target_address: str,
|
||||
reset_recovery: bool,
|
||||
fee: int,
|
||||
fee: str,
|
||||
reuse: bool,
|
||||
) -> None:
|
||||
from .wallet_funcs import transfer_did
|
||||
|
||||
asyncio.run(
|
||||
transfer_did(
|
||||
wallet_rpc_port, fingerprint, id, fee, target_address, reset_recovery is False, True if reuse else None
|
||||
wallet_rpc_port,
|
||||
fingerprint,
|
||||
id,
|
||||
Decimal(fee),
|
||||
target_address,
|
||||
reset_recovery is False,
|
||||
True if reuse else None,
|
||||
)
|
||||
)
|
||||
|
||||
|
@ -1004,11 +1004,13 @@ async def transfer_did(
|
||||
wallet_rpc_port: Optional[int],
|
||||
fp: Optional[int],
|
||||
did_wallet_id: int,
|
||||
fee: int,
|
||||
d_fee: Decimal,
|
||||
target_address: str,
|
||||
with_recovery: bool,
|
||||
reuse_puzhash: Optional[bool],
|
||||
) -> None:
|
||||
fee: int = int(d_fee * units["chia"])
|
||||
|
||||
async with get_wallet_client(wallet_rpc_port, fp) as (wallet_client, fingerprint, config):
|
||||
try:
|
||||
response = await wallet_client.did_transfer_did(
|
||||
|
@ -109,12 +109,14 @@ class ForkInfo:
|
||||
self.removals_since_fork[bytes32(spend.coin_id)] = ForkRem(bytes32(spend.puzzle_hash), height)
|
||||
for puzzle_hash, amount, hint in spend.create_coin:
|
||||
coin = Coin(bytes32(spend.coin_id), bytes32(puzzle_hash), uint64(amount))
|
||||
self.additions_since_fork[coin.name()] = ForkAdd(coin, height, timestamp, hint, False)
|
||||
self.additions_since_fork[coin.name()] = ForkAdd(
|
||||
coin, uint32(height), uint64(timestamp), hint, False
|
||||
)
|
||||
for coin in block.get_included_reward_coins():
|
||||
assert block.foliage_transaction_block is not None
|
||||
timestamp = block.foliage_transaction_block.timestamp
|
||||
assert coin.name() not in self.additions_since_fork
|
||||
self.additions_since_fork[coin.name()] = ForkAdd(coin, block.height, timestamp, None, True)
|
||||
self.additions_since_fork[coin.name()] = ForkAdd(coin, uint32(block.height), uint64(timestamp), None, True)
|
||||
|
||||
|
||||
async def validate_block_body(
|
||||
@ -391,7 +393,7 @@ async def validate_block_body(
|
||||
height,
|
||||
height,
|
||||
False,
|
||||
block.foliage_transaction_block.timestamp,
|
||||
uint64(block.foliage_transaction_block.timestamp),
|
||||
)
|
||||
removal_coin_records[new_unspent.name] = new_unspent
|
||||
else:
|
||||
@ -490,7 +492,7 @@ async def validate_block_body(
|
||||
block_timestamp: uint64
|
||||
if height < constants.SOFT_FORK2_HEIGHT:
|
||||
# this does not happen on mainnet. testnet10 only
|
||||
block_timestamp = block.foliage_transaction_block.timestamp # pragma: no cover
|
||||
block_timestamp = uint64(block.foliage_transaction_block.timestamp) # pragma: no cover
|
||||
else:
|
||||
block_timestamp = prev_transaction_block_timestamp
|
||||
|
||||
|
@ -2,7 +2,6 @@ from __future__ import annotations
|
||||
|
||||
import logging
|
||||
import random
|
||||
from dataclasses import replace
|
||||
from typing import Callable, Dict, List, Optional, Sequence, Tuple
|
||||
|
||||
import chia_rs
|
||||
@ -501,10 +500,7 @@ def unfinished_block_to_full_block(
|
||||
is_transaction_block,
|
||||
)
|
||||
if prev_block is None:
|
||||
new_foliage = replace(
|
||||
unfinished_block.foliage,
|
||||
reward_block_hash=reward_chain_block.get_hash(),
|
||||
)
|
||||
new_foliage = unfinished_block.foliage.replace(reward_block_hash=reward_chain_block.get_hash())
|
||||
else:
|
||||
if is_transaction_block:
|
||||
new_fbh = unfinished_block.foliage.foliage_transaction_block_hash
|
||||
@ -513,8 +509,7 @@ def unfinished_block_to_full_block(
|
||||
new_fbh = None
|
||||
new_fbs = None
|
||||
assert (new_fbh is None) == (new_fbs is None)
|
||||
new_foliage = replace(
|
||||
unfinished_block.foliage,
|
||||
new_foliage = unfinished_block.foliage.replace(
|
||||
reward_block_hash=reward_chain_block.get_hash(),
|
||||
prev_block_hash=prev_block.header_hash,
|
||||
foliage_transaction_block_hash=new_fbh,
|
||||
|
@ -1,6 +1,5 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import dataclasses
|
||||
import logging
|
||||
import time
|
||||
from typing import Optional, Tuple
|
||||
@ -67,7 +66,7 @@ def validate_unfinished_header_block(
|
||||
if genesis_block and header_block.prev_header_hash != constants.GENESIS_CHALLENGE:
|
||||
return None, ValidationError(Err.INVALID_PREV_BLOCK_HASH)
|
||||
|
||||
overflow = is_overflow_block(constants, header_block.reward_chain_block.signage_point_index)
|
||||
overflow = is_overflow_block(constants, uint8(header_block.reward_chain_block.signage_point_index))
|
||||
if skip_overflow_last_ss_validation and overflow:
|
||||
if final_eos_is_already_included(header_block, blocks, expected_sub_slot_iters):
|
||||
skip_overflow_last_ss_validation = False
|
||||
@ -194,9 +193,9 @@ def validate_unfinished_header_block(
|
||||
icc_iters_proof,
|
||||
sub_slot.infused_challenge_chain.infused_challenge_chain_end_of_slot_vdf.output,
|
||||
)
|
||||
if sub_slot.infused_challenge_chain.infused_challenge_chain_end_of_slot_vdf != dataclasses.replace(
|
||||
target_vdf_info,
|
||||
number_of_iterations=icc_iters_committed,
|
||||
if (
|
||||
sub_slot.infused_challenge_chain.infused_challenge_chain_end_of_slot_vdf
|
||||
!= target_vdf_info.replace(number_of_iterations=icc_iters_committed)
|
||||
):
|
||||
return None, ValidationError(Err.INVALID_ICC_EOS_VDF)
|
||||
if not skip_vdf_is_valid:
|
||||
@ -338,9 +337,8 @@ def validate_unfinished_header_block(
|
||||
else:
|
||||
cc_eos_vdf_info_iters = expected_sub_slot_iters
|
||||
# Check that the modified data is correct
|
||||
if sub_slot.challenge_chain.challenge_chain_end_of_slot_vdf != dataclasses.replace(
|
||||
partial_cc_vdf_info,
|
||||
number_of_iterations=cc_eos_vdf_info_iters,
|
||||
if sub_slot.challenge_chain.challenge_chain_end_of_slot_vdf != partial_cc_vdf_info.replace(
|
||||
number_of_iterations=cc_eos_vdf_info_iters
|
||||
):
|
||||
return None, ValidationError(Err.INVALID_CC_EOS_VDF, "wrong challenge chain end of slot vdf")
|
||||
|
||||
@ -526,13 +524,13 @@ def validate_unfinished_header_block(
|
||||
sp_iters: uint64 = calculate_sp_iters(
|
||||
constants,
|
||||
expected_sub_slot_iters,
|
||||
header_block.reward_chain_block.signage_point_index,
|
||||
uint8(header_block.reward_chain_block.signage_point_index),
|
||||
)
|
||||
|
||||
ip_iters: uint64 = calculate_ip_iters(
|
||||
constants,
|
||||
expected_sub_slot_iters,
|
||||
header_block.reward_chain_block.signage_point_index,
|
||||
uint8(header_block.reward_chain_block.signage_point_index),
|
||||
required_iters,
|
||||
)
|
||||
if header_block.reward_chain_block.challenge_chain_sp_vdf is None:
|
||||
@ -659,9 +657,8 @@ def validate_unfinished_header_block(
|
||||
header_block.reward_chain_block.challenge_chain_sp_vdf.output,
|
||||
)
|
||||
|
||||
if header_block.reward_chain_block.challenge_chain_sp_vdf != dataclasses.replace(
|
||||
target_vdf_info,
|
||||
number_of_iterations=sp_iters,
|
||||
if header_block.reward_chain_block.challenge_chain_sp_vdf != target_vdf_info.replace(
|
||||
number_of_iterations=sp_iters
|
||||
):
|
||||
return None, ValidationError(Err.INVALID_CC_SP_VDF)
|
||||
if not skip_vdf_is_valid:
|
||||
@ -879,7 +876,7 @@ def validate_finished_header_block(
|
||||
ip_iters: uint64 = calculate_ip_iters(
|
||||
constants,
|
||||
expected_sub_slot_iters,
|
||||
header_block.reward_chain_block.signage_point_index,
|
||||
uint8(header_block.reward_chain_block.signage_point_index),
|
||||
required_iters,
|
||||
)
|
||||
if not genesis_block:
|
||||
@ -944,14 +941,10 @@ def validate_finished_header_block(
|
||||
ip_vdf_iters,
|
||||
header_block.reward_chain_block.challenge_chain_ip_vdf.output,
|
||||
)
|
||||
if header_block.reward_chain_block.challenge_chain_ip_vdf != dataclasses.replace(
|
||||
cc_target_vdf_info,
|
||||
number_of_iterations=ip_iters,
|
||||
if header_block.reward_chain_block.challenge_chain_ip_vdf != cc_target_vdf_info.replace(
|
||||
number_of_iterations=ip_iters
|
||||
):
|
||||
expected = dataclasses.replace(
|
||||
cc_target_vdf_info,
|
||||
number_of_iterations=ip_iters,
|
||||
)
|
||||
expected = cc_target_vdf_info.replace(number_of_iterations=ip_iters)
|
||||
log.error(f"{header_block.reward_chain_block.challenge_chain_ip_vdf }. expected {expected}")
|
||||
log.error(f"Block: {header_block}")
|
||||
return None, ValidationError(Err.INVALID_CC_IP_VDF)
|
||||
@ -990,7 +983,7 @@ def validate_finished_header_block(
|
||||
|
||||
# 31. Check infused challenge chain infusion point VDF
|
||||
if not genesis_block:
|
||||
overflow = is_overflow_block(constants, header_block.reward_chain_block.signage_point_index)
|
||||
overflow = is_overflow_block(constants, uint8(header_block.reward_chain_block.signage_point_index))
|
||||
deficit = calculate_deficit(
|
||||
constants,
|
||||
header_block.height,
|
||||
|
@ -39,7 +39,7 @@ def block_to_block_record(
|
||||
sub_slot_iters, _ = get_next_sub_slot_iters_and_difficulty(
|
||||
constants, len(block.finished_sub_slots) > 0, prev_b, blocks
|
||||
)
|
||||
overflow = is_overflow_block(constants, block.reward_chain_block.signage_point_index)
|
||||
overflow = is_overflow_block(constants, uint8(block.reward_chain_block.signage_point_index))
|
||||
deficit = calculate_deficit(
|
||||
constants,
|
||||
block.height,
|
||||
@ -62,8 +62,8 @@ def block_to_block_record(
|
||||
blocks,
|
||||
block.height,
|
||||
blocks.block_record(prev_b.prev_hash),
|
||||
block.finished_sub_slots[0].challenge_chain.new_difficulty,
|
||||
block.finished_sub_slots[0].challenge_chain.new_sub_slot_iters,
|
||||
uint64.construct_optional(block.finished_sub_slots[0].challenge_chain.new_difficulty),
|
||||
uint64.construct_optional(block.finished_sub_slots[0].challenge_chain.new_sub_slot_iters),
|
||||
)
|
||||
if ses.get_hash() != found_ses_hash:
|
||||
raise ValueError(Err.INVALID_SUB_EPOCH_SUMMARY)
|
||||
@ -148,7 +148,7 @@ def header_block_to_sub_block_record(
|
||||
block.height,
|
||||
block.weight,
|
||||
block.total_iters,
|
||||
block.reward_chain_block.signage_point_index,
|
||||
uint8(block.reward_chain_block.signage_point_index),
|
||||
block.reward_chain_block.challenge_chain_ip_vdf.output,
|
||||
icc_output,
|
||||
block.reward_chain_block.get_hash(),
|
||||
@ -160,9 +160,9 @@ def header_block_to_sub_block_record(
|
||||
deficit,
|
||||
overflow,
|
||||
prev_transaction_block_height,
|
||||
timestamp,
|
||||
uint64.construct_optional(timestamp),
|
||||
prev_transaction_block_hash,
|
||||
fees,
|
||||
uint64.construct_optional(fees),
|
||||
reward_claims_incorporated,
|
||||
finished_challenge_slot_hashes,
|
||||
finished_infused_challenge_slot_hashes,
|
||||
|
@ -94,7 +94,7 @@ def next_sub_epoch_summary(
|
||||
Returns:
|
||||
object: the new sub-epoch summary
|
||||
"""
|
||||
signage_point_index = block.reward_chain_block.signage_point_index
|
||||
signage_point_index = uint8(block.reward_chain_block.signage_point_index)
|
||||
prev_b: Optional[BlockRecord] = blocks.try_block_record(block.prev_header_hash)
|
||||
if prev_b is None or prev_b.height == 0:
|
||||
return None
|
||||
|
@ -32,7 +32,7 @@ from chia.util.block_cache import BlockCache
|
||||
from chia.util.condition_tools import pkm_pairs
|
||||
from chia.util.errors import Err, ValidationError
|
||||
from chia.util.generator_tools import get_block_header, tx_removals_and_additions
|
||||
from chia.util.ints import uint16, uint32, uint64
|
||||
from chia.util.ints import uint8, uint16, uint32, uint64
|
||||
from chia.util.streamable import Streamable, streamable
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
@ -257,7 +257,7 @@ async def pre_validate_blocks_multiprocessing(
|
||||
constants, len(block.finished_sub_slots) > 0, prev_b, block_records
|
||||
)
|
||||
|
||||
overflow = is_overflow_block(constants, block.reward_chain_block.signage_point_index)
|
||||
overflow = is_overflow_block(constants, uint8(block.reward_chain_block.signage_point_index))
|
||||
challenge = get_block_challenge(constants, block, BlockCache(recent_blocks), prev_b is None, overflow, False)
|
||||
if block.reward_chain_block.challenge_chain_sp_vdf is None:
|
||||
cc_sp_hash: bytes32 = challenge
|
||||
|
@ -31,7 +31,6 @@ from chia.plotters.plotters import get_available_plotters
|
||||
from chia.plotting.util import add_plot_directory
|
||||
from chia.server.server import ssl_context_for_server
|
||||
from chia.util.bech32m import encode_puzzle_hash
|
||||
from chia.util.beta_metrics import BetaMetricsLogger
|
||||
from chia.util.chia_logging import initialize_service_logging
|
||||
from chia.util.config import load_config
|
||||
from chia.util.errors import KeychainCurrentPassphraseIsInvalid
|
||||
@ -1534,8 +1533,10 @@ async def async_run_daemon(root_path: Path, wait_for_unlock: bool = False) -> in
|
||||
with Lockfile.create(daemon_launch_lock_path(root_path), timeout=1):
|
||||
log.info(f"chia-blockchain version: {chia_full_version_str()}")
|
||||
|
||||
beta_metrics: Optional[BetaMetricsLogger] = None
|
||||
beta_metrics = None
|
||||
if config.get("beta", {}).get("enabled", False):
|
||||
from chia.util.beta_metrics import BetaMetricsLogger
|
||||
|
||||
beta_metrics = BetaMetricsLogger(root_path)
|
||||
beta_metrics.start_logging()
|
||||
|
||||
|
@ -43,6 +43,10 @@ def leaf_hash(key: bytes, value: bytes) -> bytes32:
|
||||
return Program.to((key, value)).get_tree_hash() # type: ignore[no-any-return]
|
||||
|
||||
|
||||
def key_hash(key: bytes) -> bytes32:
|
||||
return Program.to(key).get_tree_hash() # type: ignore[no-any-return]
|
||||
|
||||
|
||||
async def _debug_dump(db: DBWrapper2, description: str = "") -> None:
|
||||
async with db.reader() as reader:
|
||||
cursor = await reader.execute("SELECT name FROM sqlite_master WHERE type='table';")
|
||||
|
@ -28,6 +28,7 @@ from chia.data_layer.data_layer_util import (
|
||||
Subscription,
|
||||
TerminalNode,
|
||||
internal_hash,
|
||||
key_hash,
|
||||
leaf_hash,
|
||||
row_to_node,
|
||||
)
|
||||
@ -672,35 +673,40 @@ class DataStore:
|
||||
|
||||
return internal_nodes
|
||||
|
||||
async def get_keys_values_cursor(
|
||||
self, reader: aiosqlite.Connection, root_hash: Optional[bytes32]
|
||||
) -> aiosqlite.Cursor:
|
||||
return await reader.execute(
|
||||
"""
|
||||
WITH RECURSIVE
|
||||
tree_from_root_hash(hash, node_type, left, right, key, value, depth, rights) AS (
|
||||
SELECT node.*, 0 AS depth, 0 AS rights FROM node WHERE node.hash == :root_hash
|
||||
UNION ALL
|
||||
SELECT
|
||||
node.*,
|
||||
tree_from_root_hash.depth + 1 AS depth,
|
||||
CASE
|
||||
WHEN node.hash == tree_from_root_hash.right
|
||||
THEN tree_from_root_hash.rights + (1 << (62 - tree_from_root_hash.depth))
|
||||
ELSE tree_from_root_hash.rights
|
||||
END AS rights
|
||||
FROM node, tree_from_root_hash
|
||||
WHERE node.hash == tree_from_root_hash.left OR node.hash == tree_from_root_hash.right
|
||||
)
|
||||
SELECT * FROM tree_from_root_hash
|
||||
WHERE node_type == :node_type
|
||||
ORDER BY depth ASC, rights ASC
|
||||
""",
|
||||
{"root_hash": root_hash, "node_type": NodeType.TERMINAL},
|
||||
)
|
||||
|
||||
async def get_keys_values(self, tree_id: bytes32, root_hash: Optional[bytes32] = None) -> List[TerminalNode]:
|
||||
async with self.db_wrapper.reader() as reader:
|
||||
if root_hash is None:
|
||||
root = await self.get_tree_root(tree_id=tree_id)
|
||||
root_hash = root.node_hash
|
||||
cursor = await reader.execute(
|
||||
"""
|
||||
WITH RECURSIVE
|
||||
tree_from_root_hash(hash, node_type, left, right, key, value, depth, rights) AS (
|
||||
SELECT node.*, 0 AS depth, 0 AS rights FROM node WHERE node.hash == :root_hash
|
||||
UNION ALL
|
||||
SELECT
|
||||
node.*,
|
||||
tree_from_root_hash.depth + 1 AS depth,
|
||||
CASE
|
||||
WHEN node.hash == tree_from_root_hash.right
|
||||
THEN tree_from_root_hash.rights + (1 << (62 - tree_from_root_hash.depth))
|
||||
ELSE tree_from_root_hash.rights
|
||||
END AS rights
|
||||
FROM node, tree_from_root_hash
|
||||
WHERE node.hash == tree_from_root_hash.left OR node.hash == tree_from_root_hash.right
|
||||
)
|
||||
SELECT * FROM tree_from_root_hash
|
||||
WHERE node_type == :node_type
|
||||
ORDER BY depth ASC, rights ASC
|
||||
""",
|
||||
{"root_hash": None if root_hash is None else root_hash, "node_type": NodeType.TERMINAL},
|
||||
)
|
||||
|
||||
cursor = await self.get_keys_values_cursor(reader, root_hash)
|
||||
terminal_nodes: List[TerminalNode] = []
|
||||
async for row in cursor:
|
||||
if row["depth"] > 62:
|
||||
@ -722,6 +728,26 @@ class DataStore:
|
||||
|
||||
return terminal_nodes
|
||||
|
||||
async def get_keys_values_compressed(
|
||||
self, tree_id: bytes32, root_hash: Optional[bytes32] = None
|
||||
) -> Dict[bytes32, bytes32]:
|
||||
async with self.db_wrapper.reader() as reader:
|
||||
if root_hash is None:
|
||||
root = await self.get_tree_root(tree_id=tree_id)
|
||||
root_hash = root.node_hash
|
||||
|
||||
cursor = await self.get_keys_values_cursor(reader, root_hash)
|
||||
kv_compressed: Dict[bytes32, bytes32] = {}
|
||||
async for row in cursor:
|
||||
if row["depth"] > 62:
|
||||
raise Exception("Tree depth exceeded 62, unable to guarantee left-to-right node order.")
|
||||
node = row_to_node(row=row)
|
||||
if not isinstance(node, TerminalNode):
|
||||
raise Exception(f"Unexpected internal node found: {node.hash.hex()}")
|
||||
kv_compressed[key_hash(node.key)] = leaf_hash(node.key, node.value)
|
||||
|
||||
return kv_compressed
|
||||
|
||||
async def get_node_type(self, node_hash: bytes32) -> NodeType:
|
||||
async with self.db_wrapper.reader() as reader:
|
||||
cursor = await reader.execute(
|
||||
@ -795,7 +821,7 @@ class DataStore:
|
||||
key: bytes,
|
||||
value: bytes,
|
||||
tree_id: bytes32,
|
||||
hint_keys_values: Optional[Dict[bytes, bytes]] = None,
|
||||
hint_keys_values: Optional[Dict[bytes32, bytes32]] = None,
|
||||
use_optimized: bool = True,
|
||||
status: Status = Status.PENDING,
|
||||
root: Optional[Root] = None,
|
||||
@ -941,7 +967,7 @@ class DataStore:
|
||||
tree_id: bytes32,
|
||||
reference_node_hash: Optional[bytes32],
|
||||
side: Optional[Side],
|
||||
hint_keys_values: Optional[Dict[bytes, bytes]] = None,
|
||||
hint_keys_values: Optional[Dict[bytes32, bytes32]] = None,
|
||||
use_optimized: bool = True,
|
||||
status: Status = Status.PENDING,
|
||||
root: Optional[Root] = None,
|
||||
@ -959,7 +985,7 @@ class DataStore:
|
||||
if any(key == node.key for node in pairs):
|
||||
raise Exception(f"Key already present: {key.hex()}")
|
||||
else:
|
||||
if key in hint_keys_values:
|
||||
if key_hash(key) in hint_keys_values:
|
||||
raise Exception(f"Key already present: {key.hex()}")
|
||||
|
||||
if reference_node_hash is None:
|
||||
@ -1015,14 +1041,14 @@ class DataStore:
|
||||
)
|
||||
|
||||
if hint_keys_values is not None:
|
||||
hint_keys_values[key] = value
|
||||
hint_keys_values[key_hash(key)] = leaf_hash(key, value)
|
||||
return InsertResult(node_hash=new_terminal_node_hash, root=new_root)
|
||||
|
||||
async def delete(
|
||||
self,
|
||||
key: bytes,
|
||||
tree_id: bytes32,
|
||||
hint_keys_values: Optional[Dict[bytes, bytes]] = None,
|
||||
hint_keys_values: Optional[Dict[bytes32, bytes32]] = None,
|
||||
use_optimized: bool = True,
|
||||
status: Status = Status.PENDING,
|
||||
root: Optional[Root] = None,
|
||||
@ -1031,17 +1057,17 @@ class DataStore:
|
||||
async with self.db_wrapper.writer():
|
||||
if hint_keys_values is None:
|
||||
node = await self.get_node_by_key(key=key, tree_id=tree_id)
|
||||
node_hash = node.hash
|
||||
assert isinstance(node, TerminalNode)
|
||||
else:
|
||||
if key not in hint_keys_values:
|
||||
if key_hash(key) not in hint_keys_values:
|
||||
log.debug(f"Request to delete an unknown key ignored: {key.hex()}")
|
||||
return root
|
||||
value = hint_keys_values[key]
|
||||
node_hash = leaf_hash(key=key, value=value)
|
||||
node = TerminalNode(node_hash, key, value)
|
||||
del hint_keys_values[key]
|
||||
node_hash = hint_keys_values[key_hash(key)]
|
||||
del hint_keys_values[key_hash(key)]
|
||||
|
||||
ancestors: List[InternalNode] = await self.get_ancestors_common(
|
||||
node_hash=node.hash,
|
||||
node_hash=node_hash,
|
||||
tree_id=tree_id,
|
||||
root_hash=root_hash,
|
||||
use_optimized=use_optimized,
|
||||
@ -1056,7 +1082,7 @@ class DataStore:
|
||||
)
|
||||
|
||||
parent = ancestors[0]
|
||||
other_hash = parent.other_child_hash(hash=node.hash)
|
||||
other_hash = parent.other_child_hash(hash=node_hash)
|
||||
|
||||
if len(ancestors) == 1:
|
||||
# the parent is the root so the other side will become the new root
|
||||
@ -1106,7 +1132,7 @@ class DataStore:
|
||||
key: bytes,
|
||||
new_value: bytes,
|
||||
tree_id: bytes32,
|
||||
hint_keys_values: Optional[Dict[bytes, bytes]] = None,
|
||||
hint_keys_values: Optional[Dict[bytes32, bytes32]] = None,
|
||||
use_optimized: bool = True,
|
||||
status: Status = Status.PENDING,
|
||||
root: Optional[Root] = None,
|
||||
@ -1134,7 +1160,7 @@ class DataStore:
|
||||
return InsertResult(leaf_hash(key, new_value), root)
|
||||
old_node_hash = old_node.hash
|
||||
else:
|
||||
if key not in hint_keys_values:
|
||||
if key_hash(key) not in hint_keys_values:
|
||||
log.debug(f"Key not found: {key.hex()}. Doing an autoinsert instead")
|
||||
return await self.autoinsert(
|
||||
key=key,
|
||||
@ -1145,12 +1171,15 @@ class DataStore:
|
||||
status=status,
|
||||
root=root,
|
||||
)
|
||||
value = hint_keys_values[key]
|
||||
node_hash = hint_keys_values[key_hash(key)]
|
||||
node = await self.get_node(node_hash)
|
||||
assert isinstance(node, TerminalNode)
|
||||
value = node.value
|
||||
if value == new_value:
|
||||
log.debug(f"New value matches old value in upsert operation: {key.hex()}")
|
||||
return InsertResult(leaf_hash(key, new_value), root)
|
||||
old_node_hash = leaf_hash(key=key, value=value)
|
||||
del hint_keys_values[key]
|
||||
del hint_keys_values[key_hash(key)]
|
||||
|
||||
# create new terminal node
|
||||
new_terminal_node_hash = await self._insert_terminal_node(key=key, value=new_value)
|
||||
@ -1192,7 +1221,7 @@ class DataStore:
|
||||
)
|
||||
|
||||
if hint_keys_values is not None:
|
||||
hint_keys_values[key] = new_value
|
||||
hint_keys_values[key_hash(key)] = leaf_hash(key, new_value)
|
||||
return InsertResult(node_hash=new_terminal_node_hash, root=new_root)
|
||||
|
||||
async def clean_node_table(self, writer: aiosqlite.Connection) -> None:
|
||||
@ -1229,7 +1258,7 @@ class DataStore:
|
||||
if old_root.node_hash is None:
|
||||
hint_keys_values = {}
|
||||
else:
|
||||
hint_keys_values = await self.get_keys_values_dict(tree_id, root_hash=root_hash)
|
||||
hint_keys_values = await self.get_keys_values_compressed(tree_id, root_hash=root_hash)
|
||||
|
||||
intermediate_root: Optional[Root] = old_root
|
||||
for change in changelist:
|
||||
|
@ -23,7 +23,7 @@ async def generate_datastore(num_nodes: int, slow_mode: bool) -> None:
|
||||
os.remove(db_path)
|
||||
|
||||
async with DataStore.managed(database=db_path) as data_store:
|
||||
hint_keys_values: Dict[bytes, bytes] = {}
|
||||
hint_keys_values: Dict[bytes32, bytes32] = {}
|
||||
|
||||
tree_id = bytes32(b"0" * 32)
|
||||
await data_store.create_tree(tree_id)
|
||||
|
@ -94,7 +94,7 @@ from chia.util.ints import uint8, uint32, uint64, uint128
|
||||
from chia.util.limited_semaphore import LimitedSemaphore
|
||||
from chia.util.log_exceptions import log_exceptions
|
||||
from chia.util.path import path_from_root
|
||||
from chia.util.profiler import mem_profile_task, profile_task
|
||||
from chia.util.profiler import enable_profiler, mem_profile_task, profile_task
|
||||
from chia.util.safe_cancel_task import cancel_task_safe
|
||||
|
||||
|
||||
@ -232,7 +232,7 @@ class FullNode:
|
||||
async with DBWrapper2.managed(
|
||||
self.db_path,
|
||||
db_version=db_version,
|
||||
reader_count=4,
|
||||
reader_count=self.config.get("db_readers", 4),
|
||||
log_path=sql_log_path,
|
||||
synchronous=db_sync,
|
||||
) as self._db_wrapper:
|
||||
@ -288,6 +288,13 @@ class FullNode:
|
||||
if self.config.get("enable_profiler", False):
|
||||
asyncio.create_task(profile_task(self.root_path, "node", self.log))
|
||||
|
||||
self.profile_block_validation = self.config.get("profile_block_validation", False)
|
||||
if self.profile_block_validation: # pragma: no cover
|
||||
# this is not covered by any unit tests as it's essentially test code
|
||||
# itself. It's exercised manually when investigating performance issues
|
||||
profile_dir = path_from_root(self.root_path, "block-validation-profile")
|
||||
profile_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
if self.config.get("enable_memory_profiler", False):
|
||||
asyncio.create_task(mem_profile_task(self.root_path, "node", self.log))
|
||||
|
||||
@ -466,7 +473,7 @@ class FullNode:
|
||||
peer = entry.peer
|
||||
try:
|
||||
inc_status, err = await self.add_transaction(entry.transaction, entry.spend_name, peer, entry.test)
|
||||
entry.done.set_result((inc_status, err))
|
||||
entry.done.set((inc_status, err))
|
||||
except asyncio.CancelledError:
|
||||
error_stack = traceback.format_exc()
|
||||
self.log.debug(f"Cancelling _handle_one_transaction, closing: {error_stack}")
|
||||
@ -1147,7 +1154,7 @@ class FullNode:
|
||||
hints_to_add, _ = get_hints_and_subscription_coin_ids(
|
||||
state_change_summary,
|
||||
self.subscriptions.has_coin_subscription,
|
||||
self.subscriptions.has_ph_subscription,
|
||||
self.subscriptions.has_puzzle_subscription,
|
||||
)
|
||||
await self.hint_store.add_hints(hints_to_add)
|
||||
# Note that end_height is not necessarily the peak at this
|
||||
@ -1387,8 +1394,8 @@ class FullNode:
|
||||
self.log.info(
|
||||
f"⏲️ Finished signage point {request.index_from_challenge}/"
|
||||
f"{self.constants.NUM_SPS_SUB_SLOT}: "
|
||||
f"CC: {request.challenge_chain_vdf.output.get_hash()} "
|
||||
f"RC: {request.reward_chain_vdf.output.get_hash()} "
|
||||
f"CC: {request.challenge_chain_vdf.output.get_hash().hex()} "
|
||||
f"RC: {request.reward_chain_vdf.output.get_hash().hex()} "
|
||||
)
|
||||
self.signage_point_times[request.index_from_challenge] = time.time()
|
||||
sub_slot_tuple = self.full_node_store.get_sub_slot(request.challenge_chain_vdf.challenge)
|
||||
@ -1456,7 +1463,7 @@ class FullNode:
|
||||
self.log.info(
|
||||
f"🌱 Updated peak to height {record.height}, weight {record.weight}, "
|
||||
f"hh {record.header_hash}, "
|
||||
f"forked at {state_change_summary.fork_height}, rh: {record.reward_infusion_new_challenge}, "
|
||||
f"forked at {state_change_summary.fork_height}, rh: {record.reward_infusion_new_challenge.hex()}, "
|
||||
f"total iters: {record.total_iters}, "
|
||||
f"overflow: {record.overflow}, "
|
||||
f"deficit: {record.deficit}, "
|
||||
@ -1477,7 +1484,7 @@ class FullNode:
|
||||
hints_to_add, lookup_coin_ids = get_hints_and_subscription_coin_ids(
|
||||
state_change_summary,
|
||||
self.subscriptions.has_coin_subscription,
|
||||
self.subscriptions.has_ph_subscription,
|
||||
self.subscriptions.has_puzzle_subscription,
|
||||
)
|
||||
await self.hint_store.add_hints(hints_to_add)
|
||||
|
||||
@ -1591,7 +1598,6 @@ class FullNode:
|
||||
|
||||
if record.height % 1000 == 0:
|
||||
# Occasionally clear data in full node store to keep memory usage small
|
||||
self.full_node_store.clear_seen_unfinished_blocks()
|
||||
self.full_node_store.clear_old_cache_entries()
|
||||
|
||||
if self.sync_store.get_sync_mode() is False:
|
||||
@ -1706,7 +1712,9 @@ class FullNode:
|
||||
return await self.add_block(new_block, peer)
|
||||
state_change_summary: Optional[StateChangeSummary] = None
|
||||
ppp_result: Optional[PeakPostProcessingResult] = None
|
||||
async with self.blockchain.priority_mutex.acquire(priority=BlockchainMutexPriority.high):
|
||||
async with self.blockchain.priority_mutex.acquire(priority=BlockchainMutexPriority.high), enable_profiler(
|
||||
self.profile_block_validation
|
||||
) as pr:
|
||||
# After acquiring the lock, check again, because another asyncio thread might have added it
|
||||
if self.blockchain.contains_block(header_hash):
|
||||
return None
|
||||
@ -1800,9 +1808,16 @@ class FullNode:
|
||||
f"pre_validation time: {pre_validation_time:0.2f} seconds, "
|
||||
f"post-process time: {post_process_time:0.2f} seconds, "
|
||||
f"cost: {block.transactions_info.cost if block.transactions_info is not None else 'None'}"
|
||||
f"{percent_full_str} header_hash: {header_hash} height: {block.height}",
|
||||
f"{percent_full_str} header_hash: {header_hash.hex()} height: {block.height}",
|
||||
)
|
||||
|
||||
# this is not covered by any unit tests as it's essentially test code
|
||||
# itself. It's exercised manually when investigating performance issues
|
||||
if validation_time > 2 and pr is not None: # pragma: no cover
|
||||
pr.create_stats()
|
||||
profile_dir = path_from_root(self.root_path, "block-validation-profile")
|
||||
pr.dump_stats(profile_dir / f"{block.height}-{validation_time:0.1f}.profile")
|
||||
|
||||
# This code path is reached if added == ADDED_AS_ORPHAN or NEW_TIP
|
||||
peak = self.blockchain.get_peak()
|
||||
assert peak is not None
|
||||
@ -1816,11 +1831,17 @@ class FullNode:
|
||||
"transaction_block": False,
|
||||
"k_size": block.reward_chain_block.proof_of_space.size,
|
||||
"header_hash": block.header_hash,
|
||||
"fork_height": None,
|
||||
"rolled_back_records": None,
|
||||
"height": block.height,
|
||||
"validation_time": validation_time,
|
||||
"pre_validation_time": pre_validation_time,
|
||||
}
|
||||
|
||||
if state_change_summary is not None:
|
||||
state_changed_data["fork_height"] = state_change_summary.fork_height
|
||||
state_changed_data["rolled_back_records"] = len(state_change_summary.rolled_back_records)
|
||||
|
||||
if block.transactions_info is not None:
|
||||
state_changed_data["transaction_block"] = True
|
||||
state_changed_data["block_cost"] = block.transactions_info.cost
|
||||
@ -1872,7 +1893,7 @@ class FullNode:
|
||||
if self.full_node_store.seen_unfinished_block(block.get_hash()):
|
||||
return None
|
||||
|
||||
block_hash = block.reward_chain_block.get_hash()
|
||||
block_hash = bytes32(block.reward_chain_block.get_hash())
|
||||
|
||||
# This searched for the trunk hash (unfinished reward hash). If we have already added a block with the same
|
||||
# hash, return
|
||||
@ -1954,10 +1975,6 @@ class FullNode:
|
||||
validation_start = time.monotonic()
|
||||
validate_result = await self.blockchain.validate_unfinished_block(block, npc_result)
|
||||
if validate_result.error is not None:
|
||||
if validate_result.error == Err.COIN_AMOUNT_NEGATIVE.value:
|
||||
# TODO: remove in the future, hotfix for 1.1.5 peers to not disconnect older peers
|
||||
self.log.info(f"Consensus error {validate_result.error}, not disconnecting")
|
||||
return
|
||||
raise ConsensusError(Err(validate_result.error))
|
||||
validation_time = time.monotonic() - validation_start
|
||||
|
||||
@ -2097,7 +2114,9 @@ class FullNode:
|
||||
# If not found, cache keyed on prev block
|
||||
if prev_b is None:
|
||||
self.full_node_store.add_to_future_ip(request)
|
||||
self.log.warning(f"Previous block is None, infusion point {request.reward_chain_ip_vdf.challenge}")
|
||||
self.log.warning(
|
||||
f"Previous block is None, infusion point {request.reward_chain_ip_vdf.challenge.hex()}"
|
||||
)
|
||||
return None
|
||||
|
||||
finished_sub_slots: Optional[List[EndOfSubSlotBundle]] = self.full_node_store.get_finished_sub_slots(
|
||||
@ -2128,7 +2147,7 @@ class FullNode:
|
||||
+ calculate_sp_iters(
|
||||
self.constants,
|
||||
sub_slot_iters,
|
||||
unfinished_block.reward_chain_block.signage_point_index,
|
||||
uint8(unfinished_block.reward_chain_block.signage_point_index),
|
||||
)
|
||||
)
|
||||
|
||||
@ -2210,9 +2229,9 @@ class FullNode:
|
||||
if new_infusions is not None:
|
||||
self.log.info(
|
||||
f"⏲️ Finished sub slot, SP {self.constants.NUM_SPS_SUB_SLOT}/{self.constants.NUM_SPS_SUB_SLOT}, "
|
||||
f"{end_of_slot_bundle.challenge_chain.get_hash()}, "
|
||||
f"{end_of_slot_bundle.challenge_chain.get_hash().hex()}, "
|
||||
f"number of sub-slots: {len(self.full_node_store.finished_sub_slots)}, "
|
||||
f"RC hash: {end_of_slot_bundle.reward_chain.get_hash()}, "
|
||||
f"RC hash: {end_of_slot_bundle.reward_chain.get_hash().hex()}, "
|
||||
f"Deficit {end_of_slot_bundle.reward_chain.deficit}"
|
||||
)
|
||||
# Reset farmer response timer for sub slot (SP 0)
|
||||
@ -2246,7 +2265,7 @@ class FullNode:
|
||||
else:
|
||||
self.log.info(
|
||||
f"End of slot not added CC challenge "
|
||||
f"{end_of_slot_bundle.challenge_chain.challenge_chain_end_of_slot_vdf.challenge}"
|
||||
f"{end_of_slot_bundle.challenge_chain.challenge_chain_end_of_slot_vdf.challenge.hex()}"
|
||||
)
|
||||
return None, False
|
||||
|
||||
@ -2425,8 +2444,8 @@ class FullNode:
|
||||
if field_vdf == CompressibleVDFField.CC_EOS_VDF:
|
||||
for index, sub_slot in enumerate(block.finished_sub_slots):
|
||||
if sub_slot.challenge_chain.challenge_chain_end_of_slot_vdf == vdf_info:
|
||||
new_proofs = dataclasses.replace(sub_slot.proofs, challenge_chain_slot_proof=vdf_proof)
|
||||
new_subslot = dataclasses.replace(sub_slot, proofs=new_proofs)
|
||||
new_proofs = sub_slot.proofs.replace(challenge_chain_slot_proof=vdf_proof)
|
||||
new_subslot = sub_slot.replace(proofs=new_proofs)
|
||||
new_finished_subslots = block.finished_sub_slots
|
||||
new_finished_subslots[index] = new_subslot
|
||||
new_block = dataclasses.replace(block, finished_sub_slots=new_finished_subslots)
|
||||
@ -2437,8 +2456,8 @@ class FullNode:
|
||||
sub_slot.infused_challenge_chain is not None
|
||||
and sub_slot.infused_challenge_chain.infused_challenge_chain_end_of_slot_vdf == vdf_info
|
||||
):
|
||||
new_proofs = dataclasses.replace(sub_slot.proofs, infused_challenge_chain_slot_proof=vdf_proof)
|
||||
new_subslot = dataclasses.replace(sub_slot, proofs=new_proofs)
|
||||
new_proofs = sub_slot.proofs.replace(infused_challenge_chain_slot_proof=vdf_proof)
|
||||
new_subslot = sub_slot.replace(proofs=new_proofs)
|
||||
new_finished_subslots = block.finished_sub_slots
|
||||
new_finished_subslots[index] = new_subslot
|
||||
new_block = dataclasses.replace(block, finished_sub_slots=new_finished_subslots)
|
||||
|
@ -576,7 +576,7 @@ class FullNodeAPI:
|
||||
else:
|
||||
if self.full_node.full_node_store.get_sub_slot(request.challenge_hash) is None:
|
||||
if request.challenge_hash != self.full_node.constants.GENESIS_CHALLENGE:
|
||||
self.log.info(f"Don't have challenge hash {request.challenge_hash}")
|
||||
self.log.info(f"Don't have challenge hash {request.challenge_hash.hex()}")
|
||||
|
||||
sp: Optional[SignagePoint] = self.full_node.full_node_store.get_signage_point_by_index(
|
||||
request.challenge_hash,
|
||||
@ -651,7 +651,8 @@ class FullNodeAPI:
|
||||
else:
|
||||
self.log.debug(
|
||||
f"Signage point {request.index_from_challenge} not added, CC challenge: "
|
||||
f"{request.challenge_chain_vdf.challenge}, RC challenge: {request.reward_chain_vdf.challenge}"
|
||||
f"{request.challenge_chain_vdf.challenge.hex()}, "
|
||||
f"RC challenge: {request.reward_chain_vdf.challenge.hex()}"
|
||||
)
|
||||
|
||||
return None
|
||||
@ -706,7 +707,7 @@ class FullNodeAPI:
|
||||
if sp_vdfs.rc_vdf.output.get_hash() != request.reward_chain_sp:
|
||||
self.log.debug(
|
||||
f"Received proof of space for a potentially old signage point {request.challenge_chain_sp}. "
|
||||
f"Current sp: {sp_vdfs.rc_vdf.output.get_hash()}"
|
||||
f"Current sp: {sp_vdfs.rc_vdf.output.get_hash().hex()}"
|
||||
)
|
||||
return None
|
||||
|
||||
@ -879,9 +880,9 @@ class FullNodeAPI:
|
||||
sub_slot_iters = peak.sub_slot_iters
|
||||
for sub_slot in finished_sub_slots:
|
||||
if sub_slot.challenge_chain.new_difficulty is not None:
|
||||
difficulty = sub_slot.challenge_chain.new_difficulty
|
||||
difficulty = uint64(sub_slot.challenge_chain.new_difficulty)
|
||||
if sub_slot.challenge_chain.new_sub_slot_iters is not None:
|
||||
sub_slot_iters = sub_slot.challenge_chain.new_sub_slot_iters
|
||||
sub_slot_iters = uint64(sub_slot.challenge_chain.new_sub_slot_iters)
|
||||
|
||||
required_iters: uint64 = calculate_iterations_quality(
|
||||
self.full_node.constants.DIFFICULTY_CONSTANT_FACTOR,
|
||||
@ -1012,14 +1013,9 @@ class FullNodeAPI:
|
||||
self.log.warning("Signature not valid. There might be a collision in plots. Ignore this during tests.")
|
||||
return None
|
||||
|
||||
fsb2 = dataclasses.replace(
|
||||
candidate.foliage,
|
||||
foliage_block_data_signature=farmer_request.foliage_block_data_signature,
|
||||
)
|
||||
fsb2 = candidate.foliage.replace(foliage_block_data_signature=farmer_request.foliage_block_data_signature)
|
||||
if candidate.is_transaction_block():
|
||||
fsb2 = dataclasses.replace(
|
||||
fsb2, foliage_transaction_block_signature=farmer_request.foliage_transaction_block_signature
|
||||
)
|
||||
fsb2 = fsb2.replace(foliage_transaction_block_signature=farmer_request.foliage_transaction_block_signature)
|
||||
|
||||
new_candidate = dataclasses.replace(candidate, foliage=fsb2)
|
||||
if not self.full_node.has_valid_pool_sig(new_candidate):
|
||||
@ -1093,7 +1089,7 @@ class FullNodeAPI:
|
||||
if not added:
|
||||
self.log.error(
|
||||
f"Was not able to add end of sub-slot: "
|
||||
f"{request.end_of_sub_slot_bundle.challenge_chain.challenge_chain_end_of_slot_vdf.challenge}. "
|
||||
f"{request.end_of_sub_slot_bundle.challenge_chain.challenge_chain_end_of_slot_vdf.challenge.hex()}. "
|
||||
f"Re-sending new-peak to timelord"
|
||||
)
|
||||
await self.full_node.send_peak_to_timelords(peer=peer)
|
||||
@ -1276,7 +1272,7 @@ class FullNodeAPI:
|
||||
await self.full_node.transaction_queue.put(queue_entry, peer_id=None, high_priority=True)
|
||||
try:
|
||||
with anyio.fail_after(delay=45):
|
||||
status, error = await queue_entry.done
|
||||
status, error = await queue_entry.done.wait()
|
||||
except TimeoutError:
|
||||
response = wallet_protocol.TransactionAck(spend_name, uint8(MempoolInclusionStatus.PENDING), None)
|
||||
else:
|
||||
@ -1496,7 +1492,7 @@ class FullNodeAPI:
|
||||
# the returned puzzle hashes are the ones we ended up subscribing to.
|
||||
# It will have filtered duplicates and ones exceeding the subscription
|
||||
# limit.
|
||||
puzzle_hashes = self.full_node.subscriptions.add_ph_subscriptions(
|
||||
puzzle_hashes = self.full_node.subscriptions.add_puzzle_subscriptions(
|
||||
peer.peer_node_id, request.puzzle_hashes, max_subscriptions
|
||||
)
|
||||
|
||||
|
@ -45,8 +45,11 @@ class FullNodeStore:
|
||||
candidate_blocks: Dict[bytes32, Tuple[uint32, UnfinishedBlock]]
|
||||
candidate_backup_blocks: Dict[bytes32, Tuple[uint32, UnfinishedBlock]]
|
||||
|
||||
# Header hashes of unfinished blocks that we have seen recently
|
||||
seen_unfinished_blocks: Set[bytes32]
|
||||
# Block hashes of unfinished blocks that we have seen recently. This is
|
||||
# effectively a Set[bytes32] but in order to evict the oldest items first,
|
||||
# we use a Dict that preserves insertion order, and remove from the
|
||||
# beginning
|
||||
seen_unfinished_blocks: Dict[bytes32, None]
|
||||
|
||||
# Unfinished blocks, keyed from reward hash
|
||||
unfinished_blocks: Dict[bytes32, Tuple[uint32, UnfinishedBlock, PreValidationResult]]
|
||||
@ -86,10 +89,12 @@ class FullNodeStore:
|
||||
serialized_wp_message: Optional[Message]
|
||||
serialized_wp_message_tip: Optional[bytes32]
|
||||
|
||||
max_seen_unfinished_blocks: int
|
||||
|
||||
def __init__(self, constants: ConsensusConstants):
|
||||
self.candidate_blocks = {}
|
||||
self.candidate_backup_blocks = {}
|
||||
self.seen_unfinished_blocks = set()
|
||||
self.seen_unfinished_blocks = {}
|
||||
self.unfinished_blocks = {}
|
||||
self.finished_sub_slots = []
|
||||
self.future_eos_cache = {}
|
||||
@ -108,6 +113,7 @@ class FullNodeStore:
|
||||
self.tx_fetch_tasks = {}
|
||||
self.serialized_wp_message = None
|
||||
self.serialized_wp_message_tip = None
|
||||
self.max_seen_unfinished_blocks = 1000
|
||||
|
||||
def add_candidate_block(
|
||||
self, quality_string: bytes32, height: uint32, unfinished_block: UnfinishedBlock, backup: bool = False
|
||||
@ -148,12 +154,13 @@ class FullNodeStore:
|
||||
def seen_unfinished_block(self, object_hash: bytes32) -> bool:
|
||||
if object_hash in self.seen_unfinished_blocks:
|
||||
return True
|
||||
self.seen_unfinished_blocks.add(object_hash)
|
||||
self.seen_unfinished_blocks[object_hash] = None
|
||||
if len(self.seen_unfinished_blocks) > self.max_seen_unfinished_blocks:
|
||||
# remove the least recently added hash
|
||||
to_remove = next(iter(self.seen_unfinished_blocks))
|
||||
del self.seen_unfinished_blocks[to_remove]
|
||||
return False
|
||||
|
||||
def clear_seen_unfinished_blocks(self) -> None:
|
||||
self.seen_unfinished_blocks.clear()
|
||||
|
||||
def add_unfinished_block(
|
||||
self, height: uint32, unfinished_block: UnfinishedBlock, result: PreValidationResult
|
||||
) -> None:
|
||||
@ -171,8 +178,9 @@ class FullNodeStore:
|
||||
return None
|
||||
return result[2]
|
||||
|
||||
def get_unfinished_blocks(self) -> Dict[bytes32, Tuple[uint32, UnfinishedBlock, PreValidationResult]]:
|
||||
return self.unfinished_blocks
|
||||
# returns all unfinished blocks for the specified height
|
||||
def get_unfinished_blocks(self, height: uint32) -> List[UnfinishedBlock]:
|
||||
return [block for ub_height, block, _ in self.unfinished_blocks.values() if ub_height == height]
|
||||
|
||||
def clear_unfinished_blocks_below(self, height: uint32) -> None:
|
||||
del_keys: List[bytes32] = []
|
||||
@ -219,7 +227,7 @@ class FullNodeStore:
|
||||
|
||||
self.future_cache_key_times[signage_point.rc_vdf.challenge] = int(time.time())
|
||||
self.future_sp_cache[signage_point.rc_vdf.challenge].append((index, signage_point))
|
||||
log.info(f"Don't have rc hash {signage_point.rc_vdf.challenge}. caching signage point {index}.")
|
||||
log.info(f"Don't have rc hash {signage_point.rc_vdf.challenge.hex()}. caching signage point {index}.")
|
||||
|
||||
def get_future_ip(self, rc_challenge_hash: bytes32) -> List[timelord_protocol.NewInfusionPointVDF]:
|
||||
return self.future_ip_cache.get(rc_challenge_hash, [])
|
||||
@ -287,7 +295,7 @@ class FullNodeStore:
|
||||
# This prevent other peers from appending fake VDFs to our cache
|
||||
log.error(
|
||||
f"bad cc_challenge in new_finished_sub_slot, "
|
||||
f"got {eos.challenge_chain.challenge_chain_end_of_slot_vdf.challenge}"
|
||||
f"got {eos.challenge_chain.challenge_chain_end_of_slot_vdf.challenge.hex()}"
|
||||
f"expected {cc_challenge}"
|
||||
)
|
||||
return None
|
||||
@ -310,7 +318,7 @@ class FullNodeStore:
|
||||
log.debug("dont add slot, total_iters < peak.total_iters")
|
||||
return None
|
||||
|
||||
rc_challenge = eos.reward_chain.end_of_slot_vdf.challenge
|
||||
rc_challenge = bytes32(eos.reward_chain.end_of_slot_vdf.challenge)
|
||||
cc_start_element = peak.challenge_vdf_output
|
||||
iters = uint64(total_iters - peak.total_iters)
|
||||
if peak.reward_infusion_new_challenge != rc_challenge:
|
||||
@ -436,9 +444,8 @@ class FullNodeStore:
|
||||
eos.challenge_chain.challenge_chain_end_of_slot_vdf.output,
|
||||
)
|
||||
# The EOS will have the whole sub-slot iters, but the proof is only the delta, from the last peak
|
||||
if eos.challenge_chain.challenge_chain_end_of_slot_vdf != dataclasses.replace(
|
||||
partial_cc_vdf_info,
|
||||
number_of_iterations=sub_slot_iters,
|
||||
if eos.challenge_chain.challenge_chain_end_of_slot_vdf != partial_cc_vdf_info.replace(
|
||||
number_of_iterations=sub_slot_iters
|
||||
):
|
||||
return None
|
||||
if not eos.proofs.challenge_chain_slot_proof.normalized_to_identity and not validate_vdf(
|
||||
@ -487,9 +494,8 @@ class FullNodeStore:
|
||||
eos.infused_challenge_chain.infused_challenge_chain_end_of_slot_vdf.output,
|
||||
)
|
||||
# The EOS will have the whole sub-slot iters, but the proof is only the delta, from the last peak
|
||||
if eos.infused_challenge_chain.infused_challenge_chain_end_of_slot_vdf != dataclasses.replace(
|
||||
partial_icc_vdf_info,
|
||||
number_of_iterations=icc_iters,
|
||||
if eos.infused_challenge_chain.infused_challenge_chain_end_of_slot_vdf != partial_icc_vdf_info.replace(
|
||||
number_of_iterations=icc_iters
|
||||
):
|
||||
return None
|
||||
if not eos.proofs.infused_challenge_chain_slot_proof.normalized_to_identity and not validate_vdf(
|
||||
@ -615,9 +621,7 @@ class FullNodeStore:
|
||||
uint64(sp_total_iters - curr.total_iters),
|
||||
signage_point.rc_vdf.output,
|
||||
)
|
||||
if not signage_point.cc_vdf == dataclasses.replace(
|
||||
cc_vdf_info_expected, number_of_iterations=delta_iters
|
||||
):
|
||||
if not signage_point.cc_vdf == cc_vdf_info_expected.replace(number_of_iterations=delta_iters):
|
||||
self.add_to_future_sp(signage_point, index)
|
||||
return False
|
||||
if check_from_start_of_ss:
|
||||
|
@ -9,7 +9,7 @@ from chia.types.blockchain_format.sized_bytes import bytes32
|
||||
def get_hints_and_subscription_coin_ids(
|
||||
state_change_summary: StateChangeSummary,
|
||||
has_coin_subscription: Callable[[bytes32], bool],
|
||||
has_ph_subscription: Callable[[bytes32], bool],
|
||||
has_puzzle_subscription: Callable[[bytes32], bool],
|
||||
) -> Tuple[List[Tuple[bytes32, bytes]], List[bytes32]]:
|
||||
# Precondition: all hints passed in are max 32 bytes long
|
||||
# Returns the hints that we need to add to the DB, and the coin ids that need to be looked up
|
||||
@ -26,7 +26,7 @@ def get_hints_and_subscription_coin_ids(
|
||||
lookup_coin_ids.add(coin_id)
|
||||
|
||||
def add_if_ph_subscription(puzzle_hash: bytes32, coin_id: bytes32) -> None:
|
||||
if has_ph_subscription(puzzle_hash):
|
||||
if has_puzzle_subscription(puzzle_hash):
|
||||
lookup_coin_ids.add(coin_id)
|
||||
|
||||
for spend_id, puzzle_hash in state_change_summary.removals:
|
||||
|
@ -9,136 +9,196 @@ from chia.types.blockchain_format.sized_bytes import bytes32
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
# The PeerSubscriptions class is essentially a multi-index container. It can be
|
||||
# indexed by peer_id, coin_id and puzzle_hash.
|
||||
@dataclass(frozen=True)
|
||||
class PeerSubscriptions:
|
||||
# TODO: use NewType all over to describe these various uses of the same types
|
||||
# Puzzle Hash : Set[Peer ID]
|
||||
_coin_subscriptions: Dict[bytes32, Set[bytes32]] = field(default_factory=dict, init=False)
|
||||
# Puzzle Hash : Set[Peer ID]
|
||||
_ph_subscriptions: Dict[bytes32, Set[bytes32]] = field(default_factory=dict, init=False)
|
||||
# Peer ID: Set[Coin ids]
|
||||
_peer_coin_ids: Dict[bytes32, Set[bytes32]] = field(default_factory=dict, init=False)
|
||||
# Peer ID: Set[puzzle_hash]
|
||||
_peer_puzzle_hash: Dict[bytes32, Set[bytes32]] = field(default_factory=dict, init=False)
|
||||
# Peer ID: subscription count
|
||||
_peer_sub_counter: Dict[bytes32, int] = field(default_factory=dict, init=False)
|
||||
class SubscriptionSet:
|
||||
_subscriptions_for_peer: Dict[bytes32, Set[bytes32]] = field(default_factory=dict, init=False)
|
||||
_peers_for_subscription: Dict[bytes32, Set[bytes32]] = field(default_factory=dict, init=False)
|
||||
|
||||
def has_ph_subscription(self, ph: bytes32) -> bool:
|
||||
return ph in self._ph_subscriptions
|
||||
def add_subscription(self, peer_id: bytes32, item: bytes32) -> bool:
|
||||
peers = self._peers_for_subscription.setdefault(item, set())
|
||||
|
||||
def has_coin_subscription(self, coin_id: bytes32) -> bool:
|
||||
return coin_id in self._coin_subscriptions
|
||||
if peer_id in peers:
|
||||
return False
|
||||
|
||||
def add_ph_subscriptions(self, peer_id: bytes32, phs: List[bytes32], max_items: int) -> Set[bytes32]:
|
||||
"""
|
||||
returns the puzzle hashes that were actually subscribed to. These may be
|
||||
fewer than requested in case:
|
||||
* there are duplicate puzzle_hashes
|
||||
* some puzzle hashes are already subscribed to
|
||||
* the max_items limit is exceeded
|
||||
"""
|
||||
subscriptions = self._subscriptions_for_peer.setdefault(peer_id, set())
|
||||
subscriptions.add(item)
|
||||
peers.add(peer_id)
|
||||
|
||||
puzzle_hash_peers = self._peer_puzzle_hash.setdefault(peer_id, set())
|
||||
existing_sub_count = self._peer_sub_counter.setdefault(peer_id, 0)
|
||||
return True
|
||||
|
||||
ret: Set[bytes32] = set()
|
||||
def remove_subscription(self, peer_id: bytes32, item: bytes32) -> bool:
|
||||
subscriptions = self._subscriptions_for_peer.get(peer_id)
|
||||
|
||||
# if we've reached the limit on number of subscriptions, just bail
|
||||
if existing_sub_count >= max_items:
|
||||
log.info(
|
||||
"peer_id: %s reached max number of puzzle-hash subscriptions. "
|
||||
"Not all its coin states will be reported",
|
||||
peer_id,
|
||||
)
|
||||
return ret
|
||||
if subscriptions is None or item not in subscriptions:
|
||||
return False
|
||||
|
||||
# decrement this counter as we go, to know if we've hit the limit of
|
||||
# number of subscriptions
|
||||
subscriptions_left = max_items - existing_sub_count
|
||||
peers = self._peers_for_subscription[item]
|
||||
peers.remove(peer_id)
|
||||
subscriptions.remove(item)
|
||||
|
||||
for ph in phs:
|
||||
ph_sub = self._ph_subscriptions.setdefault(ph, set())
|
||||
if peer_id in ph_sub:
|
||||
continue
|
||||
if len(subscriptions) == 0:
|
||||
self._subscriptions_for_peer.pop(peer_id)
|
||||
|
||||
ret.add(ph)
|
||||
ph_sub.add(peer_id)
|
||||
puzzle_hash_peers.add(ph)
|
||||
self._peer_sub_counter[peer_id] += 1
|
||||
subscriptions_left -= 1
|
||||
if len(peers) == 0:
|
||||
self._peers_for_subscription.pop(item)
|
||||
|
||||
if subscriptions_left == 0:
|
||||
log.info(
|
||||
"peer_id: %s reached max number of puzzle-hash subscriptions. "
|
||||
"Not all its coin states will be reported",
|
||||
peer_id,
|
||||
)
|
||||
break
|
||||
return ret
|
||||
return True
|
||||
|
||||
def add_coin_subscriptions(self, peer_id: bytes32, coin_ids: List[bytes32], max_items: int) -> None:
|
||||
coin_id_peers = self._peer_coin_ids.setdefault(peer_id, set())
|
||||
existing_sub_count = self._peer_sub_counter.setdefault(peer_id, 0)
|
||||
def has_subscription(self, item: bytes32) -> bool:
|
||||
return item in self._peers_for_subscription
|
||||
|
||||
# if we've reached the limit on number of subscriptions, just bail
|
||||
if existing_sub_count >= max_items:
|
||||
log.info(
|
||||
"peer_id: %s reached max number of coin subscriptions. Not all its coin states will be reported",
|
||||
peer_id,
|
||||
)
|
||||
return
|
||||
|
||||
# decrement this counter as we go, to know if we've hit the limit of
|
||||
# number of subscriptions
|
||||
subscriptions_left = max_items - existing_sub_count
|
||||
|
||||
for coin_id in coin_ids:
|
||||
coin_sub = self._coin_subscriptions.setdefault(coin_id, set())
|
||||
if peer_id in coin_sub:
|
||||
continue
|
||||
|
||||
coin_sub.add(peer_id)
|
||||
coin_id_peers.add(coin_id)
|
||||
self._peer_sub_counter[peer_id] += 1
|
||||
subscriptions_left -= 1
|
||||
|
||||
if subscriptions_left == 0:
|
||||
log.info(
|
||||
"peer_id: %s reached max number of coin subscriptions. Not all its coin states will be reported",
|
||||
peer_id,
|
||||
)
|
||||
break
|
||||
def count_subscriptions(self, peer_id: bytes32) -> int:
|
||||
return len(self._subscriptions_for_peer.get(peer_id, {}))
|
||||
|
||||
def remove_peer(self, peer_id: bytes32) -> None:
|
||||
counter = 0
|
||||
puzzle_hashes = self._peer_puzzle_hash.get(peer_id)
|
||||
if puzzle_hashes is not None:
|
||||
for ph in puzzle_hashes:
|
||||
subs = self._ph_subscriptions[ph]
|
||||
subs.remove(peer_id)
|
||||
counter += 1
|
||||
if subs == set():
|
||||
self._ph_subscriptions.pop(ph)
|
||||
self._peer_puzzle_hash.pop(peer_id)
|
||||
for item in self._subscriptions_for_peer.pop(peer_id, {}):
|
||||
self._peers_for_subscription[item].remove(peer_id)
|
||||
|
||||
coin_ids = self._peer_coin_ids.get(peer_id)
|
||||
if coin_ids is not None:
|
||||
for coin_id in coin_ids:
|
||||
subs = self._coin_subscriptions[coin_id]
|
||||
subs.remove(peer_id)
|
||||
counter += 1
|
||||
if subs == set():
|
||||
self._coin_subscriptions.pop(coin_id)
|
||||
self._peer_coin_ids.pop(peer_id)
|
||||
if len(self._peers_for_subscription[item]) == 0:
|
||||
self._peers_for_subscription.pop(item)
|
||||
|
||||
if peer_id in self._peer_sub_counter:
|
||||
num_subs = self._peer_sub_counter.pop(peer_id)
|
||||
assert num_subs == counter
|
||||
def subscriptions(self, peer_id: bytes32) -> Set[bytes32]:
|
||||
return self._subscriptions_for_peer.get(peer_id, set())
|
||||
|
||||
def peers(self, item: bytes32) -> Set[bytes32]:
|
||||
return self._peers_for_subscription.get(item, set())
|
||||
|
||||
def total_count(self) -> int:
|
||||
return len(self._peers_for_subscription)
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class PeerSubscriptions:
|
||||
_puzzle_subscriptions: SubscriptionSet = field(default_factory=SubscriptionSet)
|
||||
_coin_subscriptions: SubscriptionSet = field(default_factory=SubscriptionSet)
|
||||
|
||||
def has_puzzle_subscription(self, puzzle_hash: bytes32) -> bool:
|
||||
return self._puzzle_subscriptions.has_subscription(puzzle_hash)
|
||||
|
||||
def has_coin_subscription(self, coin_id: bytes32) -> bool:
|
||||
return self._coin_subscriptions.has_subscription(coin_id)
|
||||
|
||||
def peer_subscription_count(self, peer_id: bytes32) -> int:
|
||||
puzzle_subscriptions = self._puzzle_subscriptions.count_subscriptions(peer_id)
|
||||
coin_subscriptions = self._coin_subscriptions.count_subscriptions(peer_id)
|
||||
return puzzle_subscriptions + coin_subscriptions
|
||||
|
||||
def add_puzzle_subscriptions(self, peer_id: bytes32, puzzle_hashes: List[bytes32], max_items: int) -> Set[bytes32]:
|
||||
"""
|
||||
Adds subscriptions until max_items is reached. Filters out duplicates and returns all additions.
|
||||
"""
|
||||
|
||||
subscription_count = self.peer_subscription_count(peer_id)
|
||||
added: Set[bytes32] = set()
|
||||
|
||||
def limit_reached() -> Set[bytes32]:
|
||||
log.info(
|
||||
"Peer %s attempted to exceed the subscription limit while adding puzzle subscriptions.",
|
||||
peer_id,
|
||||
)
|
||||
return added
|
||||
|
||||
# If the subscription limit is reached, bail.
|
||||
if subscription_count >= max_items:
|
||||
return limit_reached()
|
||||
|
||||
# Decrement this counter to know if we've hit the subscription limit.
|
||||
subscriptions_left = max_items - subscription_count
|
||||
|
||||
for puzzle_hash in puzzle_hashes:
|
||||
if not self._puzzle_subscriptions.add_subscription(peer_id, puzzle_hash):
|
||||
continue
|
||||
|
||||
subscriptions_left -= 1
|
||||
added.add(puzzle_hash)
|
||||
|
||||
if subscriptions_left == 0:
|
||||
return limit_reached()
|
||||
|
||||
return added
|
||||
|
||||
def add_coin_subscriptions(self, peer_id: bytes32, coin_ids: List[bytes32], max_items: int) -> Set[bytes32]:
|
||||
"""
|
||||
Adds subscriptions until max_items is reached. Filters out duplicates and returns all additions.
|
||||
"""
|
||||
|
||||
subscription_count = self.peer_subscription_count(peer_id)
|
||||
added: Set[bytes32] = set()
|
||||
|
||||
def limit_reached() -> Set[bytes32]:
|
||||
log.info(
|
||||
"Peer %s attempted to exceed the subscription limit while adding coin subscriptions.",
|
||||
peer_id,
|
||||
)
|
||||
return added
|
||||
|
||||
# If the subscription limit is reached, bail.
|
||||
if subscription_count >= max_items:
|
||||
return limit_reached()
|
||||
|
||||
# Decrement this counter to know if we've hit the subscription limit.
|
||||
subscriptions_left = max_items - subscription_count
|
||||
|
||||
for coin_id in coin_ids:
|
||||
if not self._coin_subscriptions.add_subscription(peer_id, coin_id):
|
||||
continue
|
||||
|
||||
subscriptions_left -= 1
|
||||
added.add(coin_id)
|
||||
|
||||
if subscriptions_left == 0:
|
||||
return limit_reached()
|
||||
|
||||
return added
|
||||
|
||||
def remove_puzzle_subscriptions(self, peer_id: bytes32, puzzle_hashes: List[bytes32]) -> Set[bytes32]:
|
||||
"""
|
||||
Removes subscriptions. Filters out duplicates and returns all removals.
|
||||
"""
|
||||
|
||||
removed: Set[bytes32] = set()
|
||||
|
||||
for puzzle_hash in puzzle_hashes:
|
||||
if not self._puzzle_subscriptions.remove_subscription(peer_id, puzzle_hash):
|
||||
continue
|
||||
|
||||
removed.add(puzzle_hash)
|
||||
|
||||
return removed
|
||||
|
||||
def remove_coin_subscriptions(self, peer_id: bytes32, coin_ids: List[bytes32]) -> Set[bytes32]:
|
||||
"""
|
||||
Removes subscriptions. Filters out duplicates and returns all removals.
|
||||
"""
|
||||
|
||||
removed: Set[bytes32] = set()
|
||||
|
||||
for coin_id in coin_ids:
|
||||
if not self._coin_subscriptions.remove_subscription(peer_id, coin_id):
|
||||
continue
|
||||
|
||||
removed.add(coin_id)
|
||||
|
||||
return removed
|
||||
|
||||
def remove_peer(self, peer_id: bytes32) -> None:
|
||||
self._puzzle_subscriptions.remove_peer(peer_id)
|
||||
self._coin_subscriptions.remove_peer(peer_id)
|
||||
|
||||
def coin_subscriptions(self, peer_id: bytes32) -> Set[bytes32]:
|
||||
return self._coin_subscriptions.subscriptions(peer_id)
|
||||
|
||||
def puzzle_subscriptions(self, peer_id: bytes32) -> Set[bytes32]:
|
||||
return self._puzzle_subscriptions.subscriptions(peer_id)
|
||||
|
||||
def peers_for_coin_id(self, coin_id: bytes32) -> Set[bytes32]:
|
||||
return self._coin_subscriptions.get(coin_id, set())
|
||||
return self._coin_subscriptions.peers(coin_id)
|
||||
|
||||
def peers_for_puzzle_hash(self, puzzle_hash: bytes32) -> Set[bytes32]:
|
||||
return self._ph_subscriptions.get(puzzle_hash, set())
|
||||
return self._puzzle_subscriptions.peers(puzzle_hash)
|
||||
|
||||
def coin_subscription_count(self) -> int:
|
||||
return self._coin_subscriptions.total_count()
|
||||
|
||||
def puzzle_subscription_count(self) -> int:
|
||||
return self._puzzle_subscriptions.total_count()
|
||||
|
@ -442,7 +442,7 @@ class WeightProofHandler:
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
curr.reward_chain_block.signage_point_index,
|
||||
uint8(curr.reward_chain_block.signage_point_index),
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
@ -549,7 +549,7 @@ class WeightProofHandler:
|
||||
curr.challenge_chain_ip_proof,
|
||||
icc_ip_proof,
|
||||
cc_sp_info,
|
||||
curr.reward_chain_block.signage_point_index,
|
||||
uint8(curr.reward_chain_block.signage_point_index),
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
@ -565,7 +565,7 @@ class WeightProofHandler:
|
||||
if len(weight_proof.sub_epochs) == 0:
|
||||
return False, uint32(0)
|
||||
|
||||
peak_height = weight_proof.recent_chain_data[-1].reward_chain_block.height
|
||||
peak_height = uint32(weight_proof.recent_chain_data[-1].reward_chain_block.height)
|
||||
log.info(f"validate weight proof peak height {peak_height}")
|
||||
summaries, sub_epoch_weight_list = _validate_sub_epoch_summaries(self.constants, weight_proof)
|
||||
if summaries is None:
|
||||
@ -707,10 +707,10 @@ def _create_sub_epoch_data(
|
||||
) -> SubEpochData:
|
||||
reward_chain_hash: bytes32 = sub_epoch_summary.reward_chain_hash
|
||||
# Number of subblocks overflow in previous slot
|
||||
previous_sub_epoch_overflows: uint8 = sub_epoch_summary.num_blocks_overflow # total in sub epoch - expected
|
||||
previous_sub_epoch_overflows = uint8(sub_epoch_summary.num_blocks_overflow) # total in sub epoch - expected
|
||||
# New work difficulty and iterations per sub-slot
|
||||
sub_slot_iters: Optional[uint64] = sub_epoch_summary.new_sub_slot_iters
|
||||
new_difficulty: Optional[uint64] = sub_epoch_summary.new_difficulty
|
||||
sub_slot_iters: Optional[int] = sub_epoch_summary.new_sub_slot_iters
|
||||
new_difficulty: Optional[int] = sub_epoch_summary.new_difficulty
|
||||
return SubEpochData(reward_chain_hash, previous_sub_epoch_overflows, sub_slot_iters, new_difficulty)
|
||||
|
||||
|
||||
@ -746,7 +746,7 @@ async def _challenge_block_vdfs(
|
||||
header_block.challenge_chain_ip_proof,
|
||||
None,
|
||||
cc_sp_info,
|
||||
header_block.reward_chain_block.signage_point_index,
|
||||
uint8(header_block.reward_chain_block.signage_point_index),
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
@ -886,7 +886,7 @@ def _map_sub_epoch_summaries(
|
||||
|
||||
# if new epoch update diff and iters
|
||||
if data.new_difficulty is not None:
|
||||
curr_difficulty = data.new_difficulty
|
||||
curr_difficulty = uint64(data.new_difficulty)
|
||||
|
||||
# add to dict
|
||||
summaries.append(ses)
|
||||
@ -998,7 +998,7 @@ def _validate_segment(
|
||||
return False, uint64(0), uint64(0), uint64(0), []
|
||||
assert sub_slot_data.signage_point_index is not None
|
||||
ip_iters = ip_iters + calculate_ip_iters(
|
||||
constants, curr_ssi, sub_slot_data.signage_point_index, required_iters
|
||||
constants, curr_ssi, uint8(sub_slot_data.signage_point_index), required_iters
|
||||
)
|
||||
vdf_list = _get_challenge_block_vdfs(constants, idx, segment.sub_slots, curr_ssi)
|
||||
to_validate.extend(vdf_list)
|
||||
@ -1025,7 +1025,7 @@ def _get_challenge_block_vdfs(
|
||||
assert sub_slot_data.signage_point_index
|
||||
sp_input = ClassgroupElement.get_default_element()
|
||||
if not sub_slot_data.cc_signage_point.normalized_to_identity and sub_slot_idx >= 1:
|
||||
is_overflow = is_overflow_block(constants, sub_slot_data.signage_point_index)
|
||||
is_overflow = is_overflow_block(constants, uint8(sub_slot_data.signage_point_index))
|
||||
prev_ssd = sub_slots[sub_slot_idx - 1]
|
||||
sp_input = sub_slot_data_vdf_input(
|
||||
constants, sub_slot_data, sub_slot_idx, sub_slots, is_overflow, prev_ssd.is_end_of_slot(), ssi
|
||||
@ -1103,7 +1103,7 @@ def _validate_sub_slot_data(
|
||||
assert sub_slot_data.cc_sp_vdf_info
|
||||
input = ClassgroupElement.get_default_element()
|
||||
if not sub_slot_data.cc_signage_point.normalized_to_identity:
|
||||
is_overflow = is_overflow_block(constants, sub_slot_data.signage_point_index)
|
||||
is_overflow = is_overflow_block(constants, uint8(sub_slot_data.signage_point_index))
|
||||
input = sub_slot_data_vdf_input(
|
||||
constants, sub_slot_data, sub_slot_idx, sub_slots, is_overflow, prev_ssd.is_end_of_slot(), ssi
|
||||
)
|
||||
@ -1208,9 +1208,9 @@ def validate_recent_blocks(
|
||||
last_blocks_to_validate = 100 # todo remove cap after benchmarks
|
||||
for summary in summaries[:ses_idx]:
|
||||
if summary.new_sub_slot_iters is not None:
|
||||
ssi = summary.new_sub_slot_iters
|
||||
ssi = uint64(summary.new_sub_slot_iters)
|
||||
if summary.new_difficulty is not None:
|
||||
diff = summary.new_difficulty
|
||||
diff = uint64(summary.new_difficulty)
|
||||
|
||||
ses_blocks, sub_slots, transaction_blocks = 0, 0, 0
|
||||
challenge, prev_challenge = recent_chain.recent_chain_data[0].reward_chain_block.pos_ss_cc_challenge_hash, None
|
||||
@ -1226,18 +1226,18 @@ def validate_recent_blocks(
|
||||
for sub_slot in block.finished_sub_slots:
|
||||
prev_challenge = sub_slot.challenge_chain.challenge_chain_end_of_slot_vdf.challenge
|
||||
challenge = sub_slot.challenge_chain.get_hash()
|
||||
deficit = sub_slot.reward_chain.deficit
|
||||
deficit = uint8(sub_slot.reward_chain.deficit)
|
||||
if sub_slot.challenge_chain.subepoch_summary_hash is not None:
|
||||
ses = True
|
||||
assert summaries[ses_idx].get_hash() == sub_slot.challenge_chain.subepoch_summary_hash
|
||||
ses_idx += 1
|
||||
if sub_slot.challenge_chain.new_sub_slot_iters is not None:
|
||||
ssi = sub_slot.challenge_chain.new_sub_slot_iters
|
||||
ssi = uint64(sub_slot.challenge_chain.new_sub_slot_iters)
|
||||
if sub_slot.challenge_chain.new_difficulty is not None:
|
||||
diff = sub_slot.challenge_chain.new_difficulty
|
||||
diff = uint64(sub_slot.challenge_chain.new_difficulty)
|
||||
|
||||
if (challenge is not None) and (prev_challenge is not None):
|
||||
overflow = is_overflow_block(constants, block.reward_chain_block.signage_point_index)
|
||||
overflow = is_overflow_block(constants, uint8(block.reward_chain_block.signage_point_index))
|
||||
if not adjusted:
|
||||
assert prev_block_record is not None
|
||||
prev_block_record = dataclasses.replace(
|
||||
@ -1334,7 +1334,7 @@ def __validate_pospace(
|
||||
|
||||
sub_slot_data: SubSlotData = segment.sub_slots[idx]
|
||||
|
||||
if sub_slot_data.signage_point_index and is_overflow_block(constants, sub_slot_data.signage_point_index):
|
||||
if sub_slot_data.signage_point_index and is_overflow_block(constants, uint8(sub_slot_data.signage_point_index)):
|
||||
curr_slot = segment.sub_slots[idx - 1]
|
||||
assert curr_slot.cc_slot_end_info
|
||||
challenge = curr_slot.cc_slot_end_info.challenge
|
||||
@ -1391,14 +1391,14 @@ def __get_rc_sub_slot(
|
||||
slots_n = 1
|
||||
assert first
|
||||
assert first.signage_point_index is not None
|
||||
if is_overflow_block(constants, first.signage_point_index):
|
||||
if is_overflow_block(constants, uint8(first.signage_point_index)):
|
||||
if idx >= 2 and slots[idx - 2].cc_slot_end is None:
|
||||
slots_n = 2
|
||||
|
||||
new_diff = None if ses is None else ses.new_difficulty
|
||||
new_ssi = None if ses is None else ses.new_sub_slot_iters
|
||||
ses_hash: Optional[bytes32] = None if ses is None else ses.get_hash()
|
||||
overflow = is_overflow_block(constants, first.signage_point_index)
|
||||
overflow = is_overflow_block(constants, uint8(first.signage_point_index))
|
||||
if overflow:
|
||||
if idx >= 2 and slots[idx - 2].cc_slot_end is not None and slots[idx - 1].cc_slot_end is not None:
|
||||
ses_hash = None
|
||||
@ -1483,9 +1483,9 @@ def _get_curr_diff_ssi(
|
||||
curr_ssi = constants.SUB_SLOT_ITERS_STARTING
|
||||
for ses in reversed(summaries[0:idx]):
|
||||
if ses.new_sub_slot_iters is not None:
|
||||
curr_ssi = ses.new_sub_slot_iters
|
||||
curr_ssi = uint64(ses.new_sub_slot_iters)
|
||||
assert ses.new_difficulty is not None
|
||||
curr_difficulty = ses.new_difficulty
|
||||
curr_difficulty = uint64(ses.new_difficulty)
|
||||
break
|
||||
|
||||
return curr_difficulty, curr_ssi
|
||||
@ -1521,7 +1521,7 @@ def _get_last_ses_hash(
|
||||
if slot.challenge_chain.subepoch_summary_hash is not None:
|
||||
return (
|
||||
slot.challenge_chain.subepoch_summary_hash,
|
||||
curr.reward_chain_block.height,
|
||||
uint32(curr.reward_chain_block.height),
|
||||
)
|
||||
idx += 1
|
||||
return None, uint32(0)
|
||||
@ -1558,8 +1558,8 @@ def get_sp_total_iters(
|
||||
assert sub_slot_data.cc_ip_vdf_info is not None
|
||||
assert sub_slot_data.total_iters is not None
|
||||
assert sub_slot_data.signage_point_index is not None
|
||||
sp_iters: uint64 = calculate_sp_iters(constants, ssi, sub_slot_data.signage_point_index)
|
||||
ip_iters: uint64 = sub_slot_data.cc_ip_vdf_info.number_of_iterations
|
||||
sp_iters: uint64 = calculate_sp_iters(constants, ssi, uint8(sub_slot_data.signage_point_index))
|
||||
ip_iters: uint64 = uint64(sub_slot_data.cc_ip_vdf_info.number_of_iterations)
|
||||
sp_sub_slot_total_iters = uint128(sub_slot_data.total_iters - ip_iters)
|
||||
if is_overflow:
|
||||
sp_sub_slot_total_iters = uint128(sp_sub_slot_total_iters - ssi)
|
||||
@ -1645,7 +1645,7 @@ async def validate_weight_proof_inner(
|
||||
if len(weight_proof.sub_epochs) == 0:
|
||||
return False, []
|
||||
|
||||
peak_height = weight_proof.recent_chain_data[-1].reward_chain_block.height
|
||||
peak_height = uint32(weight_proof.recent_chain_data[-1].reward_chain_block.height)
|
||||
log.info(f"validate weight proof peak height {peak_height}")
|
||||
seed = summaries[-2].get_hash()
|
||||
rng = random.Random(seed)
|
||||
|
0
chia/legacy/__init__.py
Normal file
0
chia/legacy/__init__.py
Normal file
@ -138,6 +138,16 @@ class Cache:
|
||||
cache_data: CacheDataV1 = CacheDataV1.from_bytes(stored_cache.blob)
|
||||
self._data = {}
|
||||
estimated_c2_sizes: Dict[int, int] = {}
|
||||
measured_sizes: Dict[int, int] = {
|
||||
32: 738,
|
||||
33: 1083,
|
||||
34: 1771,
|
||||
35: 3147,
|
||||
36: 5899,
|
||||
37: 11395,
|
||||
38: 22395,
|
||||
39: 44367,
|
||||
}
|
||||
for path, cache_entry in cache_data.entries:
|
||||
new_entry = CacheEntry(
|
||||
DiskProver.from_bytes(cache_entry.prover_data),
|
||||
@ -160,7 +170,14 @@ class Cache:
|
||||
# static data: version(2) + table pointers (<=96) + id(32) + k(1) => ~130
|
||||
# path: up to ~1870, all above will lead to false positive.
|
||||
# See https://github.com/Chia-Network/chiapos/blob/3ee062b86315823dd775453ad320b8be892c7df3/src/prover_disk.hpp#L282-L287 # noqa: E501
|
||||
if prover_size > (estimated_c2_sizes[k] + memo_size + 2000):
|
||||
|
||||
# Use experimental measurements if more than estimates
|
||||
# https://github.com/Chia-Network/chia-blockchain/issues/16063
|
||||
check_size = estimated_c2_sizes[k] + memo_size + 2000
|
||||
if k in measured_sizes:
|
||||
check_size = max(check_size, measured_sizes[k])
|
||||
|
||||
if prover_size > check_size:
|
||||
log.warning(
|
||||
"Suspicious cache entry dropped. Recommended: stop the harvester, remove "
|
||||
f"{self._path}, restart. Entry: size {prover_size}, path {path}"
|
||||
|
@ -4,7 +4,7 @@ import logging
|
||||
from typing import List, Optional, Tuple
|
||||
|
||||
from chia_rs import G1Element
|
||||
from clvm.casts import int_from_bytes, int_to_bytes
|
||||
from clvm.casts import int_from_bytes
|
||||
|
||||
from chia.clvm.singleton import SINGLETON_LAUNCHER
|
||||
from chia.consensus.block_rewards import calculate_pool_reward
|
||||
@ -93,7 +93,7 @@ def create_p2_singleton_puzzle(
|
||||
|
||||
def launcher_id_to_p2_puzzle_hash(launcher_id: bytes32, seconds_delay: uint64, delayed_puzzle_hash: bytes32) -> bytes32:
|
||||
return create_p2_singleton_puzzle(
|
||||
SINGLETON_MOD_HASH, launcher_id, int_to_bytes(seconds_delay), delayed_puzzle_hash
|
||||
SINGLETON_MOD_HASH, launcher_id, seconds_delay, delayed_puzzle_hash
|
||||
).get_tree_hash()
|
||||
|
||||
|
||||
|
@ -102,6 +102,7 @@ class FullNodeRpcApi:
|
||||
"/get_unfinished_block_headers": self.get_unfinished_block_headers,
|
||||
"/get_network_space": self.get_network_space,
|
||||
"/get_additions_and_removals": self.get_additions_and_removals,
|
||||
"/get_aggsig_additional_data": self.get_aggsig_additional_data,
|
||||
# this function is just here for backwards-compatibility. It will probably
|
||||
# be removed in the future
|
||||
"/get_initial_freeze_period": self.get_initial_freeze_period,
|
||||
@ -556,18 +557,17 @@ class FullNodeRpcApi:
|
||||
return {"headers": []}
|
||||
|
||||
response_headers: List[UnfinishedHeaderBlock] = []
|
||||
for ub_height, block, _ in (self.service.full_node_store.get_unfinished_blocks()).values():
|
||||
if ub_height == peak.height:
|
||||
unfinished_header_block = UnfinishedHeaderBlock(
|
||||
block.finished_sub_slots,
|
||||
block.reward_chain_block,
|
||||
block.challenge_chain_sp_proof,
|
||||
block.reward_chain_sp_proof,
|
||||
block.foliage,
|
||||
block.foliage_transaction_block,
|
||||
b"",
|
||||
)
|
||||
response_headers.append(unfinished_header_block)
|
||||
for block in self.service.full_node_store.get_unfinished_blocks(peak.height):
|
||||
unfinished_header_block = UnfinishedHeaderBlock(
|
||||
block.finished_sub_slots,
|
||||
block.reward_chain_block,
|
||||
block.challenge_chain_sp_proof,
|
||||
block.reward_chain_sp_proof,
|
||||
block.foliage,
|
||||
block.foliage_transaction_block,
|
||||
b"",
|
||||
)
|
||||
response_headers.append(unfinished_header_block)
|
||||
return {"headers": response_headers}
|
||||
|
||||
async def get_network_space(self, request: Dict[str, Any]) -> EndpointResult:
|
||||
@ -806,6 +806,9 @@ class FullNodeRpcApi:
|
||||
"removals": [coin_record_dict_backwards_compat(cr.to_json_dict()) for cr in removals],
|
||||
}
|
||||
|
||||
async def get_aggsig_additional_data(self, _: Dict[str, Any]) -> EndpointResult:
|
||||
return {"additional_data": self.service.constants.AGG_SIG_ME_ADDITIONAL_DATA.hex()}
|
||||
|
||||
async def get_all_mempool_tx_ids(self, _: Dict[str, Any]) -> EndpointResult:
|
||||
ids = list(self.service.mempool_manager.mempool.all_item_ids())
|
||||
return {"tx_ids": ids}
|
||||
|
@ -159,6 +159,10 @@ class FullNodeRpcClient(RpcClient):
|
||||
response = await self.fetch("get_coin_records_by_parent_ids", d)
|
||||
return [CoinRecord.from_json_dict(coin_record_dict_backwards_compat(coin)) for coin in response["coin_records"]]
|
||||
|
||||
async def get_aggsig_additional_data(self) -> bytes32:
|
||||
result = await self.fetch("get_aggsig_additional_data", {})
|
||||
return bytes32.from_hexstr(result["additional_data"])
|
||||
|
||||
async def get_coin_records_by_hint(
|
||||
self,
|
||||
hint: bytes32,
|
||||
|
@ -11,6 +11,7 @@ from chia_rs import AugSchemeMPL, G1Element, G2Element, PrivateKey
|
||||
from clvm_tools.binutils import assemble
|
||||
|
||||
from chia.consensus.block_rewards import calculate_base_farmer_reward
|
||||
from chia.consensus.default_constants import DEFAULT_CONSTANTS
|
||||
from chia.data_layer.data_layer_errors import LauncherCoinNotFoundError
|
||||
from chia.data_layer.data_layer_wallet import DataLayerWallet
|
||||
from chia.pools.pool_wallet import PoolWallet
|
||||
@ -105,6 +106,7 @@ from chia.wallet.util.transaction_type import CLAWBACK_INCOMING_TRANSACTION_TYPE
|
||||
from chia.wallet.util.tx_config import DEFAULT_TX_CONFIG, CoinSelectionConfig, CoinSelectionConfigLoader, TXConfig
|
||||
from chia.wallet.util.wallet_sync_utils import fetch_coin_spend_for_coin_state
|
||||
from chia.wallet.util.wallet_types import CoinType, WalletType
|
||||
from chia.wallet.vault.vault_drivers import get_vault_hidden_puzzle_with_index
|
||||
from chia.wallet.vc_wallet.cr_cat_drivers import ProofsChecker
|
||||
from chia.wallet.vc_wallet.cr_cat_wallet import CRCATWallet
|
||||
from chia.wallet.vc_wallet.vc_store import VCProofs
|
||||
@ -282,6 +284,8 @@ class WalletRpcApi:
|
||||
"/vc_revoke": self.vc_revoke,
|
||||
# CR-CATs
|
||||
"/crcat_approve_pending": self.crcat_approve_pending,
|
||||
# VAULT
|
||||
"/vault_create": self.vault_create,
|
||||
}
|
||||
|
||||
def get_connections(self, request_node_type: Optional[NodeType]) -> List[Dict[str, Any]]:
|
||||
@ -3494,7 +3498,7 @@ class WalletRpcApi:
|
||||
full_puzzle = nft_puzzles.create_full_puzzle(
|
||||
uncurried_nft.singleton_launcher_id,
|
||||
metadata,
|
||||
uncurried_nft.metadata_updater_hash,
|
||||
bytes32(uncurried_nft.metadata_updater_hash.as_atom()),
|
||||
inner_puzzle,
|
||||
)
|
||||
|
||||
@ -4505,3 +4509,33 @@ class WalletRpcApi:
|
||||
return {
|
||||
"transactions": [tx.to_json_dict_convenience(self.service.config) for tx in txs],
|
||||
}
|
||||
|
||||
##########################################################################################
|
||||
# VAULT
|
||||
##########################################################################################
|
||||
@tx_endpoint(push=False)
|
||||
async def vault_create(
|
||||
self,
|
||||
request: Dict[str, Any],
|
||||
tx_config: TXConfig = DEFAULT_TX_CONFIG,
|
||||
extra_conditions: Tuple[Condition, ...] = tuple(),
|
||||
) -> EndpointResult:
|
||||
"""
|
||||
Create a new vault
|
||||
"""
|
||||
assert self.service.wallet_state_manager
|
||||
secp_pk = bytes.fromhex(str(request.get("secp_pk")))
|
||||
hp_index = request.get("hp_index", 0)
|
||||
hidden_puzzle_hash = get_vault_hidden_puzzle_with_index(hp_index).get_tree_hash()
|
||||
bls_pk = G1Element.from_bytes(bytes.fromhex(str(request.get("bls_pk"))))
|
||||
timelock = uint64(request["timelock"])
|
||||
fee = uint64(request.get("fee", 0))
|
||||
genesis_challenge = DEFAULT_CONSTANTS.GENESIS_CHALLENGE
|
||||
|
||||
vault_record = await self.service.wallet_state_manager.create_vault_wallet(
|
||||
secp_pk, hidden_puzzle_hash, bls_pk, timelock, genesis_challenge, tx_config, fee=fee
|
||||
)
|
||||
|
||||
return {
|
||||
"transactions": [vault_record.to_json_dict_convenience(self.service.config)],
|
||||
}
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -981,8 +981,8 @@ class BlockTools:
|
||||
pending_ses = True
|
||||
ses_hash: Optional[bytes32] = sub_epoch_summary.get_hash()
|
||||
# if the last block is the last block of the epoch, we set the new sub-slot iters and difficulty
|
||||
new_sub_slot_iters: Optional[uint64] = sub_epoch_summary.new_sub_slot_iters
|
||||
new_difficulty: Optional[uint64] = sub_epoch_summary.new_difficulty
|
||||
new_sub_slot_iters: Optional[uint64] = uint64.construct_optional(sub_epoch_summary.new_sub_slot_iters)
|
||||
new_difficulty: Optional[uint64] = uint64.construct_optional(sub_epoch_summary.new_difficulty)
|
||||
|
||||
self.log.info(f"Sub epoch summary: {sub_epoch_summary} for block {latest_block.height+1}")
|
||||
else: # the previous block is not the last block of the sub-epoch or epoch
|
||||
@ -1252,8 +1252,8 @@ class BlockTools:
|
||||
num_empty_slots_added += 1
|
||||
|
||||
if new_sub_slot_iters is not None and new_difficulty is not None: # new epoch
|
||||
sub_slot_iters = new_sub_slot_iters
|
||||
difficulty = new_difficulty
|
||||
sub_slot_iters = uint64(new_sub_slot_iters)
|
||||
difficulty = uint64(new_difficulty)
|
||||
|
||||
def create_genesis_block(
|
||||
self,
|
||||
@ -1350,7 +1350,7 @@ class BlockTools:
|
||||
cc_challenge,
|
||||
ip_iters,
|
||||
)
|
||||
cc_ip_vdf = replace(cc_ip_vdf, number_of_iterations=ip_iters)
|
||||
cc_ip_vdf = cc_ip_vdf.replace(number_of_iterations=ip_iters)
|
||||
rc_ip_vdf, rc_ip_proof = get_vdf_info_and_proof(
|
||||
constants,
|
||||
ClassgroupElement.get_default_element(),
|
||||
@ -1421,7 +1421,7 @@ class BlockTools:
|
||||
+ calculate_sp_iters(
|
||||
self.constants,
|
||||
self.constants.SUB_SLOT_ITERS_STARTING,
|
||||
unfinished_block.reward_chain_block.signage_point_index,
|
||||
uint8(unfinished_block.reward_chain_block.signage_point_index),
|
||||
)
|
||||
)
|
||||
return unfinished_block_to_full_block(
|
||||
@ -1552,7 +1552,7 @@ def get_signage_point(
|
||||
rc_vdf_challenge,
|
||||
rc_vdf_iters,
|
||||
)
|
||||
cc_sp_vdf = replace(cc_sp_vdf, number_of_iterations=sp_iters)
|
||||
cc_sp_vdf = cc_sp_vdf.replace(number_of_iterations=sp_iters)
|
||||
if normalized_to_identity_cc_sp:
|
||||
_, cc_sp_proof = get_vdf_info_and_proof(
|
||||
constants,
|
||||
@ -1597,7 +1597,7 @@ def finish_block(
|
||||
cc_vdf_challenge,
|
||||
new_ip_iters,
|
||||
)
|
||||
cc_ip_vdf = replace(cc_ip_vdf, number_of_iterations=ip_iters)
|
||||
cc_ip_vdf = cc_ip_vdf.replace(number_of_iterations=ip_iters)
|
||||
if normalized_to_identity_cc_ip:
|
||||
_, cc_ip_proof = get_vdf_info_and_proof(
|
||||
constants,
|
||||
@ -1750,7 +1750,7 @@ def get_icc(
|
||||
if len(finished_sub_slots) == 0:
|
||||
prev_deficit = latest_block.deficit
|
||||
else:
|
||||
prev_deficit = finished_sub_slots[-1].reward_chain.deficit
|
||||
prev_deficit = uint8(finished_sub_slots[-1].reward_chain.deficit)
|
||||
|
||||
if deficit == prev_deficit == constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK:
|
||||
# new slot / overflow sb to new slot / overflow sb
|
||||
@ -2051,7 +2051,7 @@ def create_block_tools(
|
||||
|
||||
|
||||
def make_unfinished_block(block: FullBlock, constants: ConsensusConstants) -> UnfinishedBlock:
|
||||
if is_overflow_block(constants, block.reward_chain_block.signage_point_index):
|
||||
if is_overflow_block(constants, uint8(block.reward_chain_block.signage_point_index)):
|
||||
finished_ss = block.finished_sub_slots[:-1]
|
||||
else:
|
||||
finished_ss = block.finished_sub_slots
|
||||
|
@ -7,7 +7,7 @@ from chia.consensus.pot_iterations import calculate_ip_iters, calculate_iteratio
|
||||
from chia.types.blockchain_format.proof_of_space import verify_and_get_quality_string
|
||||
from chia.types.blockchain_format.reward_chain_block import RewardChainBlock, RewardChainBlockUnfinished
|
||||
from chia.types.blockchain_format.sized_bytes import bytes32
|
||||
from chia.util.ints import uint32, uint64
|
||||
from chia.util.ints import uint8, uint32, uint64
|
||||
|
||||
|
||||
def iters_from_block(
|
||||
@ -40,11 +40,11 @@ def iters_from_block(
|
||||
cc_sp,
|
||||
)
|
||||
return (
|
||||
calculate_sp_iters(constants, sub_slot_iters, reward_chain_block.signage_point_index),
|
||||
calculate_sp_iters(constants, sub_slot_iters, uint8(reward_chain_block.signage_point_index)),
|
||||
calculate_ip_iters(
|
||||
constants,
|
||||
sub_slot_iters,
|
||||
reward_chain_block.signage_point_index,
|
||||
uint8(reward_chain_block.signage_point_index),
|
||||
required_iters,
|
||||
),
|
||||
)
|
||||
|
@ -256,7 +256,7 @@ class Timelord:
|
||||
log.warning(f"Received invalid unfinished block: {e}.")
|
||||
return None
|
||||
block_sp_total_iters = self.last_state.total_iters - ip_iters + block_sp_iters
|
||||
if is_overflow_block(self.constants, block.reward_chain_block.signage_point_index):
|
||||
if is_overflow_block(self.constants, uint8(block.reward_chain_block.signage_point_index)):
|
||||
block_sp_total_iters -= self.last_state.get_sub_slot_iters()
|
||||
found_index = -1
|
||||
for index, (rc, total_iters) in enumerate(self.last_state.reward_challenge_cache):
|
||||
@ -279,7 +279,7 @@ class Timelord:
|
||||
)
|
||||
return None
|
||||
if self.last_state.reward_challenge_cache[found_index][1] > block_sp_total_iters:
|
||||
if not is_overflow_block(self.constants, block.reward_chain_block.signage_point_index):
|
||||
if not is_overflow_block(self.constants, uint8(block.reward_chain_block.signage_point_index)):
|
||||
log.error(
|
||||
f"Will not infuse unfinished block {block.rc_prev}, sp total iters: {block_sp_total_iters}, "
|
||||
f"because its iters are too low"
|
||||
@ -485,13 +485,13 @@ class Timelord:
|
||||
rc_challenge = self.last_state.get_challenge(Chain.REWARD_CHAIN)
|
||||
if rc_info.challenge != rc_challenge:
|
||||
assert rc_challenge is not None
|
||||
log.warning(f"SP: Do not have correct challenge {rc_challenge.hex()} has {rc_info.challenge}")
|
||||
log.warning(f"SP: Do not have correct challenge {rc_challenge.hex()} has {rc_info.challenge.hex()}")
|
||||
# This proof is on an outdated challenge, so don't use it
|
||||
continue
|
||||
iters_from_sub_slot_start = uint64(cc_info.number_of_iterations + self.last_state.get_last_ip())
|
||||
response = timelord_protocol.NewSignagePointVDF(
|
||||
signage_point_index,
|
||||
dataclasses.replace(cc_info, number_of_iterations=iters_from_sub_slot_start),
|
||||
cc_info.replace(number_of_iterations=iters_from_sub_slot_start),
|
||||
cc_proof,
|
||||
rc_info,
|
||||
rc_proof,
|
||||
@ -584,7 +584,7 @@ class Timelord:
|
||||
assert rc_challenge is not None
|
||||
log.warning(
|
||||
f"Do not have correct challenge {rc_challenge.hex()} "
|
||||
f"has {rc_info.challenge}, partial hash {block.reward_chain_block.get_hash()}"
|
||||
f"has {rc_info.challenge.hex()}, partial hash {block.reward_chain_block.get_hash()}"
|
||||
)
|
||||
# This proof is on an outdated challenge, so don't use it
|
||||
continue
|
||||
@ -593,13 +593,13 @@ class Timelord:
|
||||
self.last_active_time = time.time()
|
||||
log.debug(f"Generated infusion point for challenge: {challenge} iterations: {iteration}.")
|
||||
|
||||
overflow = is_overflow_block(self.constants, block.reward_chain_block.signage_point_index)
|
||||
overflow = is_overflow_block(self.constants, uint8(block.reward_chain_block.signage_point_index))
|
||||
|
||||
if not self.last_state.can_infuse_block(overflow):
|
||||
log.warning("Too many blocks, or overflow in new epoch, cannot infuse, discarding")
|
||||
return
|
||||
|
||||
cc_info = dataclasses.replace(cc_info, number_of_iterations=ip_iters)
|
||||
cc_info = cc_info.replace(number_of_iterations=ip_iters)
|
||||
response = timelord_protocol.NewInfusionPointVDF(
|
||||
challenge,
|
||||
cc_info,
|
||||
@ -628,7 +628,7 @@ class Timelord:
|
||||
+ calculate_sp_iters(
|
||||
self.constants,
|
||||
block.sub_slot_iters,
|
||||
block.reward_chain_block.signage_point_index,
|
||||
uint8(block.reward_chain_block.signage_point_index),
|
||||
)
|
||||
- (block.sub_slot_iters if overflow else 0)
|
||||
)
|
||||
@ -755,14 +755,14 @@ class Timelord:
|
||||
rc_challenge = self.last_state.get_challenge(Chain.REWARD_CHAIN)
|
||||
if rc_vdf.challenge != rc_challenge:
|
||||
assert rc_challenge is not None
|
||||
log.warning(f"Do not have correct challenge {rc_challenge.hex()} has {rc_vdf.challenge}")
|
||||
log.warning(f"Do not have correct challenge {rc_challenge.hex()} has {rc_vdf.challenge.hex()}")
|
||||
# This proof is on an outdated challenge, so don't use it
|
||||
return
|
||||
log.debug("Collected end of subslot vdfs.")
|
||||
self.iters_finished.add(iter_to_look_for)
|
||||
self.last_active_time = time.time()
|
||||
iters_from_sub_slot_start = uint64(cc_vdf.number_of_iterations + self.last_state.get_last_ip())
|
||||
cc_vdf = dataclasses.replace(cc_vdf, number_of_iterations=iters_from_sub_slot_start)
|
||||
cc_vdf = cc_vdf.replace(number_of_iterations=iters_from_sub_slot_start)
|
||||
if icc_ip_vdf is not None:
|
||||
if self.last_state.peak is not None:
|
||||
total_iters = (
|
||||
@ -778,7 +778,7 @@ class Timelord:
|
||||
log.error(f"{self.last_state.subslot_end}")
|
||||
assert False
|
||||
assert iters_from_cb <= self.last_state.sub_slot_iters
|
||||
icc_ip_vdf = dataclasses.replace(icc_ip_vdf, number_of_iterations=iters_from_cb)
|
||||
icc_ip_vdf = icc_ip_vdf.replace(number_of_iterations=iters_from_cb)
|
||||
|
||||
icc_sub_slot: Optional[InfusedChallengeChainSubSlot] = (
|
||||
None if icc_ip_vdf is None else InfusedChallengeChainSubSlot(icc_ip_vdf)
|
||||
@ -1118,7 +1118,7 @@ class Timelord:
|
||||
ip,
|
||||
reader,
|
||||
writer,
|
||||
info[1].new_proof_of_time.number_of_iterations,
|
||||
uint64(info[1].new_proof_of_time.number_of_iterations),
|
||||
info[1].header_hash,
|
||||
info[1].height,
|
||||
info[1].field_vdf,
|
||||
@ -1170,7 +1170,7 @@ class Timelord:
|
||||
bluebox_process_data = BlueboxProcessData(
|
||||
picked_info.new_proof_of_time.challenge,
|
||||
uint16(self.constants.DISCRIMINANT_SIZE_BITS),
|
||||
picked_info.new_proof_of_time.number_of_iterations,
|
||||
uint64(picked_info.new_proof_of_time.number_of_iterations),
|
||||
)
|
||||
proof = await asyncio.get_running_loop().run_in_executor(
|
||||
pool,
|
||||
|
@ -59,13 +59,13 @@ class LastState:
|
||||
state.reward_chain_block,
|
||||
state.sub_slot_iters,
|
||||
state.difficulty,
|
||||
state.reward_chain_block.height,
|
||||
uint32(state.reward_chain_block.height),
|
||||
)
|
||||
self.deficit = state.deficit
|
||||
self.sub_epoch_summary = state.sub_epoch_summary
|
||||
self.last_weight = state.reward_chain_block.weight
|
||||
self.last_height = state.reward_chain_block.height
|
||||
self.total_iters = state.reward_chain_block.total_iters
|
||||
self.last_weight = uint128(state.reward_chain_block.weight)
|
||||
self.last_height = uint32(state.reward_chain_block.height)
|
||||
self.total_iters = uint128(state.reward_chain_block.total_iters)
|
||||
self.last_peak_challenge = state.reward_chain_block.get_hash()
|
||||
self.difficulty = state.difficulty
|
||||
self.sub_slot_iters = state.sub_slot_iters
|
||||
@ -87,11 +87,11 @@ class LastState:
|
||||
self.peak = None
|
||||
self.subslot_end = state
|
||||
self.last_ip = uint64(0)
|
||||
self.deficit = state.reward_chain.deficit
|
||||
self.deficit = uint8(state.reward_chain.deficit)
|
||||
if state.challenge_chain.new_difficulty is not None:
|
||||
assert state.challenge_chain.new_sub_slot_iters is not None
|
||||
self.difficulty = state.challenge_chain.new_difficulty
|
||||
self.sub_slot_iters = state.challenge_chain.new_sub_slot_iters
|
||||
self.difficulty = uint64(state.challenge_chain.new_difficulty)
|
||||
self.sub_slot_iters = uint64(state.challenge_chain.new_sub_slot_iters)
|
||||
self.new_epoch = True
|
||||
else:
|
||||
self.new_epoch = False
|
||||
|
@ -1,34 +1,5 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from dataclasses import dataclass
|
||||
from chia_rs import ClassgroupElement
|
||||
|
||||
from chia.types.blockchain_format.sized_bytes import bytes100
|
||||
from chia.util.streamable import Streamable, streamable
|
||||
|
||||
|
||||
@streamable
|
||||
@dataclass(frozen=True)
|
||||
class ClassgroupElement(Streamable):
|
||||
"""
|
||||
Represents a classgroup element (a,b,c) where a, b, and c are 512 bit signed integers. However this is using
|
||||
a compressed representation. VDF outputs are a single classgroup element. VDF proofs can also be one classgroup
|
||||
element (or multiple).
|
||||
"""
|
||||
|
||||
data: bytes100
|
||||
|
||||
@staticmethod
|
||||
def create(data: bytes) -> ClassgroupElement:
|
||||
if len(data) < 100:
|
||||
data += b"\x00" * (100 - len(data))
|
||||
return ClassgroupElement(bytes100(data))
|
||||
|
||||
@staticmethod
|
||||
def get_default_element() -> ClassgroupElement:
|
||||
# Bit 3 in the first byte of serialized compressed form indicates if
|
||||
# it's the default generator element.
|
||||
return ClassgroupElement.create(b"\x08")
|
||||
|
||||
@staticmethod
|
||||
def get_size() -> int:
|
||||
return 100
|
||||
__all__ = ["ClassgroupElement"]
|
||||
|
@ -1,61 +1,8 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from dataclasses import dataclass
|
||||
from typing import List, Optional
|
||||
import chia_rs
|
||||
|
||||
from chia_rs import G2Element
|
||||
|
||||
from chia.types.blockchain_format.coin import Coin
|
||||
from chia.types.blockchain_format.pool_target import PoolTarget
|
||||
from chia.types.blockchain_format.sized_bytes import bytes32
|
||||
from chia.util.ints import uint64
|
||||
from chia.util.streamable import Streamable, streamable
|
||||
|
||||
|
||||
@streamable
|
||||
@dataclass(frozen=True)
|
||||
class TransactionsInfo(Streamable):
|
||||
# Information that goes along with each transaction block
|
||||
generator_root: bytes32 # sha256 of the block generator in this block
|
||||
generator_refs_root: bytes32 # sha256 of the concatenation of the generator ref list entries
|
||||
aggregated_signature: G2Element
|
||||
fees: uint64 # This only includes user fees, not block rewards
|
||||
cost: uint64 # This is the total cost of this block, including CLVM cost, cost of program size and conditions
|
||||
reward_claims_incorporated: List[Coin] # These can be in any order
|
||||
|
||||
|
||||
@streamable
|
||||
@dataclass(frozen=True)
|
||||
class FoliageTransactionBlock(Streamable):
|
||||
# Information that goes along with each transaction block that is relevant for light clients
|
||||
prev_transaction_block_hash: bytes32
|
||||
timestamp: uint64
|
||||
filter_hash: bytes32
|
||||
additions_root: bytes32
|
||||
removals_root: bytes32
|
||||
transactions_info_hash: bytes32
|
||||
|
||||
|
||||
@streamable
|
||||
@dataclass(frozen=True)
|
||||
class FoliageBlockData(Streamable):
|
||||
# Part of the block that is signed by the plot key
|
||||
unfinished_reward_block_hash: bytes32
|
||||
pool_target: PoolTarget
|
||||
pool_signature: Optional[G2Element] # Iff ProofOfSpace has a pool pk
|
||||
farmer_reward_puzzle_hash: bytes32
|
||||
extension_data: bytes32 # Used for future updates. Can be any 32 byte value initially
|
||||
|
||||
|
||||
@streamable
|
||||
@dataclass(frozen=True)
|
||||
class Foliage(Streamable):
|
||||
# The entire foliage block, containing signature and the unsigned back pointer
|
||||
# The hash of this is the "header hash". Note that for unfinished blocks, the prev_block_hash
|
||||
# Is the prev from the signage point, and can be replaced with a more recent block
|
||||
prev_block_hash: bytes32
|
||||
reward_block_hash: bytes32
|
||||
foliage_block_data: FoliageBlockData
|
||||
foliage_block_data_signature: G2Element
|
||||
foliage_transaction_block_hash: Optional[bytes32]
|
||||
foliage_transaction_block_signature: Optional[G2Element]
|
||||
TransactionsInfo = chia_rs.TransactionsInfo
|
||||
FoliageTransactionBlock = chia_rs.FoliageTransactionBlock
|
||||
FoliageBlockData = chia_rs.FoliageBlockData
|
||||
Foliage = chia_rs.Foliage
|
||||
|
@ -1,14 +1,5 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from dataclasses import dataclass
|
||||
import chia_rs
|
||||
|
||||
from chia.types.blockchain_format.sized_bytes import bytes32
|
||||
from chia.util.ints import uint32
|
||||
from chia.util.streamable import Streamable, streamable
|
||||
|
||||
|
||||
@streamable
|
||||
@dataclass(frozen=True)
|
||||
class PoolTarget(Streamable):
|
||||
puzzle_hash: bytes32
|
||||
max_height: uint32 # A max height of 0 means it is valid forever
|
||||
PoolTarget = chia_rs.PoolTarget
|
||||
|
@ -1,9 +1,9 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
from dataclasses import dataclass
|
||||
from typing import Optional, cast
|
||||
|
||||
import chia_rs
|
||||
from bitstring import BitArray
|
||||
from chia_rs import AugSchemeMPL, G1Element, PrivateKey
|
||||
from chiapos import Verifier
|
||||
@ -11,23 +11,13 @@ from chiapos import Verifier
|
||||
from chia.consensus.constants import ConsensusConstants
|
||||
from chia.types.blockchain_format.sized_bytes import bytes32
|
||||
from chia.util.hash import std_hash
|
||||
from chia.util.ints import uint8, uint32
|
||||
from chia.util.streamable import Streamable, streamable
|
||||
from chia.util.ints import uint32
|
||||
|
||||
ProofOfSpace = chia_rs.ProofOfSpace
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@streamable
|
||||
@dataclass(frozen=True)
|
||||
class ProofOfSpace(Streamable):
|
||||
challenge: bytes32
|
||||
pool_public_key: Optional[G1Element] # Only one of these two should be present
|
||||
pool_contract_puzzle_hash: Optional[bytes32]
|
||||
plot_public_key: G1Element
|
||||
size: uint8
|
||||
proof: bytes
|
||||
|
||||
|
||||
def get_plot_id(pos: ProofOfSpace) -> bytes32:
|
||||
assert pos.pool_public_key is None or pos.pool_contract_puzzle_hash is None
|
||||
if pos.pool_public_key is None:
|
||||
|
@ -1,56 +1,6 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from dataclasses import dataclass
|
||||
from typing import Optional
|
||||
import chia_rs
|
||||
|
||||
from chia_rs import G2Element
|
||||
|
||||
from chia.types.blockchain_format.proof_of_space import ProofOfSpace
|
||||
from chia.types.blockchain_format.sized_bytes import bytes32
|
||||
from chia.types.blockchain_format.vdf import VDFInfo
|
||||
from chia.util.ints import uint8, uint32, uint128
|
||||
from chia.util.streamable import Streamable, streamable
|
||||
|
||||
|
||||
@streamable
|
||||
@dataclass(frozen=True)
|
||||
class RewardChainBlockUnfinished(Streamable):
|
||||
total_iters: uint128
|
||||
signage_point_index: uint8
|
||||
pos_ss_cc_challenge_hash: bytes32
|
||||
proof_of_space: ProofOfSpace
|
||||
challenge_chain_sp_vdf: Optional[VDFInfo] # Not present for first sp in slot
|
||||
challenge_chain_sp_signature: G2Element
|
||||
reward_chain_sp_vdf: Optional[VDFInfo] # Not present for first sp in slot
|
||||
reward_chain_sp_signature: G2Element
|
||||
|
||||
|
||||
@streamable
|
||||
@dataclass(frozen=True)
|
||||
class RewardChainBlock(Streamable):
|
||||
weight: uint128
|
||||
height: uint32
|
||||
total_iters: uint128
|
||||
signage_point_index: uint8
|
||||
pos_ss_cc_challenge_hash: bytes32
|
||||
proof_of_space: ProofOfSpace
|
||||
challenge_chain_sp_vdf: Optional[VDFInfo] # Not present for first sp in slot
|
||||
challenge_chain_sp_signature: G2Element
|
||||
challenge_chain_ip_vdf: VDFInfo
|
||||
reward_chain_sp_vdf: Optional[VDFInfo] # Not present for first sp in slot
|
||||
reward_chain_sp_signature: G2Element
|
||||
reward_chain_ip_vdf: VDFInfo
|
||||
infused_challenge_chain_ip_vdf: Optional[VDFInfo] # Iff deficit < 16
|
||||
is_transaction_block: bool
|
||||
|
||||
def get_unfinished(self) -> RewardChainBlockUnfinished:
|
||||
return RewardChainBlockUnfinished(
|
||||
self.total_iters,
|
||||
self.signage_point_index,
|
||||
self.pos_ss_cc_challenge_hash,
|
||||
self.proof_of_space,
|
||||
self.challenge_chain_sp_vdf,
|
||||
self.challenge_chain_sp_signature,
|
||||
self.reward_chain_sp_vdf,
|
||||
self.reward_chain_sp_signature,
|
||||
)
|
||||
RewardChainBlock = chia_rs.RewardChainBlock
|
||||
RewardChainBlockUnfinished = chia_rs.RewardChainBlockUnfinished
|
||||
|
@ -1,115 +1,5 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import io
|
||||
from typing import Tuple
|
||||
import chia_rs
|
||||
|
||||
from chia_rs import MEMPOOL_MODE, run_chia_program, serialized_length, tree_hash
|
||||
from clvm import SExp
|
||||
from clvm.SExp import CastableType
|
||||
|
||||
from chia.types.blockchain_format.program import Program
|
||||
from chia.types.blockchain_format.sized_bytes import bytes32
|
||||
from chia.util.byte_types import hexstr_to_bytes
|
||||
|
||||
|
||||
def _serialize(node: object) -> bytes:
|
||||
if isinstance(node, list):
|
||||
serialized_list = bytearray()
|
||||
for a in node:
|
||||
serialized_list += b"\xff"
|
||||
serialized_list += _serialize(a)
|
||||
serialized_list += b"\x80"
|
||||
return bytes(serialized_list)
|
||||
if type(node) is SerializedProgram:
|
||||
return bytes(node)
|
||||
if type(node) is Program:
|
||||
return bytes(node)
|
||||
else:
|
||||
ret: bytes = SExp.to(node).as_bin()
|
||||
return ret
|
||||
|
||||
|
||||
class SerializedProgram:
|
||||
"""
|
||||
An opaque representation of a clvm program. It has a more limited interface than a full SExp
|
||||
"""
|
||||
|
||||
_buf: bytes
|
||||
|
||||
def __init__(self, buf: bytes) -> None:
|
||||
assert isinstance(buf, bytes)
|
||||
self._buf = buf
|
||||
|
||||
@staticmethod
|
||||
def parse(f: io.BytesIO) -> SerializedProgram:
|
||||
length = serialized_length(f.getvalue()[f.tell() :])
|
||||
return SerializedProgram.from_bytes(f.read(length))
|
||||
|
||||
def stream(self, f: io.BytesIO) -> None:
|
||||
f.write(self._buf)
|
||||
|
||||
@staticmethod
|
||||
def from_bytes(blob: bytes) -> SerializedProgram:
|
||||
assert serialized_length(blob) == len(blob)
|
||||
return SerializedProgram(bytes(blob))
|
||||
|
||||
@staticmethod
|
||||
def fromhex(hexstr: str) -> SerializedProgram:
|
||||
return SerializedProgram.from_bytes(hexstr_to_bytes(hexstr))
|
||||
|
||||
@staticmethod
|
||||
def from_program(p: Program) -> SerializedProgram:
|
||||
return SerializedProgram(bytes(p))
|
||||
|
||||
@staticmethod
|
||||
def to(o: CastableType) -> SerializedProgram:
|
||||
return SerializedProgram(Program.to(o).as_bin())
|
||||
|
||||
def to_program(self) -> Program:
|
||||
return Program.from_bytes(self._buf)
|
||||
|
||||
def uncurry(self) -> Tuple[Program, Program]:
|
||||
return self.to_program().uncurry()
|
||||
|
||||
def __bytes__(self) -> bytes:
|
||||
return self._buf
|
||||
|
||||
def __str__(self) -> str:
|
||||
return bytes(self).hex()
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return f"{self.__class__.__name__}({str(self)})"
|
||||
|
||||
def __eq__(self, other: object) -> bool:
|
||||
if not isinstance(other, SerializedProgram):
|
||||
return False
|
||||
return self._buf == other._buf
|
||||
|
||||
def __ne__(self, other: object) -> bool:
|
||||
if not isinstance(other, SerializedProgram):
|
||||
return True
|
||||
return self._buf != other._buf
|
||||
|
||||
def get_tree_hash(self) -> bytes32:
|
||||
return bytes32(tree_hash(self._buf))
|
||||
|
||||
def run_mempool_with_cost(self, max_cost: int, arg: object) -> Tuple[int, Program]:
|
||||
return self._run(max_cost, MEMPOOL_MODE, arg)
|
||||
|
||||
def run_with_cost(self, max_cost: int, arg: object) -> Tuple[int, Program]:
|
||||
return self._run(max_cost, 0, arg)
|
||||
|
||||
def _run(self, max_cost: int, flags: int, arg: object) -> Tuple[int, Program]:
|
||||
# when multiple arguments are passed, concatenate them into a serialized
|
||||
# buffer. Some arguments may already be in serialized form (e.g.
|
||||
# SerializedProgram) so we don't want to de-serialize those just to
|
||||
# serialize them back again. This is handled by _serialize()
|
||||
serialized_args = _serialize(arg)
|
||||
|
||||
cost, ret = run_chia_program(
|
||||
self._buf,
|
||||
bytes(serialized_args),
|
||||
max_cost,
|
||||
flags,
|
||||
)
|
||||
return cost, Program.to(ret)
|
||||
SerializedProgram = chia_rs.Program
|
||||
|
@ -1,54 +1,9 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from dataclasses import dataclass
|
||||
from typing import Optional
|
||||
import chia_rs
|
||||
|
||||
from chia_rs import G2Element
|
||||
|
||||
from chia.types.blockchain_format.proof_of_space import ProofOfSpace
|
||||
from chia.types.blockchain_format.sized_bytes import bytes32
|
||||
from chia.types.blockchain_format.vdf import VDFInfo, VDFProof
|
||||
from chia.util.ints import uint8, uint64
|
||||
from chia.util.streamable import Streamable, streamable
|
||||
|
||||
|
||||
@streamable
|
||||
@dataclass(frozen=True)
|
||||
class ChallengeBlockInfo(Streamable): # The hash of this is used as the challenge_hash for the ICC VDF
|
||||
proof_of_space: ProofOfSpace
|
||||
challenge_chain_sp_vdf: Optional[VDFInfo] # Only present if not the first sp
|
||||
challenge_chain_sp_signature: G2Element
|
||||
challenge_chain_ip_vdf: VDFInfo
|
||||
|
||||
|
||||
@streamable
|
||||
@dataclass(frozen=True)
|
||||
class ChallengeChainSubSlot(Streamable):
|
||||
challenge_chain_end_of_slot_vdf: VDFInfo
|
||||
infused_challenge_chain_sub_slot_hash: Optional[bytes32] # Only at the end of a slot
|
||||
subepoch_summary_hash: Optional[bytes32] # Only once per sub-epoch, and one sub-epoch delayed
|
||||
new_sub_slot_iters: Optional[uint64] # Only at the end of epoch, sub-epoch, and slot
|
||||
new_difficulty: Optional[uint64] # Only at the end of epoch, sub-epoch, and slot
|
||||
|
||||
|
||||
@streamable
|
||||
@dataclass(frozen=True)
|
||||
class InfusedChallengeChainSubSlot(Streamable):
|
||||
infused_challenge_chain_end_of_slot_vdf: VDFInfo
|
||||
|
||||
|
||||
@streamable
|
||||
@dataclass(frozen=True)
|
||||
class RewardChainSubSlot(Streamable):
|
||||
end_of_slot_vdf: VDFInfo
|
||||
challenge_chain_sub_slot_hash: bytes32
|
||||
infused_challenge_chain_sub_slot_hash: Optional[bytes32]
|
||||
deficit: uint8 # 16 or less. usually zero
|
||||
|
||||
|
||||
@streamable
|
||||
@dataclass(frozen=True)
|
||||
class SubSlotProofs(Streamable):
|
||||
challenge_chain_slot_proof: VDFProof
|
||||
infused_challenge_chain_slot_proof: Optional[VDFProof]
|
||||
reward_chain_slot_proof: VDFProof
|
||||
ChallengeBlockInfo = chia_rs.ChallengeBlockInfo
|
||||
ChallengeChainSubSlot = chia_rs.ChallengeChainSubSlot
|
||||
InfusedChallengeChainSubSlot = chia_rs.InfusedChallengeChainSubSlot
|
||||
RewardChainSubSlot = chia_rs.RewardChainSubSlot
|
||||
SubSlotProofs = chia_rs.SubSlotProofs
|
||||
|
@ -1,18 +1,5 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from dataclasses import dataclass
|
||||
from typing import Optional
|
||||
import chia_rs
|
||||
|
||||
from chia.types.blockchain_format.sized_bytes import bytes32
|
||||
from chia.util.ints import uint8, uint64
|
||||
from chia.util.streamable import Streamable, streamable
|
||||
|
||||
|
||||
@streamable
|
||||
@dataclass(frozen=True)
|
||||
class SubEpochSummary(Streamable):
|
||||
prev_subepoch_summary_hash: bytes32
|
||||
reward_chain_hash: bytes32 # hash of reward chain at end of last segment
|
||||
num_blocks_overflow: uint8 # How many more blocks than 384*(N-1)
|
||||
new_difficulty: Optional[uint64] # Only once per epoch (diff adjustment)
|
||||
new_sub_slot_iters: Optional[uint64] # Only once per epoch (diff adjustment)
|
||||
SubEpochSummary = chia_rs.SubEpochSummary
|
||||
|
@ -2,21 +2,22 @@ from __future__ import annotations
|
||||
|
||||
import logging
|
||||
import traceback
|
||||
from dataclasses import dataclass
|
||||
from enum import IntEnum
|
||||
from functools import lru_cache
|
||||
from typing import Optional
|
||||
|
||||
from chia_rs import VDFInfo, VDFProof
|
||||
from chiavdf import create_discriminant, verify_n_wesolowski
|
||||
|
||||
from chia.consensus.constants import ConsensusConstants
|
||||
from chia.types.blockchain_format.classgroup import ClassgroupElement
|
||||
from chia.types.blockchain_format.sized_bytes import bytes32, bytes100
|
||||
from chia.util.ints import uint8, uint64
|
||||
from chia.util.streamable import Streamable, streamable
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
__all__ = ["VDFInfo", "VDFProof"]
|
||||
|
||||
|
||||
@lru_cache(maxsize=200)
|
||||
def get_discriminant(challenge: bytes32, size_bites: int) -> int:
|
||||
@ -46,22 +47,6 @@ def verify_vdf(
|
||||
)
|
||||
|
||||
|
||||
@streamable
|
||||
@dataclass(frozen=True)
|
||||
class VDFInfo(Streamable):
|
||||
challenge: bytes32 # Used to generate the discriminant (VDF group)
|
||||
number_of_iterations: uint64
|
||||
output: ClassgroupElement
|
||||
|
||||
|
||||
@streamable
|
||||
@dataclass(frozen=True)
|
||||
class VDFProof(Streamable):
|
||||
witness_type: uint8
|
||||
witness: bytes
|
||||
normalized_to_identity: bool
|
||||
|
||||
|
||||
def validate_vdf(
|
||||
proof: VDFProof,
|
||||
constants: ConsensusConstants,
|
||||
|
@ -1,21 +1,5 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from dataclasses import dataclass
|
||||
from typing import Optional
|
||||
import chia_rs
|
||||
|
||||
from chia.types.blockchain_format.slots import (
|
||||
ChallengeChainSubSlot,
|
||||
InfusedChallengeChainSubSlot,
|
||||
RewardChainSubSlot,
|
||||
SubSlotProofs,
|
||||
)
|
||||
from chia.util.streamable import Streamable, streamable
|
||||
|
||||
|
||||
@streamable
|
||||
@dataclass(frozen=True)
|
||||
class EndOfSubSlotBundle(Streamable):
|
||||
challenge_chain: ChallengeChainSubSlot
|
||||
infused_challenge_chain: Optional[InfusedChallengeChainSubSlot]
|
||||
reward_chain: RewardChainSubSlot
|
||||
proofs: SubSlotProofs
|
||||
EndOfSubSlotBundle = chia_rs.EndOfSubSlotBundle
|
||||
|
@ -39,15 +39,15 @@ class FullBlock(Streamable):
|
||||
|
||||
@property
|
||||
def height(self) -> uint32:
|
||||
return self.reward_chain_block.height
|
||||
return uint32(self.reward_chain_block.height)
|
||||
|
||||
@property
|
||||
def weight(self) -> uint128:
|
||||
return self.reward_chain_block.weight
|
||||
return uint128(self.reward_chain_block.weight)
|
||||
|
||||
@property
|
||||
def total_iters(self) -> uint128:
|
||||
return self.reward_chain_block.total_iters
|
||||
return uint128(self.reward_chain_block.total_iters)
|
||||
|
||||
@property
|
||||
def header_hash(self) -> bytes32:
|
||||
|
@ -38,11 +38,11 @@ class HeaderBlock(Streamable):
|
||||
|
||||
@property
|
||||
def height(self) -> uint32:
|
||||
return self.reward_chain_block.height
|
||||
return uint32(self.reward_chain_block.height)
|
||||
|
||||
@property
|
||||
def weight(self) -> uint128:
|
||||
return self.reward_chain_block.weight
|
||||
return uint128(self.reward_chain_block.weight)
|
||||
|
||||
@property
|
||||
def header_hash(self) -> bytes32:
|
||||
@ -50,7 +50,7 @@ class HeaderBlock(Streamable):
|
||||
|
||||
@property
|
||||
def total_iters(self) -> uint128:
|
||||
return self.reward_chain_block.total_iters
|
||||
return uint128(self.reward_chain_block.total_iters)
|
||||
|
||||
@property
|
||||
def log_string(self) -> str:
|
||||
|
@ -1,6 +1,5 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
from dataclasses import dataclass, field
|
||||
from typing import Optional, Tuple
|
||||
|
||||
@ -9,6 +8,7 @@ from chia.types.blockchain_format.sized_bytes import bytes32
|
||||
from chia.types.mempool_inclusion_status import MempoolInclusionStatus
|
||||
from chia.types.spend_bundle import SpendBundle
|
||||
from chia.util.errors import Err
|
||||
from chia.util.misc import ValuedEvent
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
@ -22,7 +22,7 @@ class TransactionQueueEntry:
|
||||
spend_name: bytes32
|
||||
peer: Optional[WSChiaConnection] = field(compare=False)
|
||||
test: bool = field(compare=False)
|
||||
done: asyncio.Future[Tuple[MempoolInclusionStatus, Optional[Err]]] = field(
|
||||
default_factory=asyncio.Future,
|
||||
done: ValuedEvent[Tuple[MempoolInclusionStatus, Optional[Err]]] = field(
|
||||
default_factory=ValuedEvent,
|
||||
compare=False,
|
||||
)
|
||||
|
@ -42,4 +42,4 @@ class UnfinishedBlock(Streamable):
|
||||
|
||||
@property
|
||||
def total_iters(self) -> uint128:
|
||||
return self.reward_chain_block.total_iters
|
||||
return uint128(self.reward_chain_block.total_iters)
|
||||
|
@ -34,4 +34,4 @@ class UnfinishedHeaderBlock(Streamable):
|
||||
|
||||
@property
|
||||
def total_iters(self) -> uint128:
|
||||
return self.reward_chain_block.total_iters
|
||||
return uint128(self.reward_chain_block.total_iters)
|
||||
|
@ -1,26 +1,16 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from dataclasses import dataclass
|
||||
from typing import List, Optional
|
||||
from typing import List
|
||||
|
||||
import chia_rs
|
||||
|
||||
from chia.types.blockchain_format.proof_of_space import ProofOfSpace
|
||||
from chia.types.blockchain_format.reward_chain_block import RewardChainBlock
|
||||
from chia.types.blockchain_format.sized_bytes import bytes32
|
||||
from chia.types.blockchain_format.vdf import VDFInfo, VDFProof
|
||||
from chia.types.end_of_slot_bundle import EndOfSubSlotBundle
|
||||
from chia.types.header_block import HeaderBlock
|
||||
from chia.util.ints import uint8, uint32, uint64, uint128
|
||||
from chia.util.streamable import Streamable, streamable
|
||||
|
||||
|
||||
@streamable
|
||||
@dataclass(frozen=True)
|
||||
class SubEpochData(Streamable):
|
||||
reward_chain_hash: bytes32
|
||||
num_blocks_overflow: uint8
|
||||
new_sub_slot_iters: Optional[uint64]
|
||||
new_difficulty: Optional[uint64]
|
||||
|
||||
SubEpochData = chia_rs.SubEpochData
|
||||
|
||||
# number of challenge blocks
|
||||
# Average iters for challenge blocks
|
||||
@ -33,53 +23,9 @@ class SubEpochData(Streamable):
|
||||
# total number of challenge blocks == total number of reward chain blocks
|
||||
|
||||
|
||||
@streamable
|
||||
@dataclass(frozen=True)
|
||||
class SubSlotData(Streamable):
|
||||
# if infused
|
||||
proof_of_space: Optional[ProofOfSpace]
|
||||
# VDF to signage point
|
||||
cc_signage_point: Optional[VDFProof]
|
||||
# VDF from signage to infusion point
|
||||
cc_infusion_point: Optional[VDFProof]
|
||||
icc_infusion_point: Optional[VDFProof]
|
||||
cc_sp_vdf_info: Optional[VDFInfo]
|
||||
signage_point_index: Optional[uint8]
|
||||
# VDF from beginning to end of slot if not infused
|
||||
# from ip to end if infused
|
||||
cc_slot_end: Optional[VDFProof]
|
||||
icc_slot_end: Optional[VDFProof]
|
||||
# info from finished slots
|
||||
cc_slot_end_info: Optional[VDFInfo]
|
||||
icc_slot_end_info: Optional[VDFInfo]
|
||||
cc_ip_vdf_info: Optional[VDFInfo]
|
||||
icc_ip_vdf_info: Optional[VDFInfo]
|
||||
total_iters: Optional[uint128]
|
||||
|
||||
def is_challenge(self) -> bool:
|
||||
if self.proof_of_space is not None:
|
||||
return True
|
||||
return False
|
||||
|
||||
def is_end_of_slot(self) -> bool:
|
||||
if self.cc_slot_end_info is not None:
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
@streamable
|
||||
@dataclass(frozen=True)
|
||||
class SubEpochChallengeSegment(Streamable):
|
||||
sub_epoch_n: uint32
|
||||
sub_slots: List[SubSlotData]
|
||||
rc_slot_end_info: Optional[VDFInfo] # in first segment of each sub_epoch
|
||||
|
||||
|
||||
@streamable
|
||||
@dataclass(frozen=True)
|
||||
# this is used only for serialization to database
|
||||
class SubEpochSegments(Streamable):
|
||||
challenge_segments: List[SubEpochChallengeSegment]
|
||||
SubEpochChallengeSegment = chia_rs.SubEpochChallengeSegment
|
||||
SubEpochSegments = chia_rs.SubEpochSegments
|
||||
SubSlotData = chia_rs.SubSlotData
|
||||
|
||||
|
||||
@streamable
|
||||
|
@ -1,6 +1,5 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import io
|
||||
from dataclasses import dataclass
|
||||
from typing import Callable, List, Optional, Tuple
|
||||
|
||||
@ -298,8 +297,7 @@ def header_block_from_block(
|
||||
transactions_info_optional = bytes([0])
|
||||
else:
|
||||
transactions_info_optional = bytes([1])
|
||||
buf3 = buf2[1:]
|
||||
transactions_info = TransactionsInfo.parse(io.BytesIO(buf3))
|
||||
transactions_info, advance = TransactionsInfo.parse_rust(buf2[1:])
|
||||
byte_array_tx: List[bytearray] = []
|
||||
if is_transaction_block and transactions_info:
|
||||
addition_coins = tx_addition_coins + list(transactions_info.reward_claims_incorporated)
|
||||
|
@ -440,6 +440,11 @@ full_node:
|
||||
# analyze with chia/utils/profiler.py
|
||||
enable_profiler: False
|
||||
|
||||
# when enabled, each time a block is validated, the python profiler is
|
||||
# engaged. If the validation takes more than 2 seconds, the profile is saved
|
||||
# to disk, in the chia root/block-validation-profile
|
||||
profile_block_validation: False
|
||||
|
||||
enable_memory_profiler: False
|
||||
|
||||
# this is a debug and profiling facility that logs all SQLite commands to a
|
||||
|
@ -2,7 +2,7 @@ from __future__ import annotations
|
||||
|
||||
from pathlib import Path
|
||||
from sys import platform
|
||||
from typing import Optional, Tuple, Union, overload
|
||||
from typing import ClassVar, Optional, Tuple, Union, overload
|
||||
|
||||
from keyring.backends.macOS import Keyring as MacKeyring
|
||||
from keyring.backends.Windows import WinVaultKeyring as WinKeyring
|
||||
@ -63,8 +63,8 @@ class KeyringWrapper:
|
||||
"""
|
||||
|
||||
# Static members
|
||||
__shared_instance = None
|
||||
__keys_root_path: Path = DEFAULT_KEYS_ROOT_PATH
|
||||
__shared_instance: ClassVar[Optional[KeyringWrapper]] = None
|
||||
__keys_root_path: ClassVar[Path] = DEFAULT_KEYS_ROOT_PATH
|
||||
|
||||
# Instance members
|
||||
keys_root_path: Path
|
||||
|
@ -13,6 +13,7 @@ from typing import (
|
||||
Any,
|
||||
AsyncContextManager,
|
||||
AsyncIterator,
|
||||
ClassVar,
|
||||
Collection,
|
||||
ContextManager,
|
||||
Dict,
|
||||
@ -374,3 +375,27 @@ async def split_async_manager(manager: AsyncContextManager[object], object: T) -
|
||||
yield split
|
||||
finally:
|
||||
await split.exit(if_needed=True)
|
||||
|
||||
|
||||
class ValuedEventSentinel:
|
||||
pass
|
||||
|
||||
|
||||
@dataclasses.dataclass
|
||||
class ValuedEvent(Generic[T]):
|
||||
_value_sentinel: ClassVar[ValuedEventSentinel] = ValuedEventSentinel()
|
||||
|
||||
_event: asyncio.Event = dataclasses.field(default_factory=asyncio.Event)
|
||||
_value: Union[ValuedEventSentinel, T] = _value_sentinel
|
||||
|
||||
def set(self, value: T) -> None:
|
||||
if not isinstance(self._value, ValuedEventSentinel):
|
||||
raise Exception("Value already set")
|
||||
self._value = value
|
||||
self._event.set()
|
||||
|
||||
async def wait(self) -> T:
|
||||
await self._event.wait()
|
||||
if isinstance(self._value, ValuedEventSentinel):
|
||||
raise Exception("Value not set despite event being set")
|
||||
return self._value
|
||||
|
@ -5,7 +5,9 @@ import cProfile
|
||||
import logging
|
||||
import pathlib
|
||||
import tracemalloc
|
||||
from contextlib import asynccontextmanager
|
||||
from datetime import datetime
|
||||
from typing import AsyncIterator, Optional
|
||||
|
||||
from chia.util.path import path_from_root
|
||||
|
||||
@ -176,3 +178,17 @@ async def mem_profile_task(root_path: pathlib.Path, service: str, log: logging.L
|
||||
counter += 1
|
||||
finally:
|
||||
tracemalloc.stop()
|
||||
|
||||
|
||||
@asynccontextmanager
|
||||
async def enable_profiler(profile: bool) -> AsyncIterator[Optional[cProfile.Profile]]:
|
||||
if not profile:
|
||||
yield None
|
||||
return
|
||||
|
||||
# this is not covered by any unit tests as it's essentially test code
|
||||
# itself. It's exercised manually when investigating performance issues
|
||||
with cProfile.Profile() as pr: # pragma: no cover
|
||||
pr.enable()
|
||||
yield pr
|
||||
pr.disable()
|
||||
|
@ -7,6 +7,16 @@ from typing import Any
|
||||
def recursive_replace(root_obj: Any, replace_str: str, replace_with: Any) -> Any:
|
||||
split_str = replace_str.split(".")
|
||||
if len(split_str) == 1:
|
||||
return replace(root_obj, **{split_str[0]: replace_with})
|
||||
# This check is here to support native types (implemented in Rust
|
||||
# in chia_rs) that aren't dataclasses. They instead implement a
|
||||
# replace() method in their python bindings.
|
||||
if hasattr(root_obj, "replace"):
|
||||
return root_obj.replace(**{split_str[0]: replace_with})
|
||||
else:
|
||||
return replace(root_obj, **{split_str[0]: replace_with})
|
||||
sub_obj = recursive_replace(getattr(root_obj, split_str[0]), ".".join(split_str[1:]), replace_with)
|
||||
return replace(root_obj, **{split_str[0]: sub_obj})
|
||||
# See comment above
|
||||
if hasattr(root_obj, "replace"):
|
||||
return root_obj.replace(**{split_str[0]: sub_obj})
|
||||
else:
|
||||
return replace(root_obj, **{split_str[0]: sub_obj})
|
||||
|
@ -1,6 +1,6 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import BinaryIO, ClassVar, SupportsIndex, SupportsInt, Type, TypeVar, Union
|
||||
from typing import BinaryIO, ClassVar, Optional, SupportsIndex, SupportsInt, Type, TypeVar, Union
|
||||
|
||||
from typing_extensions import Protocol
|
||||
|
||||
@ -70,6 +70,13 @@ class StructStream(int):
|
||||
if not (self.MINIMUM <= self <= self.MAXIMUM):
|
||||
raise ValueError(f"Value {self} does not fit into {type(self).__name__}")
|
||||
|
||||
@classmethod
|
||||
def construct_optional(cls: Type[_T_StructStream], val: Optional[int]) -> Optional[_T_StructStream]:
|
||||
if val is None:
|
||||
return None
|
||||
else:
|
||||
return cls(val)
|
||||
|
||||
@classmethod
|
||||
def parse(cls: Type[_T_StructStream], f: BinaryIO) -> _T_StructStream:
|
||||
read_bytes = f.read(cls.SIZE)
|
||||
|
@ -1,37 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from dataclasses import dataclass
|
||||
from typing import List
|
||||
|
||||
from chia.types.blockchain_format.coin import Coin
|
||||
from chia.types.header_block import HeaderBlock
|
||||
from chia.util.streamable import Streamable, streamable
|
||||
|
||||
|
||||
@streamable
|
||||
@dataclass(frozen=True)
|
||||
class HeaderBlockRecord(Streamable):
|
||||
"""
|
||||
These are values that are stored in the wallet database, corresponding to information
|
||||
that the wallet cares about in each block
|
||||
"""
|
||||
|
||||
header: HeaderBlock
|
||||
additions: List[Coin] # A block record without additions is not finished
|
||||
removals: List[Coin] # A block record without removals is not finished
|
||||
|
||||
@property
|
||||
def header_hash(self):
|
||||
return self.header.header_hash
|
||||
|
||||
@property
|
||||
def prev_header_hash(self):
|
||||
return self.header.prev_header_hash
|
||||
|
||||
@property
|
||||
def height(self):
|
||||
return self.header.height
|
||||
|
||||
@property
|
||||
def transactions_filter(self):
|
||||
return self.header.transactions_filter
|
@ -2,7 +2,7 @@ from __future__ import annotations
|
||||
|
||||
from abc import ABC, abstractmethod
|
||||
from dataclasses import dataclass, fields, replace
|
||||
from typing import Any, Dict, Iterable, List, Optional, Set, Tuple, Type, TypeVar, Union, final, get_type_hints
|
||||
from typing import Any, Dict, Iterable, List, Optional, Set, Tuple, Type, TypeVar, Union, cast, final, get_type_hints
|
||||
|
||||
from chia_rs import G1Element
|
||||
from clvm.casts import int_from_bytes, int_to_bytes
|
||||
@ -733,8 +733,8 @@ class UnknownCondition(Condition):
|
||||
args: List[Program]
|
||||
|
||||
def to_program(self) -> Program:
|
||||
prog: Program = self.opcode.cons(self.args)
|
||||
return prog
|
||||
# TODO: Remove cast when we have proper hinting for this
|
||||
return cast(Program, self.opcode.cons(Program.to(self.args)))
|
||||
|
||||
@classmethod
|
||||
def from_program(cls, program: Program) -> UnknownCondition:
|
||||
|
@ -428,6 +428,7 @@ class DIDWallet:
|
||||
)
|
||||
|
||||
await self.add_parent(coin.name(), future_parent)
|
||||
await self.wallet_state_manager.add_interested_coin_ids([coin.name()])
|
||||
|
||||
def create_backup(self) -> str:
|
||||
"""
|
||||
@ -902,6 +903,9 @@ class DIDWallet:
|
||||
) -> Tuple[TransactionRecord, SpendBundle, str]:
|
||||
"""
|
||||
Create an attestment
|
||||
TODO:
|
||||
1. We should use/respect `tx_config` (reuse_puzhash and co)
|
||||
2. We should take a fee as it's a requirement for every transaction function to do so
|
||||
:param recovering_coin_name: Coin ID of the DID
|
||||
:param newpuz: New puzzle hash
|
||||
:param pubkey: New wallet pubkey
|
||||
@ -1116,27 +1120,20 @@ class DIDWallet:
|
||||
async def get_new_p2_inner_puzzle(self) -> Program:
|
||||
return await self.standard_wallet.get_new_puzzle()
|
||||
|
||||
async def get_new_did_innerpuz(self, origin_id=None) -> Program:
|
||||
async def get_new_did_innerpuz(self, origin_id: Optional[bytes32] = None) -> Program:
|
||||
if self.did_info.origin_coin is not None:
|
||||
innerpuz = did_wallet_puzzles.create_innerpuz(
|
||||
p2_puzzle_or_hash=await self.get_new_p2_inner_puzzle(),
|
||||
recovery_list=self.did_info.backup_ids,
|
||||
num_of_backup_ids_needed=uint64(self.did_info.num_of_backup_ids_needed),
|
||||
launcher_id=self.did_info.origin_coin.name(),
|
||||
metadata=did_wallet_puzzles.metadata_to_program(json.loads(self.did_info.metadata)),
|
||||
)
|
||||
launcher_id = self.did_info.origin_coin.name()
|
||||
elif origin_id is not None:
|
||||
innerpuz = did_wallet_puzzles.create_innerpuz(
|
||||
p2_puzzle_or_hash=await self.get_new_p2_inner_puzzle(),
|
||||
recovery_list=self.did_info.backup_ids,
|
||||
num_of_backup_ids_needed=uint64(self.did_info.num_of_backup_ids_needed),
|
||||
launcher_id=origin_id,
|
||||
metadata=did_wallet_puzzles.metadata_to_program(json.loads(self.did_info.metadata)),
|
||||
)
|
||||
launcher_id = origin_id
|
||||
else:
|
||||
raise ValueError("must have origin coin")
|
||||
|
||||
return innerpuz
|
||||
return did_wallet_puzzles.create_innerpuz(
|
||||
p2_puzzle_or_hash=await self.get_new_p2_inner_puzzle(),
|
||||
recovery_list=self.did_info.backup_ids,
|
||||
num_of_backup_ids_needed=self.did_info.num_of_backup_ids_needed,
|
||||
launcher_id=launcher_id,
|
||||
metadata=did_wallet_puzzles.metadata_to_program(json.loads(self.did_info.metadata)),
|
||||
)
|
||||
|
||||
async def get_new_did_inner_hash(self) -> bytes32:
|
||||
innerpuz = await self.get_new_did_innerpuz()
|
||||
@ -1366,9 +1363,6 @@ class DIDWallet:
|
||||
unsigned_spend_bundle = SpendBundle(list_of_coinspends, G2Element())
|
||||
return await self.sign(unsigned_spend_bundle)
|
||||
|
||||
async def get_frozen_amount(self) -> uint64:
|
||||
return await self.wallet_state_manager.get_frozen_balance(self.wallet_info.id)
|
||||
|
||||
async def get_spendable_balance(self, unspent_records=None) -> uint128:
|
||||
spendable_am = await self.wallet_state_manager.get_confirmed_spendable_balance_for_wallet(
|
||||
self.wallet_info.id, unspent_records
|
||||
@ -1509,7 +1503,7 @@ class DIDWallet:
|
||||
)
|
||||
if len(spendable_coins) == 0:
|
||||
raise RuntimeError("DID is not currently spendable")
|
||||
return list(spendable_coins)[0].coin
|
||||
return sorted(list(spendable_coins), key=lambda c: c.confirmed_block_height, reverse=True)[0].coin
|
||||
|
||||
async def match_hinted_coin(self, coin: Coin, hint: bytes32) -> bool:
|
||||
if self.did_info.origin_coin is None:
|
||||
|
@ -1,6 +1,6 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Dict, Iterator, List, Optional, Tuple, Union
|
||||
from typing import Dict, Iterator, List, Optional, Tuple, Union, cast
|
||||
|
||||
from chia_rs import G1Element
|
||||
|
||||
@ -48,7 +48,7 @@ def create_innerpuz(
|
||||
Note: Receiving a standard P2 puzzle hash wouldn't calculate a valid puzzle, but
|
||||
that can be useful if calling `.get_tree_hash_precalc()` on it.
|
||||
"""
|
||||
backup_ids_hash = Program(Program.to(recovery_list)).get_tree_hash()
|
||||
backup_ids_hash = Program.to(recovery_list).get_tree_hash()
|
||||
if recovery_list_hash is not None:
|
||||
backup_ids_hash = recovery_list_hash
|
||||
singleton_struct = Program.to((SINGLETON_TOP_LAYER_MOD_HASH, (launcher_id, SINGLETON_LAUNCHER_PUZZLE_HASH)))
|
||||
@ -123,7 +123,7 @@ def create_recovery_message_puzzle(recovering_coin_id: bytes32, newpuz: bytes32,
|
||||
:param pubkey: New wallet pubkey
|
||||
:return: Message puzzle
|
||||
"""
|
||||
return Program.to(
|
||||
puzzle = Program.to(
|
||||
(
|
||||
1,
|
||||
[
|
||||
@ -132,6 +132,8 @@ def create_recovery_message_puzzle(recovering_coin_id: bytes32, newpuz: bytes32,
|
||||
],
|
||||
)
|
||||
)
|
||||
# TODO: Remove cast when we have proper hinting for this
|
||||
return cast(Program, puzzle)
|
||||
|
||||
|
||||
def create_spend_for_message(
|
||||
@ -153,7 +155,7 @@ def create_spend_for_message(
|
||||
|
||||
def match_did_puzzle(mod: Program, curried_args: Program) -> Optional[Iterator[Program]]:
|
||||
"""
|
||||
Given a puzzle test if it's a DID, if it is, return the curried arguments
|
||||
Given a puzzle test if it's a DID, if it is, return the curried arguments
|
||||
:param puzzle: Puzzle
|
||||
:return: Curried parameters
|
||||
"""
|
||||
@ -161,7 +163,8 @@ def match_did_puzzle(mod: Program, curried_args: Program) -> Optional[Iterator[P
|
||||
if mod == SINGLETON_TOP_LAYER_MOD:
|
||||
mod, curried_args = curried_args.rest().first().uncurry()
|
||||
if mod == DID_INNERPUZ_MOD:
|
||||
return curried_args.as_iter()
|
||||
# TODO: Remove cast when we have clvm type hinting for this
|
||||
return cast(Iterator[Program], curried_args.as_iter())
|
||||
except Exception:
|
||||
import traceback
|
||||
|
||||
@ -178,11 +181,11 @@ def check_is_did_puzzle(puzzle: Program) -> bool:
|
||||
r = puzzle.uncurry()
|
||||
if r is None:
|
||||
return False
|
||||
inner_f, args = r
|
||||
inner_f, _ = r
|
||||
return is_singleton(inner_f)
|
||||
|
||||
|
||||
def metadata_to_program(metadata: Dict) -> Program:
|
||||
def metadata_to_program(metadata: Dict[str, str]) -> Program:
|
||||
"""
|
||||
Convert the metadata dict to a Chialisp program
|
||||
:param metadata: User defined metadata
|
||||
@ -191,10 +194,11 @@ def metadata_to_program(metadata: Dict) -> Program:
|
||||
kv_list = []
|
||||
for key, value in metadata.items():
|
||||
kv_list.append((key, value))
|
||||
return Program.to(kv_list)
|
||||
# TODO: Remove cast when we have proper hinting for this
|
||||
return cast(Program, Program.to(kv_list))
|
||||
|
||||
|
||||
def did_program_to_metadata(program: Program) -> Dict:
|
||||
def did_program_to_metadata(program: Program) -> Dict[str, str]:
|
||||
"""
|
||||
Convert a program to a metadata dict
|
||||
:param program: Chialisp program contains the metadata
|
||||
|
@ -29,14 +29,14 @@ class KeyValStore:
|
||||
"""
|
||||
|
||||
async with self.db_wrapper.reader_no_transaction() as conn:
|
||||
cursor = await conn.execute("SELECT * from key_val_store WHERE key=?", (key,))
|
||||
cursor = await conn.execute("SELECT value from key_val_store WHERE key=?", (key,))
|
||||
row = await cursor.fetchone()
|
||||
await cursor.close()
|
||||
|
||||
if row is None:
|
||||
return None
|
||||
|
||||
return object_type.from_bytes(row[1])
|
||||
return object_type.from_bytes(row[0])
|
||||
|
||||
async def set_object(self, key: str, obj: Any):
|
||||
"""
|
||||
|
@ -1,7 +1,8 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from dataclasses import dataclass
|
||||
from typing import Any, List, Optional
|
||||
from enum import Enum
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
from chia.types.blockchain_format.program import Program
|
||||
from chia.types.blockchain_format.sized_bytes import bytes32
|
||||
@ -9,6 +10,12 @@ from chia.util.ints import uint64
|
||||
from chia.util.streamable import Streamable, streamable
|
||||
|
||||
|
||||
class LineageProofField(Enum):
|
||||
PARENT_NAME = 1
|
||||
INNER_PUZZLE_HASH = 2
|
||||
AMOUNT = 3
|
||||
|
||||
|
||||
@streamable
|
||||
@dataclass(frozen=True)
|
||||
class LineageProof(Streamable):
|
||||
@ -16,6 +23,27 @@ class LineageProof(Streamable):
|
||||
inner_puzzle_hash: Optional[bytes32] = None
|
||||
amount: Optional[uint64] = None
|
||||
|
||||
@classmethod
|
||||
def from_program(cls, program: Program, fields: List[LineageProofField]) -> LineageProof:
|
||||
lineage_proof_info: Dict[str, Any] = {}
|
||||
field_iter = iter(fields)
|
||||
program_iter = program.as_iter()
|
||||
for program_value in program_iter:
|
||||
field = next(field_iter)
|
||||
if field == LineageProofField.PARENT_NAME:
|
||||
lineage_proof_info["parent_name"] = bytes32(program_value.as_atom())
|
||||
elif field == LineageProofField.INNER_PUZZLE_HASH:
|
||||
lineage_proof_info["inner_puzzle_hash"] = bytes32(program_value.as_atom())
|
||||
elif field == LineageProofField.AMOUNT:
|
||||
lineage_proof_info["amount"] = uint64(program_value.as_int())
|
||||
try:
|
||||
next(field_iter)
|
||||
raise ValueError("Mismatch between program data and fields information")
|
||||
except StopIteration:
|
||||
pass
|
||||
|
||||
return LineageProof(**lineage_proof_info)
|
||||
|
||||
def to_program(self) -> Program:
|
||||
final_list: List[Any] = []
|
||||
if self.parent_name is not None:
|
||||
|
@ -217,7 +217,7 @@ class GenesisByIdOrSingleton(LimitationsProgram):
|
||||
def match(uncurried_mod: Program, curried_args: Program) -> Tuple[bool, List[Program]]: # pragma: no cover
|
||||
if uncurried_mod == GENESIS_BY_ID_OR_SINGLETON_MOD:
|
||||
genesis_id = curried_args.first()
|
||||
return True, [genesis_id.as_atom()]
|
||||
return True, [genesis_id]
|
||||
else:
|
||||
return False, []
|
||||
|
||||
|
@ -540,7 +540,7 @@ class TradeManager:
|
||||
fee_left_to_pay: uint64 = fee
|
||||
# The access of the sorted keys here makes sure we create the XCH transaction first to make sure we pay fee
|
||||
# with the XCH side of the offer and don't create an extra fee transaction in other wallets.
|
||||
for id in sorted(coins_to_offer.keys()):
|
||||
for id in sorted(coins_to_offer.keys(), key=lambda id: id != 1):
|
||||
selected_coins = coins_to_offer[id]
|
||||
if isinstance(id, int):
|
||||
wallet = self.wallet_state_manager.wallets[id]
|
||||
|
@ -47,6 +47,8 @@ def build_merkle_tree_from_binary_tree(tuples: TupleTree) -> Tuple[bytes32, Dict
|
||||
|
||||
def list_to_binary_tree(objects: List[Any]) -> Any:
|
||||
size = len(objects)
|
||||
if size == 0:
|
||||
raise ValueError("Cannot build a tree out of 0 objects")
|
||||
if size == 1:
|
||||
return objects[0]
|
||||
midpoint = (size + 1) >> 1
|
||||
|
@ -41,7 +41,7 @@ class PeerRequestCache:
|
||||
if header_block.is_transaction_block:
|
||||
assert header_block.foliage_transaction_block is not None
|
||||
if self._timestamps.get(header_block.height) is None:
|
||||
self._timestamps.put(header_block.height, header_block.foliage_transaction_block.timestamp)
|
||||
self._timestamps.put(header_block.height, uint64(header_block.foliage_transaction_block.timestamp))
|
||||
|
||||
def get_block_request(self, start: uint32, end: uint32) -> Optional[asyncio.Task[Any]]:
|
||||
return self._block_requests.get((start, end))
|
||||
|
@ -4,8 +4,9 @@ from chia_rs import G1Element
|
||||
|
||||
from chia.types.blockchain_format.program import Program
|
||||
from chia.types.blockchain_format.sized_bytes import bytes32
|
||||
from chia.util.ints import uint64
|
||||
from chia.util.ints import uint32, uint64
|
||||
from chia.wallet.puzzles.load_clvm import load_clvm
|
||||
from chia.wallet.puzzles.p2_delegated_puzzle_or_hidden_puzzle import DEFAULT_HIDDEN_PUZZLE
|
||||
from chia.wallet.util.merkle_tree import MerkleTree
|
||||
|
||||
# MODS
|
||||
@ -28,27 +29,48 @@ def construct_recovery_finish(timelock: uint64, recovery_conditions: Program) ->
|
||||
return RECOVERY_FINISH_MOD.curry(timelock, recovery_conditions)
|
||||
|
||||
|
||||
def construct_p2_recovery_puzzle(secp_puzzlehash: bytes32, bls_pk: G1Element, timelock: uint64) -> Program:
|
||||
return P2_RECOVERY_MOD.curry(P2_1_OF_N_MOD_HASH, RECOVERY_FINISH_MOD_HASH, secp_puzzlehash, bls_pk, timelock)
|
||||
def construct_p2_recovery_puzzle(secp_puzzle_hash: bytes32, bls_pk: G1Element, timelock: uint64) -> Program:
|
||||
return P2_RECOVERY_MOD.curry(P2_1_OF_N_MOD_HASH, RECOVERY_FINISH_MOD_HASH, secp_puzzle_hash, bls_pk, timelock)
|
||||
|
||||
|
||||
def construct_vault_puzzle(secp_puzzlehash: bytes32, recovery_puzzlehash: bytes32) -> Program:
|
||||
return P2_1_OF_N_MOD.curry(MerkleTree([secp_puzzlehash, recovery_puzzlehash]).calculate_root())
|
||||
def construct_vault_puzzle(secp_puzzle_hash: bytes32, recovery_puzzle_hash: bytes32) -> Program:
|
||||
return P2_1_OF_N_MOD.curry(MerkleTree([secp_puzzle_hash, recovery_puzzle_hash]).calculate_root())
|
||||
|
||||
|
||||
def get_vault_hidden_puzzle_with_index(index: uint32, hidden_puzzle: Program = DEFAULT_HIDDEN_PUZZLE) -> Program:
|
||||
hidden_puzzle_with_index: Program = Program.to([6, (index, hidden_puzzle)])
|
||||
return hidden_puzzle_with_index
|
||||
|
||||
|
||||
def get_vault_inner_puzzle(
|
||||
secp_pk: bytes, genesis_challenge: bytes32, entropy: bytes, bls_pk: G1Element, timelock: uint64
|
||||
) -> Program:
|
||||
secp_puzzle_hash = construct_p2_delegated_secp(secp_pk, genesis_challenge, entropy).get_tree_hash()
|
||||
recovery_puzzle_hash = construct_p2_recovery_puzzle(secp_puzzle_hash, bls_pk, timelock).get_tree_hash()
|
||||
return construct_vault_puzzle(secp_puzzle_hash, recovery_puzzle_hash)
|
||||
|
||||
|
||||
def get_vault_inner_puzzle_hash(
|
||||
secp_pk: bytes, genesis_challenge: bytes32, entropy: bytes, bls_pk: G1Element, timelock: uint64
|
||||
) -> bytes32:
|
||||
vault_puzzle = get_vault_inner_puzzle(secp_pk, genesis_challenge, entropy, bls_pk, timelock)
|
||||
vault_puzzle_hash: bytes32 = vault_puzzle.get_tree_hash()
|
||||
return vault_puzzle_hash
|
||||
|
||||
|
||||
# MERKLE
|
||||
def construct_vault_merkle_tree(secp_puzzlehash: bytes32, recovery_puzzlehash: bytes32) -> MerkleTree:
|
||||
return MerkleTree([secp_puzzlehash, recovery_puzzlehash])
|
||||
def construct_vault_merkle_tree(secp_puzzle_hash: bytes32, recovery_puzzle_hash: bytes32) -> MerkleTree:
|
||||
return MerkleTree([secp_puzzle_hash, recovery_puzzle_hash])
|
||||
|
||||
|
||||
def get_vault_proof(merkle_tree: MerkleTree, puzzlehash: bytes32) -> Program:
|
||||
proof = merkle_tree.generate_proof(puzzlehash)
|
||||
def get_vault_proof(merkle_tree: MerkleTree, puzzle_hash: bytes32) -> Program:
|
||||
proof = merkle_tree.generate_proof(puzzle_hash)
|
||||
vault_proof: Program = Program.to((proof[0], proof[1][0]))
|
||||
return vault_proof
|
||||
|
||||
|
||||
# SECP SIGNATURE
|
||||
def construct_secp_message(
|
||||
delegated_puzzlehash: bytes32, coin_id: bytes32, genesis_challenge: bytes32, entropy: bytes
|
||||
delegated_puzzle_hash: bytes32, coin_id: bytes32, genesis_challenge: bytes32, entropy: bytes
|
||||
) -> bytes:
|
||||
return delegated_puzzlehash + coin_id + genesis_challenge + entropy
|
||||
return delegated_puzzle_hash + coin_id + genesis_challenge + entropy
|
||||
|
@ -16,7 +16,7 @@ from chia.util.ints import uint16, uint64
|
||||
from chia.util.streamable import Streamable, streamable
|
||||
from chia.wallet.cat_wallet.cat_utils import CAT_MOD, construct_cat_puzzle
|
||||
from chia.wallet.conditions import AssertCoinAnnouncement
|
||||
from chia.wallet.lineage_proof import LineageProof
|
||||
from chia.wallet.lineage_proof import LineageProof, LineageProofField
|
||||
from chia.wallet.payment import Payment
|
||||
from chia.wallet.puzzles.load_clvm import load_clvm_maybe_recompile
|
||||
from chia.wallet.puzzles.singleton_top_layer_v1_1 import SINGLETON_LAUNCHER_HASH, SINGLETON_MOD_HASH
|
||||
@ -314,10 +314,14 @@ class CRCAT:
|
||||
uncurried_puzzle: UncurriedPuzzle = uncurry_puzzle(spend.puzzle_reveal.to_program())
|
||||
first_uncurried_cr_layer: UncurriedPuzzle = uncurry_puzzle(uncurried_puzzle.args.at("rrf"))
|
||||
second_uncurried_cr_layer: UncurriedPuzzle = uncurry_puzzle(first_uncurried_cr_layer.mod)
|
||||
lineage_proof = LineageProof.from_program(
|
||||
spend.solution.to_program().at("rf"),
|
||||
[LineageProofField.PARENT_NAME, LineageProofField.INNER_PUZZLE_HASH, LineageProofField.AMOUNT],
|
||||
)
|
||||
return CRCAT(
|
||||
spend.coin,
|
||||
bytes32(uncurried_puzzle.args.at("rf").as_atom()),
|
||||
spend.solution.to_program().at("rf"),
|
||||
lineage_proof,
|
||||
[bytes32(ap.as_atom()) for ap in second_uncurried_cr_layer.args.at("rf").as_iter()],
|
||||
second_uncurried_cr_layer.args.at("rrf"),
|
||||
first_uncurried_cr_layer.args.at("rf").get_tree_hash(),
|
||||
@ -359,6 +363,7 @@ class CRCAT:
|
||||
raise ValueError(
|
||||
"Previous spend was not a CR-CAT, nor did it properly remark the CR params"
|
||||
) # pragma: no cover
|
||||
authorized_providers = [bytes32(p.as_atom()) for p in authorized_providers_as_prog.as_iter()]
|
||||
lineage_inner_puzhash: bytes32 = potential_cr_layer.get_tree_hash()
|
||||
else:
|
||||
# Otherwise the info we need will be in the puzzle reveal
|
||||
@ -369,14 +374,14 @@ class CRCAT:
|
||||
if conditions is None:
|
||||
conditions = inner_puzzle.run(inner_solution)
|
||||
inner_puzzle_hash: bytes32 = inner_puzzle.get_tree_hash()
|
||||
authorized_providers = [bytes32(p.as_atom()) for p in authorized_providers_as_prog.as_iter()]
|
||||
lineage_inner_puzhash = construct_cr_layer(
|
||||
authorized_providers_as_prog,
|
||||
authorized_providers,
|
||||
proofs_checker,
|
||||
inner_puzzle_hash, # type: ignore
|
||||
).get_tree_hash_precalc(inner_puzzle_hash)
|
||||
|
||||
# Convert all of the old stuff into python
|
||||
authorized_providers: List[bytes32] = [bytes32(p.as_atom()) for p in authorized_providers_as_prog.as_iter()]
|
||||
new_lineage_proof: LineageProof = LineageProof(
|
||||
parent_spend.coin.parent_coin_info,
|
||||
lineage_inner_puzhash,
|
||||
|
@ -237,16 +237,10 @@ def create_eml_covenant_morpher(
|
||||
|
||||
|
||||
def construct_exigent_metadata_layer(
|
||||
metadata: Optional[bytes32],
|
||||
transfer_program: Program,
|
||||
inner_puzzle: Program,
|
||||
metadata: Optional[Program], transfer_program: Program, inner_puzzle: Program
|
||||
) -> Program:
|
||||
return EXTIGENT_METADATA_LAYER.curry(
|
||||
EXTIGENT_METADATA_LAYER_HASH,
|
||||
metadata,
|
||||
transfer_program,
|
||||
transfer_program.get_tree_hash(),
|
||||
inner_puzzle,
|
||||
EXTIGENT_METADATA_LAYER_HASH, metadata, transfer_program, transfer_program.get_tree_hash(), inner_puzzle
|
||||
)
|
||||
|
||||
|
||||
|
@ -99,8 +99,8 @@ class WalletBlockchain(BlockchainInterface):
|
||||
and block.finished_sub_slots[0].challenge_chain.new_sub_slot_iters is not None
|
||||
):
|
||||
assert block.finished_sub_slots[0].challenge_chain.new_difficulty is not None # They both change together
|
||||
sub_slot_iters: uint64 = block.finished_sub_slots[0].challenge_chain.new_sub_slot_iters
|
||||
difficulty: uint64 = block.finished_sub_slots[0].challenge_chain.new_difficulty
|
||||
sub_slot_iters = uint64(block.finished_sub_slots[0].challenge_chain.new_sub_slot_iters)
|
||||
difficulty = uint64(block.finished_sub_slots[0].challenge_chain.new_difficulty)
|
||||
else:
|
||||
sub_slot_iters = self._sub_slot_iters
|
||||
difficulty = self._difficulty
|
||||
@ -164,7 +164,7 @@ class WalletBlockchain(BlockchainInterface):
|
||||
if timestamp is not None:
|
||||
self._latest_timestamp = timestamp
|
||||
elif block.foliage_transaction_block is not None:
|
||||
self._latest_timestamp = block.foliage_transaction_block.timestamp
|
||||
self._latest_timestamp = uint64(block.foliage_transaction_block.timestamp)
|
||||
log.info(f"Peak set to: {self._peak.height} timestamp: {self._latest_timestamp}")
|
||||
|
||||
async def get_peak_block(self) -> Optional[HeaderBlock]:
|
||||
|
@ -1058,7 +1058,7 @@ class WalletNode:
|
||||
self.log.debug(f"get_timestamp_for_height_from_peer use cached block for height {request_height}")
|
||||
|
||||
if block is not None and block.foliage_transaction_block is not None:
|
||||
return block.foliage_transaction_block.timestamp
|
||||
return uint64(block.foliage_transaction_block.timestamp)
|
||||
|
||||
request_height -= 1
|
||||
|
||||
|
@ -25,7 +25,7 @@ from typing import (
|
||||
)
|
||||
|
||||
import aiosqlite
|
||||
from chia_rs import G1Element, G2Element, PrivateKey
|
||||
from chia_rs import AugSchemeMPL, G1Element, G2Element, PrivateKey
|
||||
|
||||
from chia.consensus.block_rewards import calculate_base_farmer_reward, calculate_pool_reward
|
||||
from chia.consensus.coinbase import farmer_parent_id, pool_parent_id
|
||||
@ -47,7 +47,7 @@ from chia.types.blockchain_format.coin import Coin
|
||||
from chia.types.blockchain_format.program import Program
|
||||
from chia.types.blockchain_format.sized_bytes import bytes32
|
||||
from chia.types.coin_record import CoinRecord
|
||||
from chia.types.coin_spend import CoinSpend, compute_additions
|
||||
from chia.types.coin_spend import CoinSpend, compute_additions, make_spend
|
||||
from chia.types.mempool_inclusion_status import MempoolInclusionStatus
|
||||
from chia.types.spend_bundle import SpendBundle
|
||||
from chia.util.bech32m import encode_puzzle_hash
|
||||
@ -110,7 +110,12 @@ from chia.wallet.puzzles.p2_delegated_puzzle_or_hidden_puzzle import (
|
||||
puzzle_hash_for_synthetic_public_key,
|
||||
)
|
||||
from chia.wallet.sign_coin_spends import sign_coin_spends
|
||||
from chia.wallet.singleton import create_singleton_puzzle, get_inner_puzzle_from_singleton, get_singleton_id_from_puzzle
|
||||
from chia.wallet.singleton import (
|
||||
SINGLETON_LAUNCHER_PUZZLE,
|
||||
create_singleton_puzzle,
|
||||
get_inner_puzzle_from_singleton,
|
||||
get_singleton_id_from_puzzle,
|
||||
)
|
||||
from chia.wallet.trade_manager import TradeManager
|
||||
from chia.wallet.trading.trade_status import TradeStatus
|
||||
from chia.wallet.transaction_record import TransactionRecord
|
||||
@ -128,6 +133,7 @@ from chia.wallet.util.wallet_sync_utils import (
|
||||
last_change_height_cs,
|
||||
)
|
||||
from chia.wallet.util.wallet_types import CoinType, WalletIdentifier, WalletType
|
||||
from chia.wallet.vault.vault_drivers import get_vault_inner_puzzle_hash
|
||||
from chia.wallet.vc_wallet.cr_cat_drivers import CRCAT, ProofsChecker, construct_pending_approval_state
|
||||
from chia.wallet.vc_wallet.cr_cat_wallet import CRCATWallet
|
||||
from chia.wallet.vc_wallet.vc_drivers import VerifiedCredential
|
||||
@ -2547,3 +2553,70 @@ class WalletStateManager:
|
||||
self.constants.MAX_BLOCK_COST_CLVM,
|
||||
[puzzle_hash_for_synthetic_public_key],
|
||||
)
|
||||
|
||||
async def create_vault_wallet(
|
||||
self,
|
||||
secp_pk: bytes,
|
||||
hidden_puzzle_hash: bytes32,
|
||||
bls_pk: G1Element,
|
||||
timelock: uint64,
|
||||
genesis_challenge: bytes32,
|
||||
tx_config: TXConfig,
|
||||
fee: uint64 = uint64(0),
|
||||
) -> TransactionRecord:
|
||||
"""
|
||||
Returns a tx record for creating a new vault
|
||||
"""
|
||||
wallet = self.main_wallet
|
||||
vault_inner_puzzle_hash = get_vault_inner_puzzle_hash(
|
||||
secp_pk, genesis_challenge, hidden_puzzle_hash, bls_pk, timelock
|
||||
)
|
||||
# Get xch coin
|
||||
amount = uint64(1)
|
||||
coins = await wallet.select_coins(uint64(amount + fee), tx_config.coin_selection_config)
|
||||
|
||||
# Create singleton launcher
|
||||
origin = next(iter(coins))
|
||||
launcher_coin = Coin(origin.name(), SINGLETON_LAUNCHER_HASH, amount)
|
||||
|
||||
genesis_launcher_solution = Program.to([vault_inner_puzzle_hash, amount, [secp_pk, hidden_puzzle_hash]])
|
||||
announcement_message = genesis_launcher_solution.get_tree_hash()
|
||||
|
||||
[tx_record] = await wallet.generate_signed_transaction(
|
||||
amount,
|
||||
SINGLETON_LAUNCHER_HASH,
|
||||
tx_config,
|
||||
fee,
|
||||
coins,
|
||||
None,
|
||||
origin_id=origin.name(),
|
||||
extra_conditions=(
|
||||
AssertCoinAnnouncement(asserted_id=launcher_coin.name(), asserted_msg=announcement_message),
|
||||
),
|
||||
)
|
||||
|
||||
launcher_cs = make_spend(launcher_coin, SINGLETON_LAUNCHER_PUZZLE, genesis_launcher_solution)
|
||||
launcher_sb = SpendBundle([launcher_cs], AugSchemeMPL.aggregate([]))
|
||||
assert tx_record.spend_bundle is not None
|
||||
full_spend = SpendBundle.aggregate([tx_record.spend_bundle, launcher_sb])
|
||||
|
||||
vault_record = TransactionRecord(
|
||||
confirmed_at_height=uint32(0),
|
||||
created_at_time=uint64(int(time.time())),
|
||||
amount=uint64(amount),
|
||||
to_puzzle_hash=vault_inner_puzzle_hash,
|
||||
fee_amount=fee,
|
||||
confirmed=False,
|
||||
sent=uint32(0),
|
||||
spend_bundle=full_spend,
|
||||
additions=full_spend.additions(),
|
||||
removals=full_spend.removals(),
|
||||
wallet_id=wallet.id(),
|
||||
sent_to=[],
|
||||
trade_id=None,
|
||||
type=uint32(TransactionType.INCOMING_TX.value),
|
||||
name=full_spend.name(),
|
||||
memos=[],
|
||||
valid_times=ConditionValidTimes(),
|
||||
)
|
||||
return vault_record
|
||||
|
@ -210,7 +210,9 @@ elif [ "$(uname)" = "Linux" ]; then
|
||||
echo "Installing on Arch Linux."
|
||||
case $(uname -m) in
|
||||
x86_64|aarch64)
|
||||
sudo pacman ${PACMAN_AUTOMATED} -S --needed git openssl
|
||||
if ! pacman -Qs "^git$" > /dev/null || ! pacman -Qs "^openssl$" > /dev/null ; then
|
||||
sudo pacman ${PACMAN_AUTOMATED} -S --needed git openssl
|
||||
fi
|
||||
;;
|
||||
*)
|
||||
echo "Incompatible CPU architecture. Must be x86_64 or aarch64."
|
||||
|
@ -11,7 +11,6 @@ chia.plotting.manager
|
||||
chia.plotting.util
|
||||
chia.rpc.rpc_client
|
||||
chia.rpc.util
|
||||
chia.rpc.wallet_rpc_client
|
||||
chia.simulator.full_node_simulator
|
||||
chia.simulator.keyring
|
||||
chia.simulator.wallet_tools
|
||||
@ -19,10 +18,8 @@ chia.ssl.create_ssl
|
||||
chia.timelord.timelord_api
|
||||
chia.timelord.timelord_launcher
|
||||
chia.types.blockchain_format.program
|
||||
chia.wallet.block_record
|
||||
chia.wallet.chialisp
|
||||
chia.wallet.did_wallet.did_wallet
|
||||
chia.wallet.did_wallet.did_wallet_puzzles
|
||||
chia.wallet.key_val_store
|
||||
chia.wallet.lineage_proof
|
||||
chia.wallet.nft_wallet.nft_puzzles
|
||||
@ -105,25 +102,16 @@ tests.util.test_full_block_utils
|
||||
tests.util.test_misc
|
||||
tests.util.test_network
|
||||
tests.util.time_out_assert
|
||||
tests.wallet.cat_wallet.test_cat_wallet
|
||||
tests.wallet.cat_wallet.test_offer_lifecycle
|
||||
tests.wallet.cat_wallet.test_trades
|
||||
tests.wallet.did_wallet.test_did
|
||||
tests.wallet.nft_wallet.test_nft_wallet
|
||||
tests.wallet.rpc.test_wallet_rpc
|
||||
tests.wallet.simple_sync.test_simple_sync_protocol
|
||||
tests.wallet.sync.test_wallet_sync
|
||||
tests.wallet.test_bech32m
|
||||
tests.wallet.test_chialisp
|
||||
tests.wallet.test_puzzle_store
|
||||
tests.wallet.test_singleton
|
||||
tests.wallet.test_singleton_lifecycle
|
||||
tests.wallet.test_singleton_lifecycle_fast
|
||||
tests.wallet.test_taproot
|
||||
tests.wallet.test_wallet_blockchain
|
||||
tests.wallet.test_wallet_interested_store
|
||||
tests.wallet.test_wallet_key_val_store
|
||||
tests.wallet.test_wallet_user_store
|
||||
tools.analyze-chain
|
||||
tools.run_block
|
||||
tools.test_full_sync
|
||||
|
28
setup.py
28
setup.py
@ -7,14 +7,14 @@ from setuptools import find_packages, setup
|
||||
|
||||
dependencies = [
|
||||
"aiofiles==23.2.1", # Async IO for files
|
||||
"anyio==4.1.0",
|
||||
"boto3==1.34.0", # AWS S3 for DL s3 plugin
|
||||
"anyio==4.2.0",
|
||||
"boto3==1.34.11", # AWS S3 for DL s3 plugin
|
||||
"chiavdf==1.1.1", # timelord and vdf verification
|
||||
"chiabip158==1.3", # bip158-style wallet filters
|
||||
"chiapos==2.0.3", # proof of space
|
||||
"clvm==0.9.8",
|
||||
"clvm_tools==0.4.7", # Currying, Program.to, other conveniences
|
||||
"chia_rs==0.3.3",
|
||||
"chia_rs==0.4.0",
|
||||
"clvm-tools-rs==0.1.40", # Rust implementation of clvm_tools' compiler
|
||||
"aiohttp==3.9.1", # HTTP server for full node rpc
|
||||
"aiosqlite==0.19.0", # asyncio wrapper for sqlite, to store blocks
|
||||
@ -32,7 +32,7 @@ dependencies = [
|
||||
"dnspython==2.4.2", # Query DNS seeds
|
||||
"watchdog==2.2.0", # Filesystem event watching - watches keyring.yaml
|
||||
"dnslib==0.9.23", # dns lib
|
||||
"typing-extensions==4.8.0", # typing backports like Protocol and TypedDict
|
||||
"typing-extensions==4.9.0", # typing backports like Protocol and TypedDict
|
||||
"zstd==1.5.5.1",
|
||||
"packaging==23.2",
|
||||
"psutil==5.9.4",
|
||||
@ -50,25 +50,25 @@ dev_dependencies = [
|
||||
"diff-cover==8.0.1",
|
||||
"pre-commit==3.5.0; python_version < '3.9'",
|
||||
"pre-commit==3.6.0; python_version >= '3.9'",
|
||||
"py3createtorrent==1.1.0",
|
||||
"pylint==3.0.2",
|
||||
"pytest==7.4.3",
|
||||
"py3createtorrent==1.2.0",
|
||||
"pylint==3.0.3",
|
||||
"pytest==7.4.4",
|
||||
"pytest-cov==4.1.0",
|
||||
"pytest-mock==3.12.0",
|
||||
"pytest-xdist==3.5.0",
|
||||
"pyupgrade==3.15.0",
|
||||
"twine==4.0.2",
|
||||
"isort==5.12.0",
|
||||
"flake8==6.1.0",
|
||||
"mypy==1.7.1",
|
||||
"black==23.11.0",
|
||||
"isort==5.13.2",
|
||||
"flake8==7.0.0",
|
||||
"mypy==1.8.0",
|
||||
"black==23.12.1",
|
||||
"lxml==4.9.3",
|
||||
"aiohttp_cors==0.7.0", # For blackd
|
||||
"pyinstaller==5.13.0",
|
||||
"types-aiofiles==23.2.0.0",
|
||||
"pyinstaller==6.3.0",
|
||||
"types-aiofiles==23.2.0.20240106",
|
||||
"types-cryptography==3.3.23.2",
|
||||
"types-pyyaml==6.0.12.12",
|
||||
"types-setuptools==69.0.0.0",
|
||||
"types-setuptools==69.0.0.20240115",
|
||||
]
|
||||
|
||||
legacy_keyring_dependencies = [
|
||||
|
@ -576,10 +576,9 @@ class TestBlockHeaderValidation:
|
||||
block.finished_sub_slots[-1],
|
||||
"infused_challenge_chain",
|
||||
InfusedChallengeChainSubSlot(
|
||||
replace(
|
||||
block.finished_sub_slots[
|
||||
-1
|
||||
].infused_challenge_chain.infused_challenge_chain_end_of_slot_vdf,
|
||||
block.finished_sub_slots[
|
||||
-1
|
||||
].infused_challenge_chain.infused_challenge_chain_end_of_slot_vdf.replace(
|
||||
number_of_iterations=uint64(10000000),
|
||||
)
|
||||
),
|
||||
@ -594,10 +593,9 @@ class TestBlockHeaderValidation:
|
||||
block.finished_sub_slots[-1],
|
||||
"infused_challenge_chain",
|
||||
InfusedChallengeChainSubSlot(
|
||||
replace(
|
||||
block.finished_sub_slots[
|
||||
-1
|
||||
].infused_challenge_chain.infused_challenge_chain_end_of_slot_vdf,
|
||||
block.finished_sub_slots[
|
||||
-1
|
||||
].infused_challenge_chain.infused_challenge_chain_end_of_slot_vdf.replace(
|
||||
output=ClassgroupElement.get_default_element(),
|
||||
)
|
||||
),
|
||||
@ -613,11 +611,10 @@ class TestBlockHeaderValidation:
|
||||
block.finished_sub_slots[-1],
|
||||
"infused_challenge_chain",
|
||||
InfusedChallengeChainSubSlot(
|
||||
replace(
|
||||
block.finished_sub_slots[
|
||||
-1
|
||||
].infused_challenge_chain.infused_challenge_chain_end_of_slot_vdf,
|
||||
challenge=bytes32([0] * 32),
|
||||
block.finished_sub_slots[
|
||||
-1
|
||||
].infused_challenge_chain.infused_challenge_chain_end_of_slot_vdf.replace(
|
||||
challenge=bytes32([0] * 32)
|
||||
)
|
||||
),
|
||||
)
|
||||
@ -660,8 +657,7 @@ class TestBlockHeaderValidation:
|
||||
new_finished_ss = recursive_replace(
|
||||
block.finished_sub_slots[-1],
|
||||
"challenge_chain",
|
||||
replace(
|
||||
block.finished_sub_slots[-1].challenge_chain,
|
||||
block.finished_sub_slots[-1].challenge_chain.replace(
|
||||
infused_challenge_chain_sub_slot_hash=bytes([1] * 32),
|
||||
),
|
||||
)
|
||||
@ -671,8 +667,7 @@ class TestBlockHeaderValidation:
|
||||
new_finished_ss = recursive_replace(
|
||||
block.finished_sub_slots[-1],
|
||||
"challenge_chain",
|
||||
replace(
|
||||
block.finished_sub_slots[-1].challenge_chain,
|
||||
block.finished_sub_slots[-1].challenge_chain.replace(
|
||||
infused_challenge_chain_sub_slot_hash=block.finished_sub_slots[
|
||||
-1
|
||||
].infused_challenge_chain.get_hash(),
|
||||
@ -698,7 +693,7 @@ class TestBlockHeaderValidation:
|
||||
new_finished_ss_bad_rc = recursive_replace(
|
||||
block.finished_sub_slots[-1],
|
||||
"reward_chain",
|
||||
replace(block.finished_sub_slots[-1].reward_chain, infused_challenge_chain_sub_slot_hash=None),
|
||||
block.finished_sub_slots[-1].reward_chain.replace(infused_challenge_chain_sub_slot_hash=None),
|
||||
)
|
||||
block_bad = recursive_replace(
|
||||
block, "finished_sub_slots", block.finished_sub_slots[:-1] + [new_finished_ss_bad_rc]
|
||||
@ -746,7 +741,7 @@ class TestBlockHeaderValidation:
|
||||
new_finished_ss = recursive_replace(
|
||||
blocks[-1].finished_sub_slots[-1],
|
||||
"challenge_chain",
|
||||
replace(blocks[-1].finished_sub_slots[-1].challenge_chain, subepoch_summary_hash=std_hash(b"0")),
|
||||
blocks[-1].finished_sub_slots[-1].challenge_chain.replace(subepoch_summary_hash=std_hash(b"0")),
|
||||
)
|
||||
block_bad = recursive_replace(
|
||||
blocks[-1], "finished_sub_slots", blocks[-1].finished_sub_slots[:-1] + [new_finished_ss]
|
||||
@ -794,7 +789,7 @@ class TestBlockHeaderValidation:
|
||||
new_finished_ss = recursive_replace(
|
||||
blocks[-1].finished_sub_slots[-1],
|
||||
"reward_chain",
|
||||
replace(blocks[-1].finished_sub_slots[-1].reward_chain, challenge_chain_sub_slot_hash=bytes([3] * 32)),
|
||||
blocks[-1].finished_sub_slots[-1].reward_chain.replace(challenge_chain_sub_slot_hash=bytes([3] * 32)),
|
||||
)
|
||||
block_1_bad = recursive_replace(
|
||||
blocks[-1], "finished_sub_slots", blocks[-1].finished_sub_slots[:-1] + [new_finished_ss]
|
||||
@ -1040,8 +1035,8 @@ class TestBlockHeaderValidation:
|
||||
new_finished_ss = recursive_replace(
|
||||
new_finished_ss,
|
||||
"reward_chain",
|
||||
replace(
|
||||
new_finished_ss.reward_chain, challenge_chain_sub_slot_hash=new_finished_ss.challenge_chain.get_hash()
|
||||
new_finished_ss.reward_chain.replace(
|
||||
challenge_chain_sub_slot_hash=new_finished_ss.challenge_chain.get_hash()
|
||||
),
|
||||
)
|
||||
block_bad = recursive_replace(block, "finished_sub_slots", [new_finished_ss] + block.finished_sub_slots[1:])
|
||||
@ -1075,8 +1070,7 @@ class TestBlockHeaderValidation:
|
||||
new_finished_ss = recursive_replace(
|
||||
new_finished_ss,
|
||||
"reward_chain",
|
||||
replace(
|
||||
new_finished_ss.reward_chain,
|
||||
new_finished_ss.reward_chain.replace(
|
||||
challenge_chain_sub_slot_hash=new_finished_ss.challenge_chain.get_hash(),
|
||||
),
|
||||
)
|
||||
|
@ -5,12 +5,12 @@ from typing import Dict, List
|
||||
|
||||
import pytest
|
||||
|
||||
from benchmarks.utils import rand_hash
|
||||
from chia.consensus.block_record import BlockRecord
|
||||
from chia.consensus.blockchain_interface import BlockchainInterface
|
||||
from chia.consensus.find_fork_point import find_fork_point_in_chain, lookup_fork_chain
|
||||
from chia.types.blockchain_format.sized_bytes import bytes32
|
||||
from chia.util.ints import uint32
|
||||
from tests.util.benchmarks import rand_hash
|
||||
|
||||
|
||||
class DummyChain:
|
||||
|
@ -129,8 +129,8 @@ for path in test_paths:
|
||||
# TODO: design a configurable system for this
|
||||
process_count = {
|
||||
"macos": {False: 0, True: 4}.get(conf["parallel"], conf["parallel"]),
|
||||
"ubuntu": {False: 0, True: 4}.get(conf["parallel"], conf["parallel"]),
|
||||
"windows": {False: 0, True: 3}.get(conf["parallel"], conf["parallel"]),
|
||||
"ubuntu": {False: 0, True: 6}.get(conf["parallel"], conf["parallel"]),
|
||||
"windows": {False: 0, True: 6}.get(conf["parallel"], conf["parallel"]),
|
||||
}
|
||||
pytest_parallel_args = {os: f" -n {count}" for os, count in process_count.items()}
|
||||
|
||||
|
@ -323,6 +323,6 @@ def test_did_transfer(capsys: object, get_test_cli_clients: Tuple[TestRpcClients
|
||||
]
|
||||
run_cli_command_and_assert(capsys, root_dir, command_args, assert_list)
|
||||
expected_calls: logType = {
|
||||
"did_transfer_did": [(w_id, t_address, "0.5", True, DEFAULT_TX_CONFIG.override(reuse_puzhash=True))],
|
||||
"did_transfer_did": [(w_id, t_address, 500000000000, True, DEFAULT_TX_CONFIG.override(reuse_puzhash=True))],
|
||||
}
|
||||
test_rpc_clients.wallet_rpc_client.check_log(expected_calls)
|
||||
|
@ -3,6 +3,7 @@ from __future__ import annotations
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Optional, Tuple, Union, cast
|
||||
|
||||
import pkg_resources
|
||||
from chia_rs import Coin, G2Element
|
||||
|
||||
from chia.server.outbound_message import NodeType
|
||||
@ -35,7 +36,7 @@ from tests.cmds.wallet.test_consts import (
|
||||
get_bytes32,
|
||||
)
|
||||
|
||||
test_offer_file_path: Path = Path("tests") / "cmds" / "wallet" / "test_offer.toffer"
|
||||
test_offer_file_path: Path = Path(pkg_resources.resource_filename(__name__, "test_offer.toffer"))
|
||||
test_offer_file_name: str = str(test_offer_file_path)
|
||||
test_offer_file_bech32: str = open(test_offer_file_name).read()
|
||||
test_offer_id: str = "0xdfb7e8643376820ec995b0bcdb3fc1f764c16b814df5e074631263fcf1e00839"
|
||||
@ -696,6 +697,7 @@ def test_make_offer(capsys: object, get_test_cli_clients: Tuple[TestRpcClients,
|
||||
"Including Fees: 1 XCH, 1000000000000 mojos",
|
||||
"Created offer with ID 0202020202020202020202020202020202020202020202020202020202020202",
|
||||
]
|
||||
run_cli_command_and_assert(capsys, root_dir, command_args[:-4], ["without --override"])
|
||||
run_cli_command_and_assert(capsys, root_dir, command_args, assert_list)
|
||||
expected_calls: logType = {
|
||||
"cat_asset_id_to_name": [(request_cat_id,)],
|
||||
|
@ -2292,3 +2292,39 @@ async def test_wallet_log_in_changes_active_fingerprint(
|
||||
|
||||
active_fingerprint = cast(int, (await wallet_rpc_api.get_logged_in_fingerprint(request={}))["fingerprint"])
|
||||
assert active_fingerprint == secondary_fingerprint
|
||||
|
||||
|
||||
@pytest.mark.limit_consensus_modes(reason="does not depend on consensus rules")
|
||||
@pytest.mark.anyio
|
||||
async def test_mirrors(
|
||||
self_hostname: str, one_wallet_and_one_simulator_services: SimulatorsAndWalletsServices, tmp_path: Path
|
||||
) -> None:
|
||||
wallet_rpc_api, full_node_api, wallet_rpc_port, ph, bt = await init_wallet_and_node(
|
||||
self_hostname, one_wallet_and_one_simulator_services
|
||||
)
|
||||
async with init_data_layer(wallet_rpc_port=wallet_rpc_port, bt=bt, db_path=tmp_path) as data_layer:
|
||||
data_rpc_api = DataLayerRpcApi(data_layer)
|
||||
res = await data_rpc_api.create_data_store({})
|
||||
assert res is not None
|
||||
store_id = bytes32(hexstr_to_bytes(res["id"]))
|
||||
await farm_block_check_singleton(data_layer, full_node_api, ph, store_id, wallet=wallet_rpc_api.service)
|
||||
|
||||
urls = ["http://127.0.0.1/8000", "http://127.0.0.1/8001"]
|
||||
res = await data_rpc_api.add_mirror({"id": store_id.hex(), "urls": urls, "amount": 1, "fee": 1})
|
||||
|
||||
await farm_block_check_singleton(data_layer, full_node_api, ph, store_id, wallet=wallet_rpc_api.service)
|
||||
mirrors = await data_rpc_api.get_mirrors({"id": store_id.hex()})
|
||||
mirror_list = mirrors["mirrors"]
|
||||
assert len(mirror_list) == 1
|
||||
mirror = mirror_list[0]
|
||||
assert mirror["urls"] == ["http://127.0.0.1/8000", "http://127.0.0.1/8001"]
|
||||
coin_id = mirror["coin_id"]
|
||||
|
||||
res = await data_rpc_api.delete_mirror({"coin_id": coin_id, "fee": 1})
|
||||
await farm_block_check_singleton(data_layer, full_node_api, ph, store_id, wallet=wallet_rpc_api.service)
|
||||
mirrors = await data_rpc_api.get_mirrors({"id": store_id.hex()})
|
||||
mirror_list = mirrors["mirrors"]
|
||||
assert len(mirror_list) == 0
|
||||
|
||||
with pytest.raises(RuntimeError, match="URL list can't be empty"):
|
||||
res = await data_rpc_api.add_mirror({"id": store_id.hex(), "urls": [], "amount": 1, "fee": 1})
|
||||
|
@ -308,7 +308,7 @@ async def test_get_ancestors_optimized(data_store: DataStore, tree_id: bytes32)
|
||||
if i > 25 and i <= 200 and random.randint(0, 4):
|
||||
is_insert = True
|
||||
if i > 200:
|
||||
hint_keys_values = await data_store.get_keys_values_dict(tree_id)
|
||||
hint_keys_values = await data_store.get_keys_values_compressed(tree_id)
|
||||
if not deleted_all:
|
||||
while node_count > 0:
|
||||
node_count -= 1
|
||||
@ -383,7 +383,7 @@ async def test_batch_update(data_store: DataStore, tree_id: bytes32, use_optimiz
|
||||
|
||||
batch: List[Dict[str, Any]] = []
|
||||
keys_values: Dict[bytes, bytes] = {}
|
||||
hint_keys_values: Optional[Dict[bytes, bytes]] = {} if use_optimized else None
|
||||
hint_keys_values: Optional[Dict[bytes32, bytes32]] = {} if use_optimized else None
|
||||
for operation in range(num_batches * num_ops_per_batch):
|
||||
[op_type] = random.choices(
|
||||
["insert", "upsert-insert", "upsert-update", "delete"],
|
||||
@ -490,7 +490,7 @@ async def test_upsert_ignores_existing_arguments(
|
||||
) -> None:
|
||||
key = b"key"
|
||||
value = b"value1"
|
||||
hint_keys_values: Optional[Dict[bytes, bytes]] = {} if use_optimized else None
|
||||
hint_keys_values: Optional[Dict[bytes32, bytes32]] = {} if use_optimized else None
|
||||
|
||||
await data_store.autoinsert(
|
||||
key=key,
|
||||
@ -643,7 +643,7 @@ async def test_inserting_duplicate_key_fails(
|
||||
side=Side.RIGHT,
|
||||
)
|
||||
|
||||
hint_keys_values = await data_store.get_keys_values_dict(tree_id=tree_id)
|
||||
hint_keys_values = await data_store.get_keys_values_compressed(tree_id=tree_id)
|
||||
# TODO: more specific exception
|
||||
with pytest.raises(Exception):
|
||||
await data_store.insert(
|
||||
@ -691,7 +691,7 @@ async def test_inserting_invalid_length_ancestor_hash_raises_original_exception(
|
||||
async def test_autoinsert_balances_from_scratch(data_store: DataStore, tree_id: bytes32) -> None:
|
||||
random = Random()
|
||||
random.seed(100, version=2)
|
||||
hint_keys_values: Dict[bytes, bytes] = {}
|
||||
hint_keys_values: Dict[bytes32, bytes32] = {}
|
||||
hashes = []
|
||||
|
||||
for i in range(2000):
|
||||
@ -710,7 +710,7 @@ async def test_autoinsert_balances_from_scratch(data_store: DataStore, tree_id:
|
||||
async def test_autoinsert_balances_gaps(data_store: DataStore, tree_id: bytes32) -> None:
|
||||
random = Random()
|
||||
random.seed(101, version=2)
|
||||
hint_keys_values: Dict[bytes, bytes] = {}
|
||||
hint_keys_values: Dict[bytes32, bytes32] = {}
|
||||
hashes = []
|
||||
|
||||
for i in range(2000):
|
||||
@ -749,7 +749,7 @@ async def test_delete_from_left_both_terminal(data_store: DataStore, tree_id: by
|
||||
|
||||
hint_keys_values = None
|
||||
if use_hint:
|
||||
hint_keys_values = await data_store.get_keys_values_dict(tree_id=tree_id)
|
||||
hint_keys_values = await data_store.get_keys_values_compressed(tree_id=tree_id)
|
||||
|
||||
expected = Program.to(
|
||||
(
|
||||
@ -789,7 +789,7 @@ async def test_delete_from_left_other_not_terminal(data_store: DataStore, tree_i
|
||||
|
||||
hint_keys_values = None
|
||||
if use_hint:
|
||||
hint_keys_values = await data_store.get_keys_values_dict(tree_id=tree_id)
|
||||
hint_keys_values = await data_store.get_keys_values_compressed(tree_id=tree_id)
|
||||
|
||||
expected = Program.to(
|
||||
(
|
||||
@ -827,7 +827,7 @@ async def test_delete_from_right_both_terminal(data_store: DataStore, tree_id: b
|
||||
|
||||
hint_keys_values = None
|
||||
if use_hint:
|
||||
hint_keys_values = await data_store.get_keys_values_dict(tree_id=tree_id)
|
||||
hint_keys_values = await data_store.get_keys_values_compressed(tree_id=tree_id)
|
||||
|
||||
expected = Program.to(
|
||||
(
|
||||
@ -867,7 +867,7 @@ async def test_delete_from_right_other_not_terminal(data_store: DataStore, tree_
|
||||
|
||||
hint_keys_values = None
|
||||
if use_hint:
|
||||
hint_keys_values = await data_store.get_keys_values_dict(tree_id=tree_id)
|
||||
hint_keys_values = await data_store.get_keys_values_compressed(tree_id=tree_id)
|
||||
|
||||
expected = Program.to(
|
||||
(
|
||||
@ -1208,6 +1208,33 @@ async def test_kv_diff_2(data_store: DataStore, tree_id: bytes32) -> None:
|
||||
assert diff_3 == set()
|
||||
|
||||
|
||||
@pytest.mark.anyio
|
||||
async def test_kv_diff_3(data_store: DataStore, tree_id: bytes32) -> None:
|
||||
insert_result = await data_store.autoinsert(
|
||||
key=b"000",
|
||||
value=b"000",
|
||||
tree_id=tree_id,
|
||||
status=Status.COMMITTED,
|
||||
)
|
||||
await data_store.delete(tree_id=tree_id, key=b"000", status=Status.COMMITTED)
|
||||
insert_result_2 = await data_store.autoinsert(
|
||||
key=b"000",
|
||||
value=b"001",
|
||||
tree_id=tree_id,
|
||||
status=Status.COMMITTED,
|
||||
)
|
||||
diff_1 = await data_store.get_kv_diff(tree_id, insert_result.node_hash, insert_result_2.node_hash)
|
||||
assert diff_1 == {DiffData(OperationType.DELETE, b"000", b"000"), DiffData(OperationType.INSERT, b"000", b"001")}
|
||||
insert_result_3 = await data_store.upsert(
|
||||
key=b"000",
|
||||
new_value=b"002",
|
||||
tree_id=tree_id,
|
||||
status=Status.COMMITTED,
|
||||
)
|
||||
diff_2 = await data_store.get_kv_diff(tree_id, insert_result_2.node_hash, insert_result_3.node_hash)
|
||||
assert diff_2 == {DiffData(OperationType.DELETE, b"000", b"001"), DiffData(OperationType.INSERT, b"000", b"002")}
|
||||
|
||||
|
||||
@pytest.mark.anyio
|
||||
async def test_rollback_to_generation(data_store: DataStore, tree_id: bytes32) -> None:
|
||||
await add_0123_example(data_store, tree_id)
|
||||
|
@ -111,7 +111,7 @@ async def test_basic_coin_store(db_version: int, softfork_height: uint32, bt: Bl
|
||||
assert block.foliage_transaction_block is not None
|
||||
await coin_store.new_block(
|
||||
block.height,
|
||||
block.foliage_transaction_block.timestamp,
|
||||
uint64(block.foliage_transaction_block.timestamp),
|
||||
block.get_included_reward_coins(),
|
||||
tx_additions,
|
||||
tx_removals,
|
||||
@ -121,7 +121,7 @@ async def test_basic_coin_store(db_version: int, softfork_height: uint32, bt: Bl
|
||||
with pytest.raises(Exception):
|
||||
await coin_store.new_block(
|
||||
block.height,
|
||||
block.foliage_transaction_block.timestamp,
|
||||
uint64(block.foliage_transaction_block.timestamp),
|
||||
block.get_included_reward_coins(),
|
||||
tx_additions,
|
||||
tx_removals,
|
||||
@ -179,7 +179,7 @@ async def test_set_spent(db_version: int, bt: BlockTools) -> None:
|
||||
assert block.foliage_transaction_block is not None
|
||||
await coin_store.new_block(
|
||||
block.height,
|
||||
block.foliage_transaction_block.timestamp,
|
||||
uint64(block.foliage_transaction_block.timestamp),
|
||||
block.get_included_reward_coins(),
|
||||
additions,
|
||||
removals,
|
||||
@ -227,7 +227,7 @@ async def test_num_unspent(bt: BlockTools, db_version: int) -> None:
|
||||
additions: List[Coin] = []
|
||||
await coin_store.new_block(
|
||||
block.height,
|
||||
block.foliage_transaction_block.timestamp,
|
||||
uint64(block.foliage_transaction_block.timestamp),
|
||||
block.get_included_reward_coins(),
|
||||
additions,
|
||||
removals,
|
||||
@ -259,7 +259,7 @@ async def test_rollback(db_version: int, bt: BlockTools) -> None:
|
||||
assert block.foliage_transaction_block is not None
|
||||
await coin_store.new_block(
|
||||
block.height,
|
||||
block.foliage_transaction_block.timestamp,
|
||||
uint64(block.foliage_transaction_block.timestamp),
|
||||
block.get_included_reward_coins(),
|
||||
additions,
|
||||
removals,
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user