diff --git a/.github/workflows/cargo-audit.yml b/.github/workflows/cargo-audit.yml index 9bd9795f17..04046c11d4 100644 --- a/.github/workflows/cargo-audit.yml +++ b/.github/workflows/cargo-audit.yml @@ -46,4 +46,6 @@ jobs: cargo audit --ignore RUSTSEC-2023-0091 \ --ignore RUSTSEC-2024-0438 \ --ignore RUSTSEC-2025-0009 \ - --ignore RUSTSEC-2025-0055 + --ignore RUSTSEC-2025-0055 \ + --ignore RUSTSEC-2025-0073 \ + --ignore RUSTSEC-2025-0118 diff --git a/.github/workflows/check-bittensor-e2e-tests.yml.yml b/.github/workflows/check-bittensor-e2e-tests.yml similarity index 58% rename from .github/workflows/check-bittensor-e2e-tests.yml.yml rename to .github/workflows/check-bittensor-e2e-tests.yml index c133efab22..bad36f7d9d 100644 --- a/.github/workflows/check-bittensor-e2e-tests.yml.yml +++ b/.github/workflows/check-bittensor-e2e-tests.yml @@ -28,7 +28,7 @@ jobs: check-label: runs-on: ubuntu-latest outputs: - skip-bittensor-e2e-tests: ${{ steps.get-labels.outputs.skip-bittensor-e2e-tests }} + skip-bittensor-e2e-tests: ${{ steps.get-labels.outputs.skip-bittensor-e2e-tests || steps.set-default.outputs.skip-bittensor-e2e-tests }} steps: - name: Install dependencies run: | @@ -38,11 +38,12 @@ jobs: - name: Check out repository uses: actions/checkout@v4 with: - repository: ${{ github.event.pull_request.head.repo.full_name }} - ref: ${{ github.event.pull_request.head.ref }} + repository: ${{ github.event.pull_request.head.repo.full_name || github.repository }} + ref: ${{ github.event.pull_request.head.ref || github.ref_name }} - name: Get labels from PR id: get-labels + if: github.event_name == 'pull_request' run: | LABELS=$(gh pr -R ${{ github.repository }} view ${{ github.event.pull_request.number }} --json labels --jq '.labels[].name') echo "Current labels: $LABELS" @@ -54,6 +55,12 @@ jobs: env: GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + - name: Set default skip value for workflow_dispatch + id: set-default + if: github.event_name == 'workflow_dispatch' + run: | + echo "skip-bittensor-e2e-tests=false" >> $GITHUB_OUTPUT + find-btcli-e2e-tests: needs: check-label if: needs.check-label.outputs.skip-bittensor-e2e-tests == 'false' @@ -74,11 +81,44 @@ jobs: working-directory: ${{ github.workspace }}/btcli run: git checkout staging + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: '3.10' + + - name: Install uv + uses: astral-sh/setup-uv@v5 + with: + enable-cache: "false" + + - name: Create Python virtual environment + working-directory: ${{ github.workspace }}/btcli + run: uv venv --seed + + - name: Install dependencies + working-directory: ${{ github.workspace }}/btcli + run: | + source .venv/bin/activate + uv run --active pip install --upgrade pip + uv run --active pip install '.[dev]' + uv run --active pip install pytest + - name: Find e2e test files id: get-btcli-tests + working-directory: ${{ github.workspace }}/btcli run: | - test_files=$(find ${{ github.workspace }}/btcli/tests/e2e_tests -name "test*.py" | jq -R -s -c 'split("\n") | map(select(. != ""))') - echo "test-files=$test_files" >> $GITHUB_OUTPUT + set -euo pipefail + test_matrix=$( + uv run pytest -q --collect-only tests/e2e_tests \ + | sed -n '/^tests\//p' \ + | jq -R -s -c ' + split("\n") + | map(select(. != "")) + | map({nodeid: ., label: (sub("^tests/e2e_tests/"; ""))}) + ' + ) + echo "Found tests: $test_matrix" + echo "test-files=$test_matrix" >> "$GITHUB_OUTPUT" shell: bash find-sdk-e2e-tests: @@ -101,34 +141,148 @@ jobs: working-directory: ${{ github.workspace }}/bittensor run: git checkout staging + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: '3.10' + + - name: Install uv + uses: astral-sh/setup-uv@v5 + with: + enable-cache: "false" + + - name: Create Python virtual environment + working-directory: ${{ github.workspace }}/bittensor + run: uv venv --seed + + - name: Install dependencies + working-directory: ${{ github.workspace }}/bittensor + run: | + source .venv/bin/activate + uv run --active pip install --upgrade pip + uv run --active pip install '.[dev]' + uv run --active pip install pytest + - name: Find e2e test files id: get-sdk-tests + working-directory: ${{ github.workspace }}/bittensor run: | - test_files=$(find ${{ github.workspace }}/bittensor/tests/e2e_tests -name "test*.py" | jq -R -s -c 'split("\n") | map(select(. != ""))') - echo "test-files=$test_files" >> $GITHUB_OUTPUT + set -euo pipefail + test_matrix=$( + uv run pytest -q --collect-only tests/e2e_tests \ + | sed -n '/^e2e_tests\//p' \ + | sed 's|^|tests/|' \ + | jq -R -s -c ' + split("\n") + | map(select(. != "")) + | map({nodeid: ., label: (sub("^tests/e2e_tests/"; ""))}) + ' + ) + echo "Found tests: $test_matrix" + echo "test-files=$test_matrix" >> "$GITHUB_OUTPUT" shell: bash - build-image-with-current-branch: + # build artifacts for fast-runtime and non-fast-runtime + artifacts: + name: Node • ${{ matrix.runtime }} • ${{ matrix.platform.arch }} needs: check-label if: needs.check-label.outputs.skip-bittensor-e2e-tests == 'false' - runs-on: ubuntu-latest + strategy: + matrix: + platform: + - runner: [self-hosted, type-ccx33] + triple: x86_64-unknown-linux-gnu + arch: amd64 + runtime: ["fast-runtime", "non-fast-runtime"] + + runs-on: ${{ matrix.platform.runner }} + steps: - name: Checkout code uses: actions/checkout@v4 with: - repository: ${{ github.event.pull_request.head.repo.full_name }} - ref: ${{ github.event.pull_request.head.ref }} + repository: ${{ github.event.pull_request.head.repo.full_name || github.repository }} + ref: ${{ github.event.pull_request.head.ref || github.ref_name }} + + - name: Install Rust + dependencies + run: | + chmod +x ./scripts/install_build_env.sh + ./scripts/install_build_env.sh - - name: Patch non-fast-runtime node + - name: Add Rust target triple + run: | + source "$HOME/.cargo/env" + rustup target add ${{ matrix.platform.triple }} + + - name: Patch limits for local run run: | chmod +x ./scripts/localnet_patch.sh ./scripts/localnet_patch.sh - - name: Set up QEMU - uses: docker/setup-qemu-action@v3 + - name: Build binaries + run: | + export PATH="$HOME/.cargo/bin:$PATH" + export CARGO_BUILD_TARGET="${{ matrix.platform.triple }}" + + if [ "${{ matrix.runtime }}" = "fast-runtime" ]; then + ./scripts/localnet.sh --build-only + else + ./scripts/localnet.sh False --build-only + fi + + # use `ci_target` name bc .dockerignore excludes `target` + - name: Prepare artifacts for upload + run: | + RUNTIME="${{ matrix.runtime }}" + TRIPLE="${{ matrix.platform.triple }}" + + # Verify binaries exist before copying + BINARY_PATH="target/${RUNTIME}/${TRIPLE}/release/node-subtensor" + WASM_PATH="target/${RUNTIME}/${TRIPLE}/release/wbuild/node-subtensor-runtime/node_subtensor_runtime.compact.compressed.wasm" - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v3 + if [[ ! -f "$BINARY_PATH" ]]; then + echo "❌ Error: Binary not found at $BINARY_PATH" + exit 1 + fi + + if [[ ! -f "$WASM_PATH" ]]; then + echo "❌ Error: WASM file not found at $WASM_PATH" + exit 1 + fi + + mkdir -p build/ci_target/${RUNTIME}/${TRIPLE}/release/ + cp -v "$BINARY_PATH" \ + build/ci_target/${RUNTIME}/${TRIPLE}/release/ + + mkdir -p build/ci_target/${RUNTIME}/${TRIPLE}/release/wbuild/node-subtensor-runtime/ + cp -v "$WASM_PATH" \ + build/ci_target/${RUNTIME}/${TRIPLE}/release/wbuild/node-subtensor-runtime/ + + - name: Upload artifact + uses: actions/upload-artifact@v4 + with: + name: binaries-${{ matrix.platform.triple }}-${{ matrix.runtime }} + path: build/ + if-no-files-found: error + + # Collect all artifacts and build a Docker image + build-image-with-current-branch: + needs: [check-label, artifacts] + if: needs.check-label.outputs.skip-bittensor-e2e-tests == 'false' + runs-on: ubuntu-latest + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + repository: ${{ github.event.pull_request.head.repo.full_name || github.repository }} + ref: ${{ github.event.pull_request.head.ref || github.ref_name }} + + - name: Download all binary artifacts + uses: actions/download-artifact@v5 + with: + pattern: binaries-* + path: build/ + merge-multiple: true - name: Move Docker data-root to /mnt/data run: | @@ -141,7 +295,7 @@ jobs: docker info | grep "Docker Root Dir" - name: Build Docker Image - run: docker build -f Dockerfile-localnet -t localnet . + run: docker build -f Dockerfile-localnet --build-arg BUILT_IN_CI="Boom shakalaka" -t localnet . - name: Save Docker Image as Tar run: docker save -o /mnt/data/subtensor-localnet.tar localnet @@ -162,31 +316,18 @@ jobs: runs-on: ubuntu-latest strategy: fail-fast: false - max-parallel: 16 + max-parallel: 32 matrix: - rust-branch: - - stable - rust-target: - - x86_64-unknown-linux-gnu - os: - - ubuntu-latest - test-file: ${{ fromJson(needs.find-btcli-e2e-tests.outputs.test-files) }} - - env: - RELEASE_NAME: development - RUSTV: ${{ matrix.rust-branch }} - RUST_BACKTRACE: full - RUST_BIN_DIR: target/${{ matrix.rust-target }} - TARGET: ${{ matrix.rust-target }} + include: ${{ fromJson(needs.find-btcli-e2e-tests.outputs.test-files) }} timeout-minutes: 60 - name: "cli: ${{ matrix.test-file }}" + name: "cli: ${{ matrix.label }}" steps: - name: Check-out repository uses: actions/checkout@v4 with: - repository: ${{ github.event.pull_request.head.repo.full_name }} - ref: ${{ github.event.pull_request.head.ref }} + repository: ${{ github.event.pull_request.head.repo.full_name || github.repository }} + ref: ${{ github.event.pull_request.head.ref || github.ref_name }} - name: Install uv uses: astral-sh/setup-uv@v5 @@ -230,7 +371,7 @@ jobs: set +e for i in 1 2; do echo "🔁 Attempt $i: Running tests" - uv run pytest ${{ matrix.test-file }} -s + uv run pytest "${{ matrix.nodeid }}" -s status=$? if [ $status -eq 0 ]; then echo "✅ Tests passed on attempt $i" @@ -256,31 +397,18 @@ jobs: runs-on: ubuntu-latest strategy: fail-fast: false - max-parallel: 16 + max-parallel: 64 matrix: - rust-branch: - - stable - rust-target: - - x86_64-unknown-linux-gnu - os: - - ubuntu-latest - test-file: ${{ fromJson(needs.find-sdk-e2e-tests.outputs.test-files) }} - - env: - RELEASE_NAME: development - RUSTV: ${{ matrix.rust-branch }} - RUST_BACKTRACE: full - RUST_BIN_DIR: target/${{ matrix.rust-target }} - TARGET: ${{ matrix.rust-target }} + include: ${{ fromJson(needs.find-sdk-e2e-tests.outputs.test-files) }} timeout-minutes: 60 - name: "sdk: ${{ matrix.test-file }}" + name: "sdk: ${{ matrix.label }}" steps: - name: Check-out repository uses: actions/checkout@v4 with: - repository: ${{ github.event.pull_request.head.repo.full_name }} - ref: ${{ github.event.pull_request.head.ref }} + repository: ${{ github.event.pull_request.head.repo.full_name || github.repository }} + ref: ${{ github.event.pull_request.head.ref || github.ref_name }} - name: Install uv uses: astral-sh/setup-uv@v5 @@ -324,7 +452,7 @@ jobs: set +e for i in 1 2; do echo "🔁 Attempt $i: Running tests" - uv run pytest ${{ matrix.test-file }} -s + uv run pytest "${{ matrix.nodeid }}" -s status=$? if [ $status -eq 0 ]; then echo "✅ Tests passed on attempt $i" diff --git a/.github/workflows/check-rust.yml b/.github/workflows/check-rust.yml index 6cdc56f26a..4b975959d0 100644 --- a/.github/workflows/check-rust.yml +++ b/.github/workflows/check-rust.yml @@ -38,7 +38,7 @@ jobs: - name: Install Rust Nightly uses: actions-rs/toolchain@v1 with: - toolchain: nightly + toolchain: stable components: rustfmt - name: Utilize Shared Rust Cache @@ -48,7 +48,22 @@ jobs: cache-on-failure: true - name: cargo fmt - run: cargo +nightly fmt --check --all + run: | + set -euo pipefail + # Run cargo fmt and capture both stdout and stderr + output=$(cargo fmt --check --all 2>&1) || { + echo "❌ cargo fmt failed with non-zero exit code" + echo "$output" + exit 1 + } + # Check for panic/ICE messages even if exit code was 0 + if echo "$output" | grep -qiE "(the compiler unexpectedly panicked|panicked at|Internal Compiler Error|ICE|error: the compiler unexpectedly panicked)"; then + echo "❌ rustfmt panicked (ICE detected) - this should fail the build" + echo "$output" + exit 1 + fi + echo "$output" + echo "✅ cargo fmt completed successfully" cargo-clippy-default-features: name: cargo clippy diff --git a/.github/workflows/docker-localnet.yml b/.github/workflows/docker-localnet.yml index c79be55103..2218e886ea 100644 --- a/.github/workflows/docker-localnet.yml +++ b/.github/workflows/docker-localnet.yml @@ -9,6 +9,10 @@ on: description: "The branch or tag to use as the Docker image tag (optional)." required: false default: "" + pr-number: + description: "PR number to build (e.g., 2283). If specified, creates tag pr-{number} and does not update latest." + required: false + default: "" push: branches: - devnet-ready @@ -25,6 +29,7 @@ permissions: packages: write actions: read security-events: write + pull-requests: read jobs: setup: @@ -34,22 +39,52 @@ jobs: ref: ${{ steps.vars.outputs.ref }} latest_tag: ${{ steps.vars.outputs.latest_tag }} steps: + - name: Get PR branch (if pr-number is specified) + id: pr-info + if: ${{ github.event.inputs.pr-number != '' }} + uses: actions/github-script@v7 + with: + script: | + const prNumber = '${{ github.event.inputs.pr-number }}'; + const { data: pr } = await github.rest.pulls.get({ + owner: context.repo.owner, + repo: context.repo.repo, + pull_number: parseInt(prNumber) + }); + core.setOutput('head_ref', pr.head.ref); + core.setOutput('head_sha', pr.head.sha); + - name: Determine Docker tag and ref id: vars run: | if [[ "${{ github.event_name }}" == "pull_request" ]]; then echo "ref=${{ github.head_ref }}" >> $GITHUB_OUTPUT echo "tag=${{ github.base_ref }}" >> $GITHUB_OUTPUT + elif [[ -n "${{ github.event.inputs.pr-number }}" ]]; then + # PR build mode: use pr-{number} as tag, don't update latest + pr_number="${{ github.event.inputs.pr-number }}" + tag="pr-${pr_number}" + echo "tag=$tag" >> $GITHUB_OUTPUT + + # Use branch from PR info if available, otherwise use branch-or-tag input + if [[ -n "${{ steps.pr-info.outputs.head_ref }}" ]]; then + echo "ref=${{ steps.pr-info.outputs.head_ref }}" >> $GITHUB_OUTPUT + elif [[ -n "${{ github.event.inputs.branch-or-tag }}" ]]; then + echo "ref=${{ github.event.inputs.branch-or-tag }}" >> $GITHUB_OUTPUT + else + echo "ref=main" >> $GITHUB_OUTPUT + fi + echo "latest_tag=false" >> $GITHUB_OUTPUT else tag="${{ github.event.inputs.branch-or-tag || github.ref_name }}" - echo "ref=${{ github.ref_name }}" >> $GITHUB_OUTPUT + echo "ref=${{ github.event.inputs.branch-or-tag || github.ref_name }}" >> $GITHUB_OUTPUT echo "tag=$tag" >> $GITHUB_OUTPUT - fi - - if [[ "$tag" != "devnet-ready" ]]; then - echo "latest_tag=true" >> $GITHUB_OUTPUT - else - echo "latest_tag=false" >> $GITHUB_OUTPUT + + if [[ "$tag" != "devnet-ready" ]]; then + echo "latest_tag=true" >> $GITHUB_OUTPUT + else + echo "latest_tag=false" >> $GITHUB_OUTPUT + fi fi # build artifacts for fast-runtime and non-fast-runtime diff --git a/.gitignore b/.gitignore index f6a9e635e2..3f20eb58e2 100644 --- a/.gitignore +++ b/.gitignore @@ -46,4 +46,7 @@ specs/*.json bt.snap # localnet spec -scripts/specs/local.json \ No newline at end of file +scripts/specs/local.json + +# Node modules +node_modules diff --git a/Cargo.lock b/Cargo.lock index 4b32cbe31a..eeffdabd0f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -18038,6 +18038,7 @@ dependencies = [ "sp-io", "sp-runtime", "sp-std", + "substrate-fixed", "subtensor-runtime-common", "subtensor-swap-interface", ] diff --git a/chain-extensions/Cargo.toml b/chain-extensions/Cargo.toml index ae69b94d4c..9727439e7a 100644 --- a/chain-extensions/Cargo.toml +++ b/chain-extensions/Cargo.toml @@ -34,6 +34,7 @@ pallet-subtensor-proxy.workspace = true pallet-drand.workspace = true subtensor-swap-interface.workspace = true num_enum.workspace = true +substrate-fixed.workspace = true [lints] workspace = true @@ -64,4 +65,5 @@ std = [ "pallet-drand/std", "subtensor-swap-interface/std", "num_enum/std", + "substrate-fixed/std", ] diff --git a/chain-extensions/src/lib.rs b/chain-extensions/src/lib.rs index e53fac765f..eaaee70d27 100644 --- a/chain-extensions/src/lib.rs +++ b/chain-extensions/src/lib.rs @@ -18,7 +18,9 @@ use pallet_subtensor_proxy as pallet_proxy; use pallet_subtensor_proxy::WeightInfo; use sp_runtime::{DispatchError, Weight, traits::StaticLookup}; use sp_std::marker::PhantomData; +use substrate_fixed::types::U96F32; use subtensor_runtime_common::{AlphaCurrency, NetUid, ProxyType, TaoCurrency}; +use subtensor_swap_interface::SwapHandler; #[derive(DebugNoBound)] pub struct SubtensorChainExtension(PhantomData); @@ -33,7 +35,8 @@ impl ChainExtension for SubtensorChainExtension where T: pallet_subtensor::Config + pallet_contracts::Config - + pallet_proxy::Config, + + pallet_proxy::Config + + pallet_subtensor_swap::Config, T::AccountId: Clone, <::Lookup as StaticLookup>::Source: From<::AccountId>, { @@ -54,7 +57,8 @@ impl SubtensorChainExtension where T: pallet_subtensor::Config + pallet_contracts::Config - + pallet_proxy::Config, + + pallet_proxy::Config + + pallet_subtensor_swap::Config, T::AccountId: Clone, { fn dispatch(env: &mut Env) -> Result @@ -506,6 +510,26 @@ where } } } + FunctionId::GetAlphaPriceV1 => { + let netuid: NetUid = env + .read_as() + .map_err(|_| DispatchError::Other("Failed to decode input parameters"))?; + + let current_alpha_price = + as SwapHandler>::current_alpha_price( + netuid.into(), + ); + + let price = current_alpha_price.saturating_mul(U96F32::from_num(1_000_000_000)); + let price: u64 = price.saturating_to_num(); + + let encoded_result = price.encode(); + + env.write_output(&encoded_result) + .map_err(|_| DispatchError::Other("Failed to write output"))?; + + Ok(RetVal::Converging(Output::Success as u32)) + } } } } diff --git a/chain-extensions/src/tests.rs b/chain-extensions/src/tests.rs index 378fa084a1..bd6f46c8ab 100644 --- a/chain-extensions/src/tests.rs +++ b/chain-extensions/src/tests.rs @@ -2,7 +2,7 @@ use super::{SubtensorChainExtension, SubtensorExtensionEnv, mock}; use crate::types::{FunctionId, Output}; -use codec::Encode; +use codec::{Decode, Encode}; use frame_support::{assert_ok, weights::Weight}; use frame_system::RawOrigin; use pallet_contracts::chain_extension::RetVal; @@ -10,6 +10,7 @@ use pallet_subtensor::DefaultMinStake; use sp_core::Get; use sp_core::U256; use sp_runtime::DispatchError; +use substrate_fixed::types::U96F32; use subtensor_runtime_common::{AlphaCurrency, Currency as CurrencyTrait, NetUid, TaoCurrency}; use subtensor_swap_interface::SwapHandler; @@ -756,12 +757,12 @@ impl SubtensorExtensionEnv for MockEnv { } fn charge_weight(&mut self, weight: Weight) -> Result<(), DispatchError> { - if let Some(expected) = self.expected_weight { - if weight != expected { - return Err(DispatchError::Other( - "unexpected weight charged by mock env", - )); - } + if let Some(expected) = self.expected_weight + && weight != expected + { + return Err(DispatchError::Other( + "unexpected weight charged by mock env", + )); } self.charged_weight = Some(weight); Ok(()) @@ -964,3 +965,41 @@ fn unstake_all_success_unstakes_balance() { assert!(post_balance > pre_balance); }); } + +#[test] +fn get_alpha_price_returns_encoded_price() { + mock::new_test_ext(1).execute_with(|| { + let owner_hotkey = U256::from(8001); + let owner_coldkey = U256::from(8002); + let caller = U256::from(8003); + + let netuid = mock::add_dynamic_network(&owner_hotkey, &owner_coldkey); + + // Set up reserves to establish a price + let tao_reserve = TaoCurrency::from(150_000_000_000u64); + let alpha_reserve = AlphaCurrency::from(100_000_000_000u64); + mock::setup_reserves(netuid, tao_reserve, alpha_reserve); + + // Get expected price from swap handler + let expected_price = + as SwapHandler>::current_alpha_price( + netuid.into(), + ); + let expected_price_scaled = expected_price.saturating_mul(U96F32::from_num(1_000_000_000)); + let expected_price_u64: u64 = expected_price_scaled.saturating_to_num(); + + let mut env = MockEnv::new(FunctionId::GetAlphaPriceV1, caller, netuid.encode()); + + let ret = SubtensorChainExtension::::dispatch(&mut env).unwrap(); + assert_success(ret); + assert!(env.charged_weight().is_none()); + + // Decode the output + let output_price: u64 = Decode::decode(&mut &env.output()[..]).unwrap(); + + assert_eq!( + output_price, expected_price_u64, + "Price should match expected value" + ); + }); +} diff --git a/chain-extensions/src/types.rs b/chain-extensions/src/types.rs index 7c0bfe4202..ee6298ad5b 100644 --- a/chain-extensions/src/types.rs +++ b/chain-extensions/src/types.rs @@ -20,6 +20,7 @@ pub enum FunctionId { SetColdkeyAutoStakeHotkeyV1 = 12, AddProxyV1 = 13, RemoveProxyV1 = 14, + GetAlphaPriceV1 = 15, } #[derive(PartialEq, Eq, Copy, Clone, Encode, Decode, Debug)] diff --git a/common/src/lib.rs b/common/src/lib.rs index 28a33c2ae6..658f8b2e01 100644 --- a/common/src/lib.rs +++ b/common/src/lib.rs @@ -184,11 +184,37 @@ impl TryFrom for ProxyType { 14 => Ok(Self::SudoUncheckedSetCode), 15 => Ok(Self::SwapHotkey), 16 => Ok(Self::SubnetLeaseBeneficiary), + 17 => Ok(Self::RootClaim), _ => Err(()), } } } +impl From for u8 { + fn from(proxy_type: ProxyType) -> Self { + match proxy_type { + ProxyType::Any => 0, + ProxyType::Owner => 1, + ProxyType::NonCritical => 2, + ProxyType::NonTransfer => 3, + ProxyType::Senate => 4, + ProxyType::NonFungible => 5, + ProxyType::Triumvirate => 6, + ProxyType::Governance => 7, + ProxyType::Staking => 8, + ProxyType::Registration => 9, + ProxyType::Transfer => 10, + ProxyType::SmallTransfer => 11, + ProxyType::RootWeights => 12, + ProxyType::ChildKeys => 13, + ProxyType::SudoUncheckedSetCode => 14, + ProxyType::SwapHotkey => 15, + ProxyType::SubnetLeaseBeneficiary => 16, + ProxyType::RootClaim => 17, + } + } +} + impl Default for ProxyType { // allow all Calls; required to be most permissive fn default() -> Self { diff --git a/contract-tests/bittensor/lib.rs b/contract-tests/bittensor/lib.rs index 03367339ca..48e8d18aea 100755 --- a/contract-tests/bittensor/lib.rs +++ b/contract-tests/bittensor/lib.rs @@ -22,6 +22,7 @@ pub enum FunctionId { SetColdkeyAutoStakeHotkeyV1 = 12, AddProxyV1 = 13, RemoveProxyV1 = 14, + GetAlphaPriceV1 = 15, } #[ink::chain_extension(extension = 0x1000)] @@ -127,6 +128,9 @@ pub trait RuntimeReadWrite { #[ink(function = 14)] fn remove_proxy(delegate: ::AccountId); + + #[ink(function = 15)] + fn get_alpha_price(netuid: NetUid) -> u64; } #[ink::scale_derive(Encode, Decode, TypeInfo)] @@ -412,5 +416,13 @@ mod bittensor { .remove_proxy(delegate.into()) .map_err(|_e| ReadWriteErrorCode::WriteFailed) } + + #[ink(message)] + pub fn get_alpha_price(&self, netuid: u16) -> Result { + self.env() + .extension() + .get_alpha_price(netuid.into()) + .map_err(|_e| ReadWriteErrorCode::ReadFailed) + } } } diff --git a/contract-tests/run-ci.sh b/contract-tests/run-ci.sh index a5a8c2f52e..0ea0e72297 100755 --- a/contract-tests/run-ci.sh +++ b/contract-tests/run-ci.sh @@ -2,6 +2,16 @@ echo "start run-ci.sh" +cd contract-tests + +cd bittensor + +rustup component add rust-src +cargo install cargo-contract +cargo contract build --release + +cd ../.. + scripts/localnet.sh &>/dev/null & i=1 @@ -15,7 +25,7 @@ while [ $i -le 2000 ]; do done # port not available exit with error -if [ "$i" -eq 1000 ]; then +if [ "$i" -eq 2000 ]; then exit 1 fi @@ -28,14 +38,6 @@ fi cd contract-tests -cd bittensor - -rustup component add rust-src -cargo install cargo-contract -cargo contract build --release - -cd .. - # required for papi in get-metadata.sh, but we cannot run yarn before papi as it adds the descriptors to the package.json which won't resolve npm i -g polkadot-api diff --git a/contract-tests/src/contracts/proxy.ts b/contract-tests/src/contracts/proxy.ts index 48ffd0a50f..1a3eab3594 100644 --- a/contract-tests/src/contracts/proxy.ts +++ b/contract-tests/src/contracts/proxy.ts @@ -144,5 +144,41 @@ export const IProxyABI = [ "outputs": [], "stateMutability": "nonpayable", "type": "function" + }, + { + "inputs": [ + { + "internalType": "bytes32", + "name": "account", + "type": "bytes32" + } + ], + "name": "getProxies", + "outputs": [ + { + "components": [ + { + "internalType": "bytes32", + "name": "delegate", + "type": "bytes32" + }, + { + "internalType": "uint256", + "name": "proxy_type", + "type": "uint256" + }, + { + "internalType": "uint256", + "name": "delay", + "type": "uint256" + } + ], + "internalType": "struct IProxy.ProxyInfo[]", + "name": "", + "type": "tuple[]" + } + ], + "stateMutability": "view", + "type": "function" } ]; diff --git a/contract-tests/test/pure-proxy.precompile.test.ts b/contract-tests/test/pure-proxy.precompile.test.ts index 1a34b02cf7..f893b6d77a 100644 --- a/contract-tests/test/pure-proxy.precompile.test.ts +++ b/contract-tests/test/pure-proxy.precompile.test.ts @@ -46,8 +46,10 @@ async function getProxies(api: TypedApi, address: string) { describe("Test pure proxy precompile", () => { const evmWallet = generateRandomEthersWallet(); + // only used for edge case and normal proxy const evmWallet2 = generateRandomEthersWallet(); const evmWallet3 = generateRandomEthersWallet(); + const evmWallet4 = generateRandomEthersWallet(); const receiver = getRandomSubstrateKeypair(); let api: TypedApi @@ -61,6 +63,7 @@ describe("Test pure proxy precompile", () => { await forceSetBalanceToEthAddress(api, evmWallet.address) await forceSetBalanceToEthAddress(api, evmWallet2.address) await forceSetBalanceToEthAddress(api, evmWallet3.address) + await forceSetBalanceToEthAddress(api, evmWallet4.address) }) it("Call createPureProxy, then use proxy to call transfer", async () => { @@ -130,29 +133,40 @@ describe("Test pure proxy precompile", () => { const proxies = await api.query.Proxy.Proxies.getValue(convertH160ToSS58(evmWallet2.address)) const contract = new ethers.Contract(IPROXY_ADDRESS, IProxyABI, evmWallet2) + const proxiesFromContract = await contract.getProxies(convertH160ToPublicKey(evmWallet2.address)) + assert.equal(proxiesFromContract.length, proxies[0].length, "proxies length should be equal") + const type = 0; const delay = 0; const tx = await contract.addProxy(convertH160ToPublicKey(evmWallet3.address), type, delay) await tx.wait() + const proxiesAfterAdd = await api.query.Proxy.Proxies.getValue(convertH160ToSS58(evmWallet2.address)) + const proxiesList = proxiesAfterAdd[0].map(proxy => proxy.delegate) - const proxiesAfterAdd = await await api.query.Proxy.Proxies.getValue(convertH160ToSS58(evmWallet2.address)) + const proxiesFromContractAfterAdd = await contract.getProxies(convertH160ToPublicKey(evmWallet2.address)) - const length = proxiesAfterAdd[0].length - assert.equal(length, proxies[0].length + 1, "proxy should be set") - const proxy = proxiesAfterAdd[0][proxiesAfterAdd[0].length - 1] + assert.equal(proxiesFromContractAfterAdd.length, proxiesList.length, "proxy length should be equal") - assert.equal(proxy.delegate, convertH160ToSS58(evmWallet3.address), "proxy should be set") + for (let index = 0; index < proxiesFromContractAfterAdd.length; index++) { + const proxyInfo = proxiesFromContractAfterAdd[index] + let proxySs58 = convertPublicKeyToSs58(proxyInfo[0]) + assert.ok(proxiesList.includes(proxySs58), "proxy should be set") + if (index === proxiesFromContractAfterAdd.length - 1) { + assert.equal(Number(proxyInfo[1]), type, "proxy_type should match") + assert.equal(Number(proxyInfo[2]), delay, "delay should match") + } + } + assert.equal(proxiesList.length, proxies[0].length + 1, "proxy should be set") + const proxy = proxiesList[proxiesList.length - 1] + assert.equal(proxy, convertH160ToSS58(evmWallet3.address), "proxy should be set") const balance = (await api.query.System.Account.getValue(convertPublicKeyToSs58(receiver.publicKey))).data.free - const amount = 1000000000; const contract2 = new ethers.Contract(IPROXY_ADDRESS, IProxyABI, evmWallet3) - - const callCode = await getTransferCallCode(api, receiver, amount) const tx2 = await contract2.proxyCall(convertH160ToPublicKey(evmWallet2.address), [type], callCode) await tx2.wait() @@ -160,4 +174,37 @@ describe("Test pure proxy precompile", () => { const balanceAfter = (await api.query.System.Account.getValue(convertPublicKeyToSs58(receiver.publicKey))).data.free assert.equal(balanceAfter, balance + BigInt(amount), "balance should be increased") }) + + it("Call addProxy many times, then check getProxies is correct", async () => { + const proxies = await api.query.Proxy.Proxies.getValue(convertH160ToSS58(evmWallet4.address)) + const contract = new ethers.Contract(IPROXY_ADDRESS, IProxyABI, evmWallet4) + assert.equal(proxies[0].length, 0, "proxies length should be 0") + + const proxiesFromContract = await contract.getProxies(convertH160ToPublicKey(evmWallet4.address)) + assert.equal(proxiesFromContract.length, proxies[0].length, "proxies length should be equal") + + const type = 1; + const delay = 2; + + for (let i = 0; i < 5; i++) { + const evmWallet = generateRandomEthersWallet() + const tx = await contract.addProxy(convertH160ToPublicKey(evmWallet.address), type, delay) + await tx.wait() + } + + const proxiesAfterAdd = await await api.query.Proxy.Proxies.getValue(convertH160ToSS58(evmWallet4.address)) + const proxiesList = proxiesAfterAdd[0].map(proxy => proxy.delegate) + + const proxiesFromContractAfterAdd = await contract.getProxies(convertH160ToPublicKey(evmWallet4.address)) + + assert.equal(proxiesFromContractAfterAdd.length, proxiesList.length, "proxy length should be equal") + + for (let index = 0; index < proxiesFromContractAfterAdd.length; index++) { + const proxyInfo = proxiesFromContractAfterAdd[index] + let proxySs58 = convertPublicKeyToSs58(proxyInfo[0]) + assert.ok(proxiesList.includes(proxySs58), "proxy should be set") + assert.equal(Number(proxyInfo[1]), type, "proxy_type should match") + assert.equal(Number(proxyInfo[2]), delay, "delay should match") + } + }) }); diff --git a/contract-tests/test/wasm.contract.test.ts b/contract-tests/test/wasm.contract.test.ts index 680a4a56f2..26d5c87924 100644 --- a/contract-tests/test/wasm.contract.test.ts +++ b/contract-tests/test/wasm.contract.test.ts @@ -563,4 +563,25 @@ describe("Test wasm contract", () => { assert.ok(proxiesAfterRemove !== undefined) assert.ok(proxiesAfterRemove[0].length === 0) }) + + it("Can get alpha price", async () => { + const message = inkClient.message("get_alpha_price") + const data = message.encode({ + netuid: netuid, + }) + + const response = await api.apis.ContractsApi.call( + convertPublicKeyToSs58(hotkey.publicKey), + contractAddress, + BigInt(0), + undefined, + undefined, + Binary.fromBytes(data.asBytes()), + ) + + assert.ok(response.result.success) + const result = message.decode(response.result.value).value.value + + assert.ok(result !== undefined) + }) }); \ No newline at end of file diff --git a/node/src/benchmarking.rs b/node/src/benchmarking.rs index 4acece56f4..5430a75d9e 100644 --- a/node/src/benchmarking.rs +++ b/node/src/benchmarking.rs @@ -5,8 +5,8 @@ use crate::client::FullClient; use node_subtensor_runtime as runtime; -use node_subtensor_runtime::pallet_subtensor; use node_subtensor_runtime::{check_nonce, transaction_payment_wrapper}; +use node_subtensor_runtime::{pallet_subtensor, sudo_wrapper}; use runtime::{BalancesCall, SystemCall}; use sc_cli::Result; use sc_client_api::BlockBackend; @@ -139,6 +139,7 @@ pub fn create_benchmark_extrinsic( transaction_payment_wrapper::ChargeTransactionPaymentWrapper::new( pallet_transaction_payment::ChargeTransactionPayment::::from(0), ), + sudo_wrapper::SudoTransactionExtension::::new(), pallet_subtensor::transaction_extension::SubtensorTransactionExtension::< runtime::Runtime, >::new(), @@ -160,6 +161,7 @@ pub fn create_benchmark_extrinsic( (), (), (), + (), None, ), ); diff --git a/node/src/command.rs b/node/src/command.rs index 67cb200e43..3350c1443e 100644 --- a/node/src/command.rs +++ b/node/src/command.rs @@ -272,7 +272,7 @@ fn start_babe_service(arg_matches: &ArgMatches) -> Result<(), sc_cli::Error> { { log::info!("Failed to aquire DB lock, trying again in 1s..."); std::thread::sleep(std::time::Duration::from_secs(1)); - return start_babe_service(arg_matches); + start_babe_service(arg_matches) // Unknown error, return it. } else { log::error!("Failed to start Babe service: {e:?}"); diff --git a/node/src/consensus/aura_consensus.rs b/node/src/consensus/aura_consensus.rs index 57012733f6..57b5559fd3 100644 --- a/node/src/consensus/aura_consensus.rs +++ b/node/src/consensus/aura_consensus.rs @@ -120,7 +120,7 @@ impl ConsensusMechanism for AuraConsensus { Self {} } - fn build_biq(&mut self) -> Result + fn build_biq(&mut self) -> Result, sc_service::Error> where NumberFor: BlockNumberOps, { diff --git a/node/src/consensus/babe_consensus.rs b/node/src/consensus/babe_consensus.rs index 8c9a974d20..42d3022512 100644 --- a/node/src/consensus/babe_consensus.rs +++ b/node/src/consensus/babe_consensus.rs @@ -128,7 +128,7 @@ impl ConsensusMechanism for BabeConsensus { } } - fn build_biq(&mut self) -> Result + fn build_biq(&mut self) -> Result, sc_service::Error> where NumberFor: BlockNumberOps, { diff --git a/node/src/consensus/consensus_mechanism.rs b/node/src/consensus/consensus_mechanism.rs index 359b89f6ef..a500f5efe0 100644 --- a/node/src/consensus/consensus_mechanism.rs +++ b/node/src/consensus/consensus_mechanism.rs @@ -77,7 +77,7 @@ pub trait ConsensusMechanism { fn new() -> Self; /// Builds a `BIQ` that uses the ConsensusMechanism. - fn build_biq(&mut self) -> Result; + fn build_biq(&mut self) -> Result, sc_service::Error>; /// Returns the slot duration. fn slot_duration(&self, client: &FullClient) -> Result; diff --git a/node/src/mev_shield/author.rs b/node/src/mev_shield/author.rs index 7d9238a809..99000d4ac6 100644 --- a/node/src/mev_shield/author.rs +++ b/node/src/mev_shield/author.rs @@ -391,6 +391,8 @@ where >::new(pallet_transaction_payment::ChargeTransactionPayment::< runtime::Runtime, >::from(0u64)), + node_subtensor_runtime::sudo_wrapper::SudoTransactionExtension::::new( + ), pallet_subtensor::transaction_extension::SubtensorTransactionExtension::< runtime::Runtime, >::new(), @@ -427,6 +429,7 @@ where (), // CheckNonce::Implicit = () (), // CheckWeight::Implicit = () (), // ChargeTransactionPaymentWrapper::Implicit = () + (), // SudoTransactionExtension::Implicit = () (), // SubtensorTransactionExtension::Implicit = () (), // DrandPriority::Implicit = () None, // CheckMetadataHash::Implicit = Option<[u8; 32]> diff --git a/pallets/admin-utils/src/benchmarking.rs b/pallets/admin-utils/src/benchmarking.rs index 08589e530b..7b8124144d 100644 --- a/pallets/admin-utils/src/benchmarking.rs +++ b/pallets/admin-utils/src/benchmarking.rs @@ -627,5 +627,23 @@ mod benchmarks { _(RawOrigin::Root, 1u16.into()/*netuid*/, 256u16/*max_n*/)/*sudo_trim_to_max_allowed_uids()*/; } + #[benchmark] + fn sudo_set_min_non_immune_uids() { + // disable admin freeze window + pallet_subtensor::Pallet::::set_admin_freeze_window(0); + // create a network for netuid = 1 + pallet_subtensor::Pallet::::init_new_network( + 1u16.into(), /* netuid */ + 1u16, /* sudo_tempo */ + ); + + #[extrinsic_call] + _( + RawOrigin::Root, + 1u16.into(), /* netuid */ + 12u16, /* min */ + ); /* sudo_set_min_non_immune_uids() */ + } + //impl_benchmark_test_suite!(AdminUtils, crate::mock::new_test_ext(), crate::mock::Test); } diff --git a/pallets/admin-utils/src/lib.rs b/pallets/admin-utils/src/lib.rs index b95d3ac82c..c3999312b3 100644 --- a/pallets/admin-utils/src/lib.rs +++ b/pallets/admin-utils/src/lib.rs @@ -1618,7 +1618,7 @@ pub mod pallet { /// Weight is handled by the `#[pallet::weight]` attribute. #[pallet::call_index(62)] #[pallet::weight(( - Weight::from_parts(10_020_000, 3507) + Weight::from_parts(5_744_000, 3507) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(0_u64)), DispatchClass::Operational, @@ -2221,6 +2221,25 @@ pub mod pallet { log::debug!("set_tao_flow_smoothing_factor( {smoothing_factor:?} ) "); Ok(()) } + + /// Sets the minimum number of non-immortal & non-immune UIDs that must remain in a subnet + #[pallet::call_index(84)] + #[pallet::weight(( + Weight::from_parts(7_114_000, 0) + .saturating_add(::DbWeight::get().writes(1)) + .saturating_add(::DbWeight::get().reads(0_u64)), + DispatchClass::Operational, + Pays::Yes + ))] + pub fn sudo_set_min_non_immune_uids( + origin: OriginFor, + netuid: NetUid, + min: u16, + ) -> DispatchResult { + ensure_root(origin)?; + pallet_subtensor::Pallet::::set_min_non_immune_uids(netuid, min); + Ok(()) + } } } diff --git a/pallets/admin-utils/src/tests/mod.rs b/pallets/admin-utils/src/tests/mod.rs index 1aaefc8f8d..024871e60f 100644 --- a/pallets/admin-utils/src/tests/mod.rs +++ b/pallets/admin-utils/src/tests/mod.rs @@ -2456,7 +2456,7 @@ fn test_trim_to_max_allowed_uids() { register_ok_neuron(netuid, U256::from(n), U256::from(n + i), 0); } - // Run some block to ensure stake weights are set and that we are past the immunity period + // Run some blocks to ensure stake weights are set and that we are past the immunity period // for all neurons run_to_block((ImmunityPeriod::::get(netuid) + 1).into()); @@ -2479,6 +2479,8 @@ fn test_trim_to_max_allowed_uids() { let u64_values: Vec = values.iter().map(|&v| v as u64).collect(); Emission::::set(netuid, alpha_values); + // NOTE: `Rank`, `Trust`, and `PruningScores` are *not* trimmed anymore, + // but we can still populate them without asserting on them. Rank::::insert(netuid, values.clone()); Trust::::insert(netuid, values.clone()); Consensus::::insert(netuid, values.clone()); @@ -2566,7 +2568,7 @@ fn test_trim_to_max_allowed_uids() { assert_eq!(MaxAllowedUids::::get(netuid), new_max_n); // Ensure the emission has been trimmed correctly, keeping the highest emitters - // and immune and compressed to the left + // (after respecting immunity/owner exclusions) and compressed to the left assert_eq!( Emission::::get(netuid), vec![ @@ -2580,16 +2582,16 @@ fn test_trim_to_max_allowed_uids() { 74.into() ] ); - // Ensure rest of storage has been trimmed correctly + + // Ensure rest of (active) storage has been trimmed correctly let expected_values = vec![56, 91, 34, 77, 65, 88, 51, 74]; let expected_bools = vec![true, true, true, true, true, true, true, true]; let expected_u64_values = vec![56, 91, 34, 77, 65, 88, 51, 74]; - assert_eq!(Rank::::get(netuid), expected_values); - assert_eq!(Trust::::get(netuid), expected_values); + + // NOTE: Rank/Trust/PruningScores are no longer trimmed; do not assert on them. assert_eq!(Active::::get(netuid), expected_bools); assert_eq!(Consensus::::get(netuid), expected_values); assert_eq!(Dividends::::get(netuid), expected_values); - assert_eq!(PruningScores::::get(netuid), expected_values); assert_eq!(ValidatorTrust::::get(netuid), expected_values); assert_eq!(ValidatorPermit::::get(netuid), expected_bools); assert_eq!(StakeWeight::::get(netuid), expected_values); @@ -2669,7 +2671,7 @@ fn test_trim_to_max_allowed_uids() { assert_eq!(uid, Some(i)); } - // EVM association have been remapped correctly (uids: 7 -> 2, 14 -> 7) + // EVM association have been remapped correctly (uids: 6 -> 2, 14 -> 7) assert_eq!( AssociatedEvmAddress::::get(netuid, 2), Some((sp_core::H160::from_slice(b"12345678901234567891"), now)) @@ -2865,3 +2867,23 @@ fn test_sudo_set_min_allowed_uids() { ); }); } + +#[test] +fn test_sudo_set_min_non_immune_uids() { + new_test_ext().execute_with(|| { + let netuid = NetUid::from(1); + add_network(netuid, 10); + + let to_be_set: u16 = 12; + let init_value: u16 = SubtensorModule::get_min_non_immune_uids(netuid); + + assert_ok!(AdminUtils::sudo_set_min_non_immune_uids( + <::RuntimeOrigin>::root(), + netuid, + to_be_set + )); + + assert!(init_value != to_be_set); + assert_eq!(SubtensorModule::get_min_non_immune_uids(netuid), to_be_set); + }); +} diff --git a/pallets/subtensor/src/coinbase/block_step.rs b/pallets/subtensor/src/coinbase/block_step.rs index 21f7866459..6081edad19 100644 --- a/pallets/subtensor/src/coinbase/block_step.rs +++ b/pallets/subtensor/src/coinbase/block_step.rs @@ -25,11 +25,13 @@ impl Pallet { Self::run_coinbase(block_emission); // --- 5. Update moving prices AFTER using them for emissions. Self::update_moving_prices(); - // --- 6. Set pending children on the epoch; but only after the coinbase has been run. + // --- 6. Update roop prop AFTER using them for emissions. + Self::update_root_prop(); + // --- 7. Set pending children on the epoch; but only after the coinbase has been run. Self::try_set_pending_children(block_number); - // --- 7. Run auto-claim root divs. + // --- 8. Run auto-claim root divs. Self::run_auto_claim_root_divs(last_block_hash); - // --- 8. Populate root coldkey maps. + // --- 9. Populate root coldkey maps. Self::populate_root_coldkey_staking_maps(); // Return ok. @@ -227,9 +229,9 @@ impl Pallet { if next_value >= U110F18::saturating_from_num(Self::get_max_difficulty(netuid)) { Self::get_max_difficulty(netuid) } else if next_value <= U110F18::saturating_from_num(Self::get_min_difficulty(netuid)) { - return Self::get_min_difficulty(netuid); + Self::get_min_difficulty(netuid) } else { - return next_value.saturating_to_num::(); + next_value.saturating_to_num::() } } @@ -261,9 +263,9 @@ impl Pallet { if next_value >= U110F18::saturating_from_num(Self::get_max_burn(netuid)) { Self::get_max_burn(netuid) } else if next_value <= U110F18::saturating_from_num(Self::get_min_burn(netuid)) { - return Self::get_min_burn(netuid); + Self::get_min_burn(netuid) } else { - return next_value.saturating_to_num::().into(); + next_value.saturating_to_num::().into() } } @@ -277,6 +279,29 @@ impl Pallet { } } + pub fn update_root_prop() { + let subnets_to_emit_to: Vec = + Self::get_subnets_to_emit_to(&Self::get_all_subnet_netuids()); + // Only root_prop for subnets that we emit to. + for netuid_i in subnets_to_emit_to.iter() { + let root_prop = Self::root_proportion(*netuid_i); + + RootProp::::insert(netuid_i, root_prop); + } + } + + pub fn root_proportion(netuid: NetUid) -> U96F32 { + let alpha_issuance = U96F32::from_num(Self::get_alpha_issuance(netuid)); + let root_tao: U96F32 = U96F32::from_num(SubnetTAO::::get(NetUid::ROOT)); + let tao_weight: U96F32 = root_tao.saturating_mul(Self::get_tao_weight()); + + let root_proportion: U96F32 = tao_weight + .checked_div(tao_weight.saturating_add(alpha_issuance)) + .unwrap_or(U96F32::from_num(0.0)); + + root_proportion + } + pub fn reveal_crv3_commits() { let netuids: Vec = Self::get_all_subnet_netuids(); for netuid in netuids.into_iter().filter(|netuid| *netuid != NetUid::ROOT) { diff --git a/pallets/subtensor/src/coinbase/root.rs b/pallets/subtensor/src/coinbase/root.rs index d508a0162b..328ce3805c 100644 --- a/pallets/subtensor/src/coinbase/root.rs +++ b/pallets/subtensor/src/coinbase/root.rs @@ -298,6 +298,8 @@ impl Pallet { SubnetTaoInEmission::::remove(netuid); SubnetVolume::::remove(netuid); SubnetMovingPrice::::remove(netuid); + SubnetTaoFlow::::remove(netuid); + SubnetEmaTaoFlow::::remove(netuid); SubnetTaoProvided::::remove(netuid); // --- 13. Token / mechanism / registration toggles. diff --git a/pallets/subtensor/src/coinbase/run_coinbase.rs b/pallets/subtensor/src/coinbase/run_coinbase.rs index a3862f72f3..2091946598 100644 --- a/pallets/subtensor/src/coinbase/run_coinbase.rs +++ b/pallets/subtensor/src/coinbase/run_coinbase.rs @@ -184,13 +184,6 @@ impl Pallet { // --- 3. Inject ALPHA for participants. let cut_percent: U96F32 = Self::get_float_subnet_owner_cut(); - // Get total TAO on root. - let root_tao: U96F32 = asfloat!(SubnetTAO::::get(NetUid::ROOT)); - log::debug!("root_tao: {root_tao:?}"); - // Get tao_weight - let tao_weight: U96F32 = root_tao.saturating_mul(Self::get_tao_weight()); - log::debug!("tao_weight: {tao_weight:?}"); - for netuid_i in subnets_to_emit_to.iter() { // Get alpha_out for this block. let mut alpha_out_i: U96F32 = *alpha_out.get(netuid_i).unwrap_or(&asfloat!(0)); @@ -211,14 +204,8 @@ impl Pallet { *total = total.saturating_add(tou64!(owner_cut_i).into()); }); - // Get ALPHA issuance. - let alpha_issuance: U96F32 = asfloat!(Self::get_alpha_issuance(*netuid_i)); - log::debug!("alpha_issuance: {alpha_issuance:?}"); - // Get root proportional dividends. - let root_proportion: U96F32 = tao_weight - .checked_div(tao_weight.saturating_add(alpha_issuance)) - .unwrap_or(asfloat!(0.0)); + let root_proportion = Self::root_proportion(*netuid_i); log::debug!("root_proportion: {root_proportion:?}"); // Get root alpha from root prop. @@ -504,10 +491,11 @@ impl Pallet { // Insert subnet owner hotkey in the beginning of the list if valid and not // already present - if let Ok(owner_hk) = SubnetOwnerHotkey::::try_get(netuid) { - if Uids::::get(netuid, &owner_hk).is_some() && !owner_hotkeys.contains(&owner_hk) { - owner_hotkeys.insert(0, owner_hk); - } + if let Ok(owner_hk) = SubnetOwnerHotkey::::try_get(netuid) + && Uids::::get(netuid, &owner_hk).is_some() + && !owner_hotkeys.contains(&owner_hk) + { + owner_hotkeys.insert(0, owner_hk); } owner_hotkeys @@ -521,22 +509,22 @@ impl Pallet { root_alpha_dividends: BTreeMap, ) { // Distribute the owner cut. - if let Ok(owner_coldkey) = SubnetOwner::::try_get(netuid) { - if let Ok(owner_hotkey) = SubnetOwnerHotkey::::try_get(netuid) { - // Increase stake for owner hotkey and coldkey. - log::debug!( - "owner_hotkey: {owner_hotkey:?} owner_coldkey: {owner_coldkey:?}, owner_cut: {owner_cut:?}" - ); - let real_owner_cut = Self::increase_stake_for_hotkey_and_coldkey_on_subnet( - &owner_hotkey, - &owner_coldkey, - netuid, - owner_cut, - ); - // If the subnet is leased, notify the lease logic that owner cut has been distributed. - if let Some(lease_id) = SubnetUidToLeaseId::::get(netuid) { - Self::distribute_leased_network_dividends(lease_id, real_owner_cut); - } + if let Ok(owner_coldkey) = SubnetOwner::::try_get(netuid) + && let Ok(owner_hotkey) = SubnetOwnerHotkey::::try_get(netuid) + { + // Increase stake for owner hotkey and coldkey. + log::debug!( + "owner_hotkey: {owner_hotkey:?} owner_coldkey: {owner_coldkey:?}, owner_cut: {owner_cut:?}" + ); + let real_owner_cut = Self::increase_stake_for_hotkey_and_coldkey_on_subnet( + &owner_hotkey, + &owner_coldkey, + netuid, + owner_cut, + ); + // If the subnet is leased, notify the lease logic that owner cut has been distributed. + if let Some(lease_id) = SubnetUidToLeaseId::::get(netuid) { + Self::distribute_leased_network_dividends(lease_id, real_owner_cut); } } diff --git a/pallets/subtensor/src/epoch/math.rs b/pallets/subtensor/src/epoch/math.rs index 2cbbfae77d..6a53c9767b 100644 --- a/pallets/subtensor/src/epoch/math.rs +++ b/pallets/subtensor/src/epoch/math.rs @@ -335,10 +335,10 @@ pub fn inplace_row_normalize_sparse(sparse_matrix: &mut [Vec<(u16, I32F32)>]) { // Sum across each row (dim=0) of a matrix. pub fn row_sum(x: &[Vec]) -> Vec { - if let Some(first_row) = x.first() { - if first_row.is_empty() { - return vec![]; - } + if let Some(first_row) = x.first() + && first_row.is_empty() + { + return vec![]; } x.iter().map(|row| row.iter().sum()).collect() } @@ -424,10 +424,10 @@ pub fn inplace_col_max_upscale_sparse(sparse_matrix: &mut [Vec<(u16, I32F32)>], // Pass 1: compute per-column max for sparse_row in sparse_matrix.iter() { for (j, value) in sparse_row.iter() { - if let Some(m) = col_max.get_mut(*j as usize) { - if *m < *value { - *m = *value; - } + if let Some(m) = col_max.get_mut(*j as usize) + && *m < *value + { + *m = *value; } } } @@ -1147,10 +1147,10 @@ pub fn weighted_median_col_sparse( while let (Some(&s), Some(sparse_row)) = (stake_it.next(), score_it.next()) { if s > zero { for &(c, val) in sparse_row.iter() { - if let Some(col_vec) = use_score.get_mut(c as usize) { - if let Some(cell) = col_vec.get_mut(k) { - *cell = val; - } + if let Some(col_vec) = use_score.get_mut(c as usize) + && let Some(cell) = col_vec.get_mut(k) + { + *cell = val; } } k = k.saturating_add(1); @@ -1289,10 +1289,10 @@ pub fn interpolate_sparse( let v1 = row1.get(j).unwrap_or(&zero); let v2 = row2.get(j).unwrap_or(&zero); let interp = v1.saturating_add(ratio.saturating_mul(v2.saturating_sub(*v1))); - if zero < interp { - if let Some(res) = result.get_mut(i) { - res.push((j as u16, interp)); - } + if zero < interp + && let Some(res) = result.get_mut(i) + { + res.push((j as u16, interp)); } } } @@ -1338,10 +1338,10 @@ pub fn mat_vec_mul_sparse( for (j, value) in matrix_row.iter() { if let Some(vector_value) = vector.get(*j as usize) { let new_value = value.saturating_mul(*vector_value); - if new_value != I32F32::saturating_from_num(0.0) { - if let Some(result_row) = result.get_mut(i) { - result_row.push((*j, new_value)); - } + if new_value != I32F32::saturating_from_num(0.0) + && let Some(result_row) = result.get_mut(i) + { + result_row.push((*j, new_value)); } } } @@ -1592,8 +1592,3 @@ pub fn mat_ema_alpha( pub fn safe_ln(value: I32F32) -> I32F32 { ln(value).unwrap_or(I32F32::saturating_from_num(0.0)) } - -/// Safe exp function, returns 0 if value is 0. -pub fn safe_exp(value: I32F32) -> I32F32 { - exp(value).unwrap_or(I32F32::saturating_from_num(0.0)) -} diff --git a/pallets/subtensor/src/epoch/run_epoch.rs b/pallets/subtensor/src/epoch/run_epoch.rs index 5e4dd1f43e..f56b8a89a4 100644 --- a/pallets/subtensor/src/epoch/run_epoch.rs +++ b/pallets/subtensor/src/epoch/run_epoch.rs @@ -18,10 +18,7 @@ pub struct EpochTerms { pub stake_weight: u16, pub active: bool, pub emission: AlphaCurrency, - pub rank: u16, - pub trust: u16, pub consensus: u16, - pub pruning_score: u16, pub validator_trust: u16, pub new_validator_permit: bool, pub bond: Vec<(u16, u16)>, @@ -128,22 +125,16 @@ impl Pallet { let active = extract_from_sorted_terms!(terms_sorted, active); let emission = extract_from_sorted_terms!(terms_sorted, emission); - let rank = extract_from_sorted_terms!(terms_sorted, rank); - let trust = extract_from_sorted_terms!(terms_sorted, trust); let consensus = extract_from_sorted_terms!(terms_sorted, consensus); let dividend = extract_from_sorted_terms!(terms_sorted, dividend); - let pruning_score = extract_from_sorted_terms!(terms_sorted, pruning_score); let validator_trust = extract_from_sorted_terms!(terms_sorted, validator_trust); let new_validator_permit = extract_from_sorted_terms!(terms_sorted, new_validator_permit); let stake_weight = extract_from_sorted_terms!(terms_sorted, stake_weight); Active::::insert(netuid, active.clone()); Emission::::insert(netuid, emission); - Rank::::insert(netuid, rank); - Trust::::insert(netuid, trust); Consensus::::insert(netuid, consensus); Dividends::::insert(netuid, dividend); - PruningScores::::insert(netuid, pruning_score); ValidatorTrust::::insert(netuid, validator_trust); ValidatorPermit::::insert(netuid, new_validator_permit); StakeWeight::::insert(netuid, stake_weight); @@ -325,9 +316,6 @@ impl Pallet { // == Consensus, Validator Trust == // ================================ - // Compute preranks: r_j = SUM(i) w_ij * s_i - let preranks: Vec = matmul(&weights, &active_stake); - // Consensus majority ratio, e.g. 51%. let kappa: I32F32 = Self::get_float_kappa(netuid); // Calculate consensus as stake-weighted median of weights. @@ -345,9 +333,6 @@ impl Pallet { // Compute ranks: r_j = SUM(i) w_ij * s_i let mut ranks: Vec = matmul(&clipped_weights, &active_stake); - // Compute server trust: ratio of rank after vs. rank before. - let trust: Vec = vecdiv(&ranks, &preranks); - inplace_normalize(&mut ranks); let incentive: Vec = ranks.clone(); log::trace!("I: {:?}", &incentive); @@ -494,10 +479,6 @@ impl Pallet { log::trace!("nCE: {:?}", &normalized_combined_emission); log::trace!("CE: {:?}", &combined_emission); - // Set pruning scores using combined emission scores. - let pruning_scores: Vec = normalized_combined_emission.clone(); - log::trace!("P: {:?}", &pruning_scores); - // =================== // == Value storage == // =================== @@ -506,14 +487,6 @@ impl Pallet { .iter() .map(|xi| fixed_proportion_to_u16(*xi)) .collect::>(); - let cloned_ranks: Vec = ranks - .iter() - .map(|xi| fixed_proportion_to_u16(*xi)) - .collect::>(); - let cloned_trust: Vec = trust - .iter() - .map(|xi| fixed_proportion_to_u16(*xi)) - .collect::>(); let cloned_consensus: Vec = consensus .iter() .map(|xi| fixed_proportion_to_u16(*xi)) @@ -526,7 +499,6 @@ impl Pallet { .iter() .map(|xi| fixed_proportion_to_u16(*xi)) .collect::>(); - let cloned_pruning_scores: Vec = vec_max_upscale_to_u16(&pruning_scores); let cloned_validator_trust: Vec = validator_trust .iter() .map(|xi| fixed_proportion_to_u16(*xi)) @@ -534,12 +506,9 @@ impl Pallet { StakeWeight::::insert(netuid, cloned_stake_weight.clone()); Active::::insert(netuid, active.clone()); Emission::::insert(netuid, cloned_emission); - Rank::::insert(netuid, cloned_ranks); - Trust::::insert(netuid, cloned_trust); Consensus::::insert(netuid, cloned_consensus); Incentive::::insert(NetUidStorageIndex::from(netuid), cloned_incentive); Dividends::::insert(netuid, cloned_dividends); - PruningScores::::insert(netuid, cloned_pruning_scores); ValidatorTrust::::insert(netuid, cloned_validator_trust); ValidatorPermit::::insert(netuid, new_validator_permits.clone()); @@ -761,10 +730,10 @@ impl Pallet { // ---------- v3 ------------------------------------------------------ for (_epoch, q) in TimelockedWeightCommits::::iter_prefix(netuid_index) { for (who, cb, ..) in q.iter() { - if !Self::is_commit_expired(netuid, *cb) { - if let Some(cell) = uid_of(who).and_then(|i| commit_blocks.get_mut(i)) { - *cell = (*cell).min(*cb); - } + if !Self::is_commit_expired(netuid, *cb) + && let Some(cell) = uid_of(who).and_then(|i| commit_blocks.get_mut(i)) + { + *cell = (*cell).min(*cb); } } } @@ -790,10 +759,6 @@ impl Pallet { // == Consensus, Validator Trust == // ================================ - // Compute preranks: r_j = SUM(i) w_ij * s_i - let preranks: Vec = matmul_sparse(&weights, &active_stake, n); - log::trace!("Ranks (before): {:?}", &preranks); - // Consensus majority ratio, e.g. 51%. let kappa: I32F32 = Self::get_float_kappa(netuid); // Calculate consensus as stake-weighted median of weights. @@ -814,11 +779,6 @@ impl Pallet { // Compute ranks: r_j = SUM(i) w_ij * s_i. let mut ranks: Vec = matmul_sparse(&clipped_weights, &active_stake, n); - log::trace!("Ranks (after): {:?}", &ranks); - - // Compute server trust: ratio of rank after vs. rank before. - let trust: Vec = vecdiv(&ranks, &preranks); // range: I32F32(0, 1) - log::trace!("Trust: {:?}", &trust); inplace_normalize(&mut ranks); // range: I32F32(0, 1) let incentive: Vec = ranks.clone(); @@ -1004,10 +964,6 @@ impl Pallet { ); log::trace!("Combined Emission: {:?}", &combined_emission); - // Set pruning scores using combined emission scores. - let pruning_scores: Vec = normalized_combined_emission.clone(); - log::trace!("Pruning Scores: {:?}", &pruning_scores); - // =========================== // == Populate epoch output == // =========================== @@ -1016,14 +972,6 @@ impl Pallet { .map(|xi| fixed_proportion_to_u16(*xi)) .collect::>(); let cloned_emission = combined_emission.clone(); - let cloned_ranks: Vec = ranks - .iter() - .map(|xi| fixed_proportion_to_u16(*xi)) - .collect::>(); - let cloned_trust: Vec = trust - .iter() - .map(|xi| fixed_proportion_to_u16(*xi)) - .collect::>(); let cloned_consensus: Vec = consensus .iter() .map(|xi| fixed_proportion_to_u16(*xi)) @@ -1036,7 +984,6 @@ impl Pallet { .iter() .map(|xi| fixed_proportion_to_u16(*xi)) .collect::>(); - let cloned_pruning_scores: Vec = vec_max_upscale_to_u16(&pruning_scores); let cloned_validator_trust: Vec = validator_trust .iter() .map(|xi| fixed_proportion_to_u16(*xi)) @@ -1056,13 +1003,7 @@ impl Pallet { .unwrap_or_default(); terms.active = active.get(terms.uid).copied().unwrap_or_default(); terms.emission = cloned_emission.get(terms.uid).copied().unwrap_or_default(); - terms.rank = cloned_ranks.get(terms.uid).copied().unwrap_or_default(); - terms.trust = cloned_trust.get(terms.uid).copied().unwrap_or_default(); terms.consensus = cloned_consensus.get(terms.uid).copied().unwrap_or_default(); - terms.pruning_score = cloned_pruning_scores - .get(terms.uid) - .copied() - .unwrap_or_default(); terms.validator_trust = cloned_validator_trust .get(terms.uid) .copied() @@ -1536,7 +1477,7 @@ impl Pallet { // sigmoid = 1. / (1. + e^(-steepness * (combined_diff - 0.5))) let sigmoid = one.saturating_div( - one.saturating_add(safe_exp( + one.saturating_add(exp_safe( alpha_sigmoid_steepness .saturating_div(I32F32::from_num(-100)) .saturating_mul(combined_diff.saturating_sub(I32F32::from_num(0.5))), diff --git a/pallets/subtensor/src/lib.rs b/pallets/subtensor/src/lib.rs index 6d61baadf3..ef2d44e68b 100644 --- a/pallets/subtensor/src/lib.rs +++ b/pallets/subtensor/src/lib.rs @@ -95,7 +95,7 @@ pub mod pallet { use sp_std::collections::vec_deque::VecDeque; use sp_std::vec; use sp_std::vec::Vec; - use substrate_fixed::types::{I64F64, I96F32, U64F64}; + use substrate_fixed::types::{I64F64, I96F32, U64F64, U96F32}; use subtensor_macros::freeze_struct; use subtensor_runtime_common::{ AlphaCurrency, Currency, MechId, NetUid, NetUidStorageIndex, TaoCurrency, @@ -994,6 +994,12 @@ pub mod pallet { I96F32::saturating_from_num(0.0) } + /// Default subnet root proportion. + #[pallet::type_value] + pub fn DefaultRootProp() -> U96F32 { + U96F32::saturating_from_num(0.0) + } + /// Default subnet root claimable #[pallet::type_value] pub fn DefaultRootClaimable() -> BTreeMap { @@ -1057,7 +1063,12 @@ pub mod pallet { 128 } - /// Global minimum activity cutoff value + /// Default value for MinNonImmuneUids. + #[pallet::type_value] + pub fn DefaultMinNonImmuneUids() -> u16 { + 10u16 + } + #[pallet::storage] pub type MinActivityCutoff = StorageValue<_, u16, ValueQuery, DefaultMinActivityCutoff>; @@ -1279,6 +1290,11 @@ pub mod pallet { pub type SubnetMovingPrice = StorageMap<_, Identity, NetUid, I96F32, ValueQuery, DefaultMovingPrice>; + /// --- MAP ( netuid ) --> root_prop | The subnet root proportion. + #[pallet::storage] + pub type RootProp = + StorageMap<_, Identity, NetUid, U96F32, ValueQuery, DefaultRootProp>; + /// --- MAP ( netuid ) --> total_volume | The total amount of TAO bought and sold since the start of the network. #[pallet::storage] pub type SubnetVolume = @@ -2294,6 +2310,11 @@ pub mod pallet { pub type NetworkRegistrationStartBlock = StorageValue<_, u64, ValueQuery, DefaultNetworkRegistrationStartBlock>; + /// --- MAP ( netuid ) --> minimum required number of non-immortal & non-immune UIDs + #[pallet::storage] + pub type MinNonImmuneUids = + StorageMap<_, Identity, NetUid, u16, ValueQuery, DefaultMinNonImmuneUids>; + /// ============================ /// ==== Subnet Mechanisms ===== /// ============================ diff --git a/pallets/subtensor/src/macros/dispatches.rs b/pallets/subtensor/src/macros/dispatches.rs index ef36b17921..8c0b2210ec 100644 --- a/pallets/subtensor/src/macros/dispatches.rs +++ b/pallets/subtensor/src/macros/dispatches.rs @@ -49,7 +49,7 @@ mod dispatches { /// /// * 'weights' (Vec): /// - The u16 integer encoded weights. Interpreted as rational - /// values in the range [0,1]. They must sum to in32::MAX. + /// values in the range [0,1]. They must sum to in32::MAX. /// /// * 'version_key' ( u64 ): /// - The network version key to check if the validator is up to date. @@ -128,7 +128,7 @@ mod dispatches { /// /// * 'weights' (Vec): /// - The u16 integer encoded weights. Interpreted as rational - /// values in the range [0,1]. They must sum to in32::MAX. + /// values in the range [0,1]. They must sum to in32::MAX. /// /// * 'version_key' ( u64 ): /// - The network version key to check if the validator is up to date. @@ -712,7 +712,7 @@ mod dispatches { /// #[pallet::call_index(2)] #[pallet::weight((Weight::from_parts(340_800_000, 0) - .saturating_add(T::DbWeight::get().reads(27_u64)) + .saturating_add(T::DbWeight::get().reads(25_u64)) .saturating_add(T::DbWeight::get().writes(16_u64)), DispatchClass::Normal, Pays::Yes))] pub fn add_stake( origin: OriginFor, @@ -1016,8 +1016,8 @@ mod dispatches { /// #[pallet::call_index(6)] #[pallet::weight((Weight::from_parts(197_900_000, 0) - .saturating_add(T::DbWeight::get().reads(27_u64)) - .saturating_add(T::DbWeight::get().writes(23)), DispatchClass::Normal, Pays::Yes))] + .saturating_add(T::DbWeight::get().reads(24_u64)) + .saturating_add(T::DbWeight::get().writes(20_u64)), DispatchClass::Normal, Pays::Yes))] pub fn register( origin: OriginFor, netuid: NetUid, @@ -1033,8 +1033,8 @@ mod dispatches { /// Register the hotkey to root network #[pallet::call_index(62)] #[pallet::weight((Weight::from_parts(135_900_000, 0) - .saturating_add(T::DbWeight::get().reads(22_u64)) - .saturating_add(T::DbWeight::get().writes(19_u64)), DispatchClass::Normal, Pays::Yes))] + .saturating_add(T::DbWeight::get().reads(19_u64)) + .saturating_add(T::DbWeight::get().writes(16_u64)), DispatchClass::Normal, Pays::Yes))] pub fn root_register(origin: OriginFor, hotkey: T::AccountId) -> DispatchResult { Self::do_root_register(origin, hotkey) } @@ -1042,8 +1042,8 @@ mod dispatches { /// User register a new subnetwork via burning token #[pallet::call_index(7)] #[pallet::weight((Weight::from_parts(354_200_000, 0) - .saturating_add(T::DbWeight::get().reads(50_u64)) - .saturating_add(T::DbWeight::get().writes(43)), DispatchClass::Normal, Pays::Yes))] + .saturating_add(T::DbWeight::get().reads(47_u64)) + .saturating_add(T::DbWeight::get().writes(40_u64)), DispatchClass::Normal, Pays::Yes))] pub fn burned_register( origin: OriginFor, netuid: NetUid, @@ -1230,8 +1230,8 @@ mod dispatches { /// User register a new subnetwork #[pallet::call_index(59)] #[pallet::weight((Weight::from_parts(235_400_000, 0) - .saturating_add(T::DbWeight::get().reads(39_u64)) - .saturating_add(T::DbWeight::get().writes(56_u64)), DispatchClass::Normal, Pays::Yes))] + .saturating_add(T::DbWeight::get().reads(36_u64)) + .saturating_add(T::DbWeight::get().writes(53_u64)), DispatchClass::Normal, Pays::Yes))] pub fn register_network(origin: OriginFor, hotkey: T::AccountId) -> DispatchResult { Self::do_register_network(origin, &hotkey, 1, None) } @@ -1243,6 +1243,7 @@ mod dispatches { #[pallet::weight((Weight::from_parts(91_000_000, 0) .saturating_add(T::DbWeight::get().reads(27)) .saturating_add(T::DbWeight::get().writes(22)), DispatchClass::Normal, Pays::No))] + #[cfg(feature = "pow-faucet")] pub fn faucet( origin: OriginFor, block_number: u64, @@ -1517,8 +1518,8 @@ mod dispatches { /// User register a new subnetwork #[pallet::call_index(79)] #[pallet::weight((Weight::from_parts(234_200_000, 0) - .saturating_add(T::DbWeight::get().reads(38_u64)) - .saturating_add(T::DbWeight::get().writes(55_u64)), DispatchClass::Normal, Pays::Yes))] + .saturating_add(T::DbWeight::get().reads(35_u64)) + .saturating_add(T::DbWeight::get().writes(52_u64)), DispatchClass::Normal, Pays::Yes))] pub fn register_network_with_identity( origin: OriginFor, hotkey: T::AccountId, @@ -1587,7 +1588,7 @@ mod dispatches { /// - Thrown if key has hit transaction rate limit #[pallet::call_index(84)] #[pallet::weight((Weight::from_parts(358_500_000, 0) - .saturating_add(T::DbWeight::get().reads(44_u64)) + .saturating_add(T::DbWeight::get().reads(41_u64)) .saturating_add(T::DbWeight::get().writes(26_u64)), DispatchClass::Normal, Pays::Yes))] pub fn unstake_all_alpha(origin: OriginFor, hotkey: T::AccountId) -> DispatchResult { Self::do_unstake_all_alpha(origin, hotkey) @@ -1701,7 +1702,7 @@ mod dispatches { #[pallet::call_index(87)] #[pallet::weight(( Weight::from_parts(351_300_000, 0) - .saturating_add(T::DbWeight::get().reads(40_u64)) + .saturating_add(T::DbWeight::get().reads(37_u64)) .saturating_add(T::DbWeight::get().writes(24_u64)), DispatchClass::Normal, Pays::Yes @@ -1766,7 +1767,7 @@ mod dispatches { /// #[pallet::call_index(88)] #[pallet::weight((Weight::from_parts(402_900_000, 0) - .saturating_add(T::DbWeight::get().reads(27_u64)) + .saturating_add(T::DbWeight::get().reads(25_u64)) .saturating_add(T::DbWeight::get().writes(16_u64)), DispatchClass::Normal, Pays::Yes))] pub fn add_stake_limit( origin: OriginFor, @@ -1830,7 +1831,7 @@ mod dispatches { /// #[pallet::call_index(89)] #[pallet::weight((Weight::from_parts(377_400_000, 0) - .saturating_add(T::DbWeight::get().reads(31_u64)) + .saturating_add(T::DbWeight::get().reads(29_u64)) .saturating_add(T::DbWeight::get().writes(15_u64)), DispatchClass::Normal, Pays::Yes))] pub fn remove_stake_limit( origin: OriginFor, @@ -1874,7 +1875,7 @@ mod dispatches { #[pallet::call_index(90)] #[pallet::weight(( Weight::from_parts(411_500_000, 0) - .saturating_add(T::DbWeight::get().reads(40_u64)) + .saturating_add(T::DbWeight::get().reads(37_u64)) .saturating_add(T::DbWeight::get().writes(24_u64)), DispatchClass::Normal, Pays::Yes @@ -2052,7 +2053,7 @@ mod dispatches { /// Without limit_price it remove all the stake similar to `remove_stake` extrinsic #[pallet::call_index(103)] #[pallet::weight((Weight::from_parts(395_300_000, 10142) - .saturating_add(T::DbWeight::get().reads(31_u64)) + .saturating_add(T::DbWeight::get().reads(29_u64)) .saturating_add(T::DbWeight::get().writes(15_u64)), DispatchClass::Normal, Pays::Yes))] pub fn remove_stake_full_limit( origin: T::RuntimeOrigin, diff --git a/pallets/subtensor/src/macros/errors.rs b/pallets/subtensor/src/macros/errors.rs index 5a15330075..6c3d7a35df 100644 --- a/pallets/subtensor/src/macros/errors.rs +++ b/pallets/subtensor/src/macros/errors.rs @@ -266,5 +266,7 @@ mod errors { InvalidRootClaimThreshold, /// Exceeded subnet limit number or zero. InvalidSubnetNumber, + /// Unintended precision loss when unstaking alpha + PrecisionLoss, } } diff --git a/pallets/subtensor/src/macros/events.rs b/pallets/subtensor/src/macros/events.rs index d015205d4d..a06e035d86 100644 --- a/pallets/subtensor/src/macros/events.rs +++ b/pallets/subtensor/src/macros/events.rs @@ -449,6 +449,8 @@ mod events { hotkey: T::AccountId, }, + /// The minimum allowed non-Immune UIDs has been set. + MinNonImmuneUidsSet(NetUid, u16), /// Root emissions have been claimed for a coldkey on all subnets and hotkeys. /// Parameters: /// (coldkey) diff --git a/pallets/subtensor/src/macros/hooks.rs b/pallets/subtensor/src/macros/hooks.rs index b62263e370..31be3e5e4f 100644 --- a/pallets/subtensor/src/macros/hooks.rs +++ b/pallets/subtensor/src/macros/hooks.rs @@ -151,6 +151,8 @@ mod hooks { .saturating_add(migrations::migrate_kappa_map_to_default::migrate_kappa_map_to_default::()) // Remove obsolete map entries .saturating_add(migrations::migrate_remove_tao_dividends::migrate_remove_tao_dividends::()) + // Remove Trust, Rank, and Pruning Score + .saturating_add(migrations::migrate_clear_rank_trust_pruning_maps::migrate_clear_rank_trust_pruning_maps::()) // Re-init tao flows .saturating_add(migrations::migrate_init_tao_flow::migrate_init_tao_flow::()) // Migrate pending emissions diff --git a/pallets/subtensor/src/migrations/migrate_clear_rank_trust_pruning_maps.rs b/pallets/subtensor/src/migrations/migrate_clear_rank_trust_pruning_maps.rs new file mode 100644 index 0000000000..2c49aa7dfd --- /dev/null +++ b/pallets/subtensor/src/migrations/migrate_clear_rank_trust_pruning_maps.rs @@ -0,0 +1,85 @@ +use super::*; +use frame_support::{traits::Get, weights::Weight}; +use log; +use scale_info::prelude::string::String; + +/// Remove all keys from Rank, Trust, and PruningScores. +pub fn migrate_clear_rank_trust_pruning_maps() -> Weight { + let mig_name: Vec = b"clear_rank_trust_pruning_maps".to_vec(); + let mig_name_str = String::from_utf8_lossy(&mig_name); + + // 1 read for the HasMigrationRun flag + let mut total_weight = T::DbWeight::get().reads(1); + + // Run-once guard + if HasMigrationRun::::get(&mig_name) { + log::info!("Migration '{mig_name_str}' already executed - skipping"); + return total_weight; + } + + log::info!("Running migration '{mig_name_str}'"); + + let mut total_reads: u64 = 0; + let mut total_writes: u64 = 0; + let limit: u32 = u32::MAX; + + // ------------------------------ + // 1) Rank: clear in one go + // ------------------------------ + let rank_res = Rank::::clear(limit, None); + let rank_reads = rank_res.loops as u64; + let rank_writes = rank_res.backend as u64; + total_reads = total_reads.saturating_add(rank_reads); + total_writes = total_writes.saturating_add(rank_writes); + + log::info!( + "Rank wipe: backend={}, loops={}, cursor_is_none={}", + rank_res.backend, + rank_res.loops, + rank_res.maybe_cursor.is_none(), + ); + + // ------------------------------ + // 2) Trust: clear in one go + // ------------------------------ + let trust_res = Trust::::clear(limit, None); + let trust_reads = trust_res.loops as u64; + let trust_writes = trust_res.backend as u64; + total_reads = total_reads.saturating_add(trust_reads); + total_writes = total_writes.saturating_add(trust_writes); + + log::info!( + "Trust wipe: backend={}, loops={}, cursor_is_none={}", + trust_res.backend, + trust_res.loops, + trust_res.maybe_cursor.is_none(), + ); + + // ------------------------------ + // 3) PruningScores: clear in one go + // ------------------------------ + let ps_res = PruningScores::::clear(limit, None); + let ps_reads = ps_res.loops as u64; + let ps_writes = ps_res.backend as u64; + total_reads = total_reads.saturating_add(ps_reads); + total_writes = total_writes.saturating_add(ps_writes); + + log::info!( + "PruningScores wipe: backend={}, loops={}, cursor_is_none={}", + ps_res.backend, + ps_res.loops, + ps_res.maybe_cursor.is_none(), + ); + + // Accumulate reads/writes from Rank/Trust/PruningScores into the total weight + total_weight = + total_weight.saturating_add(T::DbWeight::get().reads_writes(total_reads, total_writes)); + + // Mark migration as done + HasMigrationRun::::insert(&mig_name, true); + total_weight = total_weight.saturating_add(T::DbWeight::get().writes(1)); + + log::info!("Migration '{mig_name_str}' completed"); + + total_weight +} diff --git a/pallets/subtensor/src/migrations/migrate_rao.rs b/pallets/subtensor/src/migrations/migrate_rao.rs index e614827e90..25e220b6d4 100644 --- a/pallets/subtensor/src/migrations/migrate_rao.rs +++ b/pallets/subtensor/src/migrations/migrate_rao.rs @@ -113,7 +113,7 @@ pub fn migrate_rao() -> Weight { // Only register the owner coldkey if it's not already a hotkey on the subnet. if !Uids::::contains_key(*netuid, &owner_coldkey) { // Register the owner_coldkey as neuron to the network. - let _neuron_uid: u16 = Pallet::::register_neuron(*netuid, &owner_coldkey); + //let _neuron_uid: u16 = Pallet::::register_neuron(*netuid, &owner_coldkey); } // Register the neuron immediately. if !IdentitiesV2::::contains_key(owner_coldkey.clone()) { diff --git a/pallets/subtensor/src/migrations/mod.rs b/pallets/subtensor/src/migrations/mod.rs index 6e654cd2ee..4c9d5f01d1 100644 --- a/pallets/subtensor/src/migrations/mod.rs +++ b/pallets/subtensor/src/migrations/mod.rs @@ -5,6 +5,7 @@ use sp_io::KillStorageResult; use sp_io::hashing::twox_128; use sp_io::storage::clear_prefix; pub mod migrate_auto_stake_destination; +pub mod migrate_clear_rank_trust_pruning_maps; pub mod migrate_coldkey_swap_scheduled; pub mod migrate_commit_reveal_settings; pub mod migrate_commit_reveal_v2; diff --git a/pallets/subtensor/src/staking/recycle_alpha.rs b/pallets/subtensor/src/staking/recycle_alpha.rs index 7334c8126a..5229971ed0 100644 --- a/pallets/subtensor/src/staking/recycle_alpha.rs +++ b/pallets/subtensor/src/staking/recycle_alpha.rs @@ -55,8 +55,10 @@ impl Pallet { &hotkey, &coldkey, netuid, amount, ); + ensure!(actual_alpha_decrease <= amount, Error::::PrecisionLoss); + // Recycle means we should decrease the alpha issuance tracker. - Self::recycle_subnet_alpha(netuid, amount); + Self::recycle_subnet_alpha(netuid, actual_alpha_decrease); Self::deposit_event(Event::AlphaRecycled( coldkey, @@ -120,7 +122,9 @@ impl Pallet { &hotkey, &coldkey, netuid, amount, ); - Self::burn_subnet_alpha(netuid, amount); + ensure!(actual_alpha_decrease <= amount, Error::::PrecisionLoss); + + Self::burn_subnet_alpha(netuid, actual_alpha_decrease); // Deposit event Self::deposit_event(Event::AlphaBurned( diff --git a/pallets/subtensor/src/staking/remove_stake.rs b/pallets/subtensor/src/staking/remove_stake.rs index 7fdd335556..423cb97493 100644 --- a/pallets/subtensor/src/staking/remove_stake.rs +++ b/pallets/subtensor/src/staking/remove_stake.rs @@ -484,31 +484,37 @@ impl Pallet { let mut stakers: Vec<(T::AccountId, T::AccountId, u128)> = Vec::new(); let mut total_alpha_value_u128: u128 = 0; - for ((hot, cold, this_netuid), share_u64f64) in Alpha::::iter() { - if this_netuid != netuid { - continue; - } - - keys_to_remove.push((hot.clone(), cold.clone())); - if !hotkeys_seen.contains(&hot) { - hotkeys_seen.push(hot.clone()); - } + let hotkeys_in_subnet: Vec = TotalHotkeyAlpha::::iter() + .filter(|(_, this_netuid, _)| *this_netuid == netuid) + .map(|(hot, _, _)| hot.clone()) + .collect::>(); + + for hot in hotkeys_in_subnet.iter() { + for ((cold, this_netuid), share_u64f64) in Alpha::::iter_prefix((hot,)) { + if this_netuid != netuid { + continue; + } + keys_to_remove.push((hot.clone(), cold.clone())); + if !hotkeys_seen.contains(hot) { + hotkeys_seen.push(hot.clone()); + } - // Primary: actual α value via share pool. - let pool = Self::get_alpha_share_pool(hot.clone(), netuid); - let actual_val_u64 = pool.try_get_value(&cold).unwrap_or(0); + // Primary: actual α value via share pool. + let pool = Self::get_alpha_share_pool(hot.clone(), netuid); + let actual_val_u64 = pool.try_get_value(&cold).unwrap_or(0); - // Fallback: if pool uninitialized, treat raw Alpha share as value. - let val_u64 = if actual_val_u64 == 0 { - share_u64f64.saturating_to_num::() - } else { - actual_val_u64 - }; + // Fallback: if pool uninitialized, treat raw Alpha share as value. + let val_u64 = if actual_val_u64 == 0 { + share_u64f64.saturating_to_num::() + } else { + actual_val_u64 + }; - if val_u64 > 0 { - let val_u128 = val_u64 as u128; - total_alpha_value_u128 = total_alpha_value_u128.saturating_add(val_u128); - stakers.push((hot, cold, val_u128)); + if val_u64 > 0 { + let val_u128 = val_u64 as u128; + total_alpha_value_u128 = total_alpha_value_u128.saturating_add(val_u128); + stakers.push((hot.clone(), cold, val_u128)); + } } } diff --git a/pallets/subtensor/src/staking/stake_utils.rs b/pallets/subtensor/src/staking/stake_utils.rs index d0f78551c1..f61a8a6ce2 100644 --- a/pallets/subtensor/src/staking/stake_utils.rs +++ b/pallets/subtensor/src/staking/stake_utils.rs @@ -564,11 +564,10 @@ impl Pallet { // We expect a negative value here let mut actual_alpha = 0; - if let Ok(value) = alpha_share_pool.try_get_value(coldkey) { - if value >= amount { - actual_alpha = - alpha_share_pool.update_value_for_one(coldkey, (amount as i64).neg()); - } + if let Ok(value) = alpha_share_pool.try_get_value(coldkey) + && value >= amount + { + actual_alpha = alpha_share_pool.update_value_for_one(coldkey, (amount as i64).neg()); } // Get the negation of the removed alpha, and clamp at 0. @@ -1181,10 +1180,10 @@ impl Pallet { // Ensure that if partial execution is not allowed, the amount will not cause // slippage over desired - if let Some(allow_partial) = maybe_allow_partial { - if !allow_partial { - ensure!(alpha_amount <= max_amount, Error::::SlippageTooHigh); - } + if let Some(allow_partial) = maybe_allow_partial + && !allow_partial + { + ensure!(alpha_amount <= max_amount, Error::::SlippageTooHigh); } } diff --git a/pallets/subtensor/src/subnets/mechanism.rs b/pallets/subtensor/src/subnets/mechanism.rs index 6598c308f2..481974ef05 100644 --- a/pallets/subtensor/src/subnets/mechanism.rs +++ b/pallets/subtensor/src/subnets/mechanism.rs @@ -311,20 +311,11 @@ impl Pallet { terms.emission, sub_weight, ); - acc_terms.rank = - Self::weighted_acc_u16(acc_terms.rank, terms.rank, sub_weight); - acc_terms.trust = - Self::weighted_acc_u16(acc_terms.trust, terms.trust, sub_weight); acc_terms.consensus = Self::weighted_acc_u16( acc_terms.consensus, terms.consensus, sub_weight, ); - acc_terms.pruning_score = Self::weighted_acc_u16( - acc_terms.pruning_score, - terms.pruning_score, - sub_weight, - ); acc_terms.validator_trust = Self::weighted_acc_u16( acc_terms.validator_trust, terms.validator_trust, @@ -351,14 +342,7 @@ impl Pallet { terms.emission, sub_weight, ), - rank: Self::weighted_acc_u16(0, terms.rank, sub_weight), - trust: Self::weighted_acc_u16(0, terms.trust, sub_weight), consensus: Self::weighted_acc_u16(0, terms.consensus, sub_weight), - pruning_score: Self::weighted_acc_u16( - 0, - terms.pruning_score, - sub_weight, - ), validator_trust: Self::weighted_acc_u16( 0, terms.validator_trust, diff --git a/pallets/subtensor/src/subnets/registration.rs b/pallets/subtensor/src/subnets/registration.rs index b71aa68a0a..a7771857bb 100644 --- a/pallets/subtensor/src/subnets/registration.rs +++ b/pallets/subtensor/src/subnets/registration.rs @@ -9,32 +9,29 @@ use system::pallet_prelude::BlockNumberFor; const LOG_TARGET: &str = "runtime::subtensor::registration"; impl Pallet { - pub fn register_neuron(netuid: NetUid, hotkey: &T::AccountId) -> u16 { - // Init param - let neuron_uid: u16; + pub fn register_neuron(netuid: NetUid, hotkey: &T::AccountId) -> Result { let block_number: u64 = Self::get_current_block_as_u64(); let current_subnetwork_n: u16 = Self::get_subnetwork_n(netuid); if current_subnetwork_n < Self::get_max_allowed_uids(netuid) { // No replacement required, the uid appends the subnetwork. - // We increment the subnetwork count here but not below. - neuron_uid = current_subnetwork_n; + let neuron_uid = current_subnetwork_n; // Expand subnetwork with new account. Self::append_neuron(netuid, hotkey, block_number); log::debug!("add new neuron account"); - } else { - // Replacement required. - // We take the neuron with the lowest pruning score here. - neuron_uid = Self::get_neuron_to_prune(netuid); - // Replace the neuron account with the new info. - Self::replace_neuron(netuid, neuron_uid, hotkey, block_number); - log::debug!("prune neuron"); + Ok(neuron_uid) + } else { + match Self::get_neuron_to_prune(netuid) { + Some(uid_to_replace) => { + Self::replace_neuron(netuid, uid_to_replace, hotkey, block_number); + log::debug!("prune neuron"); + Ok(uid_to_replace) + } + None => Err(Error::::NoNeuronIdAvailable.into()), + } } - - // Return the UID of the neuron. - neuron_uid } /// ---- The implementation for the extrinsic do_burned_registration: registering by burning TAO. @@ -93,14 +90,14 @@ impl Pallet { Error::::TooManyRegistrationsThisBlock ); - // --- 4. Ensure we are not exceeding the max allowed registrations per interval. + // --- 5. Ensure we are not exceeding the max allowed registrations per interval. ensure!( Self::get_registrations_this_interval(netuid) < Self::get_target_registrations_per_interval(netuid).saturating_mul(3), Error::::TooManyRegistrationsThisInterval ); - // --- 4. Ensure that the key is not already registered. + // --- 6. Ensure that the key is not already registered. ensure!( !Uids::::contains_key(netuid, &hotkey), Error::::HotKeyAlreadyRegisteredInSubNet @@ -128,7 +125,17 @@ impl Pallet { Error::::NoNeuronIdAvailable ); - // --- 10. Ensure the remove operation from the coldkey is a success. + // --- 10. If replacement is needed, ensure a safe prune candidate exists. + let current_n = Self::get_subnetwork_n(netuid); + let max_n = Self::get_max_allowed_uids(netuid); + if current_n >= max_n { + ensure!( + Self::get_neuron_to_prune(netuid).is_some(), + Error::::NoNeuronIdAvailable + ); + } + + // --- 11. Ensure the remove operation from the coldkey is a success. let actual_burn_amount = Self::remove_balance_from_coldkey_account(&coldkey, registration_cost.into())?; @@ -145,19 +152,19 @@ impl Pallet { }); // Actually perform the registration. - let neuron_uid: u16 = Self::register_neuron(netuid, &hotkey); + let neuron_uid: u16 = Self::register_neuron(netuid, &hotkey)?; - // --- 14. Record the registration and increment block and interval counters. + // --- 12. Record the registration and increment block and interval counters. BurnRegistrationsThisInterval::::mutate(netuid, |val| val.saturating_inc()); RegistrationsThisInterval::::mutate(netuid, |val| val.saturating_inc()); RegistrationsThisBlock::::mutate(netuid, |val| val.saturating_inc()); Self::increase_rao_recycled(netuid, Self::get_burn(netuid).into()); - // --- 15. Deposit successful event. + // --- 13. Deposit successful event. log::debug!("NeuronRegistered( netuid:{netuid:?} uid:{neuron_uid:?} hotkey:{hotkey:?} ) "); Self::deposit_event(Event::NeuronRegistered(netuid, neuron_uid, hotkey)); - // --- 16. Ok and done. + // --- 14. Ok and done. Ok(()) } @@ -281,45 +288,49 @@ impl Pallet { Error::::InvalidDifficulty ); // Check that the work meets difficulty. - // --- 7. Check Work is the product of the nonce, the block number, and hotkey. Add this as used work. + // --- 9. Check Work is the product of the nonce, the block number, and hotkey. Add this as used work. let seal: H256 = Self::create_seal_hash(block_number, nonce, &hotkey); ensure!(seal == work_hash, Error::::InvalidSeal); UsedWork::::insert(work.clone(), current_block_number); - // DEPRECATED --- 8. Ensure that the key passes the registration requirement - // ensure!( - // Self::passes_network_connection_requirement(netuid, &hotkey), - // Error::::DidNotPassConnectedNetworkRequirement - // ); - - // --- 9. If the network account does not exist we will create it here. + // --- 10. If the network account does not exist we will create it here. Self::create_account_if_non_existent(&coldkey, &hotkey); - // --- 10. Ensure that the pairing is correct. + // --- 11. Ensure that the pairing is correct. ensure!( Self::coldkey_owns_hotkey(&coldkey, &hotkey), Error::::NonAssociatedColdKey ); - // Possibly there is no neuron slots at all. + // --- 12. Possibly there is no neuron slots at all. ensure!( Self::get_max_allowed_uids(netuid) != 0, Error::::NoNeuronIdAvailable ); + // --- 13. If replacement is needed, ensure a safe prune candidate exists. + let current_n = Self::get_subnetwork_n(netuid); + let max_n = Self::get_max_allowed_uids(netuid); + if current_n >= max_n { + ensure!( + Self::get_neuron_to_prune(netuid).is_some(), + Error::::NoNeuronIdAvailable + ); + } + // Actually perform the registration. - let neuron_uid: u16 = Self::register_neuron(netuid, &hotkey); + let neuron_uid: u16 = Self::register_neuron(netuid, &hotkey)?; - // --- 12. Record the registration and increment block and interval counters. + // --- 14. Record the registration and increment block and interval counters. POWRegistrationsThisInterval::::mutate(netuid, |val| val.saturating_inc()); RegistrationsThisInterval::::mutate(netuid, |val| val.saturating_inc()); RegistrationsThisBlock::::mutate(netuid, |val| val.saturating_inc()); - // --- 13. Deposit successful event. + // --- 15. Deposit successful event. log::debug!("NeuronRegistered( netuid:{netuid:?} uid:{neuron_uid:?} hotkey:{hotkey:?} ) "); Self::deposit_event(Event::NeuronRegistered(netuid, neuron_uid, hotkey)); - // --- 14. Ok and done. + // --- 16. Ok and done. Ok(()) } @@ -425,93 +436,96 @@ impl Pallet { // Insert subnet owner hotkey in the beginning of the list if valid and not // already present - if let Ok(owner_hk) = SubnetOwnerHotkey::::try_get(netuid) { - if let Some(owner_uid) = Uids::::get(netuid, &owner_hk) { - if !immune_tuples.contains(&(owner_uid, owner_hk.clone())) { - immune_tuples.insert(0, (owner_uid, owner_hk.clone())); - if immune_tuples.len() > limit { - immune_tuples.truncate(limit); - } - } + if let Ok(owner_hk) = SubnetOwnerHotkey::::try_get(netuid) + && let Some(owner_uid) = Uids::::get(netuid, &owner_hk) + && !immune_tuples.contains(&(owner_uid, owner_hk.clone())) + { + immune_tuples.insert(0, (owner_uid, owner_hk.clone())); + if immune_tuples.len() > limit { + immune_tuples.truncate(limit); } } immune_tuples } - /// Determine which peer to prune from the network by finding the element with the lowest pruning score out of - /// immunity period. If there is a tie for lowest pruning score, the neuron registered earliest is pruned. - /// If all neurons are in immunity period, the neuron with the lowest pruning score is pruned. If there is a tie for - /// the lowest pruning score, the immune neuron registered earliest is pruned. - /// Ties for earliest registration are broken by the neuron with the lowest uid. - pub fn get_neuron_to_prune(netuid: NetUid) -> u16 { - let mut min_score: u16 = u16::MAX; - let mut min_score_in_immunity: u16 = u16::MAX; - let mut earliest_registration: u64 = u64::MAX; - let mut earliest_registration_in_immunity: u64 = u64::MAX; - let mut uid_to_prune: u16 = 0; - let mut uid_to_prune_in_immunity: u16 = 0; - - // This boolean is used instead of checking if min_score == u16::MAX, to avoid the case - // where all non-immune neurons have pruning score u16::MAX - // This may be unlikely in practice. - let mut found_non_immune = false; - - let neurons_n = Self::get_subnetwork_n(netuid); - if neurons_n == 0 { - return 0; // If there are no neurons in this network. + /// Determine which neuron to prune. + pub fn get_neuron_to_prune(netuid: NetUid) -> Option { + let n = Self::get_subnetwork_n(netuid); + if n == 0 { + return None; } - // Get the list of immortal (top-k by registration time of owner owned) keys - let subnet_owner_coldkey = SubnetOwner::::get(netuid); - let immortal_hotkeys = Self::get_immune_owner_hotkeys(netuid, &subnet_owner_coldkey); - for neuron_uid in 0..neurons_n { - // Do not deregister the owner's owned hotkeys - if let Ok(hotkey) = Self::get_hotkey_for_net_and_uid(netuid, neuron_uid) { - if immortal_hotkeys.contains(&hotkey) { - continue; - } + let owner_ck = SubnetOwner::::get(netuid); + let immortal_hotkeys = Self::get_immune_owner_hotkeys(netuid, &owner_ck); + let emissions: Vec = Emission::::get(netuid); + + // Single pass: + // - count current non‑immortal & non‑immune UIDs, + // - track best non‑immune and best immune candidates separately. + let mut free_count: u16 = 0; + + // (emission, reg_block, uid) + let mut best_non_immune: Option<(AlphaCurrency, u64, u16)> = None; + let mut best_immune: Option<(AlphaCurrency, u64, u16)> = None; + + for uid in 0..n { + let hk = match Self::get_hotkey_for_net_and_uid(netuid, uid) { + Ok(h) => h, + Err(_) => continue, + }; + + // Skip owner‑immortal hotkeys entirely. + if immortal_hotkeys.contains(&hk) { + continue; } - let pruning_score: u16 = Self::get_pruning_score_for_uid(netuid, neuron_uid); - let block_at_registration: u64 = - Self::get_neuron_block_at_registration(netuid, neuron_uid); - let is_immune = Self::get_neuron_is_immune(netuid, neuron_uid); + let is_immune = Self::get_neuron_is_immune(netuid, uid); + let emission = emissions + .get(uid as usize) + .cloned() + .unwrap_or(AlphaCurrency::ZERO); + let reg_block = Self::get_neuron_block_at_registration(netuid, uid); + + // Helper to decide if (e, b, u) beats the current best. + let consider = |best: &mut Option<(AlphaCurrency, u64, u16)>| match best { + None => *best = Some((emission, reg_block, uid)), + Some((be, bb, bu)) => { + let better = if emission != *be { + emission < *be + } else if reg_block != *bb { + reg_block < *bb + } else { + uid < *bu + }; + if better { + *best = Some((emission, reg_block, uid)); + } + } + }; if is_immune { - // if the immune neuron has a lower pruning score than the minimum for immune neurons, - // or, if the pruning scores are equal and the immune neuron was registered earlier than the current minimum for immune neurons, - // then update the minimum pruning score and the uid to prune for immune neurons - if pruning_score < min_score_in_immunity - || (pruning_score == min_score_in_immunity - && block_at_registration < earliest_registration_in_immunity) - { - min_score_in_immunity = pruning_score; - earliest_registration_in_immunity = block_at_registration; - uid_to_prune_in_immunity = neuron_uid; - } + consider(&mut best_immune); } else { - found_non_immune = true; - // if the non-immune neuron has a lower pruning score than the minimum for non-immune neurons, - // or, if the pruning scores are equal and the non-immune neuron was registered earlier than the current minimum for non-immune neurons, - // then update the minimum pruning score and the uid to prune for non-immune neurons - if pruning_score < min_score - || (pruning_score == min_score && block_at_registration < earliest_registration) - { - min_score = pruning_score; - earliest_registration = block_at_registration; - uid_to_prune = neuron_uid; - } + free_count = free_count.saturating_add(1); + consider(&mut best_non_immune); } } - if found_non_immune { - Self::set_pruning_score_for_uid(netuid, uid_to_prune, u16::MAX); - uid_to_prune - } else { - Self::set_pruning_score_for_uid(netuid, uid_to_prune_in_immunity, u16::MAX); - uid_to_prune_in_immunity + // No candidates left after filtering out owner‑immortal hotkeys. + if best_non_immune.is_none() && best_immune.is_none() { + return None; + } + + // Safety floor for non‑immortal & non‑immune UIDs. + let min_free: u16 = Self::get_min_non_immune_uids(netuid); + let can_prune_non_immune = free_count > min_free; + + // Prefer non‑immune if allowed; otherwise fall back to immune. + if can_prune_non_immune && let Some((_, _, uid)) = best_non_immune { + return Some(uid); } + best_immune.map(|(_, _, uid)| uid) } /// Determine whether the given hash satisfies the given difficulty. diff --git a/pallets/subtensor/src/subnets/serving.rs b/pallets/subtensor/src/subnets/serving.rs index cdaf39e51b..d11eb479d3 100644 --- a/pallets/subtensor/src/subnets/serving.rs +++ b/pallets/subtensor/src/subnets/serving.rs @@ -84,10 +84,10 @@ impl Pallet { )?; // Check+insert certificate - if let Some(certificate) = certificate { - if let Ok(certificate) = NeuronCertificateOf::try_from(certificate) { - NeuronCertificates::::insert(netuid, hotkey_id.clone(), certificate) - } + if let Some(certificate) = certificate + && let Ok(certificate) = NeuronCertificateOf::try_from(certificate) + { + NeuronCertificates::::insert(netuid, hotkey_id.clone(), certificate) } // We insert the axon meta. diff --git a/pallets/subtensor/src/subnets/uids.rs b/pallets/subtensor/src/subnets/uids.rs index fda21bc33f..0a09017e64 100644 --- a/pallets/subtensor/src/subnets/uids.rs +++ b/pallets/subtensor/src/subnets/uids.rs @@ -18,12 +18,11 @@ impl Pallet { } } - /// Resets the trust, emission, consensus, incentive, dividends, bonds, and weights of + /// Resets the emission, consensus, incentives, dividends, bonds, and weights of /// the neuron to default pub fn clear_neuron(netuid: NetUid, neuron_uid: u16) { let neuron_index: usize = neuron_uid.into(); Emission::::mutate(netuid, |v| Self::set_element_at(v, neuron_index, 0.into())); - Trust::::mutate(netuid, |v| Self::set_element_at(v, neuron_index, 0)); Consensus::::mutate(netuid, |v| Self::set_element_at(v, neuron_index, 0)); for mecid in 0..MechanismCountCurrent::::get(netuid).into() { let netuid_index = Self::get_mechanism_storage_index(netuid, mecid.into()); @@ -64,14 +63,14 @@ impl Pallet { let old_hotkey: T::AccountId = Keys::::get(netuid, uid_to_replace); // Do not replace owner hotkey from `SubnetOwnerHotkey` - if let Ok(sn_owner_hotkey) = SubnetOwnerHotkey::::try_get(netuid) { - if sn_owner_hotkey == old_hotkey.clone() { - log::warn!( - "replace_neuron: Skipped replacement because neuron is the subnet owner hotkey. \ + if let Ok(sn_owner_hotkey) = SubnetOwnerHotkey::::try_get(netuid) + && sn_owner_hotkey == old_hotkey.clone() + { + log::warn!( + "replace_neuron: Skipped replacement because neuron is the subnet owner hotkey. \ netuid: {netuid:?}, uid_to_replace: {uid_to_replace:?}, new_hotkey: {new_hotkey:?}, owner_hotkey: {sn_owner_hotkey:?}" - ); - return; - } + ); + return; } // 2. Remove previous set memberships. @@ -108,9 +107,7 @@ impl Pallet { // 2. Get and increase the uid count. SubnetworkN::::insert(netuid, next_uid.saturating_add(1)); - // 3. Expand Yuma Consensus with new position. - Rank::::mutate(netuid, |v| v.push(0)); - Trust::::mutate(netuid, |v| v.push(0)); + // 3. Expand per-neuron vectors with new position. Active::::mutate(netuid, |v| v.push(true)); Emission::::mutate(netuid, |v| v.push(0.into())); Consensus::::mutate(netuid, |v| v.push(0)); @@ -120,7 +117,6 @@ impl Pallet { Self::set_last_update_for_uid(netuid_index, next_uid, block_number); } Dividends::::mutate(netuid, |v| v.push(0)); - PruningScores::::mutate(netuid, |v| v.push(0)); ValidatorTrust::::mutate(netuid, |v| v.push(0)); ValidatorPermit::::mutate(netuid, |v| v.push(false)); @@ -231,12 +227,9 @@ impl Pallet { emissions.into_iter().unzip(); // Get all current arrays from storage - let ranks = Rank::::get(netuid); - let trust = Trust::::get(netuid); let active = Active::::get(netuid); let consensus = Consensus::::get(netuid); let dividends = Dividends::::get(netuid); - let pruning_scores = PruningScores::::get(netuid); let vtrust = ValidatorTrust::::get(netuid); let vpermit = ValidatorPermit::::get(netuid); let stake_weight = StakeWeight::::get(netuid); @@ -244,24 +237,18 @@ impl Pallet { // Create trimmed arrays by extracting values for kept uids only // Pre-allocate vectors with exact capacity for efficiency let len = trimmed_uids.len(); - let mut trimmed_ranks = Vec::with_capacity(len); - let mut trimmed_trust = Vec::with_capacity(len); let mut trimmed_active = Vec::with_capacity(len); let mut trimmed_consensus = Vec::with_capacity(len); let mut trimmed_dividends = Vec::with_capacity(len); - let mut trimmed_pruning_scores = Vec::with_capacity(len); let mut trimmed_vtrust = Vec::with_capacity(len); let mut trimmed_vpermit = Vec::with_capacity(len); let mut trimmed_stake_weight = Vec::with_capacity(len); // Single iteration to extract values for all kept uids for &uid in &trimmed_uids { - trimmed_ranks.push(ranks.get(uid).cloned().unwrap_or_default()); - trimmed_trust.push(trust.get(uid).cloned().unwrap_or_default()); trimmed_active.push(active.get(uid).cloned().unwrap_or_default()); trimmed_consensus.push(consensus.get(uid).cloned().unwrap_or_default()); trimmed_dividends.push(dividends.get(uid).cloned().unwrap_or_default()); - trimmed_pruning_scores.push(pruning_scores.get(uid).cloned().unwrap_or_default()); trimmed_vtrust.push(vtrust.get(uid).cloned().unwrap_or_default()); trimmed_vpermit.push(vpermit.get(uid).cloned().unwrap_or_default()); trimmed_stake_weight.push(stake_weight.get(uid).cloned().unwrap_or_default()); @@ -269,12 +256,9 @@ impl Pallet { // Update storage with trimmed arrays Emission::::insert(netuid, trimmed_emissions); - Rank::::insert(netuid, trimmed_ranks); - Trust::::insert(netuid, trimmed_trust); Active::::insert(netuid, trimmed_active); Consensus::::insert(netuid, trimmed_consensus); Dividends::::insert(netuid, trimmed_dividends); - PruningScores::::insert(netuid, trimmed_pruning_scores); ValidatorTrust::::insert(netuid, trimmed_vtrust); ValidatorPermit::::insert(netuid, trimmed_vpermit); StakeWeight::::insert(netuid, trimmed_stake_weight); diff --git a/pallets/subtensor/src/subnets/weights.rs b/pallets/subtensor/src/subnets/weights.rs index f1a2df56e2..56acbef9c7 100644 --- a/pallets/subtensor/src/subnets/weights.rs +++ b/pallets/subtensor/src/subnets/weights.rs @@ -1109,16 +1109,16 @@ impl Pallet { current_block: u64, ) -> bool { let maybe_netuid_and_subid = Self::get_netuid_and_subid(netuid_index); - if let Ok((netuid, _)) = maybe_netuid_and_subid { - if Self::is_uid_exist_on_network(netuid, neuron_uid) { - // --- 1. Ensure that the diff between current and last_set weights is greater than limit. - let last_set_weights: u64 = Self::get_last_update_for_uid(netuid_index, neuron_uid); - if last_set_weights == 0 { - return true; - } // (Storage default) Never set weights. - return current_block.saturating_sub(last_set_weights) - >= Self::get_weights_set_rate_limit(netuid); - } + if let Ok((netuid, _)) = maybe_netuid_and_subid + && Self::is_uid_exist_on_network(netuid, neuron_uid) + { + // --- 1. Ensure that the diff between current and last_set weights is greater than limit. + let last_set_weights: u64 = Self::get_last_update_for_uid(netuid_index, neuron_uid); + if last_set_weights == 0 { + return true; + } // (Storage default) Never set weights. + return current_block.saturating_sub(last_set_weights) + >= Self::get_weights_set_rate_limit(netuid); } // --- 3. Non registered peers cant pass. Neither can non-existing mecid diff --git a/pallets/subtensor/src/swap/swap_hotkey.rs b/pallets/subtensor/src/swap/swap_hotkey.rs index 38f1f85df8..4fdf87fb7b 100644 --- a/pallets/subtensor/src/swap/swap_hotkey.rs +++ b/pallets/subtensor/src/swap/swap_hotkey.rs @@ -369,22 +369,20 @@ impl Pallet { // 3.3 Swap Prometheus. // Prometheus( netuid, hotkey ) -> prometheus -- the prometheus data that a hotkey has in the network. - if is_network_member { - if let Ok(old_prometheus_info) = Prometheus::::try_get(netuid, old_hotkey) { - Prometheus::::remove(netuid, old_hotkey); - Prometheus::::insert(netuid, new_hotkey, old_prometheus_info); - weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 2)); - } + if is_network_member + && let Ok(old_prometheus_info) = Prometheus::::try_get(netuid, old_hotkey) + { + Prometheus::::remove(netuid, old_hotkey); + Prometheus::::insert(netuid, new_hotkey, old_prometheus_info); + weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 2)); } // 3.4. Swap axons. // Axons( netuid, hotkey ) -> axon -- the axon that the hotkey has. - if is_network_member { - if let Ok(old_axon_info) = Axons::::try_get(netuid, old_hotkey) { - Axons::::remove(netuid, old_hotkey); - Axons::::insert(netuid, new_hotkey, old_axon_info); - weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 2)); - } + if is_network_member && let Ok(old_axon_info) = Axons::::try_get(netuid, old_hotkey) { + Axons::::remove(netuid, old_hotkey); + Axons::::insert(netuid, new_hotkey, old_axon_info); + weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 2)); } // 3.5 Swap WeightCommits @@ -404,29 +402,27 @@ impl Pallet { // 3.6. Swap the subnet loaded emission. // LoadedEmission( netuid ) --> Vec<(hotkey, u64)> -- the loaded emission for the subnet. - if is_network_member { - if let Some(mut old_loaded_emission) = LoadedEmission::::get(netuid) { - for emission in old_loaded_emission.iter_mut() { - if emission.0 == *old_hotkey { - emission.0 = new_hotkey.clone(); - } + if is_network_member && let Some(mut old_loaded_emission) = LoadedEmission::::get(netuid) + { + for emission in old_loaded_emission.iter_mut() { + if emission.0 == *old_hotkey { + emission.0 = new_hotkey.clone(); } - LoadedEmission::::remove(netuid); - LoadedEmission::::insert(netuid, old_loaded_emission); - weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 2)); } + LoadedEmission::::remove(netuid); + LoadedEmission::::insert(netuid, old_loaded_emission); + weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 2)); } // 3.7. Swap neuron TLS certificates. // NeuronCertificates( netuid, hotkey ) -> Vec -- the neuron certificate for the hotkey. - if is_network_member { - if let Ok(old_neuron_certificates) = + if is_network_member + && let Ok(old_neuron_certificates) = NeuronCertificates::::try_get(netuid, old_hotkey) - { - NeuronCertificates::::remove(netuid, old_hotkey); - NeuronCertificates::::insert(netuid, new_hotkey, old_neuron_certificates); - weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 2)); - } + { + NeuronCertificates::::remove(netuid, old_hotkey); + NeuronCertificates::::insert(netuid, new_hotkey, old_neuron_certificates); + weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 2)); } // 4. Swap ChildKeys. // 5. Swap ParentKeys. diff --git a/pallets/subtensor/src/tests/claim_root.rs b/pallets/subtensor/src/tests/claim_root.rs index b910fa1e83..fd5c1deaf0 100644 --- a/pallets/subtensor/src/tests/claim_root.rs +++ b/pallets/subtensor/src/tests/claim_root.rs @@ -1816,3 +1816,12 @@ fn test_claim_root_keep_subnets_swap_claim_type() { ); }); } + +#[test] +fn test_claim_root_default_mode_keep() { + new_test_ext(1).execute_with(|| { + let coldkey = U256::from(1003); + + assert_eq!(RootClaimType::::get(coldkey), RootClaimTypeEnum::Swap); + }); +} diff --git a/pallets/subtensor/src/tests/coinbase.rs b/pallets/subtensor/src/tests/coinbase.rs index 1498e0b423..f6c92c8079 100644 --- a/pallets/subtensor/src/tests/coinbase.rs +++ b/pallets/subtensor/src/tests/coinbase.rs @@ -3920,6 +3920,67 @@ fn test_pending_emission_start_call_not_done() { }); } +#[test] +fn test_root_prop_filled_on_block_step() { + new_test_ext(1).execute_with(|| { + let hotkey = U256::from(10); + let coldkey = U256::from(11); + let netuid1 = add_dynamic_network(&hotkey, &coldkey); + let netuid2 = add_dynamic_network(&hotkey, &coldkey); + + SubnetTAO::::insert(NetUid::ROOT, TaoCurrency::from(1_000_000_000_000u64)); + SubtensorModule::set_tao_weight(u64::MAX); // Set TAO weight to 1.0 + + let tao_reserve = TaoCurrency::from(50_000_000_000); + let alpha_in = AlphaCurrency::from(100_000_000_000); + SubnetTAO::::insert(netuid1, tao_reserve); + SubnetAlphaIn::::insert(netuid1, alpha_in); + SubnetTAO::::insert(netuid2, tao_reserve); + SubnetAlphaIn::::insert(netuid2, alpha_in); + + assert!(!RootProp::::contains_key(netuid1)); + assert!(!RootProp::::contains_key(netuid2)); + + run_to_block(2); + + assert!(RootProp::::get(netuid1) > U96F32::from_num(0)); + assert!(RootProp::::get(netuid2) > U96F32::from_num(0)); + }); +} + +#[test] +fn test_root_proportion() { + new_test_ext(1).execute_with(|| { + let hotkey = U256::from(10); + let coldkey = U256::from(11); + let netuid = add_dynamic_network(&hotkey, &coldkey); + + let root_tao_reserve = 1_000_000_000_000u64; + SubnetTAO::::insert(NetUid::ROOT, TaoCurrency::from(root_tao_reserve)); + + let tao_weight = 3_320_413_933_267_719_290u64; + SubtensorModule::set_tao_weight(tao_weight); + + let alpha_in = 100_000_000_000u64; + SubnetAlphaIn::::insert(netuid, AlphaCurrency::from(alpha_in)); + + let actual_root_proportion = SubtensorModule::root_proportion(netuid); + let expected_root_prop = { + let tao_weight = SubtensorModule::get_tao_weight(); + let root_tao = U96F32::from_num(root_tao_reserve); + let alpha_in = { + let alpha: u64 = SubtensorModule::get_alpha_issuance(netuid).into(); + + U96F32::from_num(alpha) + }; + + tao_weight * root_tao / (tao_weight * root_tao + alpha_in) + }; + + assert_eq!(actual_root_proportion, expected_root_prop); + }); +} + #[test] fn test_get_subnet_terms_alpha_emissions_cap() { new_test_ext(1).execute_with(|| { diff --git a/pallets/subtensor/src/tests/epoch.rs b/pallets/subtensor/src/tests/epoch.rs index cdf44df645..32f754f78d 100644 --- a/pallets/subtensor/src/tests/epoch.rs +++ b/pallets/subtensor/src/tests/epoch.rs @@ -718,39 +718,34 @@ fn test_512_graph() { SubtensorModule::get_total_stake_for_hotkey(&(U256::from(uid))), max_stake_per_validator.into() ); - assert_eq!(SubtensorModule::get_rank_for_uid(netuid, uid), 0); - assert_eq!(SubtensorModule::get_trust_for_uid(netuid, uid), 0); assert_eq!(SubtensorModule::get_consensus_for_uid(netuid, uid), 0); assert_eq!( SubtensorModule::get_incentive_for_uid(netuid.into(), uid), 0 ); - assert_eq!(SubtensorModule::get_dividends_for_uid(netuid, uid), 1023); // Note D = floor(1 / 64 * 65_535) = 1023 + assert_eq!(SubtensorModule::get_dividends_for_uid(netuid, uid), 1023); // floor(1 / 64 * 65_535) assert_eq!( SubtensorModule::get_emission_for_uid(netuid, uid), 7812500.into() - ); // Note E = 0.5 / 200 * 1_000_000_000 = 7_812_500 + ); // 0.5 / 200 * 1_000_000_000 assert_eq!(bonds[uid as usize][validator], 0.0); assert_eq!(bonds[uid as usize][server], I32F32::from_num(65_535)); - // Note B_ij = floor(1 / 64 * 65_535) / 65_535 = 1023 / 65_535, then max-upscaled to 65_535 } for uid in servers { assert_eq!( SubtensorModule::get_total_stake_for_hotkey(&(U256::from(uid))), TaoCurrency::ZERO ); - assert_eq!(SubtensorModule::get_rank_for_uid(netuid, uid), 146); // Note R = floor(1 / (512 - 64) * 65_535) = 146 - assert_eq!(SubtensorModule::get_trust_for_uid(netuid, uid), 65535); - assert_eq!(SubtensorModule::get_consensus_for_uid(netuid, uid), 146); // Note C = floor(1 / (512 - 64) * 65_535) = 146 + assert_eq!(SubtensorModule::get_consensus_for_uid(netuid, uid), 146); assert_eq!( SubtensorModule::get_incentive_for_uid(netuid.into(), uid), 146 - ); // Note I = floor(1 / (512 - 64) * 65_535) = 146 + ); // floor(1 / (512 - 64) * 65_535) assert_eq!(SubtensorModule::get_dividends_for_uid(netuid, uid), 0); assert_eq!( SubtensorModule::get_emission_for_uid(netuid, uid), 1116071.into() - ); // Note E = floor(0.5 / (512 - 64) * 1_000_000_000) = 1_116_071 + ); // floor(0.5 / (512 - 64) * 1_000_000_000) assert_eq!(bonds[uid as usize][validator], 0.0); assert_eq!(bonds[uid as usize][server], 0.0); } @@ -3807,16 +3802,16 @@ fn test_epoch_does_not_mask_outside_window_but_masks_inside() { SubtensorModule::epoch(netuid, 1_000.into()); assert!( - SubtensorModule::get_rank_for_uid(netuid, 1) > 0, + SubtensorModule::get_incentive_for_uid(netuid.into(), 1) > 0, "UID-1 (old) unmasked" ); assert_eq!( - SubtensorModule::get_rank_for_uid(netuid, 2), + SubtensorModule::get_incentive_for_uid(netuid.into(), 2), 0, "UID-2 (inside window) masked" ); assert_eq!( - SubtensorModule::get_rank_for_uid(netuid, 3), + SubtensorModule::get_incentive_for_uid(netuid.into(), 3), 0, "UID-3 (inside window) masked" ); diff --git a/pallets/subtensor/src/tests/epoch_logs.rs b/pallets/subtensor/src/tests/epoch_logs.rs index 0c95857346..38e3a1b734 100644 --- a/pallets/subtensor/src/tests/epoch_logs.rs +++ b/pallets/subtensor/src/tests/epoch_logs.rs @@ -197,7 +197,6 @@ fn test_simple() { assert!(has( "Combined Emission: [AlphaCurrency(500000000), AlphaCurrency(500000000)]" )); - assert!(has("Pruning Scores: [0.5, 0.5]")); assert!(!has("math error:")); }); } @@ -460,16 +459,10 @@ fn test_validators_weight_two_distinct_servers() { assert!(has("Weights (mask+norm): [[(3, 1), (4, 0)], [(3, 0), (4, 1)], [(3, 1), (4, 0)], [], []]")); // downstream signals present - assert!(has("Ranks (before): [0, 0, 0, 0.6666666665, 0.3333333333]")); assert!(has("Consensus: [0, 0, 0, 1, 0]")); assert!(has("Validator Trust: [1, 0, 1, 0, 0]")); - assert!(has("Ranks (after): [0, 0, 0, 0.6666666665, 0]")); - assert!(has("Trust: [0, 0, 0, 1, 0]")); assert!(has("Dividends: [0.5, 0, 0.5, 0, 0]")); assert!(has("Normalized Combined Emission: [0.25, 0, 0.25, 0.5, 0]")); - assert!(has("Pruning Scores: [0.25, 0, 0.25, 0.5, 0]")); - - // math is ok assert!(!has("math error:")); }); } @@ -506,8 +499,6 @@ fn test_validator_splits_weight_across_two_servers() { assert!(has("validator_permits: [true, true, true, false, false]")); assert!(has("Weights (mask+norm): [[(3, 1), (4, 0)], [(3, 0), (4, 1)], [(3, 0.5), (4, 0.5)], [], []]")); - assert!(has("Ranks (before): [0, 0, 0, 0.4999999998, 0.4999999998]")); - assert!(has("Ranks (after): [0, 0, 0, 0.333333333, 0.333333333]")); assert!(has("ΔB (norm): [[(3, 0.5), (4, 0)], [(3, 0), (4, 0.5)], [(3, 0.5), (4, 0.5)], [], []]")); assert!(has("Dividends: [0.25, 0.25, 0.5, 0, 0]")); assert!(has("Normalized Combined Emission: [0.125, 0.125, 0.25, 0.25, 0.25]")); @@ -564,8 +555,6 @@ fn epoch_mechanism_reads_weights_per_mechanism() { assert!(logs_m1.contains("Active Stake: [0.3333333333, 0.3333333333, 0.3333333333, 0, 0]")); assert!(logs_m0.contains("Weights (mask+norm): [[(3, 1)], [(4, 1)], [(3, 1)], [], []]")); assert!(logs_m1.contains("Weights (mask+norm): [[(4, 1)], [(3, 1)], [(4, 1)], [], []]")); - assert!(logs_m0.contains("Ranks (before): [0, 0, 0, 0.6666666665, 0.3333333333]")); - assert!(logs_m1.contains("Ranks (before): [0, 0, 0, 0.3333333333, 0.6666666665]")); assert!(logs_m0.contains("ΔB (norm): [[(3, 0.5)], [], [(3, 0.5)], [], []]")); assert!(logs_m1.contains("ΔB (norm): [[(4, 0.5)], [], [(4, 0.5)], [], []]")); assert!(logs_m0.contains("Normalized Combined Emission: [0.25, 0, 0.25, 0.5, 0]")); @@ -631,7 +620,6 @@ fn epoch_mechanism_three_mechanisms_separate_state() { // Check major epoch indicators assert!(l0.contains("Weights (mask+norm): [[(2, 1)], [(2, 1)], [], []]")); - assert!(l0.contains("Ranks (before): [0, 0, 1, 0]")); assert!(l0.contains("ΔB (norm): [[(2, 0.5)], [(2, 0.5)], [], []]")); assert!(l0.contains("Normalized Combined Emission: [0.25, 0.25, 0.5, 0]")); @@ -640,12 +628,10 @@ fn epoch_mechanism_three_mechanisms_separate_state() { "Weights (mask+norm): [[(2, 0.5), (3, 0.5)], [(2, 0.5), (3, 0.5)], [], []]" ) ); - assert!(l1.contains("Ranks (before): [0, 0, 0.5, 0.5]")); assert!(l1.contains("ΔB (norm): [[(2, 0.5), (3, 0.5)], [(2, 0.5), (3, 0.5)], [], []]")); assert!(l1.contains("Normalized Combined Emission: [0.25, 0.25, 0.25, 0.25]")); assert!(l2.contains("Weights (mask+norm): [[(3, 1)], [(3, 1)], [], []]")); - assert!(l2.contains("Ranks (before): [0, 0, 0, 1]")); assert!(l2.contains("ΔB (norm): [[(3, 0.5)], [(3, 0.5)], [], []]")); assert!(l2.contains("Normalized Combined Emission: [0.25, 0.25, 0, 0.5]")); diff --git a/pallets/subtensor/src/tests/mechanism.rs b/pallets/subtensor/src/tests/mechanism.rs index e5c46e8722..9e6450e09c 100644 --- a/pallets/subtensor/src/tests/mechanism.rs +++ b/pallets/subtensor/src/tests/mechanism.rs @@ -742,11 +742,8 @@ fn epoch_with_mechanisms_persists_and_aggregates_all_terms() { // Fetch persisted vectors let active = Active::::get(netuid); let emission_v = Emission::::get(netuid); - let rank_v = Rank::::get(netuid); - let trust_v = Trust::::get(netuid); let cons_v = Consensus::::get(netuid); let div_v = Dividends::::get(netuid); - let prun_v = PruningScores::::get(netuid); let vtrust_v = ValidatorTrust::::get(netuid); let vperm_v = ValidatorPermit::::get(netuid); @@ -777,15 +774,8 @@ fn epoch_with_mechanisms_persists_and_aggregates_all_terms() { assert_abs_diff_eq!(u64::from(emission_v[uid]), exp_em, epsilon = 1); // u16 terms - assert_abs_diff_eq!(rank_v[uid], wu16(t0.rank, t1.rank), epsilon = 1); - assert_abs_diff_eq!(trust_v[uid], wu16(t0.trust, t1.trust), epsilon = 1); assert_abs_diff_eq!(cons_v[uid], wu16(t0.consensus, t1.consensus), epsilon = 1); assert_abs_diff_eq!(div_v[uid], wu16(t0.dividend, t1.dividend), epsilon = 1); - assert_abs_diff_eq!( - prun_v[uid], - wu16(t0.pruning_score, t1.pruning_score), - epsilon = 1 - ); assert_abs_diff_eq!( vtrust_v[uid], wu16(t0.validator_trust, t1.validator_trust), @@ -855,7 +845,6 @@ fn neuron_dereg_cleans_weights_across_subids() { AlphaCurrency::from(3u64), ], ); - Trust::::insert(netuid, vec![11u16, 99u16, 33u16]); Consensus::::insert(netuid, vec![21u16, 88u16, 44u16]); Dividends::::insert(netuid, vec![7u16, 77u16, 17u16]); @@ -884,9 +873,6 @@ fn neuron_dereg_cleans_weights_across_subids() { assert_eq!(e[1], 0u64.into()); assert_eq!(e[2], 3u64.into()); - let t = Trust::::get(netuid); - assert_eq!(t, vec![11, 0, 33]); - let c = Consensus::::get(netuid); assert_eq!(c, vec![21, 0, 44]); @@ -921,7 +907,6 @@ fn clear_neuron_handles_absent_rows_gracefully() { // Minimal vectors with non-zero at index 0 (we will clear UID=0) Emission::::insert(netuid, vec![AlphaCurrency::from(5u64)]); - Trust::::insert(netuid, vec![5u16]); Consensus::::insert(netuid, vec![6u16]); Dividends::::insert(netuid, vec![7u16]); @@ -929,12 +914,12 @@ fn clear_neuron_handles_absent_rows_gracefully() { let neuron_uid: u16 = 0; SubtensorModule::clear_neuron(netuid, neuron_uid); - // All zeroed at index 0 + // Emission/Consensus/Dividends zeroed at index 0 assert_eq!( Emission::::get(netuid), vec![AlphaCurrency::from(0u64)] ); - assert_eq!(Trust::::get(netuid), vec![0u16]); + assert_eq!(Consensus::::get(netuid), vec![0u16]); assert_eq!(Dividends::::get(netuid), vec![0u16]); }); diff --git a/pallets/subtensor/src/tests/migration.rs b/pallets/subtensor/src/tests/migration.rs index 8d439e4e8f..1d78b4dffd 100644 --- a/pallets/subtensor/src/tests/migration.rs +++ b/pallets/subtensor/src/tests/migration.rs @@ -2396,6 +2396,121 @@ fn test_migrate_remove_tao_dividends() { ); } +#[test] +fn test_migrate_clear_rank_trust_pruning_maps_removes_entries() { + new_test_ext(1).execute_with(|| { + // ------------------------------ + // 0) Constants + // ------------------------------ + const MIG_NAME: &[u8] = b"clear_rank_trust_pruning_maps"; + let empty: Vec = EmptyU16Vec::::get(); + + // ------------------------------ + // 1) Pre-state: seed using the correct key type (NetUid) + // ------------------------------ + let n0: NetUid = 0u16.into(); + let n1: NetUid = 1u16.into(); + let n2: NetUid = 42u16.into(); + + // Rank: n0 non-empty, n1 explicitly empty, n2 absent + Rank::::insert(n0, vec![10, 20, 30]); + Rank::::insert(n1, Vec::::new()); + + // Trust: n0 non-empty, n2 non-empty + Trust::::insert(n0, vec![7]); + Trust::::insert(n2, vec![1, 2, 3]); + + // PruningScores: n0 non-empty, n1 empty, n2 non-empty + PruningScores::::insert(n0, vec![5, 5, 5]); + PruningScores::::insert(n1, Vec::::new()); + PruningScores::::insert(n2, vec![9]); + + // Sanity: preconditions (keys should exist where inserted) + assert!(Rank::::contains_key(n0)); + assert!(Rank::::contains_key(n1)); + assert!(!Rank::::contains_key(n2)); + + assert!(Trust::::contains_key(n0)); + assert!(!Trust::::contains_key(n1)); + assert!(Trust::::contains_key(n2)); + + assert!(PruningScores::::contains_key(n0)); + assert!(PruningScores::::contains_key(n1)); + assert!(PruningScores::::contains_key(n2)); + + assert!( + !HasMigrationRun::::get(MIG_NAME.to_vec()), + "migration flag should be false before run" + ); + + // ------------------------------ + // 2) Run migration + // ------------------------------ + let w = crate::migrations::migrate_clear_rank_trust_pruning_maps::migrate_clear_rank_trust_pruning_maps::(); + assert!(!w.is_zero(), "weight must be non-zero"); + + // ------------------------------ + // 3) Verify: all entries removed (no keys present) + // ------------------------------ + assert!( + HasMigrationRun::::get(MIG_NAME.to_vec()), + "migration flag not set" + ); + + // Rank: all removed + assert!( + !Rank::::contains_key(n0), + "Rank[n0] should be removed" + ); + assert!( + !Rank::::contains_key(n1), + "Rank[n1] should be removed" + ); + assert!( + !Rank::::contains_key(n2), + "Rank[n2] should remain absent" + ); + // ValueQuery still returns empty default + assert_eq!(Rank::::get(n0), empty); + assert_eq!(Rank::::get(n1), empty); + assert_eq!(Rank::::get(n2), empty); + + // Trust: all removed + assert!( + !Trust::::contains_key(n0), + "Trust[n0] should be removed" + ); + assert!( + !Trust::::contains_key(n1), + "Trust[n1] should remain absent" + ); + assert!( + !Trust::::contains_key(n2), + "Trust[n2] should be removed" + ); + assert_eq!(Trust::::get(n0), empty); + assert_eq!(Trust::::get(n1), empty); + assert_eq!(Trust::::get(n2), empty); + + // PruningScores: all removed + assert!( + !PruningScores::::contains_key(n0), + "PruningScores[n0] should be removed" + ); + assert!( + !PruningScores::::contains_key(n1), + "PruningScores[n1] should be removed" + ); + assert!( + !PruningScores::::contains_key(n2), + "PruningScores[n2] should be removed" + ); + assert_eq!(PruningScores::::get(n0), empty); + assert_eq!(PruningScores::::get(n1), empty); + assert_eq!(PruningScores::::get(n2), empty); + + }); +} fn do_setup_unactive_sn() -> (Vec, Vec) { // Register some subnets let netuid0 = add_dynamic_network_without_emission_block(&U256::from(0), &U256::from(0)); diff --git a/pallets/subtensor/src/tests/networks.rs b/pallets/subtensor/src/tests/networks.rs index 0fa27a99d6..8214d58be0 100644 --- a/pallets/subtensor/src/tests/networks.rs +++ b/pallets/subtensor/src/tests/networks.rs @@ -106,6 +106,7 @@ fn dissolve_single_alpha_out_staker_gets_all_tao() { let pot: u64 = 99_999; SubnetTAO::::insert(net, TaoCurrency::from(pot)); SubtensorModule::set_subnet_locked_balance(net, 0.into()); + TotalHotkeyAlpha::::insert(s_hot, net, AlphaCurrency::from(5_000u64)); // Cold-key balance before let before = SubtensorModule::get_coldkey_balance(&s_cold); @@ -142,6 +143,9 @@ fn dissolve_two_stakers_pro_rata_distribution() { Alpha::::insert((s1_hot, s1_cold, net), U64F64::from_num(a1)); Alpha::::insert((s2_hot, s2_cold, net), U64F64::from_num(a2)); + TotalHotkeyAlpha::::insert(s1_hot, net, AlphaCurrency::from(a1 as u64)); + TotalHotkeyAlpha::::insert(s2_hot, net, AlphaCurrency::from(a2 as u64)); + let pot: u64 = 10_000; SubnetTAO::::insert(net, TaoCurrency::from(pot)); SubtensorModule::set_subnet_locked_balance(net, 5_000.into()); // owner refund path present; emission = 0 @@ -364,6 +368,10 @@ fn dissolve_clears_all_per_subnet_storages() { SubnetTaoProvided::::insert(net, TaoCurrency::from(1)); SubnetAlphaInProvided::::insert(net, AlphaCurrency::from(1)); + // TAO Flow + SubnetTaoFlow::::insert(net, 0i64); + SubnetEmaTaoFlow::::insert(net, (0u64, substrate_fixed::types::I64F64::from_num(0))); + // Subnet locks TransferToggle::::insert(net, true); SubnetLocked::::insert(net, TaoCurrency::from(1)); @@ -496,6 +504,10 @@ fn dissolve_clears_all_per_subnet_storages() { assert!(!SubnetTaoInEmission::::contains_key(net)); assert!(!SubnetVolume::::contains_key(net)); + // TAO Flow + assert!(!SubnetTaoFlow::::contains_key(net)); + assert!(!SubnetEmaTaoFlow::::contains_key(net)); + // These are now REMOVED assert!(!SubnetAlphaIn::::contains_key(net)); assert!(!SubnetAlphaOut::::contains_key(net)); @@ -627,6 +639,7 @@ fn dissolve_alpha_out_but_zero_tao_no_rewards() { SubnetTAO::::insert(net, TaoCurrency::from(0)); // zero TAO SubtensorModule::set_subnet_locked_balance(net, TaoCurrency::from(0)); Emission::::insert(net, Vec::::new()); + TotalHotkeyAlpha::::insert(sh, net, AlphaCurrency::from(1_000u64)); let before = SubtensorModule::get_coldkey_balance(&sc); assert_ok!(SubtensorModule::do_dissolve_network(net)); @@ -672,6 +685,9 @@ fn dissolve_rounding_remainder_distribution() { SubnetTAO::::insert(net, TaoCurrency::from(1)); // TAO pot = 1 SubtensorModule::set_subnet_locked_balance(net, TaoCurrency::from(0)); + TotalHotkeyAlpha::::insert(s1h, net, AlphaCurrency::from(3u64)); + TotalHotkeyAlpha::::insert(s2h, net, AlphaCurrency::from(2u64)); + // Cold-key balances before let c1_before = SubtensorModule::get_coldkey_balance(&s1c); let c2_before = SubtensorModule::get_coldkey_balance(&s2c); @@ -1922,8 +1938,8 @@ fn massive_dissolve_refund_and_reregistration_flow_is_lossless_and_cleans_state( // Capture **pair‑level** α snapshot per net (pre‑LP). for ((hot, cold, net), amt) in Alpha::::iter() { - if let Some(&ni) = net_index.get(&net) { - if lp_sets_per_net[ni].contains(&cold) { + if let Some(&ni) = net_index.get(&net) + && lp_sets_per_net[ni].contains(&cold) { let a: u128 = amt.saturating_to_num(); if a > 0 { alpha_pairs_per_net @@ -1932,7 +1948,6 @@ fn massive_dissolve_refund_and_reregistration_flow_is_lossless_and_cleans_state( .push(((hot, cold), a)); } } - } } // ──────────────────────────────────────────────────────────────────── diff --git a/pallets/subtensor/src/tests/recycle_alpha.rs b/pallets/subtensor/src/tests/recycle_alpha.rs index 173a03aea1..32a95c700d 100644 --- a/pallets/subtensor/src/tests/recycle_alpha.rs +++ b/pallets/subtensor/src/tests/recycle_alpha.rs @@ -1,6 +1,7 @@ use approx::assert_abs_diff_eq; use frame_support::{assert_noop, assert_ok, traits::Currency}; use sp_core::U256; +use substrate_fixed::types::U64F64; use subtensor_runtime_common::{AlphaCurrency, Currency as CurrencyT}; use super::mock; @@ -543,3 +544,77 @@ fn test_burn_errors() { ); }); } + +#[test] +fn test_recycle_precision_loss() { + new_test_ext(1).execute_with(|| { + let coldkey = U256::from(1); + let hotkey = U256::from(2); + + let netuid = add_dynamic_network(&hotkey, &coldkey); + + Balances::make_free_balance_be(&coldkey, 1_000_000_000); + // sanity check + assert!(SubtensorModule::if_subnet_exist(netuid)); + + // add stake to coldkey-hotkey pair so we can recycle it + let stake = 200_000; + increase_stake_on_coldkey_hotkey_account(&coldkey, &hotkey, stake.into(), netuid); + + // amount to recycle + let recycle_amount = AlphaCurrency::from(stake / 2); + + // Modify the alpha pool denominator so it's low-precision + let denominator = U64F64::from_num(0.00000001); + TotalHotkeyShares::::insert(hotkey, netuid, denominator); + Alpha::::insert((&hotkey, &coldkey, netuid), denominator); + + // recycle, expect error due to precision loss + assert_noop!( + SubtensorModule::recycle_alpha( + RuntimeOrigin::signed(coldkey), + hotkey, + recycle_amount, + netuid + ), + Error::::PrecisionLoss + ); + }); +} + +#[test] +fn test_burn_precision_loss() { + new_test_ext(1).execute_with(|| { + let coldkey = U256::from(1); + let hotkey = U256::from(2); + + let netuid = add_dynamic_network(&hotkey, &coldkey); + + Balances::make_free_balance_be(&coldkey, 1_000_000_000); + // sanity check + assert!(SubtensorModule::if_subnet_exist(netuid)); + + // add stake to coldkey-hotkey pair so we can recycle it + let stake = 200_000; + increase_stake_on_coldkey_hotkey_account(&coldkey, &hotkey, stake.into(), netuid); + + // amount to recycle + let burn_amount = AlphaCurrency::from(stake / 2); + + // Modify the alpha pool denominator so it's low-precision + let denominator = U64F64::from_num(0.00000001); + TotalHotkeyShares::::insert(hotkey, netuid, denominator); + Alpha::::insert((&hotkey, &coldkey, netuid), denominator); + + // burn, expect error due to precision loss + assert_noop!( + SubtensorModule::burn_alpha( + RuntimeOrigin::signed(coldkey), + hotkey, + burn_amount, + netuid + ), + Error::::PrecisionLoss + ); + }); +} diff --git a/pallets/subtensor/src/tests/registration.rs b/pallets/subtensor/src/tests/registration.rs index 48e887d606..c82e173907 100644 --- a/pallets/subtensor/src/tests/registration.rs +++ b/pallets/subtensor/src/tests/registration.rs @@ -607,6 +607,7 @@ fn test_burn_adjustment() { }); } +#[allow(clippy::indexing_slicing)] #[test] fn test_burn_registration_pruning_scenarios() { new_test_ext(1).execute_with(|| { @@ -620,6 +621,9 @@ fn test_burn_registration_pruning_scenarios() { const IS_IMMUNE: bool = true; const NOT_IMMUNE: bool = false; + // --- Neutralize the safety floor for this test. + SubtensorModule::set_min_non_immune_uids(netuid, 0); + // Initial setup SubtensorModule::set_burn(netuid, burn_cost.into()); SubtensorModule::set_max_allowed_uids(netuid, max_allowed_uids); @@ -634,7 +638,7 @@ fn test_burn_registration_pruning_scenarios() { let mint_balance = burn_cost * max_allowed_uids as u64 + 1_000_000_000; SubtensorModule::add_balance_to_coldkey_account(&coldkey_account_id, mint_balance); - // Register first half of neurons + // Register first half of neurons (uids: 0,1,2); all will be immune initially. for i in 0..3 { assert_ok!(SubtensorModule::burned_register( <::RuntimeOrigin>::signed(coldkey_account_id), @@ -644,51 +648,49 @@ fn test_burn_registration_pruning_scenarios() { step_block(1); } - // Note: pruning score is set to u16::MAX after getting neuron to prune - - // 1. Test if all immune neurons + // 1) All immune neurons assert_eq!(SubtensorModule::get_neuron_is_immune(netuid, 0), IS_IMMUNE); assert_eq!(SubtensorModule::get_neuron_is_immune(netuid, 1), IS_IMMUNE); assert_eq!(SubtensorModule::get_neuron_is_immune(netuid, 2), IS_IMMUNE); - SubtensorModule::set_pruning_score_for_uid(netuid, 0, 100); - SubtensorModule::set_pruning_score_for_uid(netuid, 1, 75); - SubtensorModule::set_pruning_score_for_uid(netuid, 2, 50); - - // The immune neuron with the lowest score should be pruned - assert_eq!(SubtensorModule::get_neuron_to_prune(netuid), 2); - - // 2. Test tie-breaking for immune neurons - SubtensorModule::set_pruning_score_for_uid(netuid, 1, 50); - SubtensorModule::set_pruning_score_for_uid(netuid, 2, 50); + // Drive selection with emissions: lowest emission is pruned (among immune if all immune). + // Set: uid0=100, uid1=75, uid2=50 -> expect uid2 + Emission::::mutate(netuid, |v| { + v[0] = 100u64.into(); + v[1] = 75u64.into(); + v[2] = 50u64.into(); + }); + assert_eq!(SubtensorModule::get_neuron_to_prune(netuid), Some(2)); - // Should get the oldest neuron (i.e., neuron that was registered first) - assert_eq!(SubtensorModule::get_neuron_to_prune(netuid), 1); + // 2) Tie-breaking for immune neurons: uid1=50, uid2=50 -> earliest registration among {1,2} is uid1 + Emission::::mutate(netuid, |v| { + v[1] = 50u64.into(); + v[2] = 50u64.into(); + }); + assert_eq!(SubtensorModule::get_neuron_to_prune(netuid), Some(1)); - // 3. Test if no immune neurons + // 3) Make all three non-immune step_block(immunity_period); - - // ensure all neurons are non-immune assert_eq!(SubtensorModule::get_neuron_is_immune(netuid, 0), NOT_IMMUNE); assert_eq!(SubtensorModule::get_neuron_is_immune(netuid, 1), NOT_IMMUNE); assert_eq!(SubtensorModule::get_neuron_is_immune(netuid, 2), NOT_IMMUNE); - SubtensorModule::set_pruning_score_for_uid(netuid, 0, 100); - SubtensorModule::set_pruning_score_for_uid(netuid, 1, 50); - SubtensorModule::set_pruning_score_for_uid(netuid, 2, 75); - - // The non-immune neuron with the lowest score should be pruned - assert_eq!(SubtensorModule::get_neuron_to_prune(netuid), 1); - - // 4. Test tie-breaking for non-immune neurons - SubtensorModule::set_pruning_score_for_uid(netuid, 1, 50); - SubtensorModule::set_pruning_score_for_uid(netuid, 2, 50); + // Among non-immune, choose lowest emission: set uid0=100, uid1=50, uid2=75 -> expect uid1 + Emission::::mutate(netuid, |v| { + v[0] = 100u64.into(); + v[1] = 50u64.into(); + v[2] = 75u64.into(); + }); + assert_eq!(SubtensorModule::get_neuron_to_prune(netuid), Some(1)); - // Should get the oldest non-immune neuron - assert_eq!(SubtensorModule::get_neuron_to_prune(netuid), 1); + // 4) Non-immune tie-breaking: uid1=50, uid2=50 -> earliest registration among {1,2} is uid1 + Emission::::mutate(netuid, |v| { + v[1] = 50u64.into(); + v[2] = 50u64.into(); + }); + assert_eq!(SubtensorModule::get_neuron_to_prune(netuid), Some(1)); - // 5. Test mixed immunity - // Register second batch of neurons (these will be non-immune) + // 5) Mixed immunity: register another 3 immune neurons (uids: 3,4,5) for i in 3..6 { assert_ok!(SubtensorModule::burned_register( <::RuntimeOrigin>::signed(coldkey_account_id), @@ -698,31 +700,37 @@ fn test_burn_registration_pruning_scenarios() { step_block(1); } - // Ensure all new neurons are immune + // Ensure new neurons are immune assert_eq!(SubtensorModule::get_neuron_is_immune(netuid, 3), IS_IMMUNE); assert_eq!(SubtensorModule::get_neuron_is_immune(netuid, 4), IS_IMMUNE); assert_eq!(SubtensorModule::get_neuron_is_immune(netuid, 5), IS_IMMUNE); - // Set pruning scores for all neurons - SubtensorModule::set_pruning_score_for_uid(netuid, 0, 75); // non-immune - SubtensorModule::set_pruning_score_for_uid(netuid, 1, 50); // non-immune - SubtensorModule::set_pruning_score_for_uid(netuid, 2, 60); // non-immune - SubtensorModule::set_pruning_score_for_uid(netuid, 3, 40); // immune - SubtensorModule::set_pruning_score_for_uid(netuid, 4, 55); // immune - SubtensorModule::set_pruning_score_for_uid(netuid, 5, 45); // immune - - // The non-immune neuron with the lowest score should be pruned - assert_eq!(SubtensorModule::get_neuron_to_prune(netuid), 1); - - // If we remove the lowest non-immune neuron, it should choose the next lowest non-immune - SubtensorModule::set_pruning_score_for_uid(netuid, 1, u16::MAX); - assert_eq!(SubtensorModule::get_neuron_to_prune(netuid), 2); - - // If we make all non-immune neurons have high scores, it should choose the oldest non-immune neuron - SubtensorModule::set_pruning_score_for_uid(netuid, 0, u16::MAX); - SubtensorModule::set_pruning_score_for_uid(netuid, 1, u16::MAX); - SubtensorModule::set_pruning_score_for_uid(netuid, 2, u16::MAX); - assert_eq!(SubtensorModule::get_neuron_to_prune(netuid), 0); + // Set emissions: + // non-immune (0..2): [75, 50, 60] -> lowest among non-immune is uid1 + // immune (3..5): [40, 55, 45] -> ignored while a non-immune exists + Emission::::mutate(netuid, |v| { + v[0] = 75u64.into(); + v[1] = 50u64.into(); + v[2] = 60u64.into(); + v[3] = 40u64.into(); + v[4] = 55u64.into(); + v[5] = 45u64.into(); + }); + assert_eq!(SubtensorModule::get_neuron_to_prune(netuid), Some(1)); + + // Remove lowest non-immune by making uid1 emission very high -> next lowest non-immune is uid2 + Emission::::mutate(netuid, |v| { + v[1] = 10_000u64.into(); + }); + assert_eq!(SubtensorModule::get_neuron_to_prune(netuid), Some(2)); + + // If all non-immune are equally high, choose the oldest non-immune -> uid0 + Emission::::mutate(netuid, |v| { + v[0] = 10_000u64.into(); + v[1] = 10_000u64.into(); + v[2] = 10_000u64.into(); + }); + assert_eq!(SubtensorModule::get_neuron_to_prune(netuid), Some(0)); }); } @@ -1286,53 +1294,76 @@ fn test_registration_failed_no_signature() { }); } +#[allow(clippy::indexing_slicing)] #[test] fn test_registration_get_uid_to_prune_all_in_immunity_period() { new_test_ext(1).execute_with(|| { System::set_block_number(0); let netuid = NetUid::from(1); add_network(netuid, 1, 0); - log::info!("add network"); + + // Neutralize safety floor and owner-immortality for deterministic selection + SubtensorModule::set_min_non_immune_uids(netuid, 0); + ImmuneOwnerUidsLimit::::insert(netuid, 0); + // Make sure the subnet owner is not one of the test coldkeys + SubnetOwner::::insert(netuid, U256::from(999_999u64)); + register_ok_neuron(netuid, U256::from(0), U256::from(0), 39420842); register_ok_neuron(netuid, U256::from(1), U256::from(1), 12412392); - SubtensorModule::set_pruning_score_for_uid(netuid, 0, 100); - SubtensorModule::set_pruning_score_for_uid(netuid, 1, 110); + SubtensorModule::set_immunity_period(netuid, 2); - assert_eq!(SubtensorModule::get_pruning_score_for_uid(netuid, 0), 100); - assert_eq!(SubtensorModule::get_pruning_score_for_uid(netuid, 1), 110); assert_eq!(SubtensorModule::get_immunity_period(netuid), 2); assert_eq!(SubtensorModule::get_current_block_as_u64(), 0); assert_eq!( SubtensorModule::get_neuron_block_at_registration(netuid, 0), 0 ); - assert_eq!(SubtensorModule::get_neuron_to_prune(NetUid::ROOT), 0); + + // Both immune; prune lowest emission among immune (uid0=100, uid1=110 => uid0) + Emission::::mutate(netuid, |v| { + v[0] = 100u64.into(); + v[1] = 110u64.into(); + }); + + assert_eq!(SubtensorModule::get_neuron_to_prune(netuid), Some(0)); }); } +#[allow(clippy::indexing_slicing)] #[test] fn test_registration_get_uid_to_prune_none_in_immunity_period() { new_test_ext(1).execute_with(|| { System::set_block_number(0); let netuid = NetUid::from(1); add_network(netuid, 1, 0); - log::info!("add network"); + + // Neutralize safety floor and owner-immortality for deterministic selection + SubtensorModule::set_min_non_immune_uids(netuid, 0); + ImmuneOwnerUidsLimit::::insert(netuid, 0); + SubnetOwner::::insert(netuid, U256::from(999_999u64)); + register_ok_neuron(netuid, U256::from(0), U256::from(0), 39420842); register_ok_neuron(netuid, U256::from(1), U256::from(1), 12412392); - SubtensorModule::set_pruning_score_for_uid(netuid, 0, 100); - SubtensorModule::set_pruning_score_for_uid(netuid, 1, 110); + SubtensorModule::set_immunity_period(netuid, 2); - assert_eq!(SubtensorModule::get_pruning_score_for_uid(netuid, 0), 100); - assert_eq!(SubtensorModule::get_pruning_score_for_uid(netuid, 1), 110); assert_eq!(SubtensorModule::get_immunity_period(netuid), 2); assert_eq!(SubtensorModule::get_current_block_as_u64(), 0); assert_eq!( SubtensorModule::get_neuron_block_at_registration(netuid, 0), 0 ); + + // Advance beyond immunity -> both non-immune step_block(3); assert_eq!(SubtensorModule::get_current_block_as_u64(), 3); - assert_eq!(SubtensorModule::get_neuron_to_prune(NetUid::ROOT), 0); + + // Among non-immune, lowest emission pruned: uid0=100, uid1=110 -> expect uid0 + Emission::::mutate(netuid, |v| { + v[0] = 100u64.into(); + v[1] = 110u64.into(); + }); + + assert_eq!(SubtensorModule::get_neuron_to_prune(netuid), Some(0)); }); } @@ -1341,11 +1372,9 @@ fn test_registration_get_uid_to_prune_none_in_immunity_period() { fn test_registration_get_uid_to_prune_owner_immortality() { new_test_ext(1).execute_with(|| { [ - // Burn key limit to 1 - testing the limits - // Other owner's hotkey is pruned because there's only 1 immune key and - // pruning score of owner key is lower + // Limit = 1: only the earliest owner hotkey is immortal -> prune the other owner hotkey (uid 1) (1, 1), - // Burn key limit to 2 - both owner keys are immune + // Limit = 2: both owner hotkeys are immortal -> prune the non-owner (uid 2) (2, 2), ] .iter() @@ -1362,6 +1391,7 @@ fn test_registration_get_uid_to_prune_owner_immortality() { let non_owner_hk = U256::from(3); let netuid = add_dynamic_network(&subnet_owner_hk, &subnet_owner_ck); + // Make sure registration blocks are set BlockAtRegistration::::insert(netuid, 1, 1); BlockAtRegistration::::insert(netuid, 2, 2); Uids::::insert(netuid, other_owner_hk, 1); @@ -1371,14 +1401,25 @@ fn test_registration_get_uid_to_prune_owner_immortality() { ImmunityPeriod::::insert(netuid, 1); SubnetworkN::::insert(netuid, 3); - step_block(10); + // Neutralize safety floor for this test + SubtensorModule::set_min_non_immune_uids(netuid, 0); + step_block(10); // all non-immune + + // Configure the number of immortal owner UIDs ImmuneOwnerUidsLimit::::insert(netuid, *limit); - // Set lower pruning score to sn owner keys - PruningScores::::insert(netuid, vec![0, 0, 1]); + // Drive selection by emissions (lowest first) + // uid0=0, uid1=0, uid2=1 + Emission::::insert( + netuid, + vec![AlphaCurrency::from(0), 0u64.into(), 1u64.into()], + ); - assert_eq!(SubtensorModule::get_neuron_to_prune(netuid), *uid_to_prune); + assert_eq!( + SubtensorModule::get_neuron_to_prune(netuid), + Some(*uid_to_prune) + ); }); }); } @@ -1411,90 +1452,23 @@ fn test_registration_get_uid_to_prune_owner_immortality_all_immune() { ImmunityPeriod::::insert(netuid, 100); SubnetworkN::::insert(netuid, 3); - step_block(20); - - ImmuneOwnerUidsLimit::::insert(netuid, limit); - - // Set lower pruning score to sn owner keys - PruningScores::::insert(netuid, vec![0, 0, 1]); + // Neutralize safety floor for this test + SubtensorModule::set_min_non_immune_uids(netuid, 0); - assert_eq!(SubtensorModule::get_neuron_to_prune(netuid), uid_to_prune); - }); -} + step_block(20); // all still immune -#[test] -fn test_registration_pruning() { - new_test_ext(1).execute_with(|| { - let netuid = NetUid::from(1); - let block_number: u64 = 0; - let tempo: u16 = 13; - let hotkey_account_id = U256::from(1); - let coldkey_account_id = U256::from(667); - let (nonce0, work0): (u64, Vec) = SubtensorModule::create_work_for_block_number( - netuid, - block_number, - 3942084, - &hotkey_account_id, - ); - - //add network - add_network(netuid, tempo, 0); + ImmuneOwnerUidsLimit::::insert(netuid, limit); - assert_ok!(SubtensorModule::register( - <::RuntimeOrigin>::signed(hotkey_account_id), - netuid, - block_number, - nonce0, - work0, - hotkey_account_id, - coldkey_account_id - )); - // - let neuron_uid = - SubtensorModule::get_uid_for_net_and_hotkey(netuid, &hotkey_account_id).unwrap(); - SubtensorModule::set_pruning_score_for_uid(netuid, neuron_uid, 2); - // - let hotkey_account_id1 = U256::from(2); - let coldkey_account_id1 = U256::from(668); - let (nonce1, work1): (u64, Vec) = SubtensorModule::create_work_for_block_number( + // Lowest emission among non-immortal candidates -> uid2 + Emission::::insert( netuid, - block_number, - 11231312312, - &hotkey_account_id1, + vec![AlphaCurrency::from(0), 0u64.into(), 1u64.into()], ); - assert_ok!(SubtensorModule::register( - <::RuntimeOrigin>::signed(hotkey_account_id1), - netuid, - block_number, - nonce1, - work1, - hotkey_account_id1, - coldkey_account_id1 - )); - // - let neuron_uid1 = - SubtensorModule::get_uid_for_net_and_hotkey(netuid, &hotkey_account_id1).unwrap(); - SubtensorModule::set_pruning_score_for_uid(netuid, neuron_uid1, 3); - // - let hotkey_account_id2 = U256::from(3); - let coldkey_account_id2 = U256::from(669); - let (nonce2, work2): (u64, Vec) = SubtensorModule::create_work_for_block_number( - netuid, - block_number, - 212312414, - &hotkey_account_id2, + assert_eq!( + SubtensorModule::get_neuron_to_prune(netuid), + Some(uid_to_prune) ); - - assert_ok!(SubtensorModule::register( - <::RuntimeOrigin>::signed(hotkey_account_id2), - netuid, - block_number, - nonce2, - work2, - hotkey_account_id2, - coldkey_account_id2 - )); }); } @@ -2188,6 +2162,130 @@ fn test_last_update_correctness() { }); } +#[allow(clippy::indexing_slicing)] +#[test] +fn test_registration_pruning() { + new_test_ext(1).execute_with(|| { + // --- Setup a simple non-root subnet. + let netuid = NetUid::from(5); + add_network(netuid, 10_000, 0); + + // No owner-based immortality: we want to test time-based immunity only. + ImmuneOwnerUidsLimit::::insert(netuid, 0); + + // Allow registrations freely. + MaxRegistrationsPerBlock::::insert(netuid, 1024); + SubtensorModule::set_target_registrations_per_interval(netuid, u16::MAX); + + // Cap the subnet at 3 UIDs so the 4th registration *must* prune. + SubtensorModule::set_max_allowed_uids(netuid, 3); + + // --- Register three neurons (uids 0, 1, 2). + let coldkeys = [U256::from(20_001), U256::from(20_002), U256::from(20_003)]; + let hotkeys = [U256::from(30_001), U256::from(30_002), U256::from(30_003)]; + + for i in 0..3 { + register_ok_neuron(netuid, hotkeys[i], coldkeys[i], 0); + } + + // Sanity: ensure we got sequential UIDs. + let uid0 = SubtensorModule::get_uid_for_net_and_hotkey(netuid, &hotkeys[0]).unwrap(); + let uid1 = SubtensorModule::get_uid_for_net_and_hotkey(netuid, &hotkeys[1]).unwrap(); + let uid2 = SubtensorModule::get_uid_for_net_and_hotkey(netuid, &hotkeys[2]).unwrap(); + + assert_eq!(uid0, 0); + assert_eq!(uid1, 1); + assert_eq!(uid2, 2); + assert_eq!(SubtensorModule::get_subnetwork_n(netuid), 3); + + // --- Craft immunity and tie‑breaking conditions. + + // Fixed "current" block. + let now: u64 = 1_000; + frame_system::Pallet::::set_block_number(now); + + // Immunity lasts 100 blocks. + SubtensorModule::set_immunity_period(netuid, 100); + + // Registration blocks: + // - uid0: now - 150 -> non‑immune + // - uid1: now - 200 -> non‑immune (older than uid0) + // - uid2: now - 10 -> immune + BlockAtRegistration::::insert(netuid, uid0, now - 150); + BlockAtRegistration::::insert(netuid, uid1, now - 200); + BlockAtRegistration::::insert(netuid, uid2, now - 10); + + // Check immunity flags: the 3rd neuron is immune, the first two are not. + assert!(!SubtensorModule::get_neuron_is_immune(netuid, uid0)); + assert!(!SubtensorModule::get_neuron_is_immune(netuid, uid1)); + assert!(SubtensorModule::get_neuron_is_immune(netuid, uid2)); + + // Emissions: + // - uid0: 10 + // - uid1: 10 (same emission as uid0) + // - uid2: 1 (better emission, but immune) + // + // Among *non‑immune* neurons, emission ties -> break on reg_block: + // uid1 registered earlier (now-200 < now-150), so uid1 should be pruned. + // The immune uid2 should **not** be chosen even though it has lower emission. + Emission::::mutate(netuid, |v| { + v[uid0 as usize] = 10u64.into(); + v[uid1 as usize] = 10u64.into(); + v[uid2 as usize] = 1u64.into(); + }); + + // Allow pruning of any non‑immune UID (no safety floor). + SubtensorModule::set_min_non_immune_uids(netuid, 0); + + // Check that pruning decision respects: + // 1. Prefer non‑immune over immune. + // 2. Then lowest emission. + // 3. Then earliest registration block. + // 4. Then uid (not needed here). + assert_eq!( + SubtensorModule::get_neuron_to_prune(netuid), + Some(uid1), + "Expected pruning to choose the oldest non‑immune neuron \ + when emissions tie, even if an immune neuron has lower emission" + ); + + // --- Now actually perform a registration that forces pruning. + + let new_hotkey = U256::from(40_000); + let new_coldkey = U256::from(50_000); + + // This should internally call do_burned_registration -> register_neuron, + // which must reuse the UID returned by get_neuron_to_prune (uid1). + register_ok_neuron(netuid, new_hotkey, new_coldkey, 0); + + // Still capped at 3 UIDs. + assert_eq!(SubtensorModule::get_subnetwork_n(netuid), 3); + + // Old uid1 hotkey should be gone. + assert!( + SubtensorModule::get_uid_for_net_and_hotkey(netuid, &hotkeys[1]).is_err(), + "Hotkey for pruned UID should no longer be registered" + ); + + // New hotkey should reuse uid1 (the pruned slot). + let new_uid = SubtensorModule::get_uid_for_net_and_hotkey(netuid, &new_hotkey).unwrap(); + assert_eq!( + new_uid, uid1, + "New registration should reuse the UID selected by get_neuron_to_prune" + ); + + // The other two original neurons (uid0 and uid2) must remain registered. + assert_eq!( + SubtensorModule::get_uid_for_net_and_hotkey(netuid, &hotkeys[0]).unwrap(), + uid0 + ); + assert_eq!( + SubtensorModule::get_uid_for_net_and_hotkey(netuid, &hotkeys[2]).unwrap(), + uid2 + ); + }); +} + // #[ignore] // #[test] // fn test_hotkey_swap_ok() { diff --git a/pallets/subtensor/src/tests/uids.rs b/pallets/subtensor/src/tests/uids.rs index df4f8f29ec..f533fb4aac 100644 --- a/pallets/subtensor/src/tests/uids.rs +++ b/pallets/subtensor/src/tests/uids.rs @@ -441,6 +441,7 @@ fn test_replace_neuron_subnet_owner_not_replaced_if_in_sn_owner_hotkey_map() { }); } +#[allow(clippy::indexing_slicing)] #[test] fn test_get_neuron_to_prune_owner_not_pruned() { new_test_ext(1).execute_with(|| { @@ -453,6 +454,12 @@ fn test_get_neuron_to_prune_owner_not_pruned() { SubtensorModule::set_target_registrations_per_interval(netuid, 100); SubnetOwner::::insert(netuid, owner_coldkey); + // Ensure owner's hotkey is counted as immortal in this test + ImmuneOwnerUidsLimit::::insert(netuid, 1); + + // Neutralize safety floor for this test + SubtensorModule::set_min_non_immune_uids(netuid, 0); + let owner_uid = SubtensorModule::get_uid_for_net_and_hotkey(netuid, &owner_hotkey) .expect("Owner neuron should already be registered by add_dynamic_network"); @@ -470,34 +477,25 @@ fn test_get_neuron_to_prune_owner_not_pruned() { let uid_2 = SubtensorModule::get_uid_for_net_and_hotkey(netuid, &additional_hotkey_2) .expect("Should be registered"); - SubtensorModule::set_pruning_score_for_uid(netuid, owner_uid, 0); - SubtensorModule::set_pruning_score_for_uid(netuid, uid_1, 1); - SubtensorModule::set_pruning_score_for_uid(netuid, uid_2, 2); + // Set emissions; owner has the lowest but is immortal, so choose uid_1 + Emission::::mutate(netuid, |v| { + v[owner_uid as usize] = 0u64.into(); + v[uid_1 as usize] = 1u64.into(); + v[uid_2 as usize] = 2u64.into(); + }); let pruned_uid = SubtensorModule::get_neuron_to_prune(netuid); - // - The pruned UID must be `uid_1` (score=1). - // - The owner's UID remains unpruned. - assert_eq!( - pruned_uid, uid_1, - "Should prune the neuron with pruning score=1, not the owner (score=0)." - ); - - let pruned_score = SubtensorModule::get_pruning_score_for_uid(netuid, uid_1); + // Expect to prune uid_1; owner's UID is skipped as immortal. assert_eq!( - pruned_score, - u16::MAX, - "Pruned neuron's score should be set to u16::MAX" - ); - - let owner_score = SubtensorModule::get_pruning_score_for_uid(netuid, owner_uid); - assert_eq!( - owner_score, 0, - "Owner's pruning score remains 0, indicating it was skipped" + pruned_uid, + Some(uid_1), + "Should prune the neuron with the lowest emission among non-immortal candidates." ); }); } +#[allow(clippy::indexing_slicing)] #[test] fn test_get_neuron_to_prune_owner_pruned_if_not_in_sn_owner_hotkey_map() { new_test_ext(1).execute_with(|| { @@ -511,10 +509,16 @@ fn test_get_neuron_to_prune_owner_pruned_if_not_in_sn_owner_hotkey_map() { SubtensorModule::set_target_registrations_per_interval(netuid, 100); SubnetOwner::::insert(netuid, owner_coldkey); + // Make only one owner hotkey immortal at a time + ImmuneOwnerUidsLimit::::insert(netuid, 1); + + // Neutralize safety floor for this test + SubtensorModule::set_min_non_immune_uids(netuid, 0); + let owner_uid = SubtensorModule::get_uid_for_net_and_hotkey(netuid, &owner_hotkey) .expect("Owner neuron should already be registered by add_dynamic_network"); - // Register another hotkey for the owner + // Register another hotkey for the owner (same coldkey) register_ok_neuron(netuid, other_owner_hotkey, owner_coldkey, 0); let other_owner_uid = SubtensorModule::get_uid_for_net_and_hotkey(netuid, &other_owner_hotkey) @@ -534,29 +538,121 @@ fn test_get_neuron_to_prune_owner_pruned_if_not_in_sn_owner_hotkey_map() { let uid_3 = SubtensorModule::get_uid_for_net_and_hotkey(netuid, &additional_hotkey_2) .expect("Should be registered"); - SubtensorModule::set_pruning_score_for_uid(netuid, owner_uid, 0); - // Other owner key has pruning score not worse than the owner's first hotkey, but worse than the additional hotkeys - SubtensorModule::set_pruning_score_for_uid(netuid, other_owner_uid, 1); - SubtensorModule::set_pruning_score_for_uid(netuid, uid_2, 2); - SubtensorModule::set_pruning_score_for_uid(netuid, uid_3, 3); + // Case 1: With ImmuneOwnerUidsLimit = 1, the default SubnetOwnerHotkey is `owner_hotkey`. + // That makes `owner_uid` immortal and leaves `other_owner_uid` pruneable. + // Emissions: owner=0 (immortal), other_owner=1, uid_2=2, uid_3=3 -> expect other_owner_uid + Emission::::mutate(netuid, |v| { + v[owner_uid as usize] = 0u64.into(); + v[other_owner_uid as usize] = 1u64.into(); + v[uid_2 as usize] = 2u64.into(); + v[uid_3 as usize] = 3u64.into(); + }); let pruned_uid = SubtensorModule::get_neuron_to_prune(netuid); - assert_eq!(pruned_uid, other_owner_uid, "Should prune the owner"); + assert_eq!(pruned_uid, Some(other_owner_uid), "Should prune the owner"); - // Set the owner's other hotkey as the SubnetOwnerHotkey + // Case 2: Make the other owner's hotkey the SubnetOwnerHotkey, so it becomes the prioritized + // immortal one; now `owner_uid` is not immortal and has the lowest emission -> prune it. SubnetOwnerHotkey::::insert(netuid, other_owner_hotkey); - // Reset pruning scores - SubtensorModule::set_pruning_score_for_uid(netuid, owner_uid, 0); - SubtensorModule::set_pruning_score_for_uid(netuid, other_owner_uid, 1); - SubtensorModule::set_pruning_score_for_uid(netuid, uid_2, 2); - SubtensorModule::set_pruning_score_for_uid(netuid, uid_3, 3); - + // Emissions remain the same; `owner_uid` now becomes the lowest non-immortal candidate. let pruned_uid = SubtensorModule::get_neuron_to_prune(netuid); - assert_eq!( - pruned_uid, owner_uid, + pruned_uid, + Some(owner_uid), "Should prune the owner, not the top-stake owner hotkey and not the additional hotkeys" ); }); } + +#[test] +fn test_prune_respects_min_non_immune_floor_prefers_immune() { + new_test_ext(1).execute_with(|| { + let netuid = NetUid::from(2); + add_network(netuid, 10_000, 0); + ImmuneOwnerUidsLimit::::insert(netuid, 0); + + MaxRegistrationsPerBlock::::insert(netuid, 1024); + SubtensorModule::set_target_registrations_per_interval(netuid, u16::MAX); + + let immunity_period: u64 = 1000; + SubtensorModule::set_immunity_period(netuid, immunity_period as u16); + + // Register three neurons, each in its own block so the per‑block counter resets. + for i in 0..3 { + register_ok_neuron(netuid, U256::from(10_000 + i), U256::from(20_000 + i), 0); + step_block(1); + } + + // Jump block height forward past immunity for the first 3, without iterating 1000 blocks. + let target = frame_system::Pallet::::block_number() + immunity_period + 5; + frame_system::Pallet::::set_block_number(target - 1); + step_block(1); + + // Register a 4th neuron now — it will be immune. + register_ok_neuron(netuid, U256::from(99_999), U256::from(88_888), 0); + + SubtensorModule::set_min_non_immune_uids(netuid, 3); + + // With floor in place (3 non‑immune + 1 immune), we must prune the immune candidate (uid = 3). + assert_eq!(SubtensorModule::get_neuron_to_prune(netuid), Some(3)); + }); +} + +#[test] +fn test_prune_tie_breakers_non_immune_emission_block_uid() { + new_test_ext(1).execute_with(|| { + let netuid = NetUid::from(3); + add_network(netuid, 1, 0); + + ImmuneOwnerUidsLimit::::insert(netuid, 0); + SubtensorModule::set_immunity_period(netuid, 0); + + // Register 3 neurons; registration blocks ascend (0,1,2). + for i in 0..3 { + register_ok_neuron(netuid, U256::from(30_000 + i), U256::from(40_000 + i), 0); + step_block(1); + } + + // Allow pruning of non-immune. + SubtensorModule::set_min_non_immune_uids(netuid, 0); + + // Equalize emissions across all 3. + Emission::::mutate(netuid, |v| { + for e in v.iter_mut() { + *e = 10u64.into(); + } + }); + + // Since emission ties, the earliest registration (uid=0) should be pruned. + assert_eq!(SubtensorModule::get_neuron_to_prune(netuid), Some(0)); + }); +} + +#[test] +fn test_prune_all_owner_immortal_returns_none() { + new_test_ext(1).execute_with(|| { + let netuid = NetUid::from(4); + + let owner_ck = U256::from(7777); + let owner_hk_0 = U256::from(9001); + let owner_hk_1 = U256::from(9002); + let owner_hk_2 = U256::from(9003); + + add_network(netuid, 1, 0); + SubnetOwner::::insert(netuid, owner_ck); + + register_ok_neuron(netuid, owner_hk_0, owner_ck, 0); + register_ok_neuron(netuid, owner_hk_1, owner_ck, 0); + register_ok_neuron(netuid, owner_hk_2, owner_ck, 0); + + Owner::::insert(owner_hk_0, owner_ck); + Owner::::insert(owner_hk_1, owner_ck); + Owner::::insert(owner_hk_2, owner_ck); + OwnedHotkeys::::insert(owner_ck, vec![owner_hk_0, owner_hk_1, owner_hk_2]); + + ImmuneOwnerUidsLimit::::insert(netuid, 10); + + assert_eq!(SubtensorModule::get_neuron_to_prune(netuid), None); + }); +} diff --git a/pallets/subtensor/src/tests/weights.rs b/pallets/subtensor/src/tests/weights.rs index 21d37984e4..20ace5ee0d 100644 --- a/pallets/subtensor/src/tests/weights.rs +++ b/pallets/subtensor/src/tests/weights.rs @@ -4383,8 +4383,8 @@ fn test_highly_concurrent_commits_and_reveals_with_multiple_hotkeys() { // Attempt unauthorized reveal let unauthorized_hotkey = hotkeys[0]; let target_hotkey = hotkeys[1]; - if let Some(commits) = commit_info_map.get(&target_hotkey) { - if let Some((_commit_hash, salt, uids, values, version_key)) = commits.first() { + if let Some(commits) = commit_info_map.get(&target_hotkey) + && let Some((_commit_hash, salt, uids, values, version_key)) = commits.first() { assert_err!( SubtensorModule::reveal_weights( RuntimeOrigin::signed(unauthorized_hotkey), @@ -4397,7 +4397,6 @@ fn test_highly_concurrent_commits_and_reveals_with_multiple_hotkeys() { Error::::InvalidRevealCommitHashNotMatch ); } - } let non_committing_hotkey: ::AccountId = U256::from(9999); assert_err!( diff --git a/pallets/subtensor/src/utils/misc.rs b/pallets/subtensor/src/utils/misc.rs index 70012fd44a..10fc0535f0 100644 --- a/pallets/subtensor/src/utils/misc.rs +++ b/pallets/subtensor/src/utils/misc.rs @@ -210,27 +210,6 @@ impl Pallet { *updated_active = active; Active::::insert(netuid, updated_active_vec); } - pub fn set_pruning_score_for_uid(netuid: NetUid, uid: u16, pruning_score: u16) { - log::debug!("netuid = {netuid:?}"); - log::debug!( - "SubnetworkN::::get( netuid ) = {:?}", - SubnetworkN::::get(netuid) - ); - log::debug!("uid = {uid:?}"); - if uid < SubnetworkN::::get(netuid) { - PruningScores::::mutate(netuid, |v| { - if let Some(s) = v.get_mut(uid as usize) { - *s = pruning_score; - } - }); - } else { - log::error!( - "set_pruning_score_for_uid: uid >= SubnetworkN::::get(netuid): {:?} >= {:?}", - uid, - SubnetworkN::::get(netuid) - ); - } - } pub fn set_validator_permit_for_uid(netuid: NetUid, uid: u16, validator_permit: bool) { let mut updated_validator_permits = Self::get_validator_permit(netuid); let Some(updated_validator_permit) = updated_validator_permits.get_mut(uid as usize) else { @@ -325,6 +304,16 @@ impl Pallet { pub fn get_neuron_block_at_registration(netuid: NetUid, neuron_uid: u16) -> u64 { BlockAtRegistration::::get(netuid, neuron_uid) } + /// Returns the minimum number of non-immortal & non-immune UIDs that must remain in a subnet. + pub fn get_min_non_immune_uids(netuid: NetUid) -> u16 { + MinNonImmuneUids::::get(netuid) + } + + /// Sets the minimum number of non-immortal & non-immune UIDs that must remain in a subnet. + pub fn set_min_non_immune_uids(netuid: NetUid, min: u16) { + MinNonImmuneUids::::insert(netuid, min); + Self::deposit_event(Event::MinNonImmuneUidsSet(netuid, min)); + } // ======================== // ===== Take checks ====== diff --git a/pallets/swap/src/pallet/impls.rs b/pallets/swap/src/pallet/impls.rs index 9c27b82a02..6ec02879bf 100644 --- a/pallets/swap/src/pallet/impls.rs +++ b/pallets/swap/src/pallet/impls.rs @@ -261,12 +261,12 @@ impl Pallet { // Should persist changes // Check if reserves are overused - if let Ok(ref swap_result) = result { - if reserve < swap_result.amount_paid_out { - return TransactionOutcome::Commit(Err( - Error::::InsufficientLiquidity.into() - )); - } + if let Ok(ref swap_result) = result + && reserve < swap_result.amount_paid_out + { + return TransactionOutcome::Commit(Err( + Error::::InsufficientLiquidity.into() + )); } TransactionOutcome::Commit(result) diff --git a/pallets/utility/src/lib.rs b/pallets/utility/src/lib.rs index c54413c2ea..d9f9bca281 100644 --- a/pallets/utility/src/lib.rs +++ b/pallets/utility/src/lib.rs @@ -145,8 +145,7 @@ pub mod pallet { let allocator_limit = sp_core::MAX_POSSIBLE_ALLOCATION; let call_size = (core::mem::size_of::<::RuntimeCall>() as u32) .div_ceil(CALL_ALIGN) - .checked_mul(CALL_ALIGN) - .unwrap_or(u32::MAX); + .saturating_mul(CALL_ALIGN); // The margin to take into account vec doubling capacity. let margin_factor = 3; @@ -636,7 +635,8 @@ pub mod pallet { } /// A pallet identifier. These are per pallet and should be stored in a registry somewhere. -#[freeze_struct("7e600c53ace0630a")] +#[allow(unused)] +#[freeze_struct("8b0fb6b91f673972")] #[derive(Clone, Copy, Eq, PartialEq, Encode, Decode)] struct IndexedUtilityPalletId(u16); diff --git a/pallets/utility/src/tests.rs b/pallets/utility/src/tests.rs index 8b0a9274c2..03b4acbbf2 100644 --- a/pallets/utility/src/tests.rs +++ b/pallets/utility/src/tests.rs @@ -133,12 +133,11 @@ impl pallet_timestamp::Config for Test { type WeightInfo = (); } -const MOTION_DURATION_IN_BLOCKS: BlockNumber = 3; parameter_types! { pub const MultisigDepositBase: u64 = 1; pub const MultisigDepositFactor: u64 = 1; pub const MaxSignatories: u32 = 3; - pub const MotionDuration: BlockNumber = MOTION_DURATION_IN_BLOCKS; + pub const MotionDuration: BlockNumber = 3; pub const MaxProposals: u32 = 100; pub const MaxMembers: u32 = 100; pub MaxProposalWeight: Weight = BlockWeights::get().max_block.saturating_div(2); diff --git a/precompiles/src/proxy.rs b/precompiles/src/proxy.rs index 1399177766..c8396be264 100644 --- a/precompiles/src/proxy.rs +++ b/precompiles/src/proxy.rs @@ -9,12 +9,14 @@ use frame_system::RawOrigin; use pallet_evm::{AddressMapping, PrecompileHandle}; use pallet_subtensor_proxy as pallet_proxy; use precompile_utils::EvmResult; -use sp_core::H256; +use sp_core::{H256, U256}; use sp_runtime::{ codec::DecodeLimit, traits::{Dispatchable, StaticLookup}, }; use sp_std::boxed::Box; +use sp_std::convert::{TryFrom, TryInto}; +use sp_std::vec; use sp_std::vec::Vec; use subtensor_runtime_common::ProxyType; pub struct ProxyPrecompile(PhantomData); @@ -239,4 +241,30 @@ where handle.try_dispatch_runtime_call::(call, RawOrigin::Signed(account_id)) } + + #[precompile::public("getProxies(bytes32)")] + #[precompile::view] + pub fn get_proxies( + _handle: &mut impl PrecompileHandle, + account_id: H256, + ) -> EvmResult> { + let account_id = R::AccountId::from(account_id.0.into()); + + let proxies = pallet_proxy::pallet::Pallet::::proxies(account_id); + let mut result: Vec<(H256, U256, U256)> = vec![]; + for proxy in proxies.0 { + let delegate: [u8; 32] = proxy.delegate.into(); + let proxy_type: u8 = proxy.proxy_type.into(); + let delay: u32 = proxy + .delay + .try_into() + .map_err(|_| PrecompileFailure::Error { + exit_status: ExitError::Other("Invalid delay".into()), + })?; + + result.push((delegate.into(), proxy_type.into(), delay.into())); + } + + Ok(result) + } } diff --git a/precompiles/src/solidity/proxy.abi b/precompiles/src/solidity/proxy.abi new file mode 100644 index 0000000000..f557b54606 --- /dev/null +++ b/precompiles/src/solidity/proxy.abi @@ -0,0 +1,160 @@ +[ + { + "type": "function", + "name": "createPureProxy", + "inputs": [ + { + "name": "proxy_type", + "type": "uint8", + "internalType": "uint8" + }, + { + "name": "delay", + "type": "uint32", + "internalType": "uint32" + }, + { + "name": "index", + "type": "uint16", + "internalType": "uint16" + } + ], + "outputs": [], + "stateMutability": "nonpayable" + }, + { + "type": "function", + "name": "proxyCall", + "inputs": [ + { + "name": "real", + "type": "bytes32", + "internalType": "bytes32" + }, + { + "name": "force_proxy_type", + "type": "uint8[]", + "internalType": "uint8[]" + }, + { + "name": "call", + "type": "bytes", + "internalType": "bytes" + } + ], + "outputs": [], + "stateMutability": "nonpayable" + }, + { + "type": "function", + "name": "killPureProxy", + "inputs": [ + { + "name": "spawner", + "type": "bytes32", + "internalType": "bytes32" + }, + { + "name": "proxy_type", + "type": "uint8", + "internalType": "uint8" + }, + { + "name": "index", + "type": "uint16", + "internalType": "uint16" + }, + { + "name": "height", + "type": "uint16", + "internalType": "uint16" + }, + { + "name": "ext_index", + "type": "uint32", + "internalType": "uint32" + } + ], + "outputs": [], + "stateMutability": "nonpayable" + }, + { + "type": "function", + "name": "addProxy", + "inputs": [ + { + "name": "delegate", + "type": "bytes32", + "internalType": "bytes32" + }, + { + "name": "proxy_type", + "type": "uint8", + "internalType": "uint8" + }, + { + "name": "delay", + "type": "uint32", + "internalType": "uint32" + } + ], + "outputs": [], + "stateMutability": "nonpayable" + }, + { + "type": "function", + "name": "removeProxy", + "inputs": [ + { + "name": "delegate", + "type": "bytes32", + "internalType": "bytes32" + }, + { + "name": "proxy_type", + "type": "uint8", + "internalType": "uint8" + }, + { + "name": "delay", + "type": "uint32", + "internalType": "uint32" + } + ], + "outputs": [], + "stateMutability": "nonpayable" + }, + { + "type": "function", + "name": "removeProxies", + "inputs": [], + "outputs": [], + "stateMutability": "nonpayable" + }, + { + "type": "function", + "name": "pokeDeposit", + "inputs": [], + "outputs": [], + "stateMutability": "nonpayable" + }, + { + "type": "function", + "name": "getProxies", + "inputs": [ + { + "name": "account", + "type": "bytes32", + "internalType": "bytes32" + } + ], + "outputs": [ + { + "name": "", + "type": "bytes32[]", + "internalType": "bytes32[]" + } + ], + "stateMutability": "view" + } +] diff --git a/precompiles/src/solidity/proxy.sol b/precompiles/src/solidity/proxy.sol index b0e03031bf..dbbee1d3f1 100644 --- a/precompiles/src/solidity/proxy.sol +++ b/precompiles/src/solidity/proxy.sol @@ -5,26 +5,48 @@ address constant IPROXY_ADDRESS = 0x000000000000000000000000000000000000080b; interface IProxy { function createPureProxy( - uint8 proxy_type, - uint32 delay, - uint16 index + uint8 proxy_type, + uint32 delay, + uint16 index ) external; - function proxyCall(bytes32 real, uint8[] memory force_proxy_type, bytes memory call) external; + function proxyCall( + bytes32 real, + uint8[] memory force_proxy_type, + bytes memory call + ) external; function killPureProxy( - bytes32 spawner, - uint8 proxy_type, - uint16 index, - uint16 height, - uint32 ext_index + bytes32 spawner, + uint8 proxy_type, + uint16 index, + uint16 height, + uint32 ext_index ) external; - function addProxy(bytes32 delegate, uint8 proxy_type, uint32 delay) external; + function addProxy( + bytes32 delegate, + uint8 proxy_type, + uint32 delay + ) external; - function removeProxy(bytes32 delegate, uint8 proxy_type, uint32 delay) external; + function removeProxy( + bytes32 delegate, + uint8 proxy_type, + uint32 delay + ) external; function removeProxies() external; function pokeDeposit() external; + + struct ProxyInfo { + bytes32 delegate; + uint256 proxy_type; + uint256 delay; + } + + function getProxies( + bytes32 account + ) external view returns (ProxyInfo[] memory); } diff --git a/runtime/src/lib.rs b/runtime/src/lib.rs index 9686a224b5..52746675f9 100644 --- a/runtime/src/lib.rs +++ b/runtime/src/lib.rs @@ -12,6 +12,7 @@ use core::num::NonZeroU64; pub mod check_nonce; mod migrations; +pub mod sudo_wrapper; pub mod transaction_payment_wrapper; extern crate alloc; @@ -60,7 +61,9 @@ use sp_runtime::{ AccountIdLookup, BlakeTwo256, Block as BlockT, DispatchInfoOf, Dispatchable, One, PostDispatchInfoOf, UniqueSaturatedInto, Verify, }, - transaction_validity::{TransactionSource, TransactionValidity, TransactionValidityError}, + transaction_validity::{ + TransactionPriority, TransactionSource, TransactionValidity, TransactionValidityError, + }, }; use sp_std::cmp::Ordering; use sp_std::prelude::*; @@ -173,6 +176,7 @@ impl frame_system::offchain::CreateSignedTransaction ChargeTransactionPaymentWrapper::new( pallet_transaction_payment::ChargeTransactionPayment::::from(0), ), + SudoTransactionExtension::::new(), pallet_subtensor::transaction_extension::SubtensorTransactionExtension::::new( ), pallet_drand::drand_priority::DrandPriority::::new(), @@ -237,7 +241,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { // `spec_version`, and `authoring_version` are the same between Wasm and native. // This value is set to 100 to notify Polkadot-JS App (https://polkadot.js.org/apps) to use // the compatible custom types. - spec_version: 362, + spec_version: 365, impl_version: 1, apis: RUNTIME_API_VERSIONS, transaction_version: 1, @@ -1158,6 +1162,7 @@ impl pallet_subtensor_swap::Config for Runtime { type WeightInfo = pallet_subtensor_swap::weights::DefaultWeight; } +use crate::sudo_wrapper::SudoTransactionExtension; use crate::transaction_payment_wrapper::ChargeTransactionPaymentWrapper; use sp_runtime::BoundedVec; @@ -1221,6 +1226,10 @@ impl> FindAuthor for FindAuthorTruncated { } const BLOCK_GAS_LIMIT: u64 = 75_000_000; +pub const NORMAL_DISPATCH_BASE_PRIORITY: TransactionPriority = 1; +pub const OPERATIONAL_DISPATCH_PRIORITY: TransactionPriority = 10_000_000_000; +const EVM_TRANSACTION_BASE_PRIORITY: TransactionPriority = NORMAL_DISPATCH_BASE_PRIORITY; +const EVM_LOG_TARGET: &str = "runtime::ethereum"; /// `WeightPerGas` is an approximate ratio of the amount of Weight per Gas. /// @@ -1384,6 +1393,35 @@ impl fp_rpc::ConvertTransaction<::Extrinsic> for Transac } } +fn adjust_evm_priority_and_warn( + validity: &mut Option, + priority_fee: Option, + info: &H160, +) { + if let Some(Ok(valid_transaction)) = validity.as_mut() { + let original_priority = valid_transaction.priority; + valid_transaction.priority = EVM_TRANSACTION_BASE_PRIORITY; + + let has_priority_fee = priority_fee.is_some_and(|fee| !fee.is_zero()); + if has_priority_fee { + log::warn!( + target: EVM_LOG_TARGET, + "Priority fee/tip from {:?} (max_priority_fee_per_gas: {:?}) is ignored for transaction ordering", + info, + priority_fee.unwrap_or_default(), + ); + } else if original_priority > EVM_TRANSACTION_BASE_PRIORITY { + log::warn!( + target: EVM_LOG_TARGET, + "EVM transaction priority from {:?} reduced from {} to {}; priority tips are ignored for ordering", + info, + original_priority, + EVM_TRANSACTION_BASE_PRIORITY, + ); + } + } +} + impl fp_self_contained::SelfContainedCall for RuntimeCall { type SignedInfo = H160; @@ -1408,7 +1446,21 @@ impl fp_self_contained::SelfContainedCall for RuntimeCall { len: usize, ) -> Option { match self { - RuntimeCall::Ethereum(call) => call.validate_self_contained(info, dispatch_info, len), + RuntimeCall::Ethereum(call) => { + let priority_fee = match call { + pallet_ethereum::Call::transact { transaction } => match transaction { + EthereumTransaction::EIP1559(tx) => Some(tx.max_priority_fee_per_gas), + EthereumTransaction::EIP7702(tx) => Some(tx.max_priority_fee_per_gas), + _ => None, + }, + _ => None, + }; + + let mut validity = call.validate_self_contained(info, dispatch_info, len); + adjust_evm_priority_and_warn(&mut validity, priority_fee, info); + + validity + } _ => None, } } @@ -1609,6 +1661,7 @@ pub type TransactionExtensions = ( check_nonce::CheckNonce, frame_system::CheckWeight, ChargeTransactionPaymentWrapper, + SudoTransactionExtension, pallet_subtensor::transaction_extension::SubtensorTransactionExtension, pallet_drand::drand_priority::DrandPriority, frame_metadata_hash_extension::CheckMetadataHash, @@ -2618,6 +2671,52 @@ fn test_into_substrate_balance_zero_value() { assert_eq!(result, Some(expected_substrate_balance)); } +#[test] +fn evm_priority_overrides_tip_to_base() { + let mut validity: Option = + Some(Ok(sp_runtime::transaction_validity::ValidTransaction { + priority: 99, + requires: vec![], + provides: vec![], + longevity: sp_runtime::transaction_validity::TransactionLongevity::MAX, + propagate: true, + })); + + let signer = H160::repeat_byte(1); + adjust_evm_priority_and_warn(&mut validity, Some(U256::from(10)), &signer); + + let adjusted_priority = validity + .as_ref() + .and_then(|v| v.as_ref().ok()) + .map(|v| v.priority); + + assert_eq!(adjusted_priority, Some(EVM_TRANSACTION_BASE_PRIORITY)); +} + +#[test] +fn evm_priority_cannot_overtake_unstake() { + // Unstake is a normal-class extrinsic (priority = NORMAL_DISPATCH_BASE_PRIORITY). + let unstake_priority: TransactionPriority = NORMAL_DISPATCH_BASE_PRIORITY; + let evm_priority: TransactionPriority = EVM_TRANSACTION_BASE_PRIORITY; + + // Clamp guarantees the EVM tx is never above the unstake priority. + assert!(evm_priority <= unstake_priority); + + // If both arrive with equal priority, arrival order keeps unstake first. + let mut queue: Vec<(&str, TransactionPriority, usize)> = vec![ + ("unstake", unstake_priority, 0), // arrives first + ("evm", evm_priority, 1), // arrives later + ]; + + queue.sort_by(|a, b| { + b.1.cmp(&a.1) // higher priority first + .then_with(|| a.2.cmp(&b.2)) // earlier arrival first when equal + }); + + let first = queue.first().map(|entry| entry.0); + assert_eq!(first, Some("unstake")); +} + #[test] fn test_into_evm_balance_valid() { // Valid conversion from Substrate to EVM diff --git a/runtime/src/sudo_wrapper.rs b/runtime/src/sudo_wrapper.rs new file mode 100644 index 0000000000..154fbcb89d --- /dev/null +++ b/runtime/src/sudo_wrapper.rs @@ -0,0 +1,84 @@ +use codec::{Decode, DecodeWithMemTracking, Encode}; +use frame_support::dispatch::{DispatchInfo, PostDispatchInfo}; +use frame_support::traits::IsSubType; +use frame_system::Config; +use pallet_sudo::Call as SudoCall; +use scale_info::TypeInfo; +use sp_runtime::impl_tx_ext_default; +use sp_runtime::traits::{ + AsSystemOriginSigner, DispatchInfoOf, Dispatchable, Implication, TransactionExtension, + ValidateResult, +}; +use sp_runtime::transaction_validity::{InvalidTransaction, TransactionSource}; +use sp_std::marker::PhantomData; +use subtensor_macros::freeze_struct; + +#[freeze_struct("99dce71278b36b44")] +#[derive(Default, Encode, Decode, DecodeWithMemTracking, Clone, Eq, PartialEq, TypeInfo)] +pub struct SudoTransactionExtension(pub PhantomData); + +impl sp_std::fmt::Debug for SudoTransactionExtension { + #[cfg(feature = "std")] + fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { + write!(f, "SudoTransactionExtension",) + } + #[cfg(not(feature = "std"))] + fn fmt(&self, _: &mut core::fmt::Formatter) -> core::fmt::Result { + Ok(()) + } +} + +impl SudoTransactionExtension { + pub fn new() -> Self { + Self(Default::default()) + } +} + +impl + TransactionExtension<::RuntimeCall> for SudoTransactionExtension +where + ::RuntimeCall: Dispatchable, + ::RuntimeOrigin: AsSystemOriginSigner + Clone, + ::RuntimeCall: IsSubType>, +{ + const IDENTIFIER: &'static str = "SudoTransactionExtension"; + + type Implicit = (); + type Val = (); + type Pre = (); + + impl_tx_ext_default!(::RuntimeCall; weight prepare); + + fn validate( + &self, + origin: ::RuntimeOrigin, + call: &::RuntimeCall, + _info: &DispatchInfoOf<::RuntimeCall>, + _len: usize, + _self_implicit: Self::Implicit, + _inherited_implication: &impl Implication, + _source: TransactionSource, + ) -> ValidateResult::RuntimeCall> { + // Ensure the transaction is signed, else we just skip the extension. + let Some(who) = origin.as_system_origin_signer() else { + return Ok((Default::default(), (), origin)); + }; + + // Check validity of the signer for sudo call + if let Some(_sudo_call) = IsSubType::>::is_sub_type(call) { + let sudo_key = pallet_sudo::pallet::Key::::get(); + + // No sudo key configured → reject + let Some(expected_who) = sudo_key else { + return Err(InvalidTransaction::BadSigner.into()); + }; + + // Signer does not match the sudo key → reject + if *who != expected_who { + return Err(InvalidTransaction::BadSigner.into()); + } + } + + Ok((Default::default(), (), origin)) + } +} diff --git a/runtime/src/transaction_payment_wrapper.rs b/runtime/src/transaction_payment_wrapper.rs index f299c52497..96d7f3609b 100644 --- a/runtime/src/transaction_payment_wrapper.rs +++ b/runtime/src/transaction_payment_wrapper.rs @@ -1,4 +1,4 @@ -use crate::Weight; +use crate::{NORMAL_DISPATCH_BASE_PRIORITY, OPERATIONAL_DISPATCH_PRIORITY, Weight}; use codec::{Decode, DecodeWithMemTracking, Encode}; use frame_election_provider_support::private::sp_arithmetic::traits::SaturatedConversion; use frame_support::dispatch::{DispatchClass, DispatchInfo, PostDispatchInfo}; @@ -77,19 +77,12 @@ where match inner_validate { Ok((mut valid_transaction, val, origin)) => { let overridden_priority = { - match info.class { - DispatchClass::Normal => 1u64, - DispatchClass::Mandatory => { - // Mandatory extrinsics should be prohibited (e.g. by the [`CheckWeight`] - // extensions), but just to be safe let's return the same priority as `Normal` here. - 1u64 - } - DispatchClass::Operational => { - // System calls - 10_000_000_000u64 - } - } - .saturated_into::() + let base: TransactionPriority = match info.class { + DispatchClass::Normal => NORMAL_DISPATCH_BASE_PRIORITY, + DispatchClass::Mandatory => NORMAL_DISPATCH_BASE_PRIORITY, + DispatchClass::Operational => OPERATIONAL_DISPATCH_PRIORITY, + }; + base.saturated_into::() }; valid_transaction.priority = overridden_priority; diff --git a/runtime/tests/sudo_wrapper.rs b/runtime/tests/sudo_wrapper.rs new file mode 100644 index 0000000000..bdcd17bd6e --- /dev/null +++ b/runtime/tests/sudo_wrapper.rs @@ -0,0 +1,112 @@ +#![allow(clippy::unwrap_used)] + +use frame_support::assert_ok; +use frame_support::dispatch::GetDispatchInfo; +use node_subtensor_runtime::{ + BuildStorage, Runtime, RuntimeCall, RuntimeGenesisConfig, RuntimeOrigin, System, SystemCall, + sudo_wrapper, +}; +use sp_runtime::traits::{TransactionExtension, TxBaseImplication, ValidateResult}; +use sp_runtime::transaction_validity::{ + InvalidTransaction, TransactionSource, TransactionValidityError, +}; +use subtensor_runtime_common::AccountId; + +const SUDO_ACCOUNT: [u8; 32] = [1_u8; 32]; +const OTHER_ACCOUNT: [u8; 32] = [3_u8; 32]; + +fn new_test_ext() -> sp_io::TestExternalities { + let mut ext: sp_io::TestExternalities = RuntimeGenesisConfig { + sudo: pallet_sudo::GenesisConfig { key: None }, + ..Default::default() + } + .build_storage() + .unwrap() + .into(); + ext.execute_with(|| System::set_block_number(1)); + ext +} + +fn call_remark() -> RuntimeCall { + let remark = vec![1, 2, 3]; + RuntimeCall::System(SystemCall::remark { remark }) +} + +fn sudo_extrinsic(inner: RuntimeCall) -> RuntimeCall { + RuntimeCall::Sudo(pallet_sudo::Call::sudo { + call: Box::new(inner), + }) +} + +fn validate_ext(origin: RuntimeOrigin, call: &RuntimeCall) -> ValidateResult<(), RuntimeCall> { + let ext = sudo_wrapper::SudoTransactionExtension::::new(); + + ext.validate( + origin, + call, + &call.get_dispatch_info(), + 0, + (), + &TxBaseImplication(()), + TransactionSource::External, + ) +} +#[test] +fn sudo_signed_by_correct_key_is_valid() { + new_test_ext().execute_with(|| { + let sudo_key = AccountId::from(SUDO_ACCOUNT); + pallet_sudo::Key::::put(sudo_key.clone()); + let sudo_call = sudo_extrinsic(call_remark()); + + // Signed origin with correct sudo key + let origin = RuntimeOrigin::signed(sudo_key); + let res = validate_ext(origin, &sudo_call); + assert_ok!(res); + }); +} + +#[test] +fn sudo_signed_by_wrong_account_is_rejected() { + new_test_ext().execute_with(|| { + let sudo_key = AccountId::from(SUDO_ACCOUNT); + // Set sudo key in storage + pallet_sudo::Key::::put(sudo_key.clone()); + let sudo_call = sudo_extrinsic(call_remark()); + // Wrong signer + let origin = RuntimeOrigin::signed(AccountId::from(OTHER_ACCOUNT)); + let res = validate_ext(origin, &sudo_call); + assert!(matches!( + res, + Err(TransactionValidityError::Invalid( + InvalidTransaction::BadSigner + )) + )); + }); +} + +#[test] +fn sudo_when_no_sudo_key_configured_is_rejected() { + new_test_ext().execute_with(|| { + // Remove sudo key + pallet_sudo::Key::::kill(); + let sudo_call = sudo_extrinsic(call_remark()); + let origin = RuntimeOrigin::signed(AccountId::from(SUDO_ACCOUNT)); + let res = validate_ext(origin, &sudo_call); + assert!(matches!( + res, + Err(TransactionValidityError::Invalid( + InvalidTransaction::BadSigner + )) + )); + }); +} + +#[test] +fn non_sudo_extrinsic_does_not_trigger_filter() { + new_test_ext().execute_with(|| { + let origin = RuntimeOrigin::signed(AccountId::from(OTHER_ACCOUNT)); + let call = call_remark(); + let res = validate_ext(origin, &call); + assert!(res.is_ok()); + }); +} diff --git a/rust-toolchain.toml b/rust-toolchain.toml index bfb378c468..55003ecad6 100644 --- a/rust-toolchain.toml +++ b/rust-toolchain.toml @@ -1,5 +1,5 @@ [toolchain] -channel = "1.88" # rustc 1.88.0 (6b00bc388 2025-06-23) +channel = "1.89" # rustc 1.89.0 (29483883e 2025-08-04) components = [ "cargo", "clippy", diff --git a/scripts/fix_rust.sh b/scripts/fix_rust.sh index 9d2af2904b..08e983a432 100755 --- a/scripts/fix_rust.sh +++ b/scripts/fix_rust.sh @@ -11,12 +11,12 @@ commit_if_changes() { fi } -# Step 1: Run cargo check and commit changes to Cargo.lock if any. +# Step 1: Run cargo check and commit changes to Cargo.lock if any cargo check --workspace commit_if_changes "commit Cargo.lock" # Step 2: Run cargo clippy with fixes and commit changes if any. -cargo clippy --fix --workspace --all-features +cargo clippy --fix --workspace --all-features --all-targets commit_if_changes "cargo clippy" # Step 3: Run cargo fix and commit changes if any. @@ -24,5 +24,10 @@ cargo fix --workspace --all-features --all-targets commit_if_changes "cargo fix" # Step 4: Run cargo fmt and commit changes if any. -cargo fmt +cargo fmt --all commit_if_changes "cargo fmt" + +if command -v zepter >/dev/null 2>&1; then + echo "zepter detected, running 'zepter run check'..." + zepter run check +fi \ No newline at end of file diff --git a/support/linting/src/require_freeze_struct.rs b/support/linting/src/require_freeze_struct.rs index b697c5b824..288f2a2b1a 100644 --- a/support/linting/src/require_freeze_struct.rs +++ b/support/linting/src/require_freeze_struct.rs @@ -54,14 +54,14 @@ fn is_freeze_struct(attr: &Attribute) -> bool { } fn is_derive_encode_or_decode(attr: &Attribute) -> bool { - if let Meta::List(MetaList { path, tokens, .. }) = &attr.meta { - if path.is_ident("derive") { - let nested: Punctuated = parse_quote!(#tokens); - return nested.iter().any(|nested| { - nested.segments.iter().any(|seg| seg.ident == "Encode") - || nested.segments.iter().any(|seg| seg.ident == "Decode") - }); - } + if let Meta::List(MetaList { path, tokens, .. }) = &attr.meta + && path.is_ident("derive") + { + let nested: Punctuated = parse_quote!(#tokens); + return nested.iter().any(|nested| { + nested.segments.iter().any(|seg| seg.ident == "Encode") + || nested.segments.iter().any(|seg| seg.ident == "Decode") + }); } false }