Skip to content

Commit 29cf55f

Browse files
fix yuma3 inactive neurons bonds computation
1 parent 2257f94 commit 29cf55f

File tree

4 files changed

+156
-5
lines changed

4 files changed

+156
-5
lines changed

pallets/subtensor/src/epoch/math.rs

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -565,6 +565,22 @@ pub fn inplace_mask_rows(mask: &[bool], matrix: &mut [Vec<I32F32>]) {
565565
});
566566
}
567567

568+
// Apply row mask to sparse matrix, mask=true will set the values on that row to to 0
569+
#[allow(dead_code)]
570+
pub fn inplace_mask_rows_sparse(mask: &[bool], sparse_matrix: &mut [Vec<(u16, I32F32)>]) {
571+
assert_eq!(sparse_matrix.len(), mask.len());
572+
sparse_matrix
573+
.iter_mut()
574+
.zip(mask)
575+
.for_each(|(sparse_row, mask_row)| {
576+
if *mask_row {
577+
sparse_row.iter_mut().for_each(|(_j, value)| {
578+
*value = I32F32::saturating_from_num(0);
579+
});
580+
}
581+
});
582+
}
583+
568584
// Apply column mask to matrix, mask=true will mask out, i.e. set to 0.
569585
// Assumes each column has the same length.
570586
#[allow(dead_code)]

pallets/subtensor/src/epoch/run_epoch.rs

Lines changed: 18 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -212,7 +212,7 @@ impl<T: Config> Pallet<T> {
212212
// Calculate weights for bonds, apply bonds penalty to weights.
213213
// bonds_penalty = 0: weights_for_bonds = weights.clone()
214214
// bonds_penalty = 1: weights_for_bonds = clipped_weights.clone()
215-
let weights_for_bonds: Vec<Vec<I32F32>> =
215+
let mut weights_for_bonds: Vec<Vec<I32F32>> =
216216
interpolate(&weights, &clipped_weights, bonds_penalty);
217217

218218
let mut dividends: Vec<I32F32>;
@@ -223,6 +223,13 @@ impl<T: Config> Pallet<T> {
223223
inplace_mask_cols(&recently_registered, &mut bonds); // mask outdated bonds
224224
log::trace!("B: {:?}", &bonds);
225225

226+
// Inactive neurons bonds are computed assuming 0 weights.
227+
inplace_mask_rows(&inactive, &mut weights_for_bonds);
228+
log::trace!(
229+
"Weights for bonds (active neurons): {:?}",
230+
&weights_for_bonds
231+
);
232+
226233
// Compute the Exponential Moving Average (EMA) of bonds.
227234
ema_bonds = Self::compute_bonds(netuid, &weights_for_bonds, &bonds, &consensus);
228235
log::trace!("emaB: {:?}", &ema_bonds);
@@ -628,7 +635,7 @@ impl<T: Config> Pallet<T> {
628635
// Calculate weights for bonds, apply bonds penalty to weights.
629636
// bonds_penalty = 0: weights_for_bonds = weights.clone()
630637
// bonds_penalty = 1: weights_for_bonds = clipped_weights.clone()
631-
let weights_for_bonds: Vec<Vec<(u16, I32F32)>> =
638+
let mut weights_for_bonds: Vec<Vec<(u16, I32F32)>> =
632639
interpolate_sparse(&weights, &clipped_weights, n, bonds_penalty);
633640

634641
let mut dividends: Vec<I32F32>;
@@ -650,8 +657,16 @@ impl<T: Config> Pallet<T> {
650657
);
651658
log::trace!("Bonds: (mask) {:?}", &bonds);
652659

653-
// Compute the Exponential Moving Average (EMA) of bonds.
660+
// Inactive neurons bonds are computed assuming 0 weights.
661+
// for this is necessary to keep (index, 0) entries in the sparse matrix.
654662
log::trace!("weights_for_bonds: {:?}", &weights_for_bonds);
663+
inplace_mask_rows_sparse(&inactive, &mut weights_for_bonds);
664+
log::trace!(
665+
"Weights for bonds (active neurons): {:?}",
666+
&weights_for_bonds
667+
);
668+
669+
// Compute the Exponential Moving Average (EMA) of bonds.
655670
ema_bonds = Self::compute_bonds_sparse(netuid, &weights_for_bonds, &bonds, &consensus);
656671
log::trace!("emaB: {:?}", &ema_bonds);
657672

pallets/subtensor/src/tests/epoch.rs

Lines changed: 69 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2756,6 +2756,75 @@ fn set_yuma_3_weights(netuid: NetUid, weights: Vec<Vec<u16>>, indices: Vec<u16>)
27562756
}
27572757
}
27582758

2759+
#[test]
2760+
fn test_yuma_3_inactive_bonds() {
2761+
// Test how bonds change over epochs for active vs inactive validators
2762+
for sparse in [true, false].iter() {
2763+
new_test_ext(1).execute_with(|| {
2764+
let n: u16 = 4; // 2 validators, 2 servers
2765+
let netuid = NetUid::from(1);
2766+
let max_stake: u64 = 8;
2767+
let stakes: Vec<u64> = vec![5, 5, 0, 0];
2768+
let weights_to_set: Vec<u16> = vec![u16::MAX, 0];
2769+
let miner_indices: Vec<u16> = vec![2, 3];
2770+
2771+
setup_yuma_3_scenario(netuid, n, *sparse, max_stake, stakes);
2772+
2773+
// at epoch 4 validator will go inactive if weights not set
2774+
SubtensorModule::set_activity_cutoff(netuid, 3);
2775+
2776+
// set initial weights
2777+
set_yuma_3_weights(
2778+
netuid,
2779+
vec![weights_to_set.clone(); 2],
2780+
miner_indices.clone(),
2781+
);
2782+
2783+
let all_targets_bonds = [
2784+
vec![vec![0.101319, 0.0000], vec![0.101319, 0.0000]],
2785+
vec![vec![0.192370, 0.0000], vec![0.192370, 0.0000]],
2786+
vec![vec![0.274204, 0.0000], vec![0.274204, 0.0000]],
2787+
vec![vec![0.241580, 0.0000], vec![0.347737, 0.0000]],
2788+
vec![vec![0.214023, 0.0000], vec![0.413824, 0.0000]],
2789+
vec![vec![0.293659, 0.0000], vec![0.473212, 0.0000]],
2790+
vec![vec![0.365224, 0.0000], vec![0.526588, 0.0000]],
2791+
vec![vec![0.429541, 0.0000], vec![0.574547, 0.0000]],
2792+
];
2793+
2794+
for (epoch, target_bonds) in all_targets_bonds.iter().enumerate() {
2795+
if epoch == 2 {
2796+
// Set weight only on validator 1 and let the other become inactive
2797+
assert_ok!(SubtensorModule::set_weights(
2798+
RuntimeOrigin::signed(U256::from(1)),
2799+
netuid,
2800+
miner_indices.clone(),
2801+
weights_to_set.clone(),
2802+
0
2803+
));
2804+
}
2805+
if epoch == 5 {
2806+
// all 2 validators are active again
2807+
set_yuma_3_weights(
2808+
netuid,
2809+
vec![weights_to_set.clone(); 2],
2810+
miner_indices.clone(),
2811+
);
2812+
}
2813+
run_epoch(netuid, *sparse);
2814+
2815+
// Check bonds values
2816+
let bonds = SubtensorModule::get_bonds_fixed_proportion(netuid);
2817+
for (bond, target_bond) in bonds.iter().zip(target_bonds.iter()) {
2818+
// skip the 2 validators bonds 0 values
2819+
for (b, t) in bond.iter().skip(2).zip(target_bond) {
2820+
assert_approx_eq(*b, fixed(*t), I32F32::from_num(1e-3));
2821+
}
2822+
}
2823+
}
2824+
});
2825+
}
2826+
}
2827+
27592828
#[test]
27602829
fn test_yuma_3_kappa_moves_first() {
27612830
for sparse in [true, false].iter() {

pallets/subtensor/src/tests/math.rs

Lines changed: 53 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -418,6 +418,15 @@ fn vec_to_sparse_mat_fixed(
418418
vector: &[f32],
419419
rows: usize,
420420
transpose: bool,
421+
) -> Vec<Vec<(u16, I32F32)>> {
422+
_vec_to_sparse_mat_fixed(vector, rows, transpose, true)
423+
}
424+
425+
fn _vec_to_sparse_mat_fixed(
426+
vector: &[f32],
427+
rows: usize,
428+
transpose: bool,
429+
filter_zeros: bool,
421430
) -> Vec<Vec<(u16, I32F32)>> {
422431
assert!(
423432
vector.len() % rows == 0,
@@ -430,7 +439,7 @@ fn vec_to_sparse_mat_fixed(
430439
for col in 0..cols {
431440
let mut row_vec: Vec<(u16, I32F32)> = vec![];
432441
for row in 0..rows {
433-
if vector[row * cols + col] > 0. {
442+
if !filter_zeros || vector[row * cols + col] > 0. {
434443
row_vec.push((row as u16, I32F32::from_num(vector[row * cols + col])));
435444
}
436445
}
@@ -440,7 +449,7 @@ fn vec_to_sparse_mat_fixed(
440449
for row in 0..rows {
441450
let mut row_vec: Vec<(u16, I32F32)> = vec![];
442451
for col in 0..cols {
443-
if vector[row * cols + col] > 0. {
452+
if !filter_zeros || vector[row * cols + col] > 0. {
444453
row_vec.push((col as u16, I32F32::from_num(vector[row * cols + col])));
445454
}
446455
}
@@ -1027,6 +1036,48 @@ fn test_math_inplace_mask_rows() {
10271036
);
10281037
}
10291038

1039+
#[test]
1040+
fn test_math_inplace_mask_rows_sparse() {
1041+
let input: Vec<f32> = vec![1., 2., 3., 4., 5., 6., 7., 8., 9.];
1042+
let mask: Vec<bool> = vec![false, false, false];
1043+
let target: Vec<f32> = vec![1., 2., 3., 4., 5., 6., 7., 8., 9.];
1044+
let mut mat = _vec_to_sparse_mat_fixed(&input, 3, false, false);
1045+
inplace_mask_rows_sparse(&mask, &mut mat);
1046+
assert_sparse_mat_compare(
1047+
&mat,
1048+
&_vec_to_sparse_mat_fixed(&target, 3, false, false),
1049+
I32F32::from_num(0),
1050+
);
1051+
let mask: Vec<bool> = vec![true, true, true];
1052+
let target: Vec<f32> = vec![0., 0., 0., 0., 0., 0., 0., 0., 0.];
1053+
let mut mat = _vec_to_sparse_mat_fixed(&input, 3, false, false);
1054+
inplace_mask_rows_sparse(&mask, &mut mat);
1055+
assert_sparse_mat_compare(
1056+
&mat,
1057+
&_vec_to_sparse_mat_fixed(&target, 3, false, false),
1058+
I32F32::from_num(0),
1059+
);
1060+
let mask: Vec<bool> = vec![true, false, true];
1061+
let target: Vec<f32> = vec![0., 0., 0., 4., 5., 6., 0., 0., 0.];
1062+
let mut mat = _vec_to_sparse_mat_fixed(&input, 3, false, false);
1063+
inplace_mask_rows_sparse(&mask, &mut mat);
1064+
assert_sparse_mat_compare(
1065+
&mat,
1066+
&_vec_to_sparse_mat_fixed(&target, 3, false, false),
1067+
I32F32::from_num(0),
1068+
);
1069+
let input: Vec<f32> = vec![0., 0., 0., 0., 0., 0., 0., 0., 0.];
1070+
let mut mat = _vec_to_sparse_mat_fixed(&input, 3, false, false);
1071+
let mask: Vec<bool> = vec![false, false, false];
1072+
let target: Vec<f32> = vec![0., 0., 0., 0., 0., 0., 0., 0., 0.];
1073+
inplace_mask_rows_sparse(&mask, &mut mat);
1074+
assert_sparse_mat_compare(
1075+
&mat,
1076+
&_vec_to_sparse_mat_fixed(&target, 3, false, false),
1077+
I32F32::from_num(0),
1078+
);
1079+
}
1080+
10301081
#[test]
10311082
fn test_math_inplace_mask_diag() {
10321083
let vector: Vec<f32> = vec![1., 2., 3., 4., 5., 6., 7., 8., 9.];

0 commit comments

Comments
 (0)