Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
51 changes: 21 additions & 30 deletions pallets/subtensor/src/epoch/math.rs
Original file line number Diff line number Diff line change
Expand Up @@ -294,7 +294,7 @@ pub fn inplace_row_normalize_64(x: &mut [Vec<I64F64>]) {
pub fn vecdiv(x: &[I32F32], y: &[I32F32]) -> Vec<I32F32> {
if x.len() != y.len() {
log::error!(
"vecdiv input lengths are not equal: {:?} != {:?}",
"math error: vecdiv input lengths are not equal: {:?} != {:?}",
x.len(),
y.len()
);
Expand Down Expand Up @@ -487,7 +487,7 @@ pub fn inplace_col_max_upscale(x: &mut [Vec<I32F32>]) {
pub fn inplace_mask_vector(mask: &[bool], vector: &mut [I32F32]) {
if mask.len() != vector.len() {
log::error!(
"inplace_mask_vector input lengths are not equal: {:?} != {:?}",
"math error: inplace_mask_vector input lengths are not equal: {:?} != {:?}",
mask.len(),
vector.len()
);
Expand All @@ -508,7 +508,7 @@ pub fn inplace_mask_vector(mask: &[bool], vector: &mut [I32F32]) {
pub fn inplace_mask_matrix(mask: &[Vec<bool>], matrix: &mut [Vec<I32F32>]) {
if mask.len() != matrix.len() {
log::error!(
"inplace_mask_matrix input sizes are not equal: {:?} != {:?}",
"math error: inplace_mask_matrix input sizes are not equal: {:?} != {:?}",
mask.len(),
matrix.len()
);
Expand Down Expand Up @@ -538,7 +538,7 @@ pub fn inplace_mask_matrix(mask: &[Vec<bool>], matrix: &mut [Vec<I32F32>]) {
pub fn inplace_mask_rows(mask: &[bool], matrix: &mut [Vec<I32F32>]) {
if mask.len() != matrix.len() {
log::error!(
"inplace_mask_rows input sizes are not equal: {:?} != {:?}",
"math error: inplace_mask_rows input sizes are not equal: {:?} != {:?}",
mask.len(),
matrix.len()
);
Expand All @@ -560,7 +560,7 @@ pub fn inplace_mask_rows(mask: &[bool], matrix: &mut [Vec<I32F32>]) {
pub fn inplace_mask_cols(mask: &[bool], matrix: &mut [Vec<I32F32>]) {
if mask.len() != matrix.len() {
log::error!(
"inplace_mask_cols input sizes are not equal: {:?} != {:?}",
"math error: inplace_mask_cols input sizes are not equal: {:?} != {:?}",
mask.len(),
matrix.len()
);
Expand Down Expand Up @@ -591,7 +591,7 @@ pub fn inplace_mask_diag(matrix: &mut [Vec<I32F32>]) {
// with no action. Log error if this happens.
if matrix.len() != first_row.len() {
log::error!(
"inplace_mask_diag: matrix.len {:?} != first_row.len {:?}",
"math error: inplace_mask_diag: matrix.len {:?} != first_row.len {:?}",
matrix.len(),
first_row.len()
);
Expand Down Expand Up @@ -641,7 +641,7 @@ pub fn inplace_mask_diag_except_index(matrix: &mut [Vec<I32F32>], except_index:
}
if matrix.len() != first_row.len() {
log::error!(
"inplace_mask_diag input matrix is now square: {:?} != {:?}",
"math error: inplace_mask_diag input matrix is now square: {:?} != {:?}",
matrix.len(),
first_row.len()
);
Expand Down Expand Up @@ -794,7 +794,7 @@ pub fn matmul(matrix: &[Vec<I32F32>], vector: &[I32F32]) -> Vec<I32F32> {
}
if matrix.len() != vector.len() {
log::error!(
"matmul input sizes are not equal: {:?} != {:?}",
"math error: matmul input sizes are not equal: {:?} != {:?}",
matrix.len(),
vector.len()
);
Expand Down Expand Up @@ -830,11 +830,11 @@ pub fn matmul_transpose(matrix: &[Vec<I32F32>], vector: &[I32F32]) -> Vec<I32F32
if first_row.is_empty() {
return vec![];
}
if matrix.len() != first_row.len() {
if vector.len() != first_row.len() {
log::error!(
"matmul_transpose matrix is not square: {:?} != {:?}",
matrix.len(),
first_row.len()
"math error: matmul_transpose matrix width doesn't match to vector height: {:?} != {:?}",
first_row.len(),
vector.len()
);
}

Expand Down Expand Up @@ -983,7 +983,7 @@ pub fn weighted_median(
let zero = I32F32::saturating_from_num(0.0);
if stake.len() != score.len() {
log::error!(
"weighted_median stake and score have different lengths: {:?} != {:?}",
"math error: weighted_median stake and score have different lengths: {:?} != {:?}",
stake.len(),
score.len()
);
Expand Down Expand Up @@ -1082,7 +1082,7 @@ pub fn weighted_median_col(
use_stake.push(zero);
use_score.push(zero);
log::error!(
"weighted_median_col row.len() != columns: {:?} != {:?}",
"math error: weighted_median_col row.len() != columns: {:?} != {:?}",
row.len(),
columns
);
Expand Down Expand Up @@ -1189,7 +1189,7 @@ pub fn interpolate(mat1: &[Vec<I32F32>], mat2: &[Vec<I32F32>], ratio: I32F32) ->
}
if mat1.len() != mat2.len() {
log::error!(
"interpolate mat1.len() != mat2.len(): {:?} != {:?}",
"math error: interpolate mat1.len() != mat2.len(): {:?} != {:?}",
mat1.len(),
mat2.len()
);
Expand All @@ -1213,12 +1213,12 @@ pub fn interpolate(mat1: &[Vec<I32F32>], mat2: &[Vec<I32F32>], ratio: I32F32) ->

for row1 in mat1.iter() {
let (Some(row2), Some(out_row)) = (m2_it.next(), out_it.next()) else {
log::error!("interpolate: No more rows in mat2");
log::error!("math error: interpolate: No more rows in mat2");
break;
};
if row1.len() != row2.len() {
log::error!(
"interpolate row1.len() != row2.len(): {:?} != {:?}",
"math error: interpolate row1.len() != row2.len(): {:?} != {:?}",
row1.len(),
row2.len()
);
Expand Down Expand Up @@ -1259,7 +1259,7 @@ pub fn interpolate_sparse(
if mat1.len() != mat2.len() {
// In case if sizes mismatch, return clipped weights
log::error!(
"interpolate_sparse: mat1.len() != mat2.len(): {:?} != {:?}",
"math error: interpolate_sparse: mat1.len() != mat2.len(): {:?} != {:?}",
mat1.len(),
mat2.len()
);
Expand Down Expand Up @@ -1410,7 +1410,7 @@ pub fn mat_ema_sparse(
) -> Vec<Vec<(u16, I32F32)>> {
if new.len() != old.len() {
log::error!(
"mat_ema_sparse: new.len() == old.len(): {:?} != {:?}",
"math error: mat_ema_sparse: new.len() == old.len(): {:?} != {:?}",
new.len(),
old.len()
);
Expand Down Expand Up @@ -1469,7 +1469,7 @@ pub fn mat_ema_alpha_sparse(
// If shapes don't match, just return `new`
if new.len() != old.len() || new.len() != alpha.len() {
log::error!(
"mat_ema_alpha_sparse shapes don't match: {:?} vs. {:?} vs. {:?}",
"math error: mat_ema_alpha_sparse shapes don't match: {:?} vs. {:?} vs. {:?}",
old.len(),
new.len(),
alpha.len()
Expand All @@ -1490,15 +1490,6 @@ pub fn mat_ema_alpha_sparse(
break;
};

if new_row.len() != old_row.len() || new_row.len() != alpha_row.len() {
log::error!(
"mat_ema_alpha_sparse row shapes don't match: {:?} vs. {:?} vs. {:?}",
old_row.len(),
new_row.len(),
alpha_row.len()
);
}

// Densified accumulator sized to alpha_row length (columns outside are ignored).
let mut decayed_values = vec![zero; alpha_row.len()];

Expand Down Expand Up @@ -1546,7 +1537,7 @@ pub fn mat_ema_alpha(
// If outer dimensions don't match, return bonds unchanged
if new.len() != old.len() || new.len() != alpha.len() {
log::error!(
"mat_ema_alpha shapes don't match: {:?} vs. {:?} vs. {:?}",
"math error: mat_ema_alpha shapes don't match: {:?} vs. {:?} vs. {:?}",
old.len(),
new.len(),
alpha.len()
Expand Down
6 changes: 3 additions & 3 deletions pallets/subtensor/src/epoch/run_epoch.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1128,7 +1128,7 @@ impl<T: Config> Pallet<T> {
if let Some(row) = weights.get_mut(uid_i as usize) {
row.push((*uid_j, I32F32::saturating_from_num(*weight_ij)));
} else {
log::error!("uid_i {uid_i:?} is filtered to be less than n");
log::error!("math error: uid_i {uid_i:?} is filtered to be less than n");
}
}
}
Expand Down Expand Up @@ -1391,7 +1391,7 @@ impl<T: Config> Pallet<T> {

if weights.len() != bonds.len() {
log::error!(
"compute_liquid_alpha_values: weights and bonds have different lengths: {:?} != {:?}",
"math error: compute_liquid_alpha_values: weights and bonds have different lengths: {:?} != {:?}",
weights.len(),
bonds.len()
);
Expand Down Expand Up @@ -1444,7 +1444,7 @@ impl<T: Config> Pallet<T> {

if weights.len() != bonds.len() {
log::error!(
"compute_liquid_alpha_values: weights and bonds have different lengths: {:?} != {:?}",
"math error: compute_liquid_alpha_values: weights and bonds have different lengths: {:?} != {:?}",
weights.len(),
bonds.len()
);
Expand Down
2 changes: 1 addition & 1 deletion runtime/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -220,7 +220,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion {
// `spec_version`, and `authoring_version` are the same between Wasm and native.
// This value is set to 100 to notify Polkadot-JS App (https://polkadot.js.org/apps) to use
// the compatible custom types.
spec_version: 318,
spec_version: 319,
impl_version: 1,
apis: RUNTIME_API_VERSIONS,
transaction_version: 1,
Expand Down
Loading